From 9b5d41b63697ea9f126b57d28e8d3940d09ce55a Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Tue, 30 Jan 2018 17:37:26 +0800 Subject: [PATCH 001/138] make inference_lib_dist --- cmake/external/eigen.cmake | 12 +++++++----- cmake/external/gflags.cmake | 9 ++++++++- cmake/external/glog.cmake | 9 ++++++++- cmake/external/protobuf.cmake | 9 ++++++++- paddle/framework/CMakeLists.txt | 13 +++++++------ paddle/inference/CMakeLists.txt | 13 +++++++++---- paddle/memory/CMakeLists.txt | 12 ++++++------ paddle/platform/CMakeLists.txt | 14 +++++++------- paddle/string/CMakeLists.txt | 11 ++++++----- 9 files changed, 66 insertions(+), 36 deletions(-) diff --git a/cmake/external/eigen.cmake b/cmake/external/eigen.cmake index d49c8d60110..eb6c0cef574 100644 --- a/cmake/external/eigen.cmake +++ b/cmake/external/eigen.cmake @@ -29,8 +29,10 @@ add_dependencies(eigen3 extern_eigen3) LIST(APPEND external_project_dependencies eigen3) -IF(NOT WITH_C_API AND WITH_FLUID) - INSTALL(FILES ${EIGEN_INCLUDE_DIR}/Eigen/Core DESTINATION third_party/eigen3/Eigen) - INSTALL(DIRECTORY ${EIGEN_INCLUDE_DIR}/Eigen/src DESTINATION third_party/eigen3/Eigen) - INSTALL(DIRECTORY ${EIGEN_INCLUDE_DIR}/unsupported/Eigen DESTINATION third_party/eigen3/unsupported) -ENDIF() +set(lib_dir "${CMAKE_INSTALL_PREFIX}/third_party/eigen3") +add_custom_target(eigen3_lib + COMMAND mkdir -p "${lib_dir}/Eigen" "${lib_dir}/unsupported" + COMMAND cp "${EIGEN_INCLUDE_DIR}/Eigen/Core" "${lib_dir}/Eigen" + COMMAND cp -r "${EIGEN_INCLUDE_DIR}/Eigen/src" "${lib_dir}/Eigen" + COMMAND cp -r "${EIGEN_INCLUDE_DIR}/unsupported/Eigen" "${lib_dir}/unsupported" +) diff --git a/cmake/external/gflags.cmake b/cmake/external/gflags.cmake index 60946304541..9cbc376ba0e 100644 --- a/cmake/external/gflags.cmake +++ b/cmake/external/gflags.cmake @@ -52,7 +52,7 @@ ADD_DEPENDENCIES(gflags extern_gflags) LIST(APPEND external_project_dependencies gflags) -IF(WITH_C_API OR WITH_FLUID) +IF(WITH_C_API) INSTALL(DIRECTORY ${GFLAGS_INCLUDE_DIR} DESTINATION third_party/gflags) IF(ANDROID) INSTALL(FILES ${GFLAGS_LIBRARIES} DESTINATION third_party/gflags/lib/${ANDROID_ABI}) @@ -60,3 +60,10 @@ IF(WITH_C_API OR WITH_FLUID) INSTALL(FILES ${GFLAGS_LIBRARIES} DESTINATION third_party/gflags/lib) ENDIF() ENDIF() + +set(lib_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/gflags") +add_custom_target(gflags_lib + COMMAND mkdir -p "${lib_dir}/lib" + COMMAND cp -r "${GFLAGS_INCLUDE_DIR}" "${lib_dir}" + COMMAND cp "${GFLAGS_LIBRARIES}" "${lib_dir}/lib" +) diff --git a/cmake/external/glog.cmake b/cmake/external/glog.cmake index 382fbda3b5c..0031225a6cb 100644 --- a/cmake/external/glog.cmake +++ b/cmake/external/glog.cmake @@ -68,7 +68,7 @@ LINK_LIBRARIES(glog gflags) LIST(APPEND external_project_dependencies glog) -IF(WITH_C_API OR WITH_FLUID) +IF(WITH_C_API) INSTALL(DIRECTORY ${GLOG_INCLUDE_DIR} DESTINATION third_party/glog) IF(ANDROID) INSTALL(FILES ${GLOG_LIBRARIES} DESTINATION third_party/glog/lib/${ANDROID_ABI}) @@ -76,3 +76,10 @@ IF(WITH_C_API OR WITH_FLUID) INSTALL(FILES ${GLOG_LIBRARIES} DESTINATION third_party/glog/lib) ENDIF() ENDIF() + +set(lib_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/glog") +add_custom_target(glog_lib + COMMAND mkdir -p "${lib_dir}/lib" + COMMAND cp -r "${GLOG_INCLUDE_DIR}" "${lib_dir}" + COMMAND cp "${GLOG_LIBRARIES}" "${lib_dir}/lib" +) diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index 365a370a9cf..ff3d38a691a 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -250,7 +250,7 @@ IF(NOT PROTOBUF_FOUND) SET(PROTOBUF_PROTOC_LIBRARY ${extern_protobuf_PROTOC_LIBRARY} CACHE FILEPATH "protoc library." FORCE) - IF(WITH_C_API OR WITH_FLUID) + IF(WITH_C_API) INSTALL(DIRECTORY ${PROTOBUF_INCLUDE_DIR} DESTINATION third_party/protobuf) IF(ANDROID) INSTALL(FILES ${PROTOBUF_LITE_LIBRARY} DESTINATION third_party/protobuf/lib/${ANDROID_ABI}) @@ -259,6 +259,13 @@ IF(NOT PROTOBUF_FOUND) ENDIF() ENDIF() + set(lib_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/protobuf") + add_custom_target(protobuf_lib + COMMAND mkdir -p "${lib_dir}/lib" + COMMAND cp -r "${PROTOBUF_INCLUDE_DIR}" "${lib_dir}" + COMMAND cp "${PROTOBUF_LITE_LIBRARY}" "${lib_dir}/lib" + ) + IF(CMAKE_CROSSCOMPILING) PROMPT_PROTOBUF_LIB(protobuf_host extern_protobuf) ELSE() diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 8c28709a68b..d394fa5d10d 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -92,11 +92,12 @@ cc_test(init_test SRCS init_test.cc DEPS init) cc_test(op_kernel_type_test SRCS op_kernel_type_test.cc DEPS place device_context framework_proto) cc_test(cow_ptr_tests SRCS details/cow_ptr_test.cc) -if(NOT WITH_C_API AND WITH_FLUID) - file(GLOB FRAMEWORK_HEADERS *.h) - install(FILES ${FRAMEWORK_HEADERS} DESTINATION include/paddle/framework) - install(FILES ${CMAKE_CURRENT_BINARY_DIR}/framework.pb.h DESTINATION include/paddle/framework) - install(FILES details/cow_ptr.h details/op_registry.h DESTINATION include/paddle/framework/details) -endif() +set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/framework") +add_custom_target(framework_lib DEPENDS framework_py_proto + COMMAND mkdir -p "${lib_dir}/details" + COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/*.h" "${lib_dir}" + COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/details/*.h" "${lib_dir}/details" + COMMAND cp "${CMAKE_CURRENT_BINARY_DIR}/framework.pb.h" "${lib_dir}" +) cc_test(channel_test SRCS channel_test.cc) diff --git a/paddle/inference/CMakeLists.txt b/paddle/inference/CMakeLists.txt index 683aaee42a4..58c0c59380e 100644 --- a/paddle/inference/CMakeLists.txt +++ b/paddle/inference/CMakeLists.txt @@ -19,10 +19,15 @@ target_circle_link_libraries(paddle_fluid_shared SET_TARGET_PROPERTIES(paddle_fluid_shared PROPERTIES OUTPUT_NAME paddle_fluid) # install library & headers -if(NOT WITH_C_API AND WITH_FLUID) - install(FILES io.h DESTINATION include/paddle/inference) - install(TARGETS paddle_fluid_shared DESTINATION lib) -endif() +set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/inference") +add_custom_target(inference_lib DEPENDS paddle_fluid_shared + COMMAND mkdir -p "${lib_dir}" + COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/*.h" "${lib_dir}" + COMMAND cp "${CMAKE_CURRENT_BINARY_DIR}/libpaddle_fluid.so" "${lib_dir}" +) +add_custom_target(inference_lib_dist DEPENDS + inference_lib framework_lib memory_lib platform_lib string_lib + gflags_lib glog_lib protobuf_lib eigen3_lib) add_executable(example example.cc) if(APPLE) diff --git a/paddle/memory/CMakeLists.txt b/paddle/memory/CMakeLists.txt index 496098f8042..fad49346f23 100644 --- a/paddle/memory/CMakeLists.txt +++ b/paddle/memory/CMakeLists.txt @@ -15,9 +15,9 @@ cc_library(paddle_memory cc_test(memory_test SRCS memory_test.cc DEPS place paddle_memory) -if(NOT WITH_C_API AND WITH_FLUID) - file(GLOB MEMORY_HEADERS *.h) - file(GLOB MEMORY_DETAIL_HEADERS detail/*.h) - install(FILES ${MEMORY_HEADERS} DESTINATION include/paddle/memory) - install(FILES ${MEMORY_DETAIL_HEADERS} DESTINATION include/paddle/memory/detail) -endif() +set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/memory") +add_custom_target(memory_lib + COMMAND mkdir -p "${lib_dir}/detail" + COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/*.h" "${lib_dir}" + COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/detail/*.h" "${lib_dir}/detail" +) diff --git a/paddle/platform/CMakeLists.txt b/paddle/platform/CMakeLists.txt index d68caea9971..d70530aadb8 100644 --- a/paddle/platform/CMakeLists.txt +++ b/paddle/platform/CMakeLists.txt @@ -40,10 +40,10 @@ nv_test(nccl_test SRCS nccl_test.cu DEPS dynload_cuda gpu_info device_context) cc_library(profiler SRCS profiler.cc DEPS device_context) cc_test(profiler_test SRCS profiler_test.cc DEPS profiler) -if(NOT WITH_C_API AND WITH_FLUID) - file(GLOB PLATFORM_HEADERS *.h) - file(GLOB PLATFORM_dynload_HEADERS dynload/*.h) - install(FILES ${PLATFORM_HEADERS} DESTINATION include/paddle/platform) - install(FILES ${PLATFORM_HEADERS} DESTINATION include/paddle/platform/dynload) - install(FILES details/device_ptr_cast.h DESTINATION include/paddle/platform/details) -endif() +set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/platform") +add_custom_target(platform_lib + COMMAND mkdir -p "${lib_dir}/dynload" "${lib_dir}/details" + COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/*.h" "${lib_dir}" + COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/dynload/*.h" "${lib_dir}/dynload" + COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/details/*.h" "${lib_dir}/details" +) diff --git a/paddle/string/CMakeLists.txt b/paddle/string/CMakeLists.txt index 751776dbb5c..234a9a6d036 100644 --- a/paddle/string/CMakeLists.txt +++ b/paddle/string/CMakeLists.txt @@ -3,8 +3,9 @@ cc_test(stringpiece_test SRCS piece_test.cc DEPS stringpiece glog gflags) cc_test(stringprintf_test SRCS printf_test.cc DEPS glog gflags) cc_test(to_string_test SRCS to_string_test.cc) -if(NOT WITH_C_API AND WITH_FLUID) - file(GLOB STRING_HEADERS *.h) - install(FILES ${STRING_HEADERS} DESTINATION include/paddle/string) - install(FILES tinyformat/tinyformat.h DESTINATION include/paddle/string/tinyformat) -endif() +set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/string") +add_custom_target(string_lib + COMMAND mkdir -p "${lib_dir}/tinyformat" + COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/*.h" "${lib_dir}" + COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/tinyformat/*.h" "${lib_dir}/tinyformat" +) -- GitLab From 1acad21bbf7a7eea1dc5cb9a68057d35210f7cdb Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 30 Jan 2018 20:27:38 +0800 Subject: [PATCH 002/138] init reader.h and reader.cc files --- paddle/framework/reader.cc | 51 ++++++++++++++++++++++++++++++ paddle/framework/reader.h | 65 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 116 insertions(+) create mode 100644 paddle/framework/reader.cc create mode 100644 paddle/framework/reader.h diff --git a/paddle/framework/reader.cc b/paddle/framework/reader.cc new file mode 100644 index 00000000000..7f80dd7fc10 --- /dev/null +++ b/paddle/framework/reader.cc @@ -0,0 +1,51 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/framework/reader.h" + +namespace paddle { +namespace framework { + +DDim Reader::shape(int idx) const { + PADDLE_ENFORCE_LT( + idx, shapes_.size(), + "Cannot get the %d'th shape, 'shapes_' only has %d elements.", idx, + shapes_.size()); +} + +int RandomReader::ReadNext(std::vector* outs) { + PADDLE_ENFORCE_EQ( + shapes_.size(), outs.size(), + "shapes_.size() is %d, while outs.size() is %d. They are not equal.", + shapes_.size(), outs.size()); + std::minstd_rand engine; + unsigned int seed = std::random_device()(); + engine.seed(seed); + std::uniform_real_distribution dist(min_, max_); + for (int idx = 0; idx < shapes_.size(); ++idx) { + DDim shape = shapes_[idx]; + LoDTensor* out = outs[idx]; + int64_t numel = out->numel(); + PADDLE_ENFORCE_EQ(product(shape), numel, + "The product of %d'th shape is %lld, while the " + "corresponding out's numel is %lld. They are not equal.", + idx, product(shape), numel); + for (int64_t i = 0; i < numel, ++i) { + out[i] = dist(engine); + } + } + return 0; +} +} // namespace framework +} // namespace paddle diff --git a/paddle/framework/reader.h b/paddle/framework/reader.h new file mode 100644 index 00000000000..eed9c18d087 --- /dev/null +++ b/paddle/framework/reader.h @@ -0,0 +1,65 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/framework/ddim.h" +#include "paddle/framework/lod_tensor.h" + +namespace paddle { +namespace framework { + +class Reader { + public: + virtual int ReadNext(std::vector* outs) = 0; + DDim shape(int idx) const; + + private: + std::vector shapes_; +}; + +// file readers + +class RandomReader : public Reader { + public: + RandomReader(const std::vector& shapes, float min, float max) + : shapes_(shapes), min_(min), max_(max) {} + int ReadNext(std::vector* outs) override; + + private: + float min_; + float max_; +}; + +// decorators + +class BatchReader : public Reader { + public: + BatchReader(const Reader* reader) : reader_(reader) {} + int ReadNext(std::vector* outs) override; + + private: + const Reader* reader_; +}; + +class ShuffleReader : public Reader { + public: + ShuffleReader(const Reader* reader) : reader_(reader) {} + int ReadNext(std::vector* outs) override; + + private: + const Reader* reader_; +}; +} // namespace framework +} // namespace paddle -- GitLab From 55b5f29ea44cc5e94061dd4a92e6cbf11d7f2346 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Tue, 30 Jan 2018 21:14:48 +0800 Subject: [PATCH 003/138] refine paddle_fluid_shared library --- paddle/inference/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/inference/CMakeLists.txt b/paddle/inference/CMakeLists.txt index 58c0c59380e..3f587fa790d 100644 --- a/paddle/inference/CMakeLists.txt +++ b/paddle/inference/CMakeLists.txt @@ -13,8 +13,8 @@ add_library(paddle_fluid_shared SHARED io.cc) target_circle_link_libraries(paddle_fluid_shared ARCHIVE_START ${GLOB_OP_LIB} - ARCHIVE_END - ${FLUID_CORE_MODULES}) + ${FLUID_CORE_MODULES} + ARCHIVE_END) SET_TARGET_PROPERTIES(paddle_fluid_shared PROPERTIES OUTPUT_NAME paddle_fluid) -- GitLab From f32ca6369099f5d3776ae87d431b9b39ea8eba3e Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 31 Jan 2018 18:46:45 +0800 Subject: [PATCH 004/138] draft of Reader classes --- paddle/framework/CMakeLists.txt | 2 + paddle/framework/reader.cc | 107 +++++++++++++++++++++++++------- paddle/framework/reader.h | 83 +++++++++++++++++++++---- 3 files changed, 159 insertions(+), 33 deletions(-) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 8c28709a68b..7eec91f9070 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -24,6 +24,8 @@ cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor framework_proto) cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor paddle_memory) nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor) +cc_library(reader SRCS reader.cc DEPS lod_tensor ddim) + cc_test(variable_test SRCS variable_test.cc) cc_library(threadpool SRCS threadpool.cc DEPS enforce) diff --git a/paddle/framework/reader.cc b/paddle/framework/reader.cc index 7f80dd7fc10..e11662166c6 100644 --- a/paddle/framework/reader.cc +++ b/paddle/framework/reader.cc @@ -17,35 +17,100 @@ namespace paddle { namespace framework { -DDim Reader::shape(int idx) const { +DDim Reader::shape(size_t idx) const { PADDLE_ENFORCE_LT( idx, shapes_.size(), "Cannot get the %d'th shape, 'shapes_' only has %d elements.", idx, shapes_.size()); + return shapes_[idx]; } -int RandomReader::ReadNext(std::vector* outs) { - PADDLE_ENFORCE_EQ( - shapes_.size(), outs.size(), - "shapes_.size() is %d, while outs.size() is %d. They are not equal.", - shapes_.size(), outs.size()); - std::minstd_rand engine; - unsigned int seed = std::random_device()(); - engine.seed(seed); - std::uniform_real_distribution dist(min_, max_); - for (int idx = 0; idx < shapes_.size(); ++idx) { - DDim shape = shapes_[idx]; - LoDTensor* out = outs[idx]; - int64_t numel = out->numel(); - PADDLE_ENFORCE_EQ(product(shape), numel, - "The product of %d'th shape is %lld, while the " - "corresponding out's numel is %lld. They are not equal.", - idx, product(shape), numel); - for (int64_t i = 0; i < numel, ++i) { - out[i] = dist(engine); +std::vector ShuffleReader::ReadNext() { + if (iteration_pos_ >= buffer_.size()) { + // Reload buffer with new data + buffer_.clear(); + for (int i = 0; i < buffer_size_; ++i) { + if (reader_->HasNext()) { + buffer_.push_back(reader_->ReadNext()); + } else { + break; + } } + std::random_shuffle(buffer_.begin(), buffer_.end()); + iteration_pos_ = 0; } - return 0; + if (buffer_.empty()) { + std::vector empty_res; + return empty_res; + } + return buffer_[iteration_pos_++]; +} + +std::vector BatchReader::ReadNext() { + buffer_.clear(); + for (int i = 0; i < batch_size_; ++i) { + if (reader_->HasNext()) { + buffer_.push_back(reader_->ReadNext()); + } else { + break; + } + } + // Concat instances + std::vector res; + if (buffer_.empty()) { + return res; + } + int out_num = buffer_[0].size(); + res.reserve(out_num); + for (int j = 0; j < out_num; ++j) { + // Merge shape and check date type + std::type_index batch_type = buffer_[0][j].type(); + DDim batch_shape = buffer_[0][j].dims(); + for (size_t i = 1; i < buffer_.size(); ++i) { + std::type_index ins_type = buffer_[i][j].type(); + DDim ins_shape = buffer_[i][j].dims(); + PADDLE_ENFORCE_EQ(batch_type, ins_type); + PADDLE_ENFORCE_EQ(slice_ddim(batch_shape, 1, batch_shape.size()), + slice_ddim(ins_shape, 1, ins_shape.size())); + PADDLE_ENFORCE_GT(ins_shape[0], 0); + batch_shape[0] += ins_shape[0]; + } + + LoDTensor out; + out.Resize(batch_shape); + out.mutable_data(platform::CPUPlace(), batch_type); + int64_t dst_offset = 0; + + // Merge lod and data + LoD batch_lod; + std::vector top_level_lod({0}); + for (size_t i = 0; i < buffer_.size(); ++i) { + DDim ins_shape = buffer_[i][j].dims(); + LoD ins_lod = buffer_[i][j].lod(); + if (i == 0) { + batch_lod = ins_lod; + } else { + PADDLE_ENFORCE_EQ(batch_lod.size(), ins_lod.size()); + for (size_t level_idx = 0; level_idx < batch_lod.size(); ++level_idx) { + auto& lod_level = batch_lod[level_idx]; + for (size_t k = 1; k < ins_lod[level_idx].size(); ++k) { + lod_level.push_back(ins_lod[level_idx][k] + lod_level.back()); + } + } + } + top_level_lod.push_back( + top_level_lod.back() + + (ins_lod.empty() ? ins_shape[0] : (ins_lod[0].size() - 1))); + + Tensor dst = out.Slice(dst_offset, dst_offset + ins_shape[0]); + Copy(buffer_[i][j], platform::CPUPlace(), &dst); + dst_offset += ins_shape[0]; + } + batch_lod.insert(batch_lod.begin(), top_level_lod); + out.set_lod(batch_lod); + res.push_back(out); + } + return res; } } // namespace framework } // namespace paddle diff --git a/paddle/framework/reader.h b/paddle/framework/reader.h index eed9c18d087..58675863e56 100644 --- a/paddle/framework/reader.h +++ b/paddle/framework/reader.h @@ -22,20 +22,61 @@ namespace framework { class Reader { public: - virtual int ReadNext(std::vector* outs) = 0; - DDim shape(int idx) const; + Reader() {} + explicit Reader(const std::vector& shapes) : shapes_(shapes) {} + + virtual std::vector ReadNext() = 0; + virtual bool HasNext() const = 0; + + virtual DDim shape(size_t idx) const; + virtual std::vector shapes() const { return shapes_; } + + virtual ~Reader() {} private: + // set private to prevent directly access in decorators + // a decorator should access its underlying reader_'s shape, not its own. std::vector shapes_; }; // file readers +template class RandomReader : public Reader { public: RandomReader(const std::vector& shapes, float min, float max) - : shapes_(shapes), min_(min), max_(max) {} - int ReadNext(std::vector* outs) override; + : Reader(shapes), min_(min), max_(max) { + PADDLE_ENFORCE_LE(min, max, + "'min' should be less than or equal to 'max'.(%f vs %f)", + min, max); + } + + std::vector ReadNext() override { + std::minstd_rand engine; + unsigned int seed = std::random_device()(); + engine.seed(seed); + std::uniform_real_distribution dist(min_, max_); + + std::vector res; + res.reserve(shapes().size()); + for (const DDim& shape : shapes()) { + PADDLE_ENFORCE_GE( + shape.size(), 2, + "The rank of input data should be 2 at least.(Now it's %d)", + shape.size()); + LoDTensor out; + out.Resize(shape); + T* data = out.mutable_data(platform::CPUPlace()); + int64_t numel = product(shape); + for (int64_t i = 0; i < numel; ++i) { + data[i] = dist(engine); + } + res.push_back(out); + } + return res; + } + + bool HasNext() const override { return true; } private: float min_; @@ -44,22 +85,40 @@ class RandomReader : public Reader { // decorators -class BatchReader : public Reader { +class ShuffleReader : public Reader { public: - BatchReader(const Reader* reader) : reader_(reader) {} - int ReadNext(std::vector* outs) override; + ShuffleReader(Reader* reader, int buffer_size) + : reader_(reader), buffer_size_(buffer_size), iteration_pos_(0) { + buffer_.reserve(buffer_size); + } + std::vector ReadNext() override; + bool HasNext() const override { return reader_->HasNext(); } + + DDim shape(size_t idx) const override { return reader_->shape(idx); } + std::vector shapes() const override { return reader_->shapes(); } private: - const Reader* reader_; + Reader* reader_; + int buffer_size_; + std::vector> buffer_; + size_t iteration_pos_; }; -class ShuffleReader : public Reader { +class BatchReader : public Reader { public: - ShuffleReader(const Reader* reader) : reader_(reader) {} - int ReadNext(std::vector* outs) override; + BatchReader(Reader* reader, int batch_size) + : reader_(reader), batch_size_(batch_size) {} + std::vector ReadNext() override; + bool HasNext() const override { return reader_->HasNext(); }; + + DDim shape(size_t idx) const override { return reader_->shape(idx); } + std::vector shapes() const override { return reader_->shapes(); } private: - const Reader* reader_; + Reader* reader_; + int batch_size_; + std::vector> buffer_; }; + } // namespace framework } // namespace paddle -- GitLab From d8cc21da53e1113aaee3b43ea77d136bbbd204bb Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 1 Feb 2018 12:58:14 +0800 Subject: [PATCH 005/138] refine inheritance relationship --- paddle/framework/reader.cc | 2 +- paddle/framework/reader.h | 66 +++++++++++++++++++++----------------- 2 files changed, 37 insertions(+), 31 deletions(-) diff --git a/paddle/framework/reader.cc b/paddle/framework/reader.cc index e11662166c6..a05bef42ffa 100644 --- a/paddle/framework/reader.cc +++ b/paddle/framework/reader.cc @@ -17,7 +17,7 @@ namespace paddle { namespace framework { -DDim Reader::shape(size_t idx) const { +DDim FileReader::shape(size_t idx) const { PADDLE_ENFORCE_LT( idx, shapes_.size(), "Cannot get the %d'th shape, 'shapes_' only has %d elements.", idx, diff --git a/paddle/framework/reader.h b/paddle/framework/reader.h index 58675863e56..3954a1bea8a 100644 --- a/paddle/framework/reader.h +++ b/paddle/framework/reader.h @@ -20,32 +20,48 @@ namespace paddle { namespace framework { -class Reader { +class ReaderBase { public: - Reader() {} - explicit Reader(const std::vector& shapes) : shapes_(shapes) {} - virtual std::vector ReadNext() = 0; virtual bool HasNext() const = 0; - virtual DDim shape(size_t idx) const; - virtual std::vector shapes() const { return shapes_; } + virtual DDim shape(size_t idx) const = 0; + virtual std::vector shapes() const = 0; - virtual ~Reader() {} + virtual ~ReaderBase() {} +}; - private: - // set private to prevent directly access in decorators - // a decorator should access its underlying reader_'s shape, not its own. +class FileReader : public ReaderBase { + public: + explicit FileReader(const std::vector& shapes) : shapes_(shapes) {} + + DDim shape(size_t idx) const override; + std::vector shapes() const override { return shapes_; } + + protected: std::vector shapes_; }; +class ReaderDecorator : public ReaderBase { + public: + explicit ReaderDecorator(ReaderBase* reader) : reader_(reader) {} + + bool HasNext() const override { return reader_->HasNext(); } + + DDim shape(size_t idx) const override { return reader_->shape(idx); } + std::vector shapes() const override { return reader_->shapes(); } + + protected: + ReaderBase* reader_; +}; + // file readers template -class RandomReader : public Reader { +class RandomReader : public FileReader { public: RandomReader(const std::vector& shapes, float min, float max) - : Reader(shapes), min_(min), max_(max) { + : FileReader(shapes), min_(min), max_(max) { PADDLE_ENFORCE_LE(min, max, "'min' should be less than or equal to 'max'.(%f vs %f)", min, max); @@ -58,8 +74,8 @@ class RandomReader : public Reader { std::uniform_real_distribution dist(min_, max_); std::vector res; - res.reserve(shapes().size()); - for (const DDim& shape : shapes()) { + res.reserve(shapes_.size()); + for (const DDim& shape : shapes_) { PADDLE_ENFORCE_GE( shape.size(), 2, "The rank of input data should be 2 at least.(Now it's %d)", @@ -85,37 +101,27 @@ class RandomReader : public Reader { // decorators -class ShuffleReader : public Reader { +class ShuffleReader : public ReaderDecorator { public: - ShuffleReader(Reader* reader, int buffer_size) - : reader_(reader), buffer_size_(buffer_size), iteration_pos_(0) { + ShuffleReader(ReaderBase* reader, int buffer_size) + : ReaderDecorator(reader), buffer_size_(buffer_size), iteration_pos_(0) { buffer_.reserve(buffer_size); } std::vector ReadNext() override; - bool HasNext() const override { return reader_->HasNext(); } - - DDim shape(size_t idx) const override { return reader_->shape(idx); } - std::vector shapes() const override { return reader_->shapes(); } private: - Reader* reader_; int buffer_size_; std::vector> buffer_; size_t iteration_pos_; }; -class BatchReader : public Reader { +class BatchReader : public ReaderDecorator { public: - BatchReader(Reader* reader, int batch_size) - : reader_(reader), batch_size_(batch_size) {} + BatchReader(ReaderBase* reader, int batch_size) + : ReaderDecorator(reader), batch_size_(batch_size) {} std::vector ReadNext() override; - bool HasNext() const override { return reader_->HasNext(); }; - - DDim shape(size_t idx) const override { return reader_->shape(idx); } - std::vector shapes() const override { return reader_->shapes(); } private: - Reader* reader_; int batch_size_; std::vector> buffer_; }; -- GitLab From 93cab64185edf722dc493d1a00db5032014d836e Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 1 Feb 2018 17:38:57 +0800 Subject: [PATCH 006/138] Complete CreateRandomReaderOp --- paddle/framework/reader.h | 37 +++++++----- paddle/operators/create_reader_op.cc | 90 ++++++++++++++++++++++++++++ 2 files changed, 111 insertions(+), 16 deletions(-) create mode 100644 paddle/operators/create_reader_op.cc diff --git a/paddle/framework/reader.h b/paddle/framework/reader.h index 3954a1bea8a..0669a7c7c75 100644 --- a/paddle/framework/reader.h +++ b/paddle/framework/reader.h @@ -33,8 +33,6 @@ class ReaderBase { class FileReader : public ReaderBase { public: - explicit FileReader(const std::vector& shapes) : shapes_(shapes) {} - DDim shape(size_t idx) const override; std::vector shapes() const override { return shapes_; } @@ -44,8 +42,6 @@ class FileReader : public ReaderBase { class ReaderDecorator : public ReaderBase { public: - explicit ReaderDecorator(ReaderBase* reader) : reader_(reader) {} - bool HasNext() const override { return reader_->HasNext(); } DDim shape(size_t idx) const override { return reader_->shape(idx); } @@ -60,19 +56,19 @@ class ReaderDecorator : public ReaderBase { template class RandomReader : public FileReader { public: - RandomReader(const std::vector& shapes, float min, float max) - : FileReader(shapes), min_(min), max_(max) { + void Initialize(const std::vector& shapes, float min, float max) { PADDLE_ENFORCE_LE(min, max, "'min' should be less than or equal to 'max'.(%f vs %f)", min, max); + shapes_ = shapes; + min_ = min; + max_ = max; + unsigned int seed = std::random_device()(); + engine_.seed(seed); + dist_ = std::uniform_real_distribution(min_, max_); } std::vector ReadNext() override { - std::minstd_rand engine; - unsigned int seed = std::random_device()(); - engine.seed(seed); - std::uniform_real_distribution dist(min_, max_); - std::vector res; res.reserve(shapes_.size()); for (const DDim& shape : shapes_) { @@ -85,7 +81,7 @@ class RandomReader : public FileReader { T* data = out.mutable_data(platform::CPUPlace()); int64_t numel = product(shape); for (int64_t i = 0; i < numel; ++i) { - data[i] = dist(engine); + data[i] = dist_(engine_); } res.push_back(out); } @@ -97,16 +93,21 @@ class RandomReader : public FileReader { private: float min_; float max_; + std::minstd_rand engine_; + std::uniform_real_distribution dist_; }; // decorators class ShuffleReader : public ReaderDecorator { public: - ShuffleReader(ReaderBase* reader, int buffer_size) - : ReaderDecorator(reader), buffer_size_(buffer_size), iteration_pos_(0) { + void Initialize(ReaderBase* reader, int buffer_size) { + reader_ = reader; + buffer_size_ = buffer_size; + iteration_pos_ = 0; buffer_.reserve(buffer_size); } + std::vector ReadNext() override; private: @@ -117,8 +118,12 @@ class ShuffleReader : public ReaderDecorator { class BatchReader : public ReaderDecorator { public: - BatchReader(ReaderBase* reader, int batch_size) - : ReaderDecorator(reader), batch_size_(batch_size) {} + void Initialize(ReaderBase* reader, int batch_size) { + reader_ = reader; + batch_size_ = batch_size; + buffer_.reserve(batch_size_); + } + std::vector ReadNext() override; private: diff --git a/paddle/operators/create_reader_op.cc b/paddle/operators/create_reader_op.cc new file mode 100644 index 00000000000..abdc12087e0 --- /dev/null +++ b/paddle/operators/create_reader_op.cc @@ -0,0 +1,90 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/framework/op_registry.h" +#include "paddle/framework/reader.h" + +namespace paddle { +namespace operators { + +// general infershape +class CreateReaderInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of CreateReaderOp should not be null."); + } +}; + +template +class CreateRandomReaderOp : public framework::OperatorBase { + public: + using framework::OperatorBase::OperatorBase; + void Run(const framework::Scope& scope, + const platform::Place& dev_place) const override { + const auto& shape_concat = Attr>("shape_concat"); + const auto& ranks = Attr>("ranks"); + PADDLE_ENFORCE_EQ(std::accumulate(ranks.begin(), ranks.end(), 0), + int(shape_concat.size()), + "The accumulate of all ranks should be equal to the " + "shape concat's length."); + std::vector shapes; + int offset = 0; + for (int len : ranks) { + auto start_it = shape_concat.begin() + offset; + auto end_it = start_it + len; + shapes.push_back( + framework::make_ddim(std::vector(start_it, end_it))); + offset += len; + } + auto* out = scope.FindVar(Output("Out")) + ->template GetMutable>(); + out->Initialize(shapes, Attr("min"), Attr("max")); + } +}; + +class CreateRandomReaderOpMaker : public framework::OpProtoAndCheckerMaker { + public: + CreateRandomReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(op_proto, op_checker) { + AddOutput("Out", "(RandomReader) The created random reader."); + AddAttr>("shape_concat", + "The concat of all data's shapes."); + AddAttr>( + "ranks", + "The ranks of each data." + "e.g." + "shape_concat = [2,3,4,5,6]" + "ranks = [3,2]" + "It means the reader will generate two data each time," + "whose shapes are [2,3,4] and [5,6] respectively."); + AddAttr("min", "The lower bound of reader's uniform distribution."); + AddAttr("max", "The upper bound of reader's uniform distribution."); + AddComment(R"DOC( + CreateRandomReader Operator + + This Op creates a random reader. + The reader generates random data instead of really reading from files. + Generated data follow an uniform distribution between 'min' and 'max'. + )DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(create_random_reader, ops::CreateRandomReaderOp, + ops::CreateReaderInferShape, ops::CreateRandomReaderOpMaker, + paddle::framework::EmptyGradOpMaker); \ No newline at end of file -- GitLab From 1696cb0e510a8d52427b6ca96900bab4e03b5af1 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 1 Feb 2018 21:10:16 +0800 Subject: [PATCH 007/138] Complete CreateShuffleReaderOp --- paddle/framework/reader.h | 41 +++++++++++++------ paddle/operators/CMakeLists.txt | 5 ++- paddle/operators/create_reader_op.cc | 59 +++++++++++++++++++++++++--- 3 files changed, 87 insertions(+), 18 deletions(-) diff --git a/paddle/framework/reader.h b/paddle/framework/reader.h index 0669a7c7c75..18a34bfd170 100644 --- a/paddle/framework/reader.h +++ b/paddle/framework/reader.h @@ -33,6 +33,10 @@ class ReaderBase { class FileReader : public ReaderBase { public: + explicit FileReader(const std::vector& shapes) : shapes_(shapes) { + PADDLE_ENFORCE(!shapes_.empty()); + } + DDim shape(size_t idx) const override; std::vector shapes() const override { return shapes_; } @@ -42,6 +46,10 @@ class FileReader : public ReaderBase { class ReaderDecorator : public ReaderBase { public: + explicit ReaderDecorator(ReaderBase* reader) : reader_(reader) { + PADDLE_ENFORCE_NOT_NULL(reader_); + } + bool HasNext() const override { return reader_->HasNext(); } DDim shape(size_t idx) const override { return reader_->shape(idx); } @@ -56,13 +64,11 @@ class ReaderDecorator : public ReaderBase { template class RandomReader : public FileReader { public: - void Initialize(const std::vector& shapes, float min, float max) { + RandomReader(const std::vector& shapes, float min, float max) + : FileReader(shapes), min_(min), max_(max) { PADDLE_ENFORCE_LE(min, max, "'min' should be less than or equal to 'max'.(%f vs %f)", min, max); - shapes_ = shapes; - min_ = min; - max_ = max; unsigned int seed = std::random_device()(); engine_.seed(seed); dist_ = std::uniform_real_distribution(min_, max_); @@ -101,10 +107,8 @@ class RandomReader : public FileReader { class ShuffleReader : public ReaderDecorator { public: - void Initialize(ReaderBase* reader, int buffer_size) { - reader_ = reader; - buffer_size_ = buffer_size; - iteration_pos_ = 0; + ShuffleReader(ReaderBase* reader, int buffer_size) + : ReaderDecorator(reader), buffer_size_(buffer_size), iteration_pos_(0) { buffer_.reserve(buffer_size); } @@ -118,9 +122,8 @@ class ShuffleReader : public ReaderDecorator { class BatchReader : public ReaderDecorator { public: - void Initialize(ReaderBase* reader, int batch_size) { - reader_ = reader; - batch_size_ = batch_size; + BatchReader(ReaderBase* reader, int batch_size) + : ReaderDecorator(reader), batch_size_(batch_size) { buffer_.reserve(batch_size_); } @@ -131,5 +134,21 @@ class BatchReader : public ReaderDecorator { std::vector> buffer_; }; +class ReaderHolder { + public: + void Reset(ReaderBase* reader) { reader_.reset(reader); } + + ReaderBase* Get() const { return reader_.get(); } + + std::vector ReadNext() { return reader_->ReadNext(); } + bool HasNext() const { return reader_->HasNext(); } + + DDim shape(size_t idx) const { return reader_->shape(idx); } + std::vector shapes() const { return reader_->shapes(); } + + private: + std::unique_ptr reader_; +}; + } // namespace framework } // namespace paddle diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index 48cf5816cce..3684eb0dcca 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -62,7 +62,7 @@ function(op_library TARGET) endif() # Define operators that don't need pybind here. - foreach(manual_pybind_op "net_op" "compare_op" "logical_op" "nccl_op" "tensor_array_read_write_op") + foreach(manual_pybind_op "net_op" "compare_op" "logical_op" "nccl_op" "tensor_array_read_write_op" "create_reader_op") if ("${TARGET}" STREQUAL "${manual_pybind_op}") set(pybind_flag 1) endif() @@ -153,6 +153,7 @@ op_library(recurrent_op DEPS executor) op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale math_function) op_library(cos_sim_op DEPS cos_sim_functor) op_library(parallel_do_op DEPS executor) +op_library(create_reader_op DEPS reader) # Regist multiple Kernel to pybind if (WITH_GPU) @@ -178,7 +179,7 @@ list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) foreach(src ${GENERAL_OPS}) op_library(${src}) endforeach() -file(APPEND ${pybind_file} "USE_OP(less_than);\nUSE_OP(logical_and);\nUSE_NO_KERNEL_OP(read_from_array);\n") +file(APPEND ${pybind_file} "USE_OP(less_than);\nUSE_OP(logical_and);\nUSE_NO_KERNEL_OP(read_from_array);\nUSE_NO_KERNEL_OP(create_random_reader);\n") set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library") diff --git a/paddle/operators/create_reader_op.cc b/paddle/operators/create_reader_op.cc index abdc12087e0..29b487e10b5 100644 --- a/paddle/operators/create_reader_op.cc +++ b/paddle/operators/create_reader_op.cc @@ -18,7 +18,7 @@ namespace paddle { namespace operators { -// general infershape +// general infershape for file readers class CreateReaderInferShape : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext* ctx) const override { @@ -35,6 +35,7 @@ class CreateRandomReaderOp : public framework::OperatorBase { const platform::Place& dev_place) const override { const auto& shape_concat = Attr>("shape_concat"); const auto& ranks = Attr>("ranks"); + PADDLE_ENFORCE(!shape_concat.empty() && !ranks.empty()); PADDLE_ENFORCE_EQ(std::accumulate(ranks.begin(), ranks.end(), 0), int(shape_concat.size()), "The accumulate of all ranks should be equal to the " @@ -49,8 +50,9 @@ class CreateRandomReaderOp : public framework::OperatorBase { offset += len; } auto* out = scope.FindVar(Output("Out")) - ->template GetMutable>(); - out->Initialize(shapes, Attr("min"), Attr("max")); + ->template GetMutable(); + out->Reset(new framework::RandomReader(shapes, Attr("min"), + Attr("max"))); } }; @@ -58,7 +60,7 @@ class CreateRandomReaderOpMaker : public framework::OpProtoAndCheckerMaker { public: CreateRandomReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(op_proto, op_checker) { - AddOutput("Out", "(RandomReader) The created random reader."); + AddOutput("Out", "(ReaderHolder) The created random reader."); AddAttr>("shape_concat", "The concat of all data's shapes."); AddAttr>( @@ -81,10 +83,57 @@ class CreateRandomReaderOpMaker : public framework::OpProtoAndCheckerMaker { } }; +class CreateShuffleReaderInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Underlying_reader"), + "Input(Underlying_reader) of CreateShuffleReaderOp should " + "not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of CreateShuffleReaderOp should not be null."); + } +}; + +class CreateShuffleReaderOp : public framework::OperatorBase { + public: + using framework::OperatorBase::OperatorBase; + void Run(const framework::Scope& scope, + const platform::Place& dev_place) const override { + const auto& underlying_reader = scope.FindVar(Input("Underlying_reader")) + ->Get(); + auto* out = scope.FindVar(Output("Out")) + ->template GetMutable(); + out->Reset(new framework::ShuffleReader(underlying_reader.Get(), + Attr("buffer_size"))); + } +}; + +class CreateShuffleReaderOpMaker : public framework::OpProtoAndCheckerMaker { + public: + CreateShuffleReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(op_proto, op_checker) { + AddInput( + "Underlying_reader", + "(ReaderHolder) The underlying reader for creating a shuffle reader."); + AddOutput("Out", "(ReaderHolder) The created shuffle reader."); + AddAttr("buffer_size", "The shuffle buffer size.").GreaterThan(0); + AddComment(R"DOC( + CreateShuffleReader Operator + + A shuffle reader takes another reader as its 'underlying reader' + and output the underlying reader's outputs in a shuffled order. + )DOC"); + } +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OPERATOR(create_random_reader, ops::CreateRandomReaderOp, ops::CreateReaderInferShape, ops::CreateRandomReaderOpMaker, - paddle::framework::EmptyGradOpMaker); \ No newline at end of file + paddle::framework::EmptyGradOpMaker); +REGISTER_OPERATOR(create_shuffle_reader, ops::CreateShuffleReaderOp, + ops::CreateShuffleReaderInferShape, + ops::CreateShuffleReaderOpMaker, + paddle::framework::EmptyGradOpMaker); -- GitLab From 3dfd1da138805e0c98be4c57f3ea73d62865cd18 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Thu, 1 Feb 2018 23:43:33 +0800 Subject: [PATCH 008/138] Complete CreateBatchReaderOp --- paddle/framework/reader.h | 12 ++--- paddle/operators/create_reader_op.cc | 71 +++++++++++++++++++++------- 2 files changed, 61 insertions(+), 22 deletions(-) diff --git a/paddle/framework/reader.h b/paddle/framework/reader.h index 18a34bfd170..8275ea474b4 100644 --- a/paddle/framework/reader.h +++ b/paddle/framework/reader.h @@ -44,9 +44,9 @@ class FileReader : public ReaderBase { std::vector shapes_; }; -class ReaderDecorator : public ReaderBase { +class DecoratedReader : public ReaderBase { public: - explicit ReaderDecorator(ReaderBase* reader) : reader_(reader) { + explicit DecoratedReader(ReaderBase* reader) : reader_(reader) { PADDLE_ENFORCE_NOT_NULL(reader_); } @@ -105,10 +105,10 @@ class RandomReader : public FileReader { // decorators -class ShuffleReader : public ReaderDecorator { +class ShuffleReader : public DecoratedReader { public: ShuffleReader(ReaderBase* reader, int buffer_size) - : ReaderDecorator(reader), buffer_size_(buffer_size), iteration_pos_(0) { + : DecoratedReader(reader), buffer_size_(buffer_size), iteration_pos_(0) { buffer_.reserve(buffer_size); } @@ -120,10 +120,10 @@ class ShuffleReader : public ReaderDecorator { size_t iteration_pos_; }; -class BatchReader : public ReaderDecorator { +class BatchReader : public DecoratedReader { public: BatchReader(ReaderBase* reader, int batch_size) - : ReaderDecorator(reader), batch_size_(batch_size) { + : DecoratedReader(reader), batch_size_(batch_size) { buffer_.reserve(batch_size_); } diff --git a/paddle/operators/create_reader_op.cc b/paddle/operators/create_reader_op.cc index 29b487e10b5..9cf27bbfc69 100644 --- a/paddle/operators/create_reader_op.cc +++ b/paddle/operators/create_reader_op.cc @@ -19,11 +19,22 @@ namespace paddle { namespace operators { // general infershape for file readers -class CreateReaderInferShape : public framework::InferShapeBase { +class CreateFileReaderInferShape : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of CreateReaderOp should not be null."); + "The output file reader should not be null."); + } +}; + +// general infershape for decorated readers +class CreateDecoratedReaderInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Underlying_reader"), + "Input(Underlying_reader) should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "The output decorated reader should not be null."); } }; @@ -83,17 +94,6 @@ class CreateRandomReaderOpMaker : public framework::OpProtoAndCheckerMaker { } }; -class CreateShuffleReaderInferShape : public framework::InferShapeBase { - public: - void operator()(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Underlying_reader"), - "Input(Underlying_reader) of CreateShuffleReaderOp should " - "not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of CreateShuffleReaderOp should not be null."); - } -}; - class CreateShuffleReaderOp : public framework::OperatorBase { public: using framework::OperatorBase::OperatorBase; @@ -121,7 +121,41 @@ class CreateShuffleReaderOpMaker : public framework::OpProtoAndCheckerMaker { CreateShuffleReader Operator A shuffle reader takes another reader as its 'underlying reader' - and output the underlying reader's outputs in a shuffled order. + and yields the underlying reader's outputs in a shuffled order. + )DOC"); + } +}; + +class CreateBatchReaderOp : public framework::OperatorBase { + public: + using framework::OperatorBase::OperatorBase; + void Run(const framework::Scope& scope, + const platform::Place& dev_place) const override { + const auto& underlying_reader = scope.FindVar(Input("Underlying_reader")) + ->Get(); + auto* out = scope.FindVar(Output("Out")) + ->template GetMutable(); + out->Reset(new framework::BatchReader(underlying_reader.Get(), + Attr("batch_size"))); + } +}; + +class CreateBatchReaderOpMaker : public framework::OpProtoAndCheckerMaker { + public: + CreateBatchReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(op_proto, op_checker) { + AddInput( + "Underlying_reader", + "(ReaderHolder) The underlying reader for creating a batch reader."); + AddOutput("Out", "(ReaderHolder) The created batch reader."); + AddAttr("batch_size", + "How many instances the batch reader yields each time.") + .GreaterThan(0); + AddComment(R"DOC( + CreateBatchReader Operator + + A batch reader takes another reader as its 'underlying reader', + gathers the underlying reader's outputs and then yields them in batches. )DOC"); } }; @@ -131,9 +165,14 @@ class CreateShuffleReaderOpMaker : public framework::OpProtoAndCheckerMaker { namespace ops = paddle::operators; REGISTER_OPERATOR(create_random_reader, ops::CreateRandomReaderOp, - ops::CreateReaderInferShape, ops::CreateRandomReaderOpMaker, + ops::CreateFileReaderInferShape, + ops::CreateRandomReaderOpMaker, paddle::framework::EmptyGradOpMaker); REGISTER_OPERATOR(create_shuffle_reader, ops::CreateShuffleReaderOp, - ops::CreateShuffleReaderInferShape, + ops::CreateDecoratedReaderInferShape, ops::CreateShuffleReaderOpMaker, paddle::framework::EmptyGradOpMaker); +REGISTER_OPERATOR(create_batch_reader, ops::CreateBatchReaderOp, + ops::CreateDecoratedReaderInferShape, + ops::CreateBatchReaderOpMaker, + paddle::framework::EmptyGradOpMaker); -- GitLab From 53e697c11d30a84e59fab7d1c1d54718eed14f66 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Fri, 2 Feb 2018 00:06:46 +0800 Subject: [PATCH 009/138] refine code --- paddle/framework/reader.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/paddle/framework/reader.h b/paddle/framework/reader.h index 8275ea474b4..f450e67689a 100644 --- a/paddle/framework/reader.h +++ b/paddle/framework/reader.h @@ -66,9 +66,8 @@ class RandomReader : public FileReader { public: RandomReader(const std::vector& shapes, float min, float max) : FileReader(shapes), min_(min), max_(max) { - PADDLE_ENFORCE_LE(min, max, - "'min' should be less than or equal to 'max'.(%f vs %f)", - min, max); + PADDLE_ENFORCE_LE( + min, max, "'min' shouldn't be greater than 'max'.(%f vs %f)", min, max); unsigned int seed = std::random_device()(); engine_.seed(seed); dist_ = std::uniform_real_distribution(min_, max_); @@ -103,7 +102,7 @@ class RandomReader : public FileReader { std::uniform_real_distribution dist_; }; -// decorators +// decorated readers class ShuffleReader : public DecoratedReader { public: @@ -134,6 +133,8 @@ class BatchReader : public DecoratedReader { std::vector> buffer_; }; +// The ReaderHolder is used as readers' unified wrapper, +// making it easier to access different type readers in Variables. class ReaderHolder { public: void Reset(ReaderBase* reader) { reader_.reset(reader); } -- GitLab From dc8390d8c3038173d70d4c7cc9f4e76bb1ddc587 Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Thu, 1 Feb 2018 13:46:35 -0800 Subject: [PATCH 010/138] initial commit --- .../tests/book/test_rnn_encoder_decoder.py | 51 +++++++++++++++++-- 1 file changed, 47 insertions(+), 4 deletions(-) diff --git a/python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py b/python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py index fdc60861760..593d0013c9d 100644 --- a/python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py +++ b/python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py @@ -145,7 +145,7 @@ def seq_to_seq_net(): cost = fluid.layers.cross_entropy(input=prediction, label=label) avg_cost = fluid.layers.mean(x=cost) - return avg_cost + return avg_cost, prediction def to_lodtensor(data, place): @@ -163,8 +163,8 @@ def to_lodtensor(data, place): return res -def main(): - avg_cost = seq_to_seq_net() +def train(save_dirname=None): + [avg_cost, prediction] = seq_to_seq_net() optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4) optimizer.minimize(avg_cost) @@ -196,9 +196,52 @@ def main(): print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + " avg_cost=" + str(avg_cost_val)) if batch_id > 3: + if save_dirname is not None: + fluid.io.save_inference_model(save_dirname, [ + 'source_sequence', 'target_sequence', 'label_sequence' + ], [prediction], exe) exit(0) batch_id += 1 +def inference(save_dirname=None): + if save_dirname is None: + return + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + + data = [[0, 1, 0, 1], [0, 1, 1, 0, 0, 1]] + word_data = to_lodtensor(data, place) + trg_word = to_lodtensor(data, place) + trg_word_next = to_lodtensor(data, place) + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + print(feed_target_names) + assert feed_target_names[0] == 'source_sequence' + assert feed_target_names[1] == 'target_sequence' + assert feed_target_names[2] == 'label_sequence' + results = exe.run(inference_program, + feed={ + feed_target_names[0]: word_data, + feed_target_names[1]: trg_word, + feed_target_names[2]: trg_word_next + }, + fetch_list=fetch_targets) + + print("Inference Shape: ", results[0].shape) + print("infer results: ", results[0]) + + if __name__ == '__main__': - main() + save_dirname = "rnn_encoder_decoder.inference.model" + train(save_dirname) + infer(save_dirname) -- GitLab From 6695a204cd739a000ea1d647143d5145c0e6974f Mon Sep 17 00:00:00 2001 From: xuwei06 Date: Wed, 10 Jan 2018 14:38:15 -0800 Subject: [PATCH 011/138] helper functions fetch_var and get_var fetch_var for getting the values of a variable with given name get_var for getting the Variable with given name --- python/paddle/v2/fluid/executor.py | 48 ++++++++++++++----- python/paddle/v2/fluid/framework.py | 20 ++++++++ python/paddle/v2/fluid/layers/tensor.py | 8 ++-- .../paddle/v2/fluid/tests/test_fetch_var.py | 23 +++++++++ 4 files changed, 85 insertions(+), 14 deletions(-) create mode 100644 python/paddle/v2/fluid/tests/test_fetch_var.py diff --git a/python/paddle/v2/fluid/executor.py b/python/paddle/v2/fluid/executor.py index 9f48815b8b8..af69ce2abcd 100644 --- a/python/paddle/v2/fluid/executor.py +++ b/python/paddle/v2/fluid/executor.py @@ -17,7 +17,9 @@ import contextlib from framework import Program, default_main_program from . import core -__all__ = ['Executor', 'global_scope', 'scope_guard', 'switch_scope'] +__all__ = [ + 'Executor', 'global_scope', 'scope_guard', 'switch_scope', 'fetch_var' +] g_scope = core.Scope() @@ -80,12 +82,12 @@ def has_feed_operators(block, feed_targets, feed_holder_name): Args: block: a block instance (typically global block of a program) feed_targets: a dictionary of {feed_target_name: feed_target_data} - feed_holder_name: the name of the variable that holds the data of - all feed targets. The type of this feed_holder variable is + feed_holder_name: the name of the variable that holds the data of + all feed targets. The type of this feed_holder variable is FEED_MINIBATCH, which is essentially vector. Returns: - A boolean value that indicates whether a block has feed operators + A boolean value that indicates whether a block has feed operators that match the info contained in feed_targets and feed_holder_name. """ @@ -108,7 +110,7 @@ def has_feed_operators(block, feed_targets, feed_holder_name): def has_fetch_operators(block, fetch_targets, fetch_holder_name): """ Check whether the block already has fetch operators. - + Return false if the block does not have any fetch operators. If some fetch operators have been appended to the block, check that the info contained in these fetch operators matches the fetch_targets @@ -118,13 +120,13 @@ def has_fetch_operators(block, fetch_targets, fetch_holder_name): Args: block: a block instance (typically global block of a program) fetch_targets: a dictionary of {fetch_target_name: fetch_target_data} - fetch_holder_name: the name of the variable that holds the data of - all fetch targets. The type of this fetch_holder variable is - FETCH_LIST, which is essentially vector. + fetch_holder_name: the name of the variable that holds the data of + all fetch targets. The type of this fetch_holder variable is + FETCH_LIST, which is essentially vector. - Return: - A boolean value that indicates whether a block has fetch operators - that match the info contained in fetch_targets and fetch_holder_name. + Return: + A boolean value that indicates whether a block has fetch operators + that match the info contained in fetch_targets and fetch_holder_name. """ fetch_count = 0 @@ -146,6 +148,30 @@ def has_fetch_operators(block, fetch_targets, fetch_holder_name): return fetch_count > 0 +def fetch_var(name, scope=None, return_numpy=True): + """ + Fetch the value of the variable with the given name from the given scope + Args: + name(str): name of the variable + scope(core.Scope|None): scope object. + If None, global_scope() will be used. + return_numpy(bool): whether convert the tensor to numpy.ndarray + Returns: + LodTensor|numpy.ndarray + """ + assert isinstance(name, str) + if scope is None: + scope = global_scope() + assert isinstance(scope, core.Scope) + + var = global_scope().find_var(name) + assert var is not None, "Cannot find '%s' in scope." % name + tensor = var.get_tensor() + if return_numpy: + tensor = as_numpy(tensor) + return tensor + + class Executor(object): def __init__(self, places): if not isinstance(places, list) and not isinstance(places, tuple): diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index 7f5187d2998..7fcd19b215f 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -31,6 +31,7 @@ __all__ = [ 'program_guard', 'switch_startup_program', 'switch_main_program', + 'get_var', ] EMPTY_VAR_NAME = core.kEmptyVarName() @@ -1124,3 +1125,22 @@ def program_guard(main_program, startup_program=None): switch_main_program(main_program) if startup_program is not None: switch_startup_program(startup_program) + + +def get_var(name, program=None): + """ + Get a variable by name from the global block of a program + Args: + name(str): name of the variable + program(Program|None): program object. + If None, default_global_program() will be used. + + Returns: + Variable + """ + if program is None: + program = default_main_program() + assert isinstance(name, str) + assert isinstance(name, Program) + + return program.global_block().var(name) diff --git a/python/paddle/v2/fluid/layers/tensor.py b/python/paddle/v2/fluid/layers/tensor.py index c435c5206d1..27067d458d4 100644 --- a/python/paddle/v2/fluid/layers/tensor.py +++ b/python/paddle/v2/fluid/layers/tensor.py @@ -35,13 +35,15 @@ __all__ = [ ] -def create_tensor(dtype, name=None): +def create_tensor(dtype, name=None, persistable=False): helper = LayerHelper("create_tensor", **locals()) - return helper.create_variable(name=helper.name, dtype=dtype) + return helper.create_variable( + name=helper.name, dtype=dtype, persistable=persistable) def create_parameter(shape, dtype, + name=None, attr=None, is_bias=False, default_initializer=None): @@ -62,7 +64,7 @@ def create_parameter(shape, """ helper = LayerHelper("create_parameter", **locals()) if attr is None: - attr = ParamAttr() + attr = ParamAttr(name=name) return helper.create_parameter(attr, shape, dtype, is_bias, default_initializer) diff --git a/python/paddle/v2/fluid/tests/test_fetch_var.py b/python/paddle/v2/fluid/tests/test_fetch_var.py new file mode 100644 index 00000000000..670ab54f517 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_fetch_var.py @@ -0,0 +1,23 @@ +import paddle.v2.fluid as fluid +import paddle.v2.fluid.layers as layers +import op_test +import numpy +import unittest + + +class TestFetchVar(op_test.OpTest): + def test_fetch_var(self): + val = numpy.array([1, 3, 5]).astype(numpy.int32) + x = layers.create_tensor(dtype="int32", persistable=True, name="x") + layers.assign(input=val, output=x) + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(fluid.default_main_program(), feed={}, fetch_list=[]) + fetched_x = fluid.fetch_var("x") + self.assertTrue( + numpy.array_equal(fetched_x, val), + "fetch_x=%s val=%s" % (fetched_x, val)) + self.assertEqual(fetched_x.dtype, val.dtype) + + +if __name__ == '__main__': + unittest.main() -- GitLab From 7208190701d9a3c6d1e4dc507940f5d89d12024f Mon Sep 17 00:00:00 2001 From: xuwei06 Date: Fri, 12 Jan 2018 09:27:38 -0800 Subject: [PATCH 012/138] More informative comment and error message for fetch_var() --- python/paddle/v2/fluid/executor.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/python/paddle/v2/fluid/executor.py b/python/paddle/v2/fluid/executor.py index af69ce2abcd..0eddcc3a5ab 100644 --- a/python/paddle/v2/fluid/executor.py +++ b/python/paddle/v2/fluid/executor.py @@ -152,8 +152,10 @@ def fetch_var(name, scope=None, return_numpy=True): """ Fetch the value of the variable with the given name from the given scope Args: - name(str): name of the variable - scope(core.Scope|None): scope object. + name(str): name of the variable. Typically, only persistable variables + can be found in the scope used for running the program. + scope(core.Scope|None): scope object. It should be the scope where + you pass to Executor.run() when running your program. If None, global_scope() will be used. return_numpy(bool): whether convert the tensor to numpy.ndarray Returns: @@ -165,7 +167,10 @@ def fetch_var(name, scope=None, return_numpy=True): assert isinstance(scope, core.Scope) var = global_scope().find_var(name) - assert var is not None, "Cannot find '%s' in scope." % name + assert var is not None, ( + "Cannot find " + name + " in scope. Perhaps you need to make the" + " variable persistable by using var.persistable = True in your" + " program.") tensor = var.get_tensor() if return_numpy: tensor = as_numpy(tensor) -- GitLab From 901cab9ed3e0838954f0015221093fc1d64b5795 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Fri, 2 Feb 2018 13:52:41 +0800 Subject: [PATCH 013/138] Add `make clean` in docker/build.sh --- paddle/scripts/docker/build.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index df7310d6b70..59f3af03986 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -79,6 +79,7 @@ function run_build() { Building in /paddle/build ... ============================================ EOF + make clean make -j `nproc` } -- GitLab From 71a70f209ac63b6f351bd3c399ddff804da090f7 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Wed, 31 Jan 2018 17:07:51 +0800 Subject: [PATCH 014/138] refine gradient --- paddle/operators/layer_norm_op.cc | 42 ++++++++++++------------------- 1 file changed, 16 insertions(+), 26 deletions(-) diff --git a/paddle/operators/layer_norm_op.cc b/paddle/operators/layer_norm_op.cc index 1c6d2ae4d05..8fcac00e08f 100644 --- a/paddle/operators/layer_norm_op.cc +++ b/paddle/operators/layer_norm_op.cc @@ -291,32 +291,28 @@ class LayerNormGradKernel auto d_x_map = EigenMatrixMapRowMajor(d_x->data(), left, right); auto triple_product_func = [](T ele) { return ele * ele * ele; }; auto inv_std_func = [](T ele) { return std::sqrt(1 / ele); }; + + auto inv_std_map = var_map.unaryExpr(inv_std_func).eval(); // TODO(zcd): these code can be refined if (d_scale) { auto scale_map = ConstEigenMatrixMapRowMajor(scale->data(), 1, right); // dy_dx - auto dx_end = var_map.unaryExpr(inv_std_func) - .replicate(1, right) - .cwiseProduct(d_y_map) - .cwiseProduct(scale_map.replicate(left, 1)); + auto dx_end = + inv_std_map.replicate(1, right).cwiseProduct(d_y_map).cwiseProduct( + scale_map.replicate(left, 1)); + // dy_dmean_dx - auto dx_mean = (T(-1.0) / right) * - var_map.unaryExpr(inv_std_func) - .replicate(1, right) - .cwiseProduct(d_y_map) - .cwiseProduct(scale_map.replicate(left, 1)) - .rowwise() - .sum() - .replicate(1, right); + auto dx_mean = + (T(-1.0) / right) * dx_end.rowwise().sum().replicate(1, right); + // dy_var_dx auto dvar_end_part = (x_map - mean_map.replicate(1, right)) .cwiseProduct(scale_map.replicate(left, 1)) .cwiseProduct(d_y_map) .rowwise() .sum(); - auto dvar_end = var_map.unaryExpr(inv_std_func) - .unaryExpr(triple_product_func) + auto dvar_end = inv_std_map.unaryExpr(triple_product_func) .cwiseProduct(dvar_end_part) .replicate(1, right); auto dx_var = @@ -326,24 +322,18 @@ class LayerNormGradKernel d_x_map = dx_end + dx_mean + dx_var; } else { // dy_dx - auto dx_end = var_map.unaryExpr(inv_std_func) - .replicate(1, right) - .cwiseProduct(d_y_map); + auto dx_end = inv_std_map.replicate(1, right).cwiseProduct(d_y_map); + // dy_dmean_dx - auto dx_mean = (T(-1.0) / right) * - var_map.unaryExpr(inv_std_func) - .replicate(1, right) - .cwiseProduct(d_y_map) - .rowwise() - .sum() - .replicate(1, right); + auto dx_mean = + (T(-1.0) / right) * dx_end.rowwise().sum().replicate(1, right); + // dy_var_dx auto dvar_end_part = (x_map - mean_map.replicate(1, right)) .cwiseProduct(d_y_map) .rowwise() .sum(); - auto dvar_end = var_map.unaryExpr(inv_std_func) - .unaryExpr(triple_product_func) + auto dvar_end = inv_std_map.unaryExpr(triple_product_func) .cwiseProduct(dvar_end_part) .replicate(1, right); auto dx_var = -- GitLab From 37a251ebafce61776b2fea7a2fb2ee16defd14ea Mon Sep 17 00:00:00 2001 From: xuwei06 Date: Fri, 2 Feb 2018 15:46:51 -0800 Subject: [PATCH 015/138] Fix copyright for test_fetch_var.py --- python/paddle/v2/fluid/tests/test_fetch_var.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/python/paddle/v2/fluid/tests/test_fetch_var.py b/python/paddle/v2/fluid/tests/test_fetch_var.py index 670ab54f517..ed75a350b0b 100644 --- a/python/paddle/v2/fluid/tests/test_fetch_var.py +++ b/python/paddle/v2/fluid/tests/test_fetch_var.py @@ -1,3 +1,17 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import paddle.v2.fluid as fluid import paddle.v2.fluid.layers as layers import op_test -- GitLab From 76e188e5c6b39fc7cccab9d5b64bd19163fafb77 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Fri, 2 Feb 2018 15:09:23 +0800 Subject: [PATCH 016/138] Add layer norm [GPU] --- paddle/operators/compare_op.h | 2 +- paddle/operators/elementwise_add_op.h | 3 +- paddle/operators/elementwise_div_op.h | 3 +- paddle/operators/elementwise_max_op.h | 3 +- paddle/operators/elementwise_min_op.h | 3 +- paddle/operators/elementwise_mul_op.h | 3 +- paddle/operators/elementwise_op_function.h | 4 +- paddle/operators/elementwise_pow_op.h | 3 +- paddle/operators/elementwise_sub_op.h | 3 +- paddle/operators/layer_norm_op.cc | 9 +- paddle/operators/layer_norm_op.cu | 245 +++++++++++++++++++++ paddle/operators/math/math_function.cc | 6 + paddle/operators/math/math_function.cu | 25 +++ paddle/operators/math/math_function.h | 12 + paddle/operators/math/math_function_impl.h | 82 +++++++ 15 files changed, 393 insertions(+), 13 deletions(-) create mode 100644 paddle/operators/layer_norm_op.cu diff --git a/paddle/operators/compare_op.h b/paddle/operators/compare_op.h index b275fd75b35..79b8c6f59c7 100644 --- a/paddle/operators/compare_op.h +++ b/paddle/operators/compare_op.h @@ -62,7 +62,7 @@ class CompareOpKernel z->mutable_data(context.GetPlace()); int axis = context.Attr("axis"); ElementwiseComputeEx(context, x, y, axis, - z); + Functor(), z); } }; diff --git a/paddle/operators/elementwise_add_op.h b/paddle/operators/elementwise_add_op.h index c32288d6984..c24f97a8509 100644 --- a/paddle/operators/elementwise_add_op.h +++ b/paddle/operators/elementwise_add_op.h @@ -35,7 +35,8 @@ class ElementwiseAddKernel : public framework::OpKernel { auto* z = ctx.Output("Out"); z->mutable_data(ctx.GetPlace()); int axis = ctx.Attr("axis"); - ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, z); + ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, + AddFunctor(), z); } }; diff --git a/paddle/operators/elementwise_div_op.h b/paddle/operators/elementwise_div_op.h index 07ebade31ff..dc863cc598e 100644 --- a/paddle/operators/elementwise_div_op.h +++ b/paddle/operators/elementwise_div_op.h @@ -35,7 +35,8 @@ class ElementwiseDivKernel : public framework::OpKernel { auto* z = ctx.Output("Out"); z->mutable_data(ctx.GetPlace()); int axis = ctx.Attr("axis"); - ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, z); + ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, + DivFunctor(), z); } }; diff --git a/paddle/operators/elementwise_max_op.h b/paddle/operators/elementwise_max_op.h index 717e45ab31d..67efe4e1511 100644 --- a/paddle/operators/elementwise_max_op.h +++ b/paddle/operators/elementwise_max_op.h @@ -35,7 +35,8 @@ class ElementwiseMaxKernel : public framework::OpKernel { auto* z = ctx.Output("Out"); z->mutable_data(ctx.GetPlace()); int axis = ctx.Attr("axis"); - ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, z); + ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, + MaxFunctor(), z); } }; diff --git a/paddle/operators/elementwise_min_op.h b/paddle/operators/elementwise_min_op.h index 0de9a91c52b..cf11759404d 100644 --- a/paddle/operators/elementwise_min_op.h +++ b/paddle/operators/elementwise_min_op.h @@ -35,7 +35,8 @@ class ElementwiseMinKernel : public framework::OpKernel { auto* z = ctx.Output("Out"); z->mutable_data(ctx.GetPlace()); int axis = ctx.Attr("axis"); - ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, z); + ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, + MinFunctor(), z); } }; diff --git a/paddle/operators/elementwise_mul_op.h b/paddle/operators/elementwise_mul_op.h index ae7a71e0244..773125f5ca5 100644 --- a/paddle/operators/elementwise_mul_op.h +++ b/paddle/operators/elementwise_mul_op.h @@ -34,7 +34,8 @@ class ElementwiseMulKernel : public framework::OpKernel { auto* z = ctx.Output("Out"); z->mutable_data(ctx.GetPlace()); int axis = ctx.Attr("axis"); - ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, z); + ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, + MulFunctor(), z); } }; diff --git a/paddle/operators/elementwise_op_function.h b/paddle/operators/elementwise_op_function.h index 213fe1f5a81..74abf7c4a58 100644 --- a/paddle/operators/elementwise_op_function.h +++ b/paddle/operators/elementwise_op_function.h @@ -365,10 +365,10 @@ template void ElementwiseComputeEx(const framework::ExecutionContext& ctx, const framework::Tensor* x, - const framework::Tensor* y, int axis, + const framework::Tensor* y, int axis, Functor func, framework::Tensor* z) { TransformFunctor functor( - x, y, z, ctx.template device_context(), Functor()); + x, y, z, ctx.template device_context(), func); auto x_dims = x->dims(); auto y_dims = y->dims(); diff --git a/paddle/operators/elementwise_pow_op.h b/paddle/operators/elementwise_pow_op.h index 874fd3f09f2..0c5dd031ec4 100644 --- a/paddle/operators/elementwise_pow_op.h +++ b/paddle/operators/elementwise_pow_op.h @@ -36,7 +36,8 @@ class ElementwisePowKernel : public framework::OpKernel { auto* z = ctx.Output("Out"); z->mutable_data(ctx.GetPlace()); int axis = ctx.Attr("axis"); - ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, z); + ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, + PowFunctor(), z); } }; diff --git a/paddle/operators/elementwise_sub_op.h b/paddle/operators/elementwise_sub_op.h index c2749a8e6ba..6a88c5f6b4c 100644 --- a/paddle/operators/elementwise_sub_op.h +++ b/paddle/operators/elementwise_sub_op.h @@ -34,7 +34,8 @@ class ElementwiseSubKernel : public framework::OpKernel { auto* z = ctx.Output("Out"); z->mutable_data(ctx.GetPlace()); int axis = ctx.Attr("axis"); - ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, z); + ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, + SubFunctor(), z); } }; diff --git a/paddle/operators/layer_norm_op.cc b/paddle/operators/layer_norm_op.cc index 8fcac00e08f..6dd18277c9c 100644 --- a/paddle/operators/layer_norm_op.cc +++ b/paddle/operators/layer_norm_op.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/layer_norm_op.h" +#include "paddle/operators/elementwise_op_function.h" +#include "paddle/operators/math/math_function.h" namespace paddle { namespace operators { @@ -353,8 +355,9 @@ namespace ops = paddle::operators; REGISTER_OP(layer_norm, ops::LayerNormOp, ops::LayerNormOpMaker, layer_norm_grad, ops::LayerNormGradOp); REGISTER_OP_CPU_KERNEL( - layer_norm, - ops::LayerNormKernel); + layer_norm, ops::LayerNormKernel, + ops::LayerNormKernel); REGISTER_OP_CPU_KERNEL( layer_norm_grad, - ops::LayerNormGradKernel); + ops::LayerNormGradKernel, + ops::LayerNormGradKernel); diff --git a/paddle/operators/layer_norm_op.cu b/paddle/operators/layer_norm_op.cu new file mode 100644 index 00000000000..a84f5a41eae --- /dev/null +++ b/paddle/operators/layer_norm_op.cu @@ -0,0 +1,245 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/elementwise_op_function.h" +#include "paddle/operators/layer_norm_op.h" +#include "paddle/operators/math/math_function.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; +using DataLayout = framework::DataLayout; + +namespace { +template +struct SubAndSquareFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { return (a - b) * (a - b); } +}; + +template +struct DivAndSqrtFunctor { + explicit DivAndSqrtFunctor(T epsilon) { epsilon_ = epsilon; } + inline HOSTDEVICE T operator()(T a, T b) const { + return a / (sqrt(b) + epsilon_); + } + + private: + T epsilon_; +}; + +template +struct MulFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { return a * b; } +}; + +template +struct AddFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { return a + b; } +}; + +template +struct SubFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { return a - b; } +}; + +template +struct MulInvVarFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { + return a * std::sqrt(1.0 / b); + } +}; +} // namespace + +template +class LayerNormCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + const float epsilon = ctx.Attr("epsilon"); + auto *scale = ctx.Input("Scale"); + auto *bias = ctx.Input("Bias"); + auto x = *ctx.Input("X"); + + auto *y = ctx.Output("Y"); + auto *mean = ctx.Output("Mean"); + auto *var = ctx.Output("Variance"); + const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); + + const auto &x_dims = x.dims(); + + y->mutable_data(ctx.GetPlace()); + mean->mutable_data(ctx.GetPlace()); + var->mutable_data(ctx.GetPlace()); + + auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); + int left = static_cast(matrix_dim[0]); + int right = static_cast(matrix_dim[1]); + + framework::DDim matrix_shape({left, right}); + + x.Resize(matrix_shape); + y->Resize(matrix_shape); + + auto &dev_ctx = ctx.template device_context(); + math::RowwiseMean row_mean; + + // functor-> get mean + row_mean(dev_ctx, x, mean); + + // functor-> get variance + ElementwiseComputeEx, DeviceContext, T>( + ctx, &x, mean, /*axis*/ 0, SubAndSquareFunctor(), y); + row_mean(dev_ctx, *y, var); + + // functor-> get norm_out + ElementwiseComputeEx, DeviceContext, T>( + ctx, &x, mean, /*axis*/ 0, SubFunctor(), y); + ElementwiseComputeEx, DeviceContext, T>( + ctx, y, var, /*axis*/ 0, DivAndSqrtFunctor(static_cast(epsilon)), + y); + + framework::DDim scale_shape({right}); + if (scale) { + Tensor scale_matrix = *scale; + scale_matrix.Resize(scale_shape); + ElementwiseComputeEx, DeviceContext, T>( + ctx, y, &scale_matrix, /*axis*/ 1, MulFunctor(), y); + } + if (bias) { + Tensor bias_matrix = *bias; + bias_matrix.Resize(scale_shape); + ElementwiseComputeEx, DeviceContext, T>( + ctx, y, &bias_matrix, /*axis*/ 1, AddFunctor(), y); + } + y->Resize(x_dims); + } +}; + +template +class LayerNormCUDAGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + const float epsilon = ctx.Attr("epsilon"); + auto x = *ctx.Input("X"); + auto mean = *ctx.Input("Mean"); + auto var = *ctx.Input("Variance"); + auto scale = *ctx.Input("Scale"); + auto d_y = *ctx.Input(framework::GradVarName("Y")); + const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); + + // init output + auto *d_x = ctx.Output(framework::GradVarName("X")); + auto *d_scale = ctx.Output(framework::GradVarName("Scale")); + auto *d_bias = ctx.Output(framework::GradVarName("Bias")); + + const auto &x_dims = x.dims(); + auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); + int left = static_cast(matrix_dim[0]); + int right = static_cast(matrix_dim[1]); + framework::DDim matrix_shape({left, right}); + + d_y.Resize(matrix_shape); + auto &dev_ctx = ctx.template device_context(); + math::ColwiseSum colwise_sum; + + Tensor temp; + Tensor temp_norm; + if (d_scale || d_x) { + x.Resize(matrix_shape); + temp.mutable_data(matrix_shape, ctx.GetPlace()); + temp_norm.mutable_data(matrix_shape, ctx.GetPlace()); + + // get x_norm + ElementwiseComputeEx, DeviceContext, T>( + ctx, &x, &mean, /*axis*/ 0, SubFunctor(), &temp_norm); + ElementwiseComputeEx, DeviceContext, T>( + ctx, &temp_norm, &var, /*axis*/ 0, + DivAndSqrtFunctor(static_cast(epsilon)), &temp_norm); + } + + if (d_bias) { + d_bias->mutable_data(ctx.GetPlace()); + colwise_sum(dev_ctx, d_y, d_bias); + } + if (d_scale) { + d_scale->mutable_data(ctx.GetPlace()); + ElementwiseComputeEx, DeviceContext, T>( + ctx, &temp_norm, &d_y, /*axis*/ 0, MulFunctor(), &temp); + colwise_sum(dev_ctx, temp, d_scale); + } + + if (d_x) { + framework::DDim vec_shape({left}); + d_x->mutable_data(ctx.GetPlace()); + Tensor temp_vec; + temp_vec.mutable_data(vec_shape, ctx.GetPlace()); + + auto &dev_ctx = ctx.template device_context(); + math::RowwiseMean row_mean; + + if (d_scale) { + // dy_dx + ElementwiseComputeEx, DeviceContext, T>( + ctx, &d_y, &scale, /*axis*/ 1, MulFunctor(), &temp); + framework::Copy(temp, ctx.GetPlace(), ctx.device_context(), d_x); + + // dy_dmean_dx + row_mean(dev_ctx, temp, &temp_vec); + ElementwiseComputeEx, DeviceContext, T>( + ctx, d_x, &temp_vec, /*axis*/ 0, SubFunctor(), d_x); + + // dy_var_dx + ElementwiseComputeEx, DeviceContext, T>( + ctx, &temp, &temp_norm, /*axis*/ 0, MulFunctor(), &temp); + + } else { + // dy_dx + framework::Copy(d_y, ctx.GetPlace(), ctx.device_context(), d_x); + + // dy_dmean_dx + row_mean(dev_ctx, d_y, &temp_vec); + ElementwiseComputeEx, DeviceContext, T>( + ctx, d_x, &temp_vec, /*axis*/ 0, SubFunctor(), d_x); + + // dy_var_dx + ElementwiseComputeEx, DeviceContext, T>( + ctx, &d_y, &temp_norm, /*axis*/ 0, MulFunctor(), &temp); + } + // dy_var_dx + row_mean(dev_ctx, temp, &temp_vec); + ElementwiseComputeEx, DeviceContext, T>( + ctx, &temp_norm, &temp_vec, /*axis*/ 0, MulFunctor(), &temp_norm); + ElementwiseComputeEx, DeviceContext, T>( + ctx, d_x, &temp_norm, /*axis*/ 0, SubFunctor(), d_x); + + ElementwiseComputeEx, DeviceContext, T>( + ctx, d_x, &var, /*axis*/ 0, + DivAndSqrtFunctor(static_cast(epsilon)), d_x); + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + layer_norm, + ops::LayerNormCUDAKernel, + ops::LayerNormCUDAKernel); +REGISTER_OP_CUDA_KERNEL( + layer_norm_grad, + ops::LayerNormCUDAGradKernel, + ops::LayerNormCUDAGradKernel); diff --git a/paddle/operators/math/math_function.cc b/paddle/operators/math/math_function.cc index dcf4b85e1aa..ce0a5f6cff8 100644 --- a/paddle/operators/math/math_function.cc +++ b/paddle/operators/math/math_function.cc @@ -331,6 +331,12 @@ template struct RowwiseAdd; template struct ColwiseSum; template struct ColwiseSum; +template struct RowwiseSum; +template struct RowwiseSum; + +template struct RowwiseMean; +template struct RowwiseMean; + } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/math/math_function.cu b/paddle/operators/math/math_function.cu index d47a7f818de..c0a107470a4 100644 --- a/paddle/operators/math/math_function.cu +++ b/paddle/operators/math/math_function.cu @@ -325,6 +325,31 @@ void ColwiseSum::operator()( vector->data()); } +template struct RowwiseSum; +// template struct RowwiseSum; +// TODO(zcd): Following ColwiseSum format, need to confirm. +// The RowwiseSum failed in debug mode, +// and only failed for this case. So reimplemented it. +template <> +void RowwiseSum::operator()( + const platform::CUDADeviceContext& context, const framework::Tensor& input, + framework::Tensor* vector) { + auto in_dims = input.dims(); + auto size = input.numel() / in_dims[0]; + PADDLE_ENFORCE_EQ(vector->numel(), in_dims[0]); + framework::Tensor one; + one.mutable_data({size}, context.GetPlace()); + SetConstant set; + set(context, &one, static_cast(1.0)); + gemv( + context, true, static_cast(in_dims[1]), static_cast(in_dims[0]), + 1.0, one.data(), input.data(), 0.0, + vector->data()); +} + +template struct RowwiseMean; +template struct RowwiseMean; + } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/math/math_function.h b/paddle/operators/math/math_function.h index 8cc03c2ba0f..cb14d1e5746 100644 --- a/paddle/operators/math/math_function.h +++ b/paddle/operators/math/math_function.h @@ -128,6 +128,18 @@ struct ColwiseSum { framework::Tensor* vec); }; +template +struct RowwiseSum { + void operator()(const DeviceContext& context, const framework::Tensor& input, + framework::Tensor* vec); +}; + +template +struct RowwiseMean { + void operator()(const DeviceContext& context, const framework::Tensor& input, + framework::Tensor* vec); +}; + } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/operators/math/math_function_impl.h b/paddle/operators/math/math_function_impl.h index de591626df2..af4127788af 100644 --- a/paddle/operators/math/math_function_impl.h +++ b/paddle/operators/math/math_function_impl.h @@ -87,6 +87,88 @@ class ColwiseSum { } }; +template +void RowwiseMean::operator()(const DeviceContext& context, + const framework::Tensor& input, + framework::Tensor* out) { + auto in_dims = input.dims(); + PADDLE_ENFORCE_EQ(in_dims.size(), 2U); + PADDLE_ENFORCE_EQ(out->numel(), in_dims[0]); + + auto in = framework::EigenMatrix::From(input); + auto vec = framework::EigenVector::Flatten(*out); + + vec.device(*context.eigen_device()) = in.mean(Eigen::array({{1}})); +} +// TODO(zcd): Following ColwiseSum format, need to confirm. +// Specialize for CPU, since Eigen implement a general reduce. However, +// rowwise-sum can be easily implemented. General reduce has a huge overhead in +// CPU +template +class RowwiseMean { + public: + void operator()(const platform::CPUDeviceContext& context, + const framework::Tensor& input, framework::Tensor* out) { + auto& in_dims = input.dims(); + PADDLE_ENFORCE_EQ(in_dims.size(), 2U); + auto height = in_dims[0]; + auto size = in_dims[1]; + PADDLE_ENFORCE_EQ(out->numel(), height); + auto inv_size = 1.0 / size; + T* out_buf = out->mutable_data(out->place()); + const T* in_buf = input.data(); + + for (size_t i = 0; i < static_cast(height); ++i) { + T sum = 0; + for (size_t j = 0; j < static_cast(size); ++j) { + sum += in_buf[i * size + j]; + } + out_buf[i] = sum * inv_size; + } + } +}; + +template +void RowwiseSum::operator()(const DeviceContext& context, + const framework::Tensor& input, + framework::Tensor* out) { + auto in_dims = input.dims(); + PADDLE_ENFORCE_EQ(in_dims.size(), 2U); + PADDLE_ENFORCE_EQ(out->numel(), in_dims[0]); + + auto in = framework::EigenMatrix::From(input); + auto vec = framework::EigenVector::Flatten(*out); + + vec.device(*context.eigen_device()) = in.sum(Eigen::array({{1}})); +} +// TODO(zcd): Following ColwiseSum format, need to confirm. +// Specialize for CPU, since Eigen implement a general reduce. However, +// rowwise-sum can be easily implemented. General reduce has a huge overhead in +// CPU +template +class RowwiseSum { + public: + void operator()(const platform::CPUDeviceContext& context, + const framework::Tensor& input, framework::Tensor* out) { + auto& in_dims = input.dims(); + PADDLE_ENFORCE_EQ(in_dims.size(), 2U); + auto height = in_dims[0]; + auto size = in_dims[1]; + PADDLE_ENFORCE_EQ(out->numel(), size); + + T* out_buf = out->mutable_data(out->place()); + const T* in_buf = input.data(); + + for (size_t i = 0; i < static_cast(height); ++i) { + T sum = 0; + for (size_t j = 0; j < static_cast(size); ++j) { + sum += in_buf[i * size + j]; + } + out_buf[i] = sum; + } + } +}; + } // namespace math } // namespace operators } // namespace paddle -- GitLab From e03337350356626f36cdff371a3e82292e25a1c8 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sat, 3 Feb 2018 14:34:29 +0800 Subject: [PATCH 017/138] unifid GPU and CPU implementation --- paddle/operators/layer_norm_op.cc | 187 ------------------------ paddle/operators/layer_norm_op.cu | 228 +----------------------------- paddle/operators/layer_norm_op.h | 204 +++++++++++++++++++++++++- 3 files changed, 206 insertions(+), 413 deletions(-) diff --git a/paddle/operators/layer_norm_op.cc b/paddle/operators/layer_norm_op.cc index 6dd18277c9c..edc26dfb96f 100644 --- a/paddle/operators/layer_norm_op.cc +++ b/paddle/operators/layer_norm_op.cc @@ -13,8 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/layer_norm_op.h" -#include "paddle/operators/elementwise_op_function.h" -#include "paddle/operators/math/math_function.h" namespace paddle { namespace operators { @@ -23,13 +21,6 @@ using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; using DataLayout = framework::DataLayout; -template -using EigenMatrixMapRowMajor = Eigen::Map< - Eigen::Matrix>; -template -using ConstEigenMatrixMapRowMajor = Eigen::Map< - const Eigen::Matrix>; - class LayerNormOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -118,75 +109,6 @@ https://arxiv.org/abs/1607.06450 } }; -template -class LayerNormKernel - : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext &ctx) const override { - const float epsilon = ctx.Attr("epsilon"); - const auto *scale = ctx.Input("Scale"); - const auto *bias = ctx.Input("Bias"); - const auto *x = ctx.Input("X"); - const auto &x_dims = x->dims(); - const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); - - auto *output = ctx.Output("Y"); - auto *mean = ctx.Output("Mean"); - auto *var = ctx.Output("Variance"); - output->mutable_data(ctx.GetPlace()); - mean->mutable_data(ctx.GetPlace()); - var->mutable_data(ctx.GetPlace()); - - auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); - int left = static_cast(matrix_dim[0]); - int right = static_cast(matrix_dim[1]); - - auto input_map = ConstEigenMatrixMapRowMajor(x->data(), left, right); - - auto mean_map = EigenMatrixMapRowMajor(mean->data(), left, 1); - auto var_map = EigenMatrixMapRowMajor(var->data(), left, 1); - auto output_map = EigenMatrixMapRowMajor(output->data(), left, right); - - auto squre = [](T ele) { return ele * ele; }; - auto add_epslion = [epsilon](T ele) { return ele + epsilon; }; - - mean_map = input_map.rowwise().mean(); - var_map = (input_map - mean_map.replicate(1, right)) - .unaryExpr(squre) - .rowwise() - .mean() - .unaryExpr(add_epslion); - - auto inv_std_func = [](T ele) { return std::sqrt(1 / ele); }; - // TODO(zcd): Some thinking about output_map, is it appropriate that - // `output_map` and `input_map` point to the same memory. - auto inv_std = var_map.unaryExpr(inv_std_func); - if (scale && bias) { - auto scale_map = - ConstEigenMatrixMapRowMajor(scale->data(), 1, right); - auto bias_map = ConstEigenMatrixMapRowMajor(bias->data(), 1, right); - output_map = (input_map - mean_map.replicate(1, right)) - .cwiseProduct(inv_std.replicate(1, right)) - .cwiseProduct(scale_map.replicate(left, 1)) + - bias_map.replicate(left, 1); - } else if (scale) { - auto scale_map = - ConstEigenMatrixMapRowMajor(scale->data(), 1, right); - output_map = (input_map - mean_map.replicate(1, right)) - .cwiseProduct(inv_std.replicate(1, right)) - .cwiseProduct(scale_map.replicate(left, 1)); - } else if (bias) { - auto bias_map = ConstEigenMatrixMapRowMajor(bias->data(), 1, right); - output_map = (input_map - mean_map.replicate(1, right)) - .cwiseProduct(inv_std.replicate(1, right)) + - bias_map.replicate(left, 1); - } else { - output_map = (input_map - mean_map.replicate(1, right)) - .cwiseProduct(inv_std.replicate(1, right)); - } - } -}; - class LayerNormGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -239,115 +161,6 @@ class LayerNormGradOp : public framework::OperatorWithKernel { } }; -template -class LayerNormGradKernel - : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext &ctx) const override { - const auto *x = ctx.Input("X"); - const auto *mean = ctx.Input("Mean"); - const auto *var = ctx.Input("Variance"); - const auto *scale = ctx.Input("Scale"); - const auto *d_y = ctx.Input(framework::GradVarName("Y")); - - const auto &x_dims = x->dims(); - - const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); - auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); - int left = static_cast(matrix_dim[0]); - int right = static_cast(matrix_dim[1]); - - // init output - auto *d_x = ctx.Output(framework::GradVarName("X")); - auto *d_scale = ctx.Output(framework::GradVarName("Scale")); - auto *d_bias = ctx.Output(framework::GradVarName("Bias")); - - auto x_map = ConstEigenMatrixMapRowMajor(x->data(), left, right); - auto d_y_map = ConstEigenMatrixMapRowMajor(d_y->data(), left, right); - auto mean_map = ConstEigenMatrixMapRowMajor(mean->data(), left, 1); - auto var_map = ConstEigenMatrixMapRowMajor(var->data(), left, 1); - - if (d_bias) { - d_bias->mutable_data(ctx.GetPlace()); - auto d_bias_map = EigenMatrixMapRowMajor(d_bias->data(), 1, right); - d_bias_map = d_y_map.colwise().sum(); - } - if (d_scale) { - d_scale->mutable_data(ctx.GetPlace()); - auto d_scale_map = - EigenMatrixMapRowMajor(d_scale->data(), 1, right); - auto inv_std_func = [](T ele) { return std::sqrt(1 / ele); }; - // There are two equation to compute d_scale. One uses "Y" and the other - // does not use "Y" - d_scale_map = - ((x_map - mean_map.replicate(1, right)) - .cwiseProduct( - var_map.unaryExpr(inv_std_func).replicate(1, right)) - .cwiseProduct(d_y_map)) - .colwise() - .sum(); - } - - if (d_x) { - d_x->mutable_data(ctx.GetPlace()); - auto d_x_map = EigenMatrixMapRowMajor(d_x->data(), left, right); - auto triple_product_func = [](T ele) { return ele * ele * ele; }; - auto inv_std_func = [](T ele) { return std::sqrt(1 / ele); }; - - auto inv_std_map = var_map.unaryExpr(inv_std_func).eval(); - // TODO(zcd): these code can be refined - if (d_scale) { - auto scale_map = - ConstEigenMatrixMapRowMajor(scale->data(), 1, right); - // dy_dx - auto dx_end = - inv_std_map.replicate(1, right).cwiseProduct(d_y_map).cwiseProduct( - scale_map.replicate(left, 1)); - - // dy_dmean_dx - auto dx_mean = - (T(-1.0) / right) * dx_end.rowwise().sum().replicate(1, right); - - // dy_var_dx - auto dvar_end_part = (x_map - mean_map.replicate(1, right)) - .cwiseProduct(scale_map.replicate(left, 1)) - .cwiseProduct(d_y_map) - .rowwise() - .sum(); - auto dvar_end = inv_std_map.unaryExpr(triple_product_func) - .cwiseProduct(dvar_end_part) - .replicate(1, right); - auto dx_var = - (T(-1.0) / right) * - (x_map - mean_map.replicate(1, right)).cwiseProduct(dvar_end); - - d_x_map = dx_end + dx_mean + dx_var; - } else { - // dy_dx - auto dx_end = inv_std_map.replicate(1, right).cwiseProduct(d_y_map); - - // dy_dmean_dx - auto dx_mean = - (T(-1.0) / right) * dx_end.rowwise().sum().replicate(1, right); - - // dy_var_dx - auto dvar_end_part = (x_map - mean_map.replicate(1, right)) - .cwiseProduct(d_y_map) - .rowwise() - .sum(); - auto dvar_end = inv_std_map.unaryExpr(triple_product_func) - .cwiseProduct(dvar_end_part) - .replicate(1, right); - auto dx_var = - (T(-1.0) / right) * - (x_map - mean_map.replicate(1, right)).cwiseProduct(dvar_end); - - d_x_map = dx_end + dx_mean + dx_var; - } - } - } -}; - } // namespace operators } // namespace paddle diff --git a/paddle/operators/layer_norm_op.cu b/paddle/operators/layer_norm_op.cu index a84f5a41eae..77d13b216f0 100644 --- a/paddle/operators/layer_norm_op.cu +++ b/paddle/operators/layer_norm_op.cu @@ -12,234 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/elementwise_op_function.h" #include "paddle/operators/layer_norm_op.h" -#include "paddle/operators/math/math_function.h" - -namespace paddle { -namespace operators { - -using Tensor = framework::Tensor; -using LoDTensor = framework::LoDTensor; -using DataLayout = framework::DataLayout; - -namespace { -template -struct SubAndSquareFunctor { - inline HOSTDEVICE T operator()(T a, T b) const { return (a - b) * (a - b); } -}; - -template -struct DivAndSqrtFunctor { - explicit DivAndSqrtFunctor(T epsilon) { epsilon_ = epsilon; } - inline HOSTDEVICE T operator()(T a, T b) const { - return a / (sqrt(b) + epsilon_); - } - - private: - T epsilon_; -}; - -template -struct MulFunctor { - inline HOSTDEVICE T operator()(T a, T b) const { return a * b; } -}; - -template -struct AddFunctor { - inline HOSTDEVICE T operator()(T a, T b) const { return a + b; } -}; - -template -struct SubFunctor { - inline HOSTDEVICE T operator()(T a, T b) const { return a - b; } -}; - -template -struct MulInvVarFunctor { - inline HOSTDEVICE T operator()(T a, T b) const { - return a * std::sqrt(1.0 / b); - } -}; -} // namespace - -template -class LayerNormCUDAKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext &ctx) const override { - const float epsilon = ctx.Attr("epsilon"); - auto *scale = ctx.Input("Scale"); - auto *bias = ctx.Input("Bias"); - auto x = *ctx.Input("X"); - - auto *y = ctx.Output("Y"); - auto *mean = ctx.Output("Mean"); - auto *var = ctx.Output("Variance"); - const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); - - const auto &x_dims = x.dims(); - - y->mutable_data(ctx.GetPlace()); - mean->mutable_data(ctx.GetPlace()); - var->mutable_data(ctx.GetPlace()); - - auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); - int left = static_cast(matrix_dim[0]); - int right = static_cast(matrix_dim[1]); - - framework::DDim matrix_shape({left, right}); - - x.Resize(matrix_shape); - y->Resize(matrix_shape); - - auto &dev_ctx = ctx.template device_context(); - math::RowwiseMean row_mean; - - // functor-> get mean - row_mean(dev_ctx, x, mean); - - // functor-> get variance - ElementwiseComputeEx, DeviceContext, T>( - ctx, &x, mean, /*axis*/ 0, SubAndSquareFunctor(), y); - row_mean(dev_ctx, *y, var); - - // functor-> get norm_out - ElementwiseComputeEx, DeviceContext, T>( - ctx, &x, mean, /*axis*/ 0, SubFunctor(), y); - ElementwiseComputeEx, DeviceContext, T>( - ctx, y, var, /*axis*/ 0, DivAndSqrtFunctor(static_cast(epsilon)), - y); - - framework::DDim scale_shape({right}); - if (scale) { - Tensor scale_matrix = *scale; - scale_matrix.Resize(scale_shape); - ElementwiseComputeEx, DeviceContext, T>( - ctx, y, &scale_matrix, /*axis*/ 1, MulFunctor(), y); - } - if (bias) { - Tensor bias_matrix = *bias; - bias_matrix.Resize(scale_shape); - ElementwiseComputeEx, DeviceContext, T>( - ctx, y, &bias_matrix, /*axis*/ 1, AddFunctor(), y); - } - y->Resize(x_dims); - } -}; - -template -class LayerNormCUDAGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext &ctx) const override { - const float epsilon = ctx.Attr("epsilon"); - auto x = *ctx.Input("X"); - auto mean = *ctx.Input("Mean"); - auto var = *ctx.Input("Variance"); - auto scale = *ctx.Input("Scale"); - auto d_y = *ctx.Input(framework::GradVarName("Y")); - const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); - - // init output - auto *d_x = ctx.Output(framework::GradVarName("X")); - auto *d_scale = ctx.Output(framework::GradVarName("Scale")); - auto *d_bias = ctx.Output(framework::GradVarName("Bias")); - - const auto &x_dims = x.dims(); - auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); - int left = static_cast(matrix_dim[0]); - int right = static_cast(matrix_dim[1]); - framework::DDim matrix_shape({left, right}); - - d_y.Resize(matrix_shape); - auto &dev_ctx = ctx.template device_context(); - math::ColwiseSum colwise_sum; - - Tensor temp; - Tensor temp_norm; - if (d_scale || d_x) { - x.Resize(matrix_shape); - temp.mutable_data(matrix_shape, ctx.GetPlace()); - temp_norm.mutable_data(matrix_shape, ctx.GetPlace()); - - // get x_norm - ElementwiseComputeEx, DeviceContext, T>( - ctx, &x, &mean, /*axis*/ 0, SubFunctor(), &temp_norm); - ElementwiseComputeEx, DeviceContext, T>( - ctx, &temp_norm, &var, /*axis*/ 0, - DivAndSqrtFunctor(static_cast(epsilon)), &temp_norm); - } - - if (d_bias) { - d_bias->mutable_data(ctx.GetPlace()); - colwise_sum(dev_ctx, d_y, d_bias); - } - if (d_scale) { - d_scale->mutable_data(ctx.GetPlace()); - ElementwiseComputeEx, DeviceContext, T>( - ctx, &temp_norm, &d_y, /*axis*/ 0, MulFunctor(), &temp); - colwise_sum(dev_ctx, temp, d_scale); - } - - if (d_x) { - framework::DDim vec_shape({left}); - d_x->mutable_data(ctx.GetPlace()); - Tensor temp_vec; - temp_vec.mutable_data(vec_shape, ctx.GetPlace()); - - auto &dev_ctx = ctx.template device_context(); - math::RowwiseMean row_mean; - - if (d_scale) { - // dy_dx - ElementwiseComputeEx, DeviceContext, T>( - ctx, &d_y, &scale, /*axis*/ 1, MulFunctor(), &temp); - framework::Copy(temp, ctx.GetPlace(), ctx.device_context(), d_x); - - // dy_dmean_dx - row_mean(dev_ctx, temp, &temp_vec); - ElementwiseComputeEx, DeviceContext, T>( - ctx, d_x, &temp_vec, /*axis*/ 0, SubFunctor(), d_x); - - // dy_var_dx - ElementwiseComputeEx, DeviceContext, T>( - ctx, &temp, &temp_norm, /*axis*/ 0, MulFunctor(), &temp); - - } else { - // dy_dx - framework::Copy(d_y, ctx.GetPlace(), ctx.device_context(), d_x); - - // dy_dmean_dx - row_mean(dev_ctx, d_y, &temp_vec); - ElementwiseComputeEx, DeviceContext, T>( - ctx, d_x, &temp_vec, /*axis*/ 0, SubFunctor(), d_x); - - // dy_var_dx - ElementwiseComputeEx, DeviceContext, T>( - ctx, &d_y, &temp_norm, /*axis*/ 0, MulFunctor(), &temp); - } - // dy_var_dx - row_mean(dev_ctx, temp, &temp_vec); - ElementwiseComputeEx, DeviceContext, T>( - ctx, &temp_norm, &temp_vec, /*axis*/ 0, MulFunctor(), &temp_norm); - ElementwiseComputeEx, DeviceContext, T>( - ctx, d_x, &temp_norm, /*axis*/ 0, SubFunctor(), d_x); - - ElementwiseComputeEx, DeviceContext, T>( - ctx, d_x, &var, /*axis*/ 0, - DivAndSqrtFunctor(static_cast(epsilon)), d_x); - } - } -}; - -} // namespace operators -} // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( layer_norm, - ops::LayerNormCUDAKernel, - ops::LayerNormCUDAKernel); + ops::LayerNormKernel, + ops::LayerNormKernel); REGISTER_OP_CUDA_KERNEL( layer_norm_grad, - ops::LayerNormCUDAGradKernel, - ops::LayerNormCUDAGradKernel); + ops::LayerNormGradKernel, + ops::LayerNormGradKernel); diff --git a/paddle/operators/layer_norm_op.h b/paddle/operators/layer_norm_op.h index bca35b91e6f..309f1b87a26 100644 --- a/paddle/operators/layer_norm_op.h +++ b/paddle/operators/layer_norm_op.h @@ -16,19 +16,219 @@ limitations under the License. */ #include "paddle/framework/eigen.h" #include "paddle/framework/op_registry.h" +#include "paddle/operators/elementwise_op_function.h" +#include "paddle/operators/math/math_function.h" + namespace paddle { namespace operators { +template +struct SubAndSquareFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { return (a - b) * (a - b); } +}; + +template +struct DivAndSqrtFunctor { + explicit DivAndSqrtFunctor(T epsilon) { epsilon_ = epsilon; } + inline HOSTDEVICE T operator()(T a, T b) const { + return a / (sqrt(b) + epsilon_); + } + + private: + T epsilon_; +}; + +template +struct MulFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { return a * b; } +}; + +template +struct AddFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { return a + b; } +}; + +template +struct SubFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { return a - b; } +}; + +template +struct MulInvVarFunctor { + inline HOSTDEVICE T operator()(T a, T b) const { + return a * std::sqrt(1.0 / b); + } +}; + +using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; +using DataLayout = framework::DataLayout; + template class LayerNormKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override; + void Compute(const framework::ExecutionContext &ctx) const override { + const float epsilon = ctx.Attr("epsilon"); + auto *scale = ctx.Input("Scale"); + auto *bias = ctx.Input("Bias"); + auto x = *ctx.Input("X"); + + auto *y = ctx.Output("Y"); + auto *mean = ctx.Output("Mean"); + auto *var = ctx.Output("Variance"); + const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); + + const auto &x_dims = x.dims(); + + y->mutable_data(ctx.GetPlace()); + mean->mutable_data(ctx.GetPlace()); + var->mutable_data(ctx.GetPlace()); + + auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); + int left = static_cast(matrix_dim[0]); + int right = static_cast(matrix_dim[1]); + + framework::DDim matrix_shape({left, right}); + + x.Resize(matrix_shape); + y->Resize(matrix_shape); + + auto &dev_ctx = ctx.template device_context(); + math::RowwiseMean row_mean; + + // functor-> get mean + row_mean(dev_ctx, x, mean); + + // functor-> get variance + ElementwiseComputeEx, DeviceContext, T>( + ctx, &x, mean, /*axis*/ 0, SubAndSquareFunctor(), y); + row_mean(dev_ctx, *y, var); + + // functor-> get norm_out + ElementwiseComputeEx, DeviceContext, T>( + ctx, &x, mean, /*axis*/ 0, SubFunctor(), y); + ElementwiseComputeEx, DeviceContext, T>( + ctx, y, var, /*axis*/ 0, DivAndSqrtFunctor(static_cast(epsilon)), + y); + + framework::DDim scale_shape({right}); + if (scale) { + Tensor scale_matrix = *scale; + scale_matrix.Resize(scale_shape); + ElementwiseComputeEx, DeviceContext, T>( + ctx, y, &scale_matrix, /*axis*/ 1, MulFunctor(), y); + } + if (bias) { + Tensor bias_matrix = *bias; + bias_matrix.Resize(scale_shape); + ElementwiseComputeEx, DeviceContext, T>( + ctx, y, &bias_matrix, /*axis*/ 1, AddFunctor(), y); + } + y->Resize(x_dims); + } }; template class LayerNormGradKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& ctx) const override; + void Compute(const framework::ExecutionContext &ctx) const override { + const float epsilon = ctx.Attr("epsilon"); + auto x = *ctx.Input("X"); + auto mean = *ctx.Input("Mean"); + auto var = *ctx.Input("Variance"); + auto scale = *ctx.Input("Scale"); + auto d_y = *ctx.Input(framework::GradVarName("Y")); + const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); + + // init output + auto *d_x = ctx.Output(framework::GradVarName("X")); + auto *d_scale = ctx.Output(framework::GradVarName("Scale")); + auto *d_bias = ctx.Output(framework::GradVarName("Bias")); + + const auto &x_dims = x.dims(); + auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); + int left = static_cast(matrix_dim[0]); + int right = static_cast(matrix_dim[1]); + framework::DDim matrix_shape({left, right}); + + d_y.Resize(matrix_shape); + auto &dev_ctx = ctx.template device_context(); + math::ColwiseSum colwise_sum; + + Tensor temp; + Tensor temp_norm; + if (d_scale || d_x) { + x.Resize(matrix_shape); + temp.mutable_data(matrix_shape, ctx.GetPlace()); + temp_norm.mutable_data(matrix_shape, ctx.GetPlace()); + + // get x_norm + ElementwiseComputeEx, DeviceContext, T>( + ctx, &x, &mean, /*axis*/ 0, SubFunctor(), &temp_norm); + ElementwiseComputeEx, DeviceContext, T>( + ctx, &temp_norm, &var, /*axis*/ 0, + DivAndSqrtFunctor(static_cast(epsilon)), &temp_norm); + } + + if (d_bias) { + d_bias->mutable_data(ctx.GetPlace()); + colwise_sum(dev_ctx, d_y, d_bias); + } + if (d_scale) { + d_scale->mutable_data(ctx.GetPlace()); + ElementwiseComputeEx, DeviceContext, T>( + ctx, &temp_norm, &d_y, /*axis*/ 0, MulFunctor(), &temp); + colwise_sum(dev_ctx, temp, d_scale); + } + + if (d_x) { + framework::DDim vec_shape({left}); + d_x->mutable_data(ctx.GetPlace()); + Tensor temp_vec; + temp_vec.mutable_data(vec_shape, ctx.GetPlace()); + + math::RowwiseMean row_mean; + + if (d_scale) { + // dy_dx + ElementwiseComputeEx, DeviceContext, T>( + ctx, &d_y, &scale, /*axis*/ 1, MulFunctor(), &temp); + framework::Copy(temp, ctx.GetPlace(), ctx.device_context(), d_x); + + // dy_dmean_dx + row_mean(dev_ctx, temp, &temp_vec); + ElementwiseComputeEx, DeviceContext, T>( + ctx, d_x, &temp_vec, /*axis*/ 0, SubFunctor(), d_x); + + // dy_var_dx + ElementwiseComputeEx, DeviceContext, T>( + ctx, &temp, &temp_norm, /*axis*/ 0, MulFunctor(), &temp); + + } else { + // dy_dx + framework::Copy(d_y, ctx.GetPlace(), ctx.device_context(), d_x); + + // dy_dmean_dx + row_mean(dev_ctx, d_y, &temp_vec); + ElementwiseComputeEx, DeviceContext, T>( + ctx, d_x, &temp_vec, /*axis*/ 0, SubFunctor(), d_x); + + // dy_var_dx + ElementwiseComputeEx, DeviceContext, T>( + ctx, &d_y, &temp_norm, /*axis*/ 0, MulFunctor(), &temp); + } + // dy_var_dx + row_mean(dev_ctx, temp, &temp_vec); + ElementwiseComputeEx, DeviceContext, T>( + ctx, &temp_norm, &temp_vec, /*axis*/ 0, MulFunctor(), &temp_norm); + ElementwiseComputeEx, DeviceContext, T>( + ctx, d_x, &temp_norm, /*axis*/ 0, SubFunctor(), d_x); + + ElementwiseComputeEx, DeviceContext, T>( + ctx, d_x, &var, /*axis*/ 0, + DivAndSqrtFunctor(static_cast(epsilon)), d_x); + } + } }; } // namespace operators -- GitLab From b60da6729fa2484506869bc29271761de91676b7 Mon Sep 17 00:00:00 2001 From: chengduo Date: Sat, 3 Feb 2018 23:32:56 +0800 Subject: [PATCH 018/138] Refine buffer channel (#8098) * refine buffer channel * refine Receive and Send * follow comments --- paddle/framework/channel.h | 4 +-- paddle/framework/details/buffered_channel.h | 25 ++++++++----------- paddle/framework/details/unbuffered_channel.h | 14 ++++++++--- 3 files changed, 23 insertions(+), 20 deletions(-) diff --git a/paddle/framework/channel.h b/paddle/framework/channel.h index 0570980c5a4..b679387b112 100644 --- a/paddle/framework/channel.h +++ b/paddle/framework/channel.h @@ -23,8 +23,8 @@ namespace framework { template class Channel { public: - virtual void Send(T*) = 0; - virtual void Receive(T*) = 0; + virtual bool Send(T*) = 0; + virtual bool Receive(T*) = 0; virtual size_t Cap() = 0; virtual void Close() = 0; virtual ~Channel() {} diff --git a/paddle/framework/details/buffered_channel.h b/paddle/framework/details/buffered_channel.h index 9c806461aa5..7ac234b8d42 100644 --- a/paddle/framework/details/buffered_channel.h +++ b/paddle/framework/details/buffered_channel.h @@ -30,8 +30,8 @@ class Buffered : public paddle::framework::Channel { friend void paddle::framework::CloseChannel(Channel*); public: - virtual void Send(T*); - virtual void Receive(T*); + virtual bool Send(T*); + virtual bool Receive(T*); virtual size_t Cap() { return cap_; } virtual void Close(); virtual ~Buffered(); @@ -48,33 +48,36 @@ class Buffered : public paddle::framework::Channel { PADDLE_ENFORCE_GT(cap, 0); } - void NotifyAllSenders(std::unique_lock*); void NotifyAllParticipants(std::unique_lock*); }; template -void Buffered::Send(T* item) { +bool Buffered::Send(T* item) { std::unique_lock lock(mu_); full_cond_var_.wait(lock, [this]() { return channel_.size() < cap_ || closed_; }); + bool ret = false; if (!closed_) { channel_.push_back(std::move(*item)); lock.unlock(); empty_cond_var_.notify_one(); + ret = true; } + return ret; } template -void Buffered::Receive(T* item) { +bool Buffered::Receive(T* item) { std::unique_lock lock(mu_); empty_cond_var_.wait(lock, [this]() { return !channel_.empty() || closed_; }); + bool ret = false; if (!closed_) { *item = std::move(channel_.front()); channel_.pop_front(); - NotifyAllSenders(&lock); - } else { - item = nullptr; + full_cond_var_.notify_one(); + ret = true; } + return ret; } template @@ -92,12 +95,6 @@ Buffered::~Buffered() { NotifyAllParticipants(&lock); } -template -void Buffered::NotifyAllSenders(std::unique_lock* lock) { - lock->unlock(); - full_cond_var_.notify_all(); -} - template void Buffered::NotifyAllParticipants(std::unique_lock* lock) { lock->unlock(); diff --git a/paddle/framework/details/unbuffered_channel.h b/paddle/framework/details/unbuffered_channel.h index 0dc5afd7e57..f86a894bb4a 100644 --- a/paddle/framework/details/unbuffered_channel.h +++ b/paddle/framework/details/unbuffered_channel.h @@ -29,8 +29,8 @@ class UnBuffered : public paddle::framework::Channel { friend void paddle::framework::CloseChannel(Channel*); public: - virtual void Send(T*); - virtual void Receive(T*); + virtual bool Send(T*); + virtual bool Receive(T*); virtual size_t Cap() { return 0; } virtual void Close(); virtual ~UnBuffered(); @@ -57,7 +57,7 @@ class UnBuffered : public paddle::framework::Channel { // This function implements the concept of how data should // be sent from a writer to a reader. template -void UnBuffered::Send(T* data) { +bool UnBuffered::Send(T* data) { // Prevent other writers from entering std::unique_lock writer_lock(mu_write_); writer_found_ = true; @@ -66,6 +66,7 @@ void UnBuffered::Send(T* data) { cv_writer_.wait(cv_lock, [this]() { return reader_found_ == true || closed_; }); cv_reader_.notify_one(); + bool ret = false; if (!closed_) { std::unique_lock channel_lock(mu_ch_); item = data; @@ -74,14 +75,16 @@ void UnBuffered::Send(T* data) { channel_lock.lock(); cv_channel_.wait(channel_lock, [this]() { return item == nullptr || closed_; }); + ret = true; } writer_found_ = false; + return ret; } // This function implements the concept of how // data that was sent by a writer is read from a reader. template -void UnBuffered::Receive(T* data) { +bool UnBuffered::Receive(T* data) { // Prevent other readers from entering std::unique_lock read_lock{mu_read_}; reader_found_ = true; @@ -90,6 +93,7 @@ void UnBuffered::Receive(T* data) { cv_reader_.wait(cv_lock, [this]() { return writer_found_ == true || closed_; }); cv_writer_.notify_one(); + bool ret = false; if (!closed_) { std::unique_lock lock_ch{mu_ch_}; // Reader should wait for the writer to first write its data @@ -98,10 +102,12 @@ void UnBuffered::Receive(T* data) { *data = std::move(*item); item = nullptr; lock_ch.unlock(); + ret = true; } cv_channel_.notify_one(); } reader_found_ = false; + return ret; } // This function implements the sequence of events -- GitLab From c3d27b15b7575a48e02a24eae6dff6b58d23cf70 Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Sun, 4 Feb 2018 01:43:01 -0800 Subject: [PATCH 019/138] modify prune.cc for multiple blocks --- paddle/framework/prune.cc | 71 +++++++++++++++++++++++++++++++-------- 1 file changed, 57 insertions(+), 14 deletions(-) diff --git a/paddle/framework/prune.cc b/paddle/framework/prune.cc index bff8e0bceac..6a3882f199e 100644 --- a/paddle/framework/prune.cc +++ b/paddle/framework/prune.cc @@ -49,11 +49,28 @@ bool IsTarget(const proto::OpDesc& op_desc) { return false; } -void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, - int block_id) { - // TODO(tonyyang-svail): - // - will change to use multiple blocks for RNN op and Cond Op +int GetSubBlockIndex(const proto::OpDesc& op_desc) { + for (auto& attr : op_desc.attrs()) { + if (attr.type() == proto::AttrType::BLOCK) { + PADDLE_ENFORCE(attr.has_block_idx()); + return attr.block_idx(); + } + } + return -1; +} +bool HasSubBlock(const proto::OpDesc& op_desc) { + return GetSubBlockIndex(op_desc) > 0; +} + +// block_id is the idx of the current block in the input desc +// parent_block_id is the idx of the parent of the current block +// in the output desc, -1 means the current block is global block +// dependent_vars is passed recursively from the parent block to +// the child block to help pruning +void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, + int block_id, int parent_block_id, + std::set& dependent_vars) { auto& block = input.blocks(block_id); auto& ops = block.ops(); @@ -72,11 +89,9 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, expect_fetch = (op_desc.type() == kFetchOpType); } - std::set dependent_vars; std::vector should_run; for (auto op_iter = ops.rbegin(); op_iter != ops.rend(); ++op_iter) { auto& op_desc = *op_iter; - if (IsTarget(op_desc) || HasDependentVar(op_desc, dependent_vars)) { // insert its input to the dependency graph for (auto& var : op_desc.inputs()) { @@ -84,7 +99,6 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, dependent_vars.insert(argu); } } - should_run.push_back(true); } else { should_run.push_back(false); @@ -95,19 +109,48 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, // we reverse the should_run vector std::reverse(should_run.begin(), should_run.end()); - *output = input; - auto* op_field = output->mutable_blocks(block_id)->mutable_ops(); + //*output = input; + // copy the current block from input to output + auto* block_field = output->mutable_blocks(); + *block_field->Add() = input.blocks(block_id); + + int output_block_id = output->blocks_size() - 1; + auto* output_block = output->mutable_blocks(output_block_id); + output_block->set_idx = output_block_id; + output_block->set_parent_idx = parent_block_id; + + auto* op_field = output_block->mutable_ops(); op_field->Clear(); for (size_t i = 0; i < should_run.size(); ++i) { if (should_run[i]) { - *op_field->Add() = input.blocks(block_id).ops(i); + auto* op = op_field->Add(); + *op = input.blocks(block_id).ops(i); + if (HasSubBlock(*op)) { + // create sub_block_dependent_vars here to help prune the sub block + std::set sub_block_dependent_vars; + for (auto& var : op.inputs()) { + for (auto& argu : var.arguments()) { + sub_block_dependent_vars.insert(argu); + } + } + for (auto& var : op.outputs()) { + for (auto& argu : var.arguments()) { + sub_block_dependent_vars.insert(argu); + } + } + + // GetSubBlockIndex(*op) is the idx of the sub_block in the input desc + // output_block_id is the idx of the current block in the output desc + prune_impl(input, output, GetSubBlockIndex(*op), output_block_id, + sub_block_dependent_vars); + } } } // remove the VarDescs in BlockDesc that are not referenced in // the pruned OpDescs std::unordered_map var_map; - auto* var_field = output->mutable_blocks(block_id)->mutable_vars(); + auto* var_field = output->mutable_blocks(output_block_id)->mutable_vars(); for (const auto& var : *var_field) { var_map[var.name()] = var; } @@ -118,14 +161,14 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, auto& input_field = op.inputs(); for (auto& input_var : input_field) { for (auto& arg : input_var.arguments()) { - *var_field->Add() = var_map[arg]; + *var_field->Add() = var_map.at(arg); } } // add VarDescs of all output arguments for each OpDesc auto& output_field = op.outputs(); for (auto& output_var : output_field) { for (auto& arg : output_var.arguments()) { - *var_field->Add() = var_map[arg]; + *var_field->Add() = var_map.at(arg); } } } @@ -133,7 +176,7 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, // TODO(fengjiayi): Prune() could be inplaced to avoid unnecessary copies void Prune(const proto::ProgramDesc& input, proto::ProgramDesc* output) { - prune_impl(input, output, 0); + prune_impl(input, output, 0, -1, {}); } void inference_optimize_impl(const proto::ProgramDesc& input, -- GitLab From 326fa176ea6401f171e9325aa29fb0b5cf6f7a29 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Sun, 4 Feb 2018 22:45:47 +0800 Subject: [PATCH 020/138] Fix empty output tensor and add an unitest case --- paddle/operators/ctc_align_op.cu | 8 ++++++++ paddle/operators/ctc_align_op.h | 9 ++++++++- python/paddle/v2/fluid/tests/test_ctc_align.py | 11 +++++++++++ 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/paddle/operators/ctc_align_op.cu b/paddle/operators/ctc_align_op.cu index 2a970cd9fa9..918df83effb 100644 --- a/paddle/operators/ctc_align_op.cu +++ b/paddle/operators/ctc_align_op.cu @@ -80,6 +80,14 @@ class CTCAlignOpCUDAKernel : public framework::OpKernel { // resize output dims output->Resize({static_cast(host_out_lod0.back()), 1}); + + if (host_out_lod0.back() == 0) { + output->Resize({1}); + output->mutable_data(ctx.GetPlace()); + math::SetConstant set_constant; + set_constant(ctx.template device_context(), + output, -1); + } } }; diff --git a/paddle/operators/ctc_align_op.h b/paddle/operators/ctc_align_op.h index fed89aa1e89..7a063870f3c 100644 --- a/paddle/operators/ctc_align_op.h +++ b/paddle/operators/ctc_align_op.h @@ -16,6 +16,8 @@ limitations under the License. */ #include #include "paddle/framework/op_registry.h" +#include "paddle/operators/math/math_function.h" + namespace paddle { namespace operators { @@ -65,9 +67,14 @@ class CTCAlignKernel : public framework::OpKernel { framework::LoD output_lod; output_lod.push_back(output_lod0); output->set_lod(output_lod); - // resize output dims output->Resize({static_cast(output_lod0.back()), 1}); + // for empty sequence + if (output_lod0.back() == 0) { + output->Resize({1}); + output_data = output->mutable_data(ctx.GetPlace()); + output_data[0] = -1; + } } }; diff --git a/python/paddle/v2/fluid/tests/test_ctc_align.py b/python/paddle/v2/fluid/tests/test_ctc_align.py index 773c69d1ad0..cc815d8e9e1 100644 --- a/python/paddle/v2/fluid/tests/test_ctc_align.py +++ b/python/paddle/v2/fluid/tests/test_ctc_align.py @@ -31,6 +31,8 @@ def CTCAlign(input, lod, blank, merge_repeated): result.append(token) prev_token = token result = np.array(result).reshape([len(result), 1]).astype("int32") + if len(result) == 0: + result = np.array([-1]) return result @@ -72,5 +74,14 @@ class TestCTCAlignOpCase1(TestCTCAlignOp): [19, 1]).astype("int32") +class TestCTCAlignOpCase2(TestCTCAlignOp): + def config(self): + self.op_type = "ctc_align" + self.input_lod = [[0, 4]] + self.blank = 0 + self.merge_repeated = True + self.input = np.array([0, 0, 0, 0]).reshape([4, 1]).astype("int32") + + if __name__ == "__main__": unittest.main() -- GitLab From be65516876ae32fe2f8cfde1aaa2d22926ccc583 Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Sun, 4 Feb 2018 16:37:02 +0000 Subject: [PATCH 021/138] Fix the error when sorted_key is none in profiler --- paddle/platform/profiler.cc | 2 +- python/paddle/v2/fluid/profiler.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/paddle/platform/profiler.cc b/paddle/platform/profiler.cc index 2a8afc94039..6df087d154c 100644 --- a/paddle/platform/profiler.cc +++ b/paddle/platform/profiler.cc @@ -233,7 +233,7 @@ void ParseEvents(std::vector>& events, }; break; default: - sorted_domain = "event end time"; + sorted_domain = "event first end time"; } std::vector> events_table; diff --git a/python/paddle/v2/fluid/profiler.py b/python/paddle/v2/fluid/profiler.py index d4a2cd7eeab..d33a4c52a88 100644 --- a/python/paddle/v2/fluid/profiler.py +++ b/python/paddle/v2/fluid/profiler.py @@ -103,10 +103,10 @@ def profiler(state, sorted_key=None): core.enable_profiler(prof_state) yield - if sorted_key not in ['calls', 'total', 'max', 'min', 'ave']: - raise ValueError("The state must be in 'calls', 'total', " - "'max', 'min', 'ave'") sorted_key = 'default' if sorted_key is None else sorted_key + if sorted_key not in ['default', 'calls', 'total', 'max', 'min', 'ave']: + raise ValueError("The sorted_key must be None or in 'calls', 'total', " + "'max', 'min' and 'ave'") key_map = { 'default': core.EventSortingKey.kDefault, 'calls': core.EventSortingKey.kCalls, -- GitLab From 1d2dd9c4a5b99074cec3cb642f64bfd2124e6412 Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Sun, 4 Feb 2018 10:04:53 -0800 Subject: [PATCH 022/138] Close buffered channel should unblock the blocked senders and receivers (#8109) --- paddle/framework/channel_test.cc | 113 +++++++++++++++++++++++++++++-- 1 file changed, 106 insertions(+), 7 deletions(-) diff --git a/paddle/framework/channel_test.cc b/paddle/framework/channel_test.cc index c3533bbb1ac..444d68498c9 100644 --- a/paddle/framework/channel_test.cc +++ b/paddle/framework/channel_test.cc @@ -48,12 +48,12 @@ TEST(Channel, SufficientBufferSizeDoesntBlock) { const size_t buffer_size = 10; auto ch = MakeChannel(buffer_size); for (size_t i = 0; i < buffer_size; ++i) { - ch->Send(&i); // should not block + EXPECT_EQ(ch->Send(&i), true); // should not block } size_t out; for (size_t i = 0; i < buffer_size; ++i) { - ch->Receive(&out); // should not block + EXPECT_EQ(ch->Receive(&out), true); // should not block EXPECT_EQ(out, i); } CloseChannel(ch); @@ -67,7 +67,10 @@ TEST(Channel, ConcurrentSendNonConcurrentReceiveWithSufficientBufferSize) { std::thread t([&]() { // Try to write more than buffer size. for (size_t i = 0; i < 2 * buffer_size; ++i) { - ch->Send(&i); // should block after 10 iterations + if (i < buffer_size) + EXPECT_EQ(ch->Send(&i), true); // should block after 10 iterations + else + EXPECT_EQ(ch->Send(&i), false); sum += i; } }); @@ -84,13 +87,13 @@ TEST(Channel, SimpleUnbufferedChannelTest) { unsigned sum_send = 0; std::thread t([&]() { for (int i = 0; i < 5; i++) { - ch->Send(&i); + EXPECT_EQ(ch->Send(&i), true); sum_send += i; } }); for (int i = 0; i < 5; i++) { int recv; - ch->Receive(&recv); + EXPECT_EQ(ch->Receive(&recv), true); EXPECT_EQ(recv, i); } @@ -100,6 +103,102 @@ TEST(Channel, SimpleUnbufferedChannelTest) { delete ch; } +// This tests that closing a buffered channel also unblocks +// any receivers waiting on the channel +TEST(Channel, BufferedChannelCloseUnblocksReceiversTest) { + auto ch = MakeChannel(1); + size_t num_threads = 5; + std::thread t[num_threads]; + bool thread_ended[num_threads]; + + // Launches threads that try to read and are blocked because of no writers + for (size_t i = 0; i < num_threads; i++) { + thread_ended[i] = false; + t[i] = std::thread( + [&](bool *p) { + int data; + // All reads should return false + EXPECT_EQ(ch->Receive(&data), false); + *p = true; + }, + &thread_ended[i]); + } + std::this_thread::sleep_for(std::chrono::milliseconds(100)); // wait + + // Verify that all threads are blocked + for (size_t i = 0; i < num_threads; i++) { + EXPECT_EQ(thread_ended[i], false); + } + + // Explicitly close the channel + // This should unblock all receivers + CloseChannel(ch); + + std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait + + // Verify that all threads got unblocked + for (size_t i = 0; i < num_threads; i++) { + EXPECT_EQ(thread_ended[i], true); + } + + for (size_t i = 0; i < num_threads; i++) t[i].join(); + delete ch; +} + +// This tests that closing a buffered channel also unblocks +// any senders waiting for channel to have write space +TEST(Channel, BufferedChannelCloseUnblocksSendersTest) { + auto ch = MakeChannel(1); + size_t num_threads = 5; + std::thread t[num_threads]; + bool thread_ended[num_threads]; + bool send_success[num_threads]; + + // Launches threads that try to write and are blocked because of no readers + for (size_t i = 0; i < num_threads; i++) { + thread_ended[i] = false; + send_success[i] = false; + t[i] = std::thread( + [&](bool *ended, bool *success) { + int data = 10; + *success = ch->Send(&data); + *ended = true; + }, + &thread_ended[i], &send_success[i]); + } + std::this_thread::sleep_for(std::chrono::milliseconds(100)); // wait + + // Verify that atleast 4 threads are blocked + int ct = 0; + for (size_t i = 0; i < num_threads; i++) { + if (thread_ended[i] == false) ct++; + } + // Atleast 4 threads must be blocked + EXPECT_GE(ct, 4); + + // Explicitly close the thread + // This should unblock all senders + CloseChannel(ch); + + std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait + + // Verify that all threads got unblocked + for (size_t i = 0; i < num_threads; i++) { + EXPECT_EQ(thread_ended[i], true); + } + + // Verify that only 1 send was successful + ct = 0; + for (size_t i = 0; i < num_threads; i++) { + if (send_success[i]) ct++; + } + // Only 1 send must be successful + EXPECT_EQ(ct, 1); + + for (size_t i = 0; i < num_threads; i++) t[i].join(); + delete ch; +} + // This tests that closing an unbuffered channel also unblocks // unblocks any receivers waiting for senders TEST(Channel, UnbufferedChannelCloseUnblocksReceiversTest) { @@ -114,7 +213,7 @@ TEST(Channel, UnbufferedChannelCloseUnblocksReceiversTest) { t[i] = std::thread( [&](bool *p) { int data; - ch->Receive(&data); + EXPECT_EQ(ch->Receive(&data), false); *p = true; }, &thread_ended[i]); @@ -155,7 +254,7 @@ TEST(Channel, UnbufferedChannelCloseUnblocksSendersTest) { t[i] = std::thread( [&](bool *p) { int data = 10; - ch->Send(&data); + EXPECT_EQ(ch->Send(&data), false); *p = true; }, &thread_ended[i]); -- GitLab From 6f28084b4d062100336fd3889012b91c6e278bcc Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Mon, 5 Feb 2018 10:53:26 +0800 Subject: [PATCH 023/138] debug/format protobuf to human-readable codes (#8086) --- python/paddle/v2/fluid/debuger.py | 192 ++++++++++++++++++++++++++++++ 1 file changed, 192 insertions(+) diff --git a/python/paddle/v2/fluid/debuger.py b/python/paddle/v2/fluid/debuger.py index d3793524429..db1808c6474 100644 --- a/python/paddle/v2/fluid/debuger.py +++ b/python/paddle/v2/fluid/debuger.py @@ -12,10 +12,202 @@ # See the License for the specific language governing permissions and # limitations under the License. +import sys import re from graphviz import GraphPreviewGenerator import proto.framework_pb2 as framework_pb2 +_vartype2str_ = [ + "UNK", + "LoDTensor", + "SelectedRows", + "FeedMinibatch", + "FetchList", + "StepScopes", + "LodRankTable", + "LoDTensorArray", + "PlaceList", +] +_dtype2str_ = [ + "bool", + "int16", + "int32", + "int64", + "float16", + "float32", + "float64", +] + + +def repr_data_type(type): + return _dtype2str_[type] + + +def repr_tensor(proto): + return "tensor(type={}, shape={})".format(_dtype2str_[int(proto.data_type)], + str(proto.dims)) + + +reprtpl = "{ttype} {name} ({reprs})" + + +def repr_lodtensor(proto): + if not proto.lod_tensor: return + level = proto.lod_tensor.lod_level + reprs = repr_tensor(proto.lod_tensor.tensor) + return reprtpl.format( + ttype="LoDTensor" if level > 0 else "Tensor", + name=proto.name, + reprs="level=%d, %s" % (level, reprs) if level > 0 else reprs) + + +def repr_selected_rows(proto): + if not proto.selected_rows: return + return reprtpl.format( + ttype="SelectedRows", + name=proto.name, + reprs=repr_tensor(proto.selected_rows)) + + +def repr_tensor_array(proto): + if not proto.tensor_array: return + return reprtpl.format( + ttype="TensorArray", + name=proto.name, + reprs="level=%d, %s" % (proto.tensor_array.lod_level, + repr_tensor(proto.lod_tensor))) + + +type_handlers = [ + repr_lodtensor, + repr_selected_rows, + repr_tensor_array, +] + + +def repr_var(vardesc): + for handler in type_handlers: + res = handler(vardesc) + if res: + return res + + +def pprint_program_codes(program_desc): + reprs = [] + for block_idx in range(program_desc.num_blocks()): + block_desc = program_desc.block(block_idx) + block_repr = pprint_block_codes(block_desc) + reprs.append(block_repr) + return '\n'.join(reprs) + + +def pprint_block_codes(block_desc, show_backward=False): + def is_op_backward(op_desc): + if op_desc.type.endswith('_grad'): return True + + def is_var_backward(var): + if "@GRAD" in var.parameter: return True + for arg in var.arguments: + if "@GRAD" in arg: return True + + for var in op_desc.inputs: + if is_var_backward(var): return True + for var in op_desc.outputs: + if is_var_backward(var): return True + return False + + def is_var_backward(var_desc): + return "@GRAD" in var_desc.name + + if type(block_desc) is not framework_pb2.BlockDesc: + block_desc = framework_pb2.BlockDesc.FromString( + block_desc.serialize_to_string()) + var_reprs = [] + op_reprs = [] + for var in block_desc.vars: + if not show_backward and is_var_backward(var): + continue + var_reprs.append(repr_var(var)) + + for op in block_desc.ops: + if not show_backward and is_op_backward(op): continue + op_reprs.append(repr_op(op)) + + tpl = "// block-{idx} parent-{pidx}\n// variables\n{vars}\n\n// operators\n{ops}\n" + return tpl.format( + idx=block_desc.idx, + pidx=block_desc.parent_idx, + vars='\n'.join(var_reprs), + ops='\n'.join(op_reprs), ) + + +def repr_attr(desc): + tpl = "{key}={value}" + valgetter = [ + lambda attr: attr.i, + lambda attr: attr.f, + lambda attr: attr.s, + lambda attr: attr.ints, + lambda attr: attr.floats, + lambda attr: attr.strings, + lambda attr: attr.b, + lambda attr: attr.bools, + lambda attr: attr.block_idx, + lambda attr: attr.l, + ] + key = desc.name + value = valgetter[desc.type](desc) + if key == "dtype": + value = repr_data_type(value) + return tpl.format(key=key, value=str(value)), (key, value) + + +def _repr_op_fill_constant(optype, inputs, outputs, attrs): + if optype == "fill_constant": + return "{output} = {data} [shape={shape}]".format( + output=','.join(outputs), + data=attrs['value'], + shape=str(attrs['shape'])) + + +op_repr_handlers = [_repr_op_fill_constant, ] + + +def repr_op(opdesc): + optype = None + attrs = [] + attr_dict = {} + is_target = None + inputs = [] + outputs = [] + + tpl = "{outputs} = {optype}({inputs}{is_target}) [{attrs}]" + args2value = lambda args: args[0] if len(args) == 1 else str(list(args)) + for var in opdesc.inputs: + key = var.parameter + value = args2value(var.arguments) + inputs.append("%s=%s" % (key, value)) + for var in opdesc.outputs: + value = args2value(var.arguments) + outputs.append(value) + for attr in opdesc.attrs: + attr_repr, attr_pair = repr_attr(attr) + attrs.append(attr_repr) + attr_dict[attr_pair[0]] = attr_pair[1] + + is_target = opdesc.is_target + + for handler in op_repr_handlers: + res = handler(opdesc.type, inputs, outputs, attr_dict) + if res: return res + + return tpl.format( + outputs=', '.join(outputs), + optype=opdesc.type, + inputs=', '.join(inputs), + attrs="{%s}" % ','.join(attrs), + is_target=", is_target" if is_target else "") + def draw_block_graphviz(block, highlights=None, path="./temp.dot"): ''' -- GitLab From 5092f5291c17e46b4c5e176c00b46a69f5e0d466 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Sat, 3 Feb 2018 15:05:55 +0800 Subject: [PATCH 024/138] Separate GPU and CPU implementation --- paddle/operators/layer_norm_op.cc | 186 +++++++++++++++++- paddle/operators/layer_norm_op.h | 29 ++- .../v2/fluid/tests/test_layer_norm_op.py | 11 +- 3 files changed, 202 insertions(+), 24 deletions(-) diff --git a/paddle/operators/layer_norm_op.cc b/paddle/operators/layer_norm_op.cc index edc26dfb96f..910b8ec0a4d 100644 --- a/paddle/operators/layer_norm_op.cc +++ b/paddle/operators/layer_norm_op.cc @@ -21,6 +21,13 @@ using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; using DataLayout = framework::DataLayout; +template +using EigenMatrixMapRowMajor = Eigen::Map< + Eigen::Matrix>; +template +using ConstEigenMatrixMapRowMajor = Eigen::Map< + const Eigen::Matrix>; + class LayerNormOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -101,7 +108,6 @@ class LayerNormOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( Layer Normalization. - Layer Norm has been implemented as discussed in the paper: https://arxiv.org/abs/1607.06450 ... @@ -109,6 +115,75 @@ https://arxiv.org/abs/1607.06450 } }; +template +class LayerNormKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + const float epsilon = ctx.Attr("epsilon"); + const auto *scale = ctx.Input("Scale"); + const auto *bias = ctx.Input("Bias"); + const auto *x = ctx.Input("X"); + const auto &x_dims = x->dims(); + const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); + + auto *output = ctx.Output("Y"); + auto *mean = ctx.Output("Mean"); + auto *var = ctx.Output("Variance"); + output->mutable_data(ctx.GetPlace()); + mean->mutable_data(ctx.GetPlace()); + var->mutable_data(ctx.GetPlace()); + + auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); + int left = static_cast(matrix_dim[0]); + int right = static_cast(matrix_dim[1]); + + auto input_map = ConstEigenMatrixMapRowMajor(x->data(), left, right); + + auto mean_map = EigenMatrixMapRowMajor(mean->data(), left, 1); + auto var_map = EigenMatrixMapRowMajor(var->data(), left, 1); + auto output_map = EigenMatrixMapRowMajor(output->data(), left, right); + + auto squre = [](T ele) { return ele * ele; }; + auto add_epslion = [epsilon](T ele) { return ele + epsilon; }; + + mean_map = input_map.rowwise().mean(); + var_map = (input_map - mean_map.replicate(1, right)) + .unaryExpr(squre) + .rowwise() + .mean() + .unaryExpr(add_epslion); + + auto inv_std_func = [](T ele) { return std::sqrt(1 / ele); }; + // TODO(zcd): Some thinking about output_map, is it appropriate that + // `output_map` and `input_map` point to the same memory. + auto inv_std = var_map.unaryExpr(inv_std_func); + if (scale && bias) { + auto scale_map = + ConstEigenMatrixMapRowMajor(scale->data(), 1, right); + auto bias_map = ConstEigenMatrixMapRowMajor(bias->data(), 1, right); + output_map = (input_map - mean_map.replicate(1, right)) + .cwiseProduct(inv_std.replicate(1, right)) + .cwiseProduct(scale_map.replicate(left, 1)) + + bias_map.replicate(left, 1); + } else if (scale) { + auto scale_map = + ConstEigenMatrixMapRowMajor(scale->data(), 1, right); + output_map = (input_map - mean_map.replicate(1, right)) + .cwiseProduct(inv_std.replicate(1, right)) + .cwiseProduct(scale_map.replicate(left, 1)); + } else if (bias) { + auto bias_map = ConstEigenMatrixMapRowMajor(bias->data(), 1, right); + output_map = (input_map - mean_map.replicate(1, right)) + .cwiseProduct(inv_std.replicate(1, right)) + + bias_map.replicate(left, 1); + } else { + output_map = (input_map - mean_map.replicate(1, right)) + .cwiseProduct(inv_std.replicate(1, right)); + } + } +}; + class LayerNormGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -161,6 +236,115 @@ class LayerNormGradOp : public framework::OperatorWithKernel { } }; +template +class LayerNormGradKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + const auto *x = ctx.Input("X"); + const auto *mean = ctx.Input("Mean"); + const auto *var = ctx.Input("Variance"); + const auto *scale = ctx.Input("Scale"); + const auto *d_y = ctx.Input(framework::GradVarName("Y")); + + const auto &x_dims = x->dims(); + + const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); + auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); + int left = static_cast(matrix_dim[0]); + int right = static_cast(matrix_dim[1]); + + // init output + auto *d_x = ctx.Output(framework::GradVarName("X")); + auto *d_scale = ctx.Output(framework::GradVarName("Scale")); + auto *d_bias = ctx.Output(framework::GradVarName("Bias")); + + auto x_map = ConstEigenMatrixMapRowMajor(x->data(), left, right); + auto d_y_map = ConstEigenMatrixMapRowMajor(d_y->data(), left, right); + auto mean_map = ConstEigenMatrixMapRowMajor(mean->data(), left, 1); + auto var_map = ConstEigenMatrixMapRowMajor(var->data(), left, 1); + + if (d_bias) { + d_bias->mutable_data(ctx.GetPlace()); + auto d_bias_map = EigenMatrixMapRowMajor(d_bias->data(), 1, right); + d_bias_map = d_y_map.colwise().sum(); + } + if (d_scale) { + d_scale->mutable_data(ctx.GetPlace()); + auto d_scale_map = + EigenMatrixMapRowMajor(d_scale->data(), 1, right); + auto inv_std_func = [](T ele) { return std::sqrt(1 / ele); }; + // There are two equation to compute d_scale. One uses "Y" and the other + // does not use "Y" + d_scale_map = + ((x_map - mean_map.replicate(1, right)) + .cwiseProduct( + var_map.unaryExpr(inv_std_func).replicate(1, right)) + .cwiseProduct(d_y_map)) + .colwise() + .sum(); + } + + if (d_x) { + d_x->mutable_data(ctx.GetPlace()); + auto d_x_map = EigenMatrixMapRowMajor(d_x->data(), left, right); + auto triple_product_func = [](T ele) { return ele * ele * ele; }; + auto inv_std_func = [](T ele) { return std::sqrt(1 / ele); }; + + auto inv_std_map = var_map.unaryExpr(inv_std_func).eval(); + // TODO(zcd): these code can be refined + if (d_scale) { + auto scale_map = + ConstEigenMatrixMapRowMajor(scale->data(), 1, right); + // dy_dx + auto dx_end = + inv_std_map.replicate(1, right).cwiseProduct(d_y_map).cwiseProduct( + scale_map.replicate(left, 1)); + + // dy_dmean_dx + auto dx_mean = + (T(-1.0) / right) * dx_end.rowwise().sum().replicate(1, right); + + // dy_var_dx + auto dvar_end_part = (x_map - mean_map.replicate(1, right)) + .cwiseProduct(scale_map.replicate(left, 1)) + .cwiseProduct(d_y_map) + .rowwise() + .sum(); + auto dvar_end = inv_std_map.unaryExpr(triple_product_func) + .cwiseProduct(dvar_end_part) + .replicate(1, right); + auto dx_var = + (T(-1.0) / right) * + (x_map - mean_map.replicate(1, right)).cwiseProduct(dvar_end); + + d_x_map = dx_end + dx_mean + dx_var; + } else { + // dy_dx + auto dx_end = inv_std_map.replicate(1, right).cwiseProduct(d_y_map); + + // dy_dmean_dx + auto dx_mean = + (T(-1.0) / right) * dx_end.rowwise().sum().replicate(1, right); + + // dy_var_dx + auto dvar_end_part = (x_map - mean_map.replicate(1, right)) + .cwiseProduct(d_y_map) + .rowwise() + .sum(); + auto dvar_end = inv_std_map.unaryExpr(triple_product_func) + .cwiseProduct(dvar_end_part) + .replicate(1, right); + auto dx_var = + (T(-1.0) / right) * + (x_map - mean_map.replicate(1, right)).cwiseProduct(dvar_end); + + d_x_map = dx_end + dx_mean + dx_var; + } + } + } +}; + } // namespace operators } // namespace paddle diff --git a/paddle/operators/layer_norm_op.h b/paddle/operators/layer_norm_op.h index 309f1b87a26..2de58186fbd 100644 --- a/paddle/operators/layer_norm_op.h +++ b/paddle/operators/layer_norm_op.h @@ -78,7 +78,7 @@ class LayerNormKernel : public framework::OpKernel { auto *var = ctx.Output("Variance"); const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); - const auto &x_dims = x.dims(); + const auto x_dims = x.dims(); y->mutable_data(ctx.GetPlace()); mean->mutable_data(ctx.GetPlace()); @@ -87,11 +87,12 @@ class LayerNormKernel : public framework::OpKernel { auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); int left = static_cast(matrix_dim[0]); int right = static_cast(matrix_dim[1]); - framework::DDim matrix_shape({left, right}); x.Resize(matrix_shape); - y->Resize(matrix_shape); + Tensor out; + out.ShareDataWith(*y); + out.Resize(matrix_shape); auto &dev_ctx = ctx.template device_context(); math::RowwiseMean row_mean; @@ -101,30 +102,24 @@ class LayerNormKernel : public framework::OpKernel { // functor-> get variance ElementwiseComputeEx, DeviceContext, T>( - ctx, &x, mean, /*axis*/ 0, SubAndSquareFunctor(), y); - row_mean(dev_ctx, *y, var); + ctx, &x, mean, /*axis*/ 0, SubAndSquareFunctor(), &out); + row_mean(dev_ctx, out, var); // functor-> get norm_out ElementwiseComputeEx, DeviceContext, T>( - ctx, &x, mean, /*axis*/ 0, SubFunctor(), y); + ctx, &x, mean, /*axis*/ 0, SubFunctor(), &out); ElementwiseComputeEx, DeviceContext, T>( - ctx, y, var, /*axis*/ 0, DivAndSqrtFunctor(static_cast(epsilon)), - y); + ctx, &out, var, /*axis*/ 0, + DivAndSqrtFunctor(static_cast(epsilon)), &out); - framework::DDim scale_shape({right}); if (scale) { - Tensor scale_matrix = *scale; - scale_matrix.Resize(scale_shape); ElementwiseComputeEx, DeviceContext, T>( - ctx, y, &scale_matrix, /*axis*/ 1, MulFunctor(), y); + ctx, &out, scale, /*axis*/ 1, MulFunctor(), &out); } if (bias) { - Tensor bias_matrix = *bias; - bias_matrix.Resize(scale_shape); ElementwiseComputeEx, DeviceContext, T>( - ctx, y, &bias_matrix, /*axis*/ 1, AddFunctor(), y); + ctx, &out, bias, /*axis*/ 1, AddFunctor(), &out); } - y->Resize(x_dims); } }; @@ -184,6 +179,7 @@ class LayerNormGradKernel : public framework::OpKernel { if (d_x) { framework::DDim vec_shape({left}); d_x->mutable_data(ctx.GetPlace()); + auto dx_dim = d_x->dims(); Tensor temp_vec; temp_vec.mutable_data(vec_shape, ctx.GetPlace()); @@ -227,6 +223,7 @@ class LayerNormGradKernel : public framework::OpKernel { ElementwiseComputeEx, DeviceContext, T>( ctx, d_x, &var, /*axis*/ 0, DivAndSqrtFunctor(static_cast(epsilon)), d_x); + d_x->Resize(dx_dim); } } }; diff --git a/python/paddle/v2/fluid/tests/test_layer_norm_op.py b/python/paddle/v2/fluid/tests/test_layer_norm_op.py index 68cf8673cd4..f456b1194c5 100644 --- a/python/paddle/v2/fluid/tests/test_layer_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_layer_norm_op.py @@ -62,9 +62,9 @@ def _reference_layer_norm_grad(x, grad_y, scale, mean, var, begin_norm_axis=1): grad_x = dx_end + d_mean + d_std - grad_y.shape = x_shape - x.shape = x_shape + grad_x.shape, x.shape, grad_y.shape = x_shape, x_shape, x_shape scale.shape = scale_shape + var.shape, mean.shape = [N, ], [N, ] return grad_x, d_scale, d_bias @@ -112,10 +112,7 @@ def set_output_grad(scope, outputs, place, feed_dict=None): class TestLayerNormdOp(OpTest): def __assert_close(self, tensor, np_array, msg, atol=1e-4): - self.assertTrue( - np.allclose( - np.array(tensor).reshape(np_array.shape), np_array, atol=atol), - msg) + self.assertTrue(np.allclose(np.array(tensor), np_array, atol=atol), msg) def __assert_grad_close(self, tensor, @@ -123,7 +120,7 @@ class TestLayerNormdOp(OpTest): name, place, max_relative_error=0.02): - a = np.array(tensor).reshape(np_array.shape) + a = np.array(tensor) b = np_array abs_a = np.abs(a) abs_a[abs_a < 1e-5] = 1 -- GitLab From 96d4bf5337c985feff01a549c26133e3ed1c3bde Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Mon, 5 Feb 2018 12:38:37 +0800 Subject: [PATCH 025/138] prevent make clean from cleaning ExternalProject boost --- CMakeLists.txt | 2 +- cmake/external/boost.cmake | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e8ea828dd2a..49334279f6d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -137,7 +137,7 @@ include(external/openblas) # download, build, install openblas include(external/mkldnn) # download, build, install mkldnn include(external/swig) # download, build, install swig include(external/warpctc) # download, build, install warpctc -include(external/boost) # download, build, install boost +include(external/boost) # download boost include(external/any) # download libn::any include(external/eigen) # download eigen3 include(external/pybind11) # download pybind11 diff --git a/cmake/external/boost.cmake b/cmake/external/boost.cmake index c70d83b3f4b..dbc676bdac3 100644 --- a/cmake/external/boost.cmake +++ b/cmake/external/boost.cmake @@ -21,6 +21,7 @@ set(BOOST_URL "http://sourceforge.net/projects/boost/files/boost/${BOO set(BOOST_SOURCES_DIR ${THIRD_PARTY_PATH}/boost) set(BOOST_DOWNLOAD_DIR "${BOOST_SOURCES_DIR}/src/${BOOST_PROJECT}") set(BOOST_INCLUDE_DIR "${BOOST_DOWNLOAD_DIR}/${BOOST_TAR}" CACHE PATH "boost include directory." FORCE) +set_directory_properties(PROPERTIES CLEAN_NO_CUSTOM 1) include_directories(${BOOST_INCLUDE_DIR}) -- GitLab From eef381d07482f845a875269f1b963f1d135e2cdc Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Mon, 5 Feb 2018 12:47:25 +0800 Subject: [PATCH 026/138] remove duplicated mobile index --- doc/index_cn.rst | 1 - doc/index_en.rst | 1 - doc/mobile/index_cn.rst | 9 --------- doc/mobile/index_en.rst | 9 --------- 4 files changed, 20 deletions(-) delete mode 100644 doc/mobile/index_cn.rst delete mode 100644 doc/mobile/index_en.rst diff --git a/doc/index_cn.rst b/doc/index_cn.rst index ada51c2d732..9279bac7f4b 100644 --- a/doc/index_cn.rst +++ b/doc/index_cn.rst @@ -8,4 +8,3 @@ PaddlePaddle 文档 howto/index_cn.rst api/index_cn.rst faq/index_cn.rst - mobile/index_cn.rst diff --git a/doc/index_en.rst b/doc/index_en.rst index 23b64b6cadf..64684b8b9b2 100644 --- a/doc/index_en.rst +++ b/doc/index_en.rst @@ -7,4 +7,3 @@ PaddlePaddle Documentation getstarted/index_en.rst howto/index_en.rst api/index_en.rst - mobile/index_en.rst diff --git a/doc/mobile/index_cn.rst b/doc/mobile/index_cn.rst deleted file mode 100644 index 1d99666e58b..00000000000 --- a/doc/mobile/index_cn.rst +++ /dev/null @@ -1,9 +0,0 @@ -MOBILE -====== - -.. toctree:: - :maxdepth: 1 - - cross_compiling_for_android_cn.md - cross_compiling_for_ios_cn.md - cross_compiling_for_raspberry_cn.md diff --git a/doc/mobile/index_en.rst b/doc/mobile/index_en.rst deleted file mode 100644 index ef421dacad4..00000000000 --- a/doc/mobile/index_en.rst +++ /dev/null @@ -1,9 +0,0 @@ -MOBILE -====== - -.. toctree:: - :maxdepth: 1 - - cross_compiling_for_android_en.md - cross_compiling_for_ios_en.md - cross_compiling_for_raspberry_en.md -- GitLab From df0e74dba0fcbb894eeefa727d7a8a4d50025ccb Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 5 Feb 2018 11:28:22 +0800 Subject: [PATCH 027/138] unifid GPU and CPU implementation --- paddle/operators/layer_norm_op.cc | 185 ------------------ paddle/operators/layer_norm_op.h | 2 +- .../v2/fluid/tests/test_layer_norm_op.py | 4 +- 3 files changed, 4 insertions(+), 187 deletions(-) diff --git a/paddle/operators/layer_norm_op.cc b/paddle/operators/layer_norm_op.cc index 910b8ec0a4d..76d5d571c31 100644 --- a/paddle/operators/layer_norm_op.cc +++ b/paddle/operators/layer_norm_op.cc @@ -21,13 +21,6 @@ using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; using DataLayout = framework::DataLayout; -template -using EigenMatrixMapRowMajor = Eigen::Map< - Eigen::Matrix>; -template -using ConstEigenMatrixMapRowMajor = Eigen::Map< - const Eigen::Matrix>; - class LayerNormOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -115,75 +108,6 @@ https://arxiv.org/abs/1607.06450 } }; -template -class LayerNormKernel - : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext &ctx) const override { - const float epsilon = ctx.Attr("epsilon"); - const auto *scale = ctx.Input("Scale"); - const auto *bias = ctx.Input("Bias"); - const auto *x = ctx.Input("X"); - const auto &x_dims = x->dims(); - const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); - - auto *output = ctx.Output("Y"); - auto *mean = ctx.Output("Mean"); - auto *var = ctx.Output("Variance"); - output->mutable_data(ctx.GetPlace()); - mean->mutable_data(ctx.GetPlace()); - var->mutable_data(ctx.GetPlace()); - - auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); - int left = static_cast(matrix_dim[0]); - int right = static_cast(matrix_dim[1]); - - auto input_map = ConstEigenMatrixMapRowMajor(x->data(), left, right); - - auto mean_map = EigenMatrixMapRowMajor(mean->data(), left, 1); - auto var_map = EigenMatrixMapRowMajor(var->data(), left, 1); - auto output_map = EigenMatrixMapRowMajor(output->data(), left, right); - - auto squre = [](T ele) { return ele * ele; }; - auto add_epslion = [epsilon](T ele) { return ele + epsilon; }; - - mean_map = input_map.rowwise().mean(); - var_map = (input_map - mean_map.replicate(1, right)) - .unaryExpr(squre) - .rowwise() - .mean() - .unaryExpr(add_epslion); - - auto inv_std_func = [](T ele) { return std::sqrt(1 / ele); }; - // TODO(zcd): Some thinking about output_map, is it appropriate that - // `output_map` and `input_map` point to the same memory. - auto inv_std = var_map.unaryExpr(inv_std_func); - if (scale && bias) { - auto scale_map = - ConstEigenMatrixMapRowMajor(scale->data(), 1, right); - auto bias_map = ConstEigenMatrixMapRowMajor(bias->data(), 1, right); - output_map = (input_map - mean_map.replicate(1, right)) - .cwiseProduct(inv_std.replicate(1, right)) - .cwiseProduct(scale_map.replicate(left, 1)) + - bias_map.replicate(left, 1); - } else if (scale) { - auto scale_map = - ConstEigenMatrixMapRowMajor(scale->data(), 1, right); - output_map = (input_map - mean_map.replicate(1, right)) - .cwiseProduct(inv_std.replicate(1, right)) - .cwiseProduct(scale_map.replicate(left, 1)); - } else if (bias) { - auto bias_map = ConstEigenMatrixMapRowMajor(bias->data(), 1, right); - output_map = (input_map - mean_map.replicate(1, right)) - .cwiseProduct(inv_std.replicate(1, right)) + - bias_map.replicate(left, 1); - } else { - output_map = (input_map - mean_map.replicate(1, right)) - .cwiseProduct(inv_std.replicate(1, right)); - } - } -}; - class LayerNormGradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -236,115 +160,6 @@ class LayerNormGradOp : public framework::OperatorWithKernel { } }; -template -class LayerNormGradKernel - : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext &ctx) const override { - const auto *x = ctx.Input("X"); - const auto *mean = ctx.Input("Mean"); - const auto *var = ctx.Input("Variance"); - const auto *scale = ctx.Input("Scale"); - const auto *d_y = ctx.Input(framework::GradVarName("Y")); - - const auto &x_dims = x->dims(); - - const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); - auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); - int left = static_cast(matrix_dim[0]); - int right = static_cast(matrix_dim[1]); - - // init output - auto *d_x = ctx.Output(framework::GradVarName("X")); - auto *d_scale = ctx.Output(framework::GradVarName("Scale")); - auto *d_bias = ctx.Output(framework::GradVarName("Bias")); - - auto x_map = ConstEigenMatrixMapRowMajor(x->data(), left, right); - auto d_y_map = ConstEigenMatrixMapRowMajor(d_y->data(), left, right); - auto mean_map = ConstEigenMatrixMapRowMajor(mean->data(), left, 1); - auto var_map = ConstEigenMatrixMapRowMajor(var->data(), left, 1); - - if (d_bias) { - d_bias->mutable_data(ctx.GetPlace()); - auto d_bias_map = EigenMatrixMapRowMajor(d_bias->data(), 1, right); - d_bias_map = d_y_map.colwise().sum(); - } - if (d_scale) { - d_scale->mutable_data(ctx.GetPlace()); - auto d_scale_map = - EigenMatrixMapRowMajor(d_scale->data(), 1, right); - auto inv_std_func = [](T ele) { return std::sqrt(1 / ele); }; - // There are two equation to compute d_scale. One uses "Y" and the other - // does not use "Y" - d_scale_map = - ((x_map - mean_map.replicate(1, right)) - .cwiseProduct( - var_map.unaryExpr(inv_std_func).replicate(1, right)) - .cwiseProduct(d_y_map)) - .colwise() - .sum(); - } - - if (d_x) { - d_x->mutable_data(ctx.GetPlace()); - auto d_x_map = EigenMatrixMapRowMajor(d_x->data(), left, right); - auto triple_product_func = [](T ele) { return ele * ele * ele; }; - auto inv_std_func = [](T ele) { return std::sqrt(1 / ele); }; - - auto inv_std_map = var_map.unaryExpr(inv_std_func).eval(); - // TODO(zcd): these code can be refined - if (d_scale) { - auto scale_map = - ConstEigenMatrixMapRowMajor(scale->data(), 1, right); - // dy_dx - auto dx_end = - inv_std_map.replicate(1, right).cwiseProduct(d_y_map).cwiseProduct( - scale_map.replicate(left, 1)); - - // dy_dmean_dx - auto dx_mean = - (T(-1.0) / right) * dx_end.rowwise().sum().replicate(1, right); - - // dy_var_dx - auto dvar_end_part = (x_map - mean_map.replicate(1, right)) - .cwiseProduct(scale_map.replicate(left, 1)) - .cwiseProduct(d_y_map) - .rowwise() - .sum(); - auto dvar_end = inv_std_map.unaryExpr(triple_product_func) - .cwiseProduct(dvar_end_part) - .replicate(1, right); - auto dx_var = - (T(-1.0) / right) * - (x_map - mean_map.replicate(1, right)).cwiseProduct(dvar_end); - - d_x_map = dx_end + dx_mean + dx_var; - } else { - // dy_dx - auto dx_end = inv_std_map.replicate(1, right).cwiseProduct(d_y_map); - - // dy_dmean_dx - auto dx_mean = - (T(-1.0) / right) * dx_end.rowwise().sum().replicate(1, right); - - // dy_var_dx - auto dvar_end_part = (x_map - mean_map.replicate(1, right)) - .cwiseProduct(d_y_map) - .rowwise() - .sum(); - auto dvar_end = inv_std_map.unaryExpr(triple_product_func) - .cwiseProduct(dvar_end_part) - .replicate(1, right); - auto dx_var = - (T(-1.0) / right) * - (x_map - mean_map.replicate(1, right)).cwiseProduct(dvar_end); - - d_x_map = dx_end + dx_mean + dx_var; - } - } - } -}; - } // namespace operators } // namespace paddle diff --git a/paddle/operators/layer_norm_op.h b/paddle/operators/layer_norm_op.h index 2de58186fbd..608447b1ff8 100644 --- a/paddle/operators/layer_norm_op.h +++ b/paddle/operators/layer_norm_op.h @@ -31,7 +31,7 @@ template struct DivAndSqrtFunctor { explicit DivAndSqrtFunctor(T epsilon) { epsilon_ = epsilon; } inline HOSTDEVICE T operator()(T a, T b) const { - return a / (sqrt(b) + epsilon_); + return a / (sqrt(b + epsilon_)); } private: diff --git a/python/paddle/v2/fluid/tests/test_layer_norm_op.py b/python/paddle/v2/fluid/tests/test_layer_norm_op.py index f456b1194c5..4460ffaf9c4 100644 --- a/python/paddle/v2/fluid/tests/test_layer_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_layer_norm_op.py @@ -20,6 +20,8 @@ import paddle.v2.fluid.core as core from paddle.v2.fluid.op import Operator from paddle.v2.fluid.framework import grad_var_name +np.random.random(123) + def _reference_layer_norm_naive(x, scale, beta, epsilon, begin_norm_axis=1): x_shape = x.shape @@ -148,7 +150,7 @@ class TestLayerNormdOp(OpTest): x_shape = shape D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1) scale_shape = [D] - np.random.random(123) + x_val = np.random.random_sample(x_shape).astype(np.float32) scale_val = np.random.random_sample(scale_shape).astype(np.float32) bias_val = np.random.random_sample(scale_shape).astype(np.float32) -- GitLab From 677312973516832a495a0f83fbcc8b5c55e977f6 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 5 Feb 2018 13:40:11 +0800 Subject: [PATCH 028/138] code refine --- paddle/operators/layer_norm_op.h | 42 ++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/paddle/operators/layer_norm_op.h b/paddle/operators/layer_norm_op.h index 608447b1ff8..3c436b89263 100644 --- a/paddle/operators/layer_norm_op.h +++ b/paddle/operators/layer_norm_op.h @@ -97,15 +97,15 @@ class LayerNormKernel : public framework::OpKernel { auto &dev_ctx = ctx.template device_context(); math::RowwiseMean row_mean; - // functor-> get mean + // get mean row_mean(dev_ctx, x, mean); - // functor-> get variance + // get variance ElementwiseComputeEx, DeviceContext, T>( ctx, &x, mean, /*axis*/ 0, SubAndSquareFunctor(), &out); row_mean(dev_ctx, out, var); - // functor-> get norm_out + // get x_norm ElementwiseComputeEx, DeviceContext, T>( ctx, &x, mean, /*axis*/ 0, SubFunctor(), &out); ElementwiseComputeEx, DeviceContext, T>( @@ -129,9 +129,11 @@ class LayerNormGradKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext &ctx) const override { const float epsilon = ctx.Attr("epsilon"); auto x = *ctx.Input("X"); - auto mean = *ctx.Input("Mean"); - auto var = *ctx.Input("Variance"); - auto scale = *ctx.Input("Scale"); + auto *y = ctx.Input("Y"); + auto *mean = ctx.Input("Mean"); + auto *var = ctx.Input("Variance"); + auto *scale = ctx.Input("Scale"); + auto *bias = ctx.Input("Bias"); auto d_y = *ctx.Input(framework::GradVarName("Y")); const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); @@ -155,14 +157,19 @@ class LayerNormGradKernel : public framework::OpKernel { if (d_scale || d_x) { x.Resize(matrix_shape); temp.mutable_data(matrix_shape, ctx.GetPlace()); - temp_norm.mutable_data(matrix_shape, ctx.GetPlace()); - // get x_norm - ElementwiseComputeEx, DeviceContext, T>( - ctx, &x, &mean, /*axis*/ 0, SubFunctor(), &temp_norm); - ElementwiseComputeEx, DeviceContext, T>( - ctx, &temp_norm, &var, /*axis*/ 0, - DivAndSqrtFunctor(static_cast(epsilon)), &temp_norm); + if (!(bias && scale)) { + temp_norm.ShareDataWith(*y); + temp_norm.Resize(matrix_shape); + } else { + temp_norm.mutable_data(matrix_shape, ctx.GetPlace()); + // get x_norm + ElementwiseComputeEx, DeviceContext, T>( + ctx, &x, mean, /*axis*/ 0, SubFunctor(), &temp_norm); + ElementwiseComputeEx, DeviceContext, T>( + ctx, &temp_norm, var, /*axis*/ 0, + DivAndSqrtFunctor(static_cast(epsilon)), &temp_norm); + } } if (d_bias) { @@ -188,7 +195,7 @@ class LayerNormGradKernel : public framework::OpKernel { if (d_scale) { // dy_dx ElementwiseComputeEx, DeviceContext, T>( - ctx, &d_y, &scale, /*axis*/ 1, MulFunctor(), &temp); + ctx, &d_y, scale, /*axis*/ 1, MulFunctor(), &temp); framework::Copy(temp, ctx.GetPlace(), ctx.device_context(), d_x); // dy_dmean_dx @@ -199,7 +206,6 @@ class LayerNormGradKernel : public framework::OpKernel { // dy_var_dx ElementwiseComputeEx, DeviceContext, T>( ctx, &temp, &temp_norm, /*axis*/ 0, MulFunctor(), &temp); - } else { // dy_dx framework::Copy(d_y, ctx.GetPlace(), ctx.device_context(), d_x); @@ -216,12 +222,12 @@ class LayerNormGradKernel : public framework::OpKernel { // dy_var_dx row_mean(dev_ctx, temp, &temp_vec); ElementwiseComputeEx, DeviceContext, T>( - ctx, &temp_norm, &temp_vec, /*axis*/ 0, MulFunctor(), &temp_norm); + ctx, &temp_norm, &temp_vec, /*axis*/ 0, MulFunctor(), &temp); ElementwiseComputeEx, DeviceContext, T>( - ctx, d_x, &temp_norm, /*axis*/ 0, SubFunctor(), d_x); + ctx, d_x, &temp, /*axis*/ 0, SubFunctor(), d_x); ElementwiseComputeEx, DeviceContext, T>( - ctx, d_x, &var, /*axis*/ 0, + ctx, d_x, var, /*axis*/ 0, DivAndSqrtFunctor(static_cast(epsilon)), d_x); d_x->Resize(dx_dim); } -- GitLab From 7dabee27960b5e043b85aca3ee51568443b326f4 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 5 Feb 2018 15:00:03 +0800 Subject: [PATCH 029/138] Add type Reader for VarDesc Add a new type `Reader` for `VarDesc`, which can holds more than one LoDTensor. --- paddle/framework/backward.cc | 4 +- paddle/framework/framework.proto | 10 +- paddle/framework/op_desc.cc | 4 +- paddle/framework/program_desc_test.cc | 4 +- paddle/framework/var_desc.cc | 174 ++++++++++++++++-- paddle/framework/var_desc.h | 20 +- paddle/inference/io.cc | 2 +- paddle/pybind/protobuf.cc | 14 +- .../v2/fluid/tests/test_protobuf_descs.py | 38 ++++ 9 files changed, 246 insertions(+), 24 deletions(-) diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index 85e693434af..f52a51519fc 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -534,7 +534,7 @@ ParamGradInfoMap AppendBackward( auto root_block = program_desc.MutableBlock(root_block_idx); std::string fill_one_op_out = GradVarName(target.Name()); - bool is_scalar = target.Shape() == std::vector{1}; + bool is_scalar = target.GetShape() == std::vector{1}; PADDLE_ENFORCE(is_scalar, "target should be scalar"); VLOG(3) << "backward from loss=" << target.Name() << " data_type=" << target.GetDataType(); @@ -565,7 +565,7 @@ ParamGradInfoMap AppendBackward( auto var = root_block->Var(fill_one_op_out); var->SetDataType(target.GetDataType()); - var->SetShape(target.Shape()); + var->SetShape(target.GetShape()); auto& target_grad = retv[target.Name()]; target_grad.name_ = fill_one_op_out; target_grad.block_idx_ = root_block_idx; diff --git a/paddle/framework/framework.proto b/paddle/framework/framework.proto index 5b6ef03f610..f65ccae6e6a 100644 --- a/paddle/framework/framework.proto +++ b/paddle/framework/framework.proto @@ -116,6 +116,8 @@ message LoDTensorArrayDesc { optional int32 lod_level = 2 [ default = 0 ]; } +message Reader { repeated LoDTensorDesc lod_tensor = 1; } + message VarDesc { enum VarType { LOD_TENSOR = 1; @@ -126,13 +128,15 @@ message VarDesc { LOD_RANK_TABLE = 6; LOD_TENSOR_ARRAY = 7; PLACE_LIST = 8; + READER = 9; } required string name = 1; required VarType type = 2; - optional LoDTensorDesc lod_tensor = 3; - optional TensorDesc selected_rows = 4; + optional bool persistable = 3 [ default = false ]; + optional LoDTensorDesc lod_tensor = 4; + optional TensorDesc selected_rows = 5; optional LoDTensorArrayDesc tensor_array = 6; - optional bool persistable = 5 [ default = false ]; + optional Reader reader = 7; } message BlockDesc { diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index f554c778450..ad361852ec9 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -458,11 +458,11 @@ DDim CompileTimeInferShapeContext::GetDim(const std::string &name) const { auto var = block_.FindVarRecursive(name); PADDLE_ENFORCE(var != nullptr, "Cannot find variable %s", name); try { - auto shape = var->Shape(); + auto shape = var->GetShape(); if (shape.empty()) { return framework::make_ddim({0UL}); } else { - return framework::make_ddim(var->Shape()); + return framework::make_ddim(var->GetShape()); } } catch (...) { VLOG(5) << "GetDim of variable " << name << " error"; diff --git a/paddle/framework/program_desc_test.cc b/paddle/framework/program_desc_test.cc index 59947c9f218..9945aee31b6 100644 --- a/paddle/framework/program_desc_test.cc +++ b/paddle/framework/program_desc_test.cc @@ -53,7 +53,7 @@ TEST(ProgramDesc, copy_ctor) { ASSERT_NE(copy, var_before); ASSERT_EQ(copy->Name(), var_before->Name()); ASSERT_EQ(copy->GetType(), var_before->GetType()); - ASSERT_EQ(copy->Shape(), var_before->Shape()); + ASSERT_EQ(copy->GetShape(), var_before->GetShape()); ASSERT_EQ(copy->Proto()->SerializeAsString(), var_before->Proto()->SerializeAsString()); }; @@ -117,7 +117,7 @@ TEST(ProgramDescBind, serialize_and_deserialize) { ASSERT_NE(restored, var_before); ASSERT_EQ(restored->Name(), var_before->Name()); ASSERT_EQ(restored->GetType(), var_before->GetType()); - ASSERT_EQ(restored->Shape(), var_before->Shape()); + ASSERT_EQ(restored->GetShape(), var_before->GetShape()); ASSERT_EQ(restored->Proto()->SerializeAsString(), var_before->Proto()->SerializeAsString()); }; diff --git a/paddle/framework/var_desc.cc b/paddle/framework/var_desc.cc index 62ab6593ef2..44bd2363c83 100644 --- a/paddle/framework/var_desc.cc +++ b/paddle/framework/var_desc.cc @@ -26,18 +26,91 @@ void VarDesc::SetShape(const std::vector &dims) { VectorToRepeated(dims, mutable_tensor_desc()->mutable_dims()); } +void VarDesc::SetTensorDescNum(size_t num) { + switch (desc_.type()) { + case proto::VarDesc::READER: { + auto *lod_tensors_ptr = desc_.mutable_reader()->mutable_lod_tensor(); + lod_tensors_ptr->Clear(); + for (size_t i = 0; i < num; ++i) { + lod_tensors_ptr->Add(); + } + return; + } break; + default: + PADDLE_THROW( + "Setting 'sub_tensor_number' is not supported by the type of var %s.", + this->Name()); + } +} + +size_t VarDesc::GetTensorDescNum() const { + switch (desc_.type()) { + case proto::VarDesc::READER: + return desc_.reader().lod_tensor_size(); + break; + default: + PADDLE_THROW( + "Getting 'sub_tensor_number' is not supported by the type of var %s.", + this->Name()); + } +} + +void VarDesc::SetShapes( + const std::vector> &multiple_dims) { + PADDLE_ENFORCE_EQ(multiple_dims.size(), GetTensorDescNum(), + "The number of given shapes(%d) doesn't equal to the " + "number of sub tensor.", + multiple_dims.size(), GetTensorDescNum()); + std::vector tensors = mutable_tensor_descs(); + for (size_t i = 0; i < multiple_dims.size(); ++i) { + VectorToRepeated(multiple_dims[i], tensors[i]->mutable_dims()); + } +} + +std::vector VarDesc::GetShape() const { + return RepeatedToVector(tensor_desc().dims()); +} + +std::vector> VarDesc::GetShapes() const { + std::vector descs = tensor_descs(); + std::vector> res; + res.reserve(descs.size()); + for (const auto &tensor_desc : descs) { + res.push_back(RepeatedToVector(tensor_desc.dims())); + } + return res; +} + void VarDesc::SetDataType(proto::DataType data_type) { mutable_tensor_desc()->set_data_type(data_type); } -std::vector VarDesc::Shape() const { - return RepeatedToVector(tensor_desc().dims()); +void VarDesc::SetDataTypes( + const std::vector &multiple_data_type) { + PADDLE_ENFORCE_EQ(multiple_data_type.size(), GetTensorDescNum(), + "The number of given data types(%d) doesn't equal to the " + "number of sub tensor.", + multiple_data_type.size(), GetTensorDescNum()); + std::vector tensor_descs = mutable_tensor_descs(); + for (size_t i = 0; i < multiple_data_type.size(); ++i) { + tensor_descs[i]->set_data_type(multiple_data_type[i]); + } } proto::DataType VarDesc::GetDataType() const { return tensor_desc().data_type(); } +std::vector VarDesc::GetDataTypes() const { + std::vector descs = tensor_descs(); + std::vector res; + res.reserve(descs.size()); + for (const auto &tensor_desc : descs) { + res.push_back(tensor_desc.data_type()); + } + return res; +} + void VarDesc::SetLoDLevel(int32_t lod_level) { switch (desc_.type()) { case proto::VarDesc::LOD_TENSOR: @@ -47,8 +120,28 @@ void VarDesc::SetLoDLevel(int32_t lod_level) { desc_.mutable_tensor_array()->set_lod_level(lod_level); break; default: - PADDLE_THROW("Tensor type=%d does not support LoDLevel", - desc_.tensor_array().lod_level()); + PADDLE_THROW( + "Setting 'lod_level' is not supported by the type of var %s.", + this->Name()); + } +} + +void VarDesc::SetLoDLevels(const std::vector &multiple_lod_level) { + PADDLE_ENFORCE_EQ(multiple_lod_level.size(), GetTensorDescNum(), + "The number of given data types(%d) doesn't equal to the " + "number of sub tensor.", + multiple_lod_level.size(), GetTensorDescNum()); + switch (desc_.type()) { + case proto::VarDesc::READER: { + size_t i = 0; + for (auto &lod_tensor : *desc_.mutable_reader()->mutable_lod_tensor()) { + lod_tensor.set_lod_level(multiple_lod_level[i++]); + } + } break; + default: + PADDLE_THROW( + "Setting 'lod_levels' is not supported by the type of var %s.", + this->Name()); } } @@ -59,13 +152,31 @@ int32_t VarDesc::GetLoDLevel() const { case proto::VarDesc::LOD_TENSOR_ARRAY: return desc_.tensor_array().lod_level(); default: - PADDLE_THROW("Tensor type=%d does not support LoDLevel", - desc_.tensor_array().lod_level()); + PADDLE_THROW( + "Getting 'lod_level' is not supported by the type of var %s.", + this->Name()); + } +} + +std::vector VarDesc::GetLoDLevels() const { + std::vector res; + switch (desc_.type()) { + case proto::VarDesc::READER: + res.reserve(desc_.reader().lod_tensor_size()); + for (auto &lod_tensor : desc_.reader().lod_tensor()) { + res.push_back(lod_tensor.lod_level()); + } + return res; + break; + default: + PADDLE_THROW( + "Getting 'lod_levels' is not supported by the type of var %s.", + this->Name()); } } const proto::TensorDesc &VarDesc::tensor_desc() const { - PADDLE_ENFORCE(desc_.has_type(), "invoke TensorDesc must after set type"); + PADDLE_ENFORCE(desc_.has_type(), "The var's type hasn't been set."); switch (desc_.type()) { case proto::VarDesc::SELECTED_ROWS: return desc_.selected_rows(); @@ -74,13 +185,32 @@ const proto::TensorDesc &VarDesc::tensor_desc() const { case proto::VarDesc::LOD_TENSOR_ARRAY: return desc_.tensor_array().tensor(); default: - PADDLE_THROW("The type of var %s is unsupported.", this->Name()); + PADDLE_THROW( + "Getting 'tensor_desc' is not supported by the type of var %s.", + this->Name()); + } +} + +std::vector VarDesc::tensor_descs() const { + PADDLE_ENFORCE(desc_.has_type(), "The var type hasn't been set."); + std::vector res; + res.reserve(GetTensorDescNum()); + switch (desc_.type()) { + case proto::VarDesc::READER: + for (const auto &lod_tensor : desc_.reader().lod_tensor()) { + res.push_back(lod_tensor.tensor()); + } + return res; + default: + PADDLE_THROW( + "Getting 'tensor_descs' is not supported by the type of var " + "%s.", + this->Name()); } } proto::TensorDesc *VarDesc::mutable_tensor_desc() { - PADDLE_ENFORCE(desc_.has_type(), - "invoke MutableTensorDesc must after set type"); + PADDLE_ENFORCE(desc_.has_type(), "The var type hasn't been set."); switch (desc_.type()) { case proto::VarDesc::SELECTED_ROWS: return desc_.mutable_selected_rows(); @@ -89,8 +219,30 @@ proto::TensorDesc *VarDesc::mutable_tensor_desc() { case proto::VarDesc::LOD_TENSOR_ARRAY: return desc_.mutable_tensor_array()->mutable_tensor(); default: - PADDLE_THROW("Unexpected branch."); + PADDLE_THROW( + "Getting 'mutable_tensor_desc' is not supported by the type of var " + "%s.", + this->Name()); } } + +std::vector VarDesc::mutable_tensor_descs() { + PADDLE_ENFORCE(desc_.has_type(), "The var type hasn't been set."); + std::vector res; + res.reserve(GetTensorDescNum()); + switch (desc_.type()) { + case proto::VarDesc::READER: + for (auto &lod_tensor : *desc_.mutable_reader()->mutable_lod_tensor()) { + res.push_back(lod_tensor.mutable_tensor()); + } + return res; + default: + PADDLE_THROW( + "Getting 'tensor_descs' is not supported by the type of var " + "%s.", + this->Name()); + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/var_desc.h b/paddle/framework/var_desc.h index 9316b14bb69..862b9a5d807 100644 --- a/paddle/framework/var_desc.h +++ b/paddle/framework/var_desc.h @@ -68,18 +68,34 @@ class VarDesc { void SetName(std::string name) { desc_.set_name(name); } + void SetTensorDescNum(size_t num); + + size_t GetTensorDescNum() const; + void SetShape(const std::vector &dims); + void SetShapes(const std::vector> &multiple_dims); + + std::vector GetShape() const; + + std::vector> GetShapes() const; + void SetDataType(proto::DataType data_type); - std::vector Shape() const; + void SetDataTypes(const std::vector &multiple_data_type); proto::DataType GetDataType() const; + std::vector GetDataTypes() const; + void SetLoDLevel(int32_t lod_level); + void SetLoDLevels(const std::vector &multiple_lod_level); + int32_t GetLoDLevel() const; + std::vector GetLoDLevels() const; + proto::VarDesc::VarType GetType() const; void SetType(proto::VarDesc::VarType type); @@ -90,7 +106,9 @@ class VarDesc { private: const proto::TensorDesc &tensor_desc() const; + std::vector tensor_descs() const; proto::TensorDesc *mutable_tensor_desc(); + std::vector mutable_tensor_descs(); proto::VarDesc desc_; }; diff --git a/paddle/inference/io.cc b/paddle/inference/io.cc index 60ad7af1c0a..1ed14b69c83 100644 --- a/paddle/inference/io.cc +++ b/paddle/inference/io.cc @@ -55,7 +55,7 @@ void LoadPersistables(framework::Executor& executor, VLOG(3) << "parameter's name: " << var->Name(); framework::VarDesc* new_var = load_block->Var(var->Name()); - new_var->SetShape(var->Shape()); + new_var->SetShape(var->GetShape()); new_var->SetDataType(var->GetDataType()); new_var->SetType(var->GetType()); new_var->SetLoDLevel(var->GetLoDLevel()); diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc index 371d6119d4a..0f1953abe08 100644 --- a/paddle/pybind/protobuf.cc +++ b/paddle/pybind/protobuf.cc @@ -214,11 +214,20 @@ void BindVarDsec(py::module &m) { py::return_value_policy::reference) .def("set_name", &VarDesc::SetName) .def("set_shape", &VarDesc::SetShape) + .def("set_shapes", &VarDesc::SetShapes) .def("set_dtype", &VarDesc::SetDataType) - .def("shape", &VarDesc::Shape, py::return_value_policy::reference) + .def("set_dtypes", &VarDesc::SetDataTypes) + .def("set_tensor_num", &VarDesc::SetTensorDescNum) + .def("tensor_num", &VarDesc::GetTensorDescNum) + .def("shape", &VarDesc::GetShape, py::return_value_policy::reference) + .def("shapes", &VarDesc::GetShapes, py::return_value_policy::reference) .def("dtype", &VarDesc::GetDataType, py::return_value_policy::reference) + .def("dtypes", &VarDesc::GetDataTypes, py::return_value_policy::reference) .def("lod_level", &VarDesc::GetLoDLevel) + .def("lod_levels", &VarDesc::GetLoDLevels, + py::return_value_policy::reference) .def("set_lod_level", &VarDesc::SetLoDLevel) + .def("set_lod_levels", &VarDesc::SetLoDLevels) .def("type", &VarDesc::GetType) .def("set_type", &VarDesc::SetType) .def("serialize_to_string", SerializeMessage) @@ -233,7 +242,8 @@ void BindVarDsec(py::module &m) { .value("STEP_SCOPES", proto::VarDesc::STEP_SCOPES) .value("LOD_RANK_TABLE", proto::VarDesc::LOD_RANK_TABLE) .value("LOD_TENSOR_ARRAY", proto::VarDesc::LOD_TENSOR_ARRAY) - .value("PLACE_LIST", proto::VarDesc::PLACE_LIST); + .value("PLACE_LIST", proto::VarDesc::PLACE_LIST) + .value("READER", proto::VarDesc::READER); } void BindOpDesc(py::module &m) { diff --git a/python/paddle/v2/fluid/tests/test_protobuf_descs.py b/python/paddle/v2/fluid/tests/test_protobuf_descs.py index 9034b2f4ef1..ac6de68b5fe 100644 --- a/python/paddle/v2/fluid/tests/test_protobuf_descs.py +++ b/python/paddle/v2/fluid/tests/test_protobuf_descs.py @@ -115,6 +115,20 @@ class TestVarDesc(unittest.TestCase): self.assertEqual(src_shape, res_shape) self.assertEqual(core.VarDesc.VarType.SELECTED_ROWS, var.type()) + def test_multiple_shape(self): + program_desc = core.ProgramDesc() + block = program_desc.block(0) + var = block.var('my_reader') + var.set_type(core.VarDesc.VarType.READER) + var.set_tensor_num(3) + src_shapes = [[2, 3, 3], [4, 5], [6, 7, 8, 9]] + var.set_shapes(src_shapes) + #import pdb + # pdb.set_trace() + res_shapes = var.shapes() + self.assertEqual(src_shapes, res_shapes) + self.assertEqual(core.VarDesc.VarType.READER, var.type()) + def test_dtype(self): program_desc = core.ProgramDesc() block = program_desc.block(0) @@ -124,6 +138,30 @@ class TestVarDesc(unittest.TestCase): self.assertEqual(core.DataType.INT32, var.dtype()) self.assertEqual(core.VarDesc.VarType.LOD_TENSOR, var.type()) + def test_multiple_dtype(self): + program_desc = core.ProgramDesc() + block = program_desc.block(0) + var = block.var('my_reader') + var.set_type(core.VarDesc.VarType.READER) + var.set_tensor_num(3) + src_types = [ + core.DataType.INT32, core.DataType.FP64, core.DataType.FP32 + ] + var.set_dtypes(src_types) + self.assertEqual(src_types, var.dtypes()) + self.assertEqual(core.VarDesc.VarType.READER, var.type()) + + def test_multiple_lod_level(self): + program_desc = core.ProgramDesc() + block = program_desc.block(0) + var = block.var('my_reader') + var.set_type(core.VarDesc.VarType.READER) + var.set_tensor_num(3) + src_types = [3, 1, 2] + var.set_lod_levels(src_types) + self.assertEqual(src_types, var.lod_levels()) + self.assertEqual(core.VarDesc.VarType.READER, var.type()) + class TestBlockDesc(unittest.TestCase): def test_add_var(self): -- GitLab From 0d03cab5e9b16dba434ed4a25b5dff887d60a897 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Mon, 5 Feb 2018 15:18:10 +0800 Subject: [PATCH 030/138] fix a compile error --- paddle/framework/var_desc.cc | 2 +- paddle/framework/var_desc.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/framework/var_desc.cc b/paddle/framework/var_desc.cc index 44bd2363c83..6d83e2e4112 100644 --- a/paddle/framework/var_desc.cc +++ b/paddle/framework/var_desc.cc @@ -56,7 +56,7 @@ size_t VarDesc::GetTensorDescNum() const { } void VarDesc::SetShapes( - const std::vector> &multiple_dims) { + const std::vector> &multiple_dims) { PADDLE_ENFORCE_EQ(multiple_dims.size(), GetTensorDescNum(), "The number of given shapes(%d) doesn't equal to the " "number of sub tensor.", diff --git a/paddle/framework/var_desc.h b/paddle/framework/var_desc.h index 862b9a5d807..72da2fbb0a6 100644 --- a/paddle/framework/var_desc.h +++ b/paddle/framework/var_desc.h @@ -74,7 +74,7 @@ class VarDesc { void SetShape(const std::vector &dims); - void SetShapes(const std::vector> &multiple_dims); + void SetShapes(const std::vector> &multiple_dims); std::vector GetShape() const; -- GitLab From 63320f722cc718e69ddaa4aa5921e7fd047097df Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 5 Feb 2018 01:17:00 -0800 Subject: [PATCH 031/138] "add some interfaces" --- paddle/framework/lod_tensor.h | 22 ++++++- paddle/framework/mixed_vector.h | 102 ++++++++++++++++++++------------ paddle/memory/memory.h | 18 ++++++ 3 files changed, 103 insertions(+), 39 deletions(-) diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index d0ab640485b..ab289241610 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -48,12 +48,26 @@ namespace framework { */ struct LoD : public std::vector> { using std::vector>::vector; + platform::Place place() const { + if (this->size() == 0) { + // Not Initialze Yet. + return platform::CPUPlace(); + } else { + return this->front().place(); + } + } void CopyFromCUDA() { for (auto it = this->begin(); it != this->end(); ++it) { it->CopyFromCUDA(); } } + + void CopyToPeer(platform::Place place) { + for (auto it = this->begin(); it != this->end(); ++it) { + it->mutable_data(place); + } + } }; std::ostream& operator<<(std::ostream& os, const LoD& lod); @@ -115,7 +129,13 @@ class LoDTensor : public Tensor { explicit LoDTensor(const LoD& lod) : lod_(lod) {} - void set_lod(const LoD& lod) { lod_ = lod; } + void set_lod(const LoD& lod) { + lod_ = lod; + if (holder_ != nullptr && + platform::is_same_place(holder_->place(), lod.place())) { + lod_.CopyToPeer(holder_->place()); + } + } const LoD& lod() const { return lod_; } diff --git a/paddle/framework/mixed_vector.h b/paddle/framework/mixed_vector.h index 85caac8dcd9..d86899bc631 100644 --- a/paddle/framework/mixed_vector.h +++ b/paddle/framework/mixed_vector.h @@ -40,14 +40,15 @@ class Vector : public std::vector { Vector() {} Vector(const std::vector &v) : std::vector(v) {} // NOLINT - virtual ~Vector() { -#ifdef PADDLE_WITH_CUDA - if (cuda_ptr_ != nullptr) { - memory::Free(place_, cuda_ptr_); - } -#endif - } + inline platform::Place place() const { return place_; } + /*! Return a pointer to constant memory block. */ + inline const T *data(platform::Place place) const; + + /*! Return a pointer to mutable memory block. */ + inline T *mutable_data(platform::Place place); + + // TODO(dzhwinter): below interfaces should be removed /* Get device vector */ T *cuda_data() { CopyToCUDA(); @@ -68,25 +69,71 @@ class Vector : public std::vector { void CopyToPeer(platform::Place); private: - void *cuda_ptr_ = nullptr; + std::shared_ptr cuda_ptr_; size_t cuda_size_ = 0; // device vector numel platform::CUDAPlace place_; }; template -void Vector::CopyToCUDA() { +inline const T *Vector::data(platform::Place place) const { + if (platform::is_cpu_place(place)) { + return std::vector::data(); + } else if (platform::is_gpu_place(place)) { + if (cuda_ptr_ == nullptr) { + return nullptr; + } + if (platform::is_same_place(place, place_)) { + return static_cast(cuda_ptr_.get()); + } else { + PADDLE_THROW( + "Unmatched place. Please use `mutable_data` copy lod to the target " + "Place first."); + } + } else { + PADDLE_THROW("Unsupport Place."); + } +} + +template +inline T *Vector::mutable_data(platform::Place place) { + if (platform::is_cpu_place(place)) { + return std::vector::data(); + } else if (platform::is_gpu_place(place)) { + if (!platform::is_same_place(place, place_)) { + place_ = boost::get(place); + } #ifdef PADDLE_WITH_CUDA - if (cuda_size_ < this->size()) { - if (cuda_ptr_ != nullptr) { - memory::Free(place_, cuda_ptr_); + if (cuda_size_ < this->size() || cuda_ptr_ == nullptr) { + cuda_ptr_.reset( + memory::Alloc(place_, this->size() * sizeof(T)), + memory::PlainDeleter(place_)); } - cuda_ptr_ = - memory::Alloc(place_, this->size() * sizeof(T)); + cuda_size_ = this->size(); + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto *ctx = pool.GetByPlace(place_); + memory::Copy(place_, cuda_ptr_.get(), platform::CPUPlace(), + static_cast(this->data()), + this->size() * sizeof(T), ctx->stream()); + ctx->Wait(); + return static_cast(cuda_ptr_.get()); +#endif + } else { + PADDLE_THROW("Unsupport Place."); + } +} + +template +void Vector::CopyToCUDA() { +#ifdef PADDLE_WITH_CUDA + if (cuda_size_ < this->size() || cuda_ptr_ == nullptr) { + cuda_ptr_.reset( + memory::Alloc(this->size() * sizeof(T)), + memory::PlainDeleter(place_)); } cuda_size_ = this->size(); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto *ctx = pool.GetByPlace(place_); - memory::Copy(place_, cuda_ptr_, platform::CPUPlace(), + memory::Copy(place_, cuda_ptr_.get(), platform::CPUPlace(), static_cast(this->data()), this->size() * sizeof(T), ctx->stream()); ctx->Wait(); @@ -104,32 +151,11 @@ void Vector::CopyFromCUDA() { platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto *ctx = pool.GetByPlace(place_); memory::Copy(platform::CPUPlace(), static_cast(this->data()), place_, - static_cast(cuda_ptr_), this->size() * sizeof(T), - ctx->stream()); - ctx->Wait(); -#endif -} - -template -void Vector::CopyToPeer(platform::Place peer_place) { -#ifdef PADDLE_WITH_CUDA - auto *ctx = platform::DeviceContextPool::Instance().GetByPlace(place_); - void *peer_cuda_ptr = memory::Alloc( - boost::get(peer_place), this->size() * sizeof(T)); - memory::Copy(boost::get(peer_place), peer_cuda_ptr, - place_, cuda_ptr_, this->size() * sizeof(T), ctx->stream()); + static_cast(cuda_ptr_.get()), + this->size() * sizeof(T), ctx->stream()); ctx->Wait(); - - memory::Free(place_, cuda_ptr_); - place_ = boost::get(peer_place); - cuda_ptr_ = peer_cuda_ptr; #endif } -template class Vector; -template class Vector; -template class Vector; -template class Vector; - } // namespace framework } // namespace paddle diff --git a/paddle/memory/memory.h b/paddle/memory/memory.h index 7012b6d331d..30ed68c6e0e 100644 --- a/paddle/memory/memory.h +++ b/paddle/memory/memory.h @@ -81,5 +81,23 @@ class PODDeleter { Place place_; }; +/** + * \brief Free memory block in one place does not meet POD + * + * \note In some cases, custom deleter is used to + * deallocate the memory automatically for + * std::unique_ptr in tensor.h. + * + */ +template +class PlainDeleter { + public: + explicit PlainDeleter(Place place) : place_(place) {} + void operator()(T* ptr) { Free(place_, reinterpret_cast(ptr)); } + + private: + Place place_; +}; + } // namespace memory } // namespace paddle -- GitLab From a402d2b39257ae58345998ed5edd6b87b09e9a1b Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 5 Feb 2018 01:22:13 -0800 Subject: [PATCH 032/138] "fix condition" --- paddle/framework/lod_tensor.h | 2 +- paddle/framework/selected_rows.h | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index ab289241610..3465e02c826 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -132,7 +132,7 @@ class LoDTensor : public Tensor { void set_lod(const LoD& lod) { lod_ = lod; if (holder_ != nullptr && - platform::is_same_place(holder_->place(), lod.place())) { + !platform::is_same_place(holder_->place(), lod.place())) { lod_.CopyToPeer(holder_->place()); } } diff --git a/paddle/framework/selected_rows.h b/paddle/framework/selected_rows.h index 30d3dfc1e89..11323442446 100644 --- a/paddle/framework/selected_rows.h +++ b/paddle/framework/selected_rows.h @@ -42,7 +42,13 @@ class SelectedRows { Vector* mutable_rows() { return &rows_; } - void set_rows(const Vector& rows) { rows_ = rows; } + void set_rows(const Vector& rows) { + rows_ = rows; + if (value_ != nullptr && + !platform::is_same_place(value_->place(), rows.place())) { + rows_.mutable_data(value_->place()); + } + } DDim GetCompleteDims() const { std::vector dims = vectorize(value_->dims()); -- GitLab From 07dd3d25b39878b6ccc4736e189c015cfd2265d2 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 5 Feb 2018 01:53:43 -0800 Subject: [PATCH 033/138] "fix const warning" --- paddle/framework/CMakeLists.txt | 1 + paddle/framework/lod_tensor_test.cu | 22 -------- paddle/framework/mixed_vector_test.cu | 72 +++++++++++++++++++++++++++ 3 files changed, 73 insertions(+), 22 deletions(-) create mode 100644 paddle/framework/mixed_vector_test.cu diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 8b71f73c36c..7c4ba3afb96 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -20,6 +20,7 @@ endif() cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) +nv_test(mixed_vector_test SRCS mixed_vector_test.cu DEPS place paddle_memory device_context init) cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor framework_proto) cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor paddle_memory) nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor init) diff --git a/paddle/framework/lod_tensor_test.cu b/paddle/framework/lod_tensor_test.cu index d4c9f00bd9c..adea02e3b3f 100644 --- a/paddle/framework/lod_tensor_test.cu +++ b/paddle/framework/lod_tensor_test.cu @@ -28,28 +28,6 @@ __global__ void test(size_t* a, int size) { } } -TEST(Vector, Normal) { - using namespace paddle::framework; - using namespace paddle::platform; - using namespace paddle::memory; - - paddle::framework::InitDevices(); - - paddle::framework::Vector vec({1, 2, 3}); - size_t* ptr = vec.data(); - for (size_t i = 0; i < vec.size(); ++i) { - EXPECT_EQ(vec[i], *(ptr + i)); - } - - vec.clear(); - vec.CopyFromCUDA(); - - std::vector v = {1, 2, 3}; - for (size_t i = 0; i < v.size(); ++i) { - EXPECT_EQ(v[i], vec[i]); - } -} - TEST(LoD, data) { paddle::framework::InitDevices(); diff --git a/paddle/framework/mixed_vector_test.cu b/paddle/framework/mixed_vector_test.cu new file mode 100644 index 00000000000..7b571788ad1 --- /dev/null +++ b/paddle/framework/mixed_vector_test.cu @@ -0,0 +1,72 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ +#include +#include +#include "gtest/gtest.h" + +#include "paddle/framework/init.h" +#include "paddle/framework/mixed_vector.h" + +using namespace paddle::framework; +using namespace paddle::platform; +using namespace paddle::memory; + +template +__global__ void test(T* data, int size) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; + i += blockDim.x * gridDim.x) { + data[i] *= 2; + } +} + +TEST(Vector, Normal) { + // fill the device context pool. + InitDevices(); + + Vector vec({1, 2, 3}); + size_t* ptr = vec.data(); + for (size_t i = 0; i < vec.size(); ++i) { + EXPECT_EQ(vec[i], *(ptr + i)); + } + + vec.clear(); + vec.CopyFromCUDA(); + + std::vector v = {1, 2, 3}; + for (size_t i = 0; i < v.size(); ++i) { + EXPECT_EQ(v[i], vec[i]); + } +} + +TEST(Vector, MultipleCopy) { + InitDevices(); + Vector vec({1, 2, 3}); + CUDAPlace place(0); + vec.mutable_data(place); + auto vec2 = Vector(vec); + { + const size_t* ptr = vec2.data(CPUPlace()); + for (size_t i = 0; i < vec2.size(); ++i) { + EXPECT_EQ(*(ptr + i), vec[i]); + } + } + test<<<3, 3>>>(vec2.mutable_data(place), vec2.size()); + vec2.CopyFromCUDA(); + { + const size_t* ptr = vec2.data(CPUPlace()); + for (size_t i = 0; i < vec2.size(); ++i) { + EXPECT_EQ(*(ptr + i), vec[i] * 2); + } + } +} -- GitLab From 4e5202647684f4ff6525775ce62a6dd674257917 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Mon, 5 Feb 2018 16:55:53 +0800 Subject: [PATCH 034/138] add independent sphinx tree for api --- doc/CMakeLists.txt | 2 ++ doc/api/CMakeLists.txt | 20 ++++++++++++++++++++ paddle/scripts/docker/build.sh | 2 +- paddle/scripts/travis/build_doc.sh | 6 ++++-- 4 files changed, 27 insertions(+), 3 deletions(-) create mode 100644 doc/api/CMakeLists.txt diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt index 94dd3457fb5..58ce5d61c95 100644 --- a/doc/CMakeLists.txt +++ b/doc/CMakeLists.txt @@ -47,3 +47,5 @@ sphinx_add_target(paddle_docs_cn ${SPHINX_CACHE_DIR_CN} ${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_HTML_DIR_CN}) + +add_subdirectory(api) diff --git a/doc/api/CMakeLists.txt b/doc/api/CMakeLists.txt new file mode 100644 index 00000000000..4e0bc1d5b8e --- /dev/null +++ b/doc/api/CMakeLists.txt @@ -0,0 +1,20 @@ +# configured documentation tools and intermediate build results +set(BINARY_BUILD_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_build") + +# Sphinx cache with pickled ReST documents +set(SPHINX_CACHE_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_doctrees") + +# HTML output director +set(SPHINX_HTML_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/html") + +configure_file( + "${CMAKE_CURRENT_SOURCE_DIR}/../templates/conf.py.en.in" + "${BINARY_BUILD_DIR_EN}/conf.py" + @ONLY) + +sphinx_add_target(paddle_api_docs + html + ${BINARY_BUILD_DIR_EN} + ${SPHINX_CACHE_DIR_EN} + ${CMAKE_CURRENT_SOURCE_DIR} + ${SPHINX_HTML_DIR_EN}) diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index 59f3af03986..ba496db5f83 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -117,7 +117,7 @@ EOF -DWITH_STYLE_CHECK=OFF make -j `nproc` gen_proto_py make -j `nproc` paddle_python - make -j `nproc` paddle_docs paddle_docs_cn + make -j `nproc` paddle_docs paddle_docs_cn paddle_api_docs make -j `nproc` print_operators_doc paddle/pybind/print_operators_doc > doc/en/html/operators.json popd diff --git a/paddle/scripts/travis/build_doc.sh b/paddle/scripts/travis/build_doc.sh index 0db8d33bbcb..4af4ac4f5e4 100755 --- a/paddle/scripts/travis/build_doc.sh +++ b/paddle/scripts/travis/build_doc.sh @@ -9,13 +9,14 @@ cd $TRAVIS_BUILD_DIR/build cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON make -j `nproc` gen_proto_py make -j `nproc` paddle_python -make -j `nproc` paddle_docs paddle_docs_cn +make -j `nproc` paddle_docs paddle_docs_cn paddle_api_docs make -j `nproc` print_operators_doc paddle/pybind/print_operators_doc > doc/en/html/operators.json # check websites for broken links linkchecker doc/en/html/index.html linkchecker doc/cn/html/index.html +linkchecker doc/api/en/html/index.html # Parse Github URL REPO=`git config remote.origin.url` @@ -54,10 +55,11 @@ function deploy_docs() { mkdir -p ${DIR} # remove old docs. mv new docs. set +e - rm -rf ${DIR}/doc ${DIR}/doc_cn + rm -rf ${DIR}/doc ${DIR}/doc_cn ${DIR}/api_doc set -e cp -r ../doc/cn/html ${DIR}/doc_cn cp -r ../doc/en/html ${DIR}/doc + cp -r ../doc/api/en/html ${DIR}/api_doc git add . } -- GitLab From 239fafb0d31618a1aee2ac814ed662f18c48cc9c Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 5 Feb 2018 02:37:52 -0800 Subject: [PATCH 035/138] "test on parallel do op" --- paddle/operators/parallel_do_op.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/paddle/operators/parallel_do_op.cc b/paddle/operators/parallel_do_op.cc index 67f9854c02f..d662878592a 100644 --- a/paddle/operators/parallel_do_op.cc +++ b/paddle/operators/parallel_do_op.cc @@ -79,6 +79,7 @@ inline void CopyOrShare(const framework::Variable &src, } else { Copy(src.Get(), dst_place, dst->GetMutable()); } + dst->set_lod(src.lod()); } else if (src.IsType()) { auto &src_sr = src.Get(); auto *dst_sr = dst->GetMutable(); @@ -89,6 +90,7 @@ inline void CopyOrShare(const framework::Variable &src, } else { Copy(src_sr.value(), dst_place, dst_sr->mutable_value()); } + dst_sr->set_rows(src_sr.rows()); } else { PADDLE_THROW("Expect LoDTensor/SelectedRows, get %s", src.Type().name()); } @@ -145,6 +147,7 @@ class ParallelDoOp : public framework::OperatorBase { auto *sub_scope = sub_scopes[i]; auto *dst = sub_scope->Var(param)->GetMutable(); framework::Copy(src, place, dst); + dst->set_lod(src.lod()); } } WaitOnPlaces(places); -- GitLab From f18f3826dc5d59f49908f2c232ff81b15c0abd9a Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 5 Feb 2018 03:04:39 -0800 Subject: [PATCH 036/138] "parallel op set lod after copy " --- paddle/framework/mixed_vector.h | 4 ++-- paddle/operators/parallel_do_op.cc | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/framework/mixed_vector.h b/paddle/framework/mixed_vector.h index d86899bc631..aade7d83918 100644 --- a/paddle/framework/mixed_vector.h +++ b/paddle/framework/mixed_vector.h @@ -54,7 +54,7 @@ class Vector : public std::vector { CopyToCUDA(); PADDLE_ENFORCE_NOT_NULL( cuda_ptr_, "No data or Insufficient CUDA memory to allocation"); - return static_cast(cuda_ptr_); + return static_cast(cuda_ptr_.get()); } /* Get host vector */ @@ -127,7 +127,7 @@ void Vector::CopyToCUDA() { #ifdef PADDLE_WITH_CUDA if (cuda_size_ < this->size() || cuda_ptr_ == nullptr) { cuda_ptr_.reset( - memory::Alloc(this->size() * sizeof(T)), + memory::Alloc(place_, this->size() * sizeof(T)), memory::PlainDeleter(place_)); } cuda_size_ = this->size(); diff --git a/paddle/operators/parallel_do_op.cc b/paddle/operators/parallel_do_op.cc index d662878592a..87678decde1 100644 --- a/paddle/operators/parallel_do_op.cc +++ b/paddle/operators/parallel_do_op.cc @@ -79,7 +79,7 @@ inline void CopyOrShare(const framework::Variable &src, } else { Copy(src.Get(), dst_place, dst->GetMutable()); } - dst->set_lod(src.lod()); + dst->GetMutable()->set_lod(src.Get().lod()); } else if (src.IsType()) { auto &src_sr = src.Get(); auto *dst_sr = dst->GetMutable(); -- GitLab From 93734a79138945e6a603b1c9b28ea8cb1b32569e Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 5 Feb 2018 19:01:26 +0800 Subject: [PATCH 037/138] fix bug --- paddle/operators/prior_box_op.cc | 69 ++++++++++++++++++-------------- 1 file changed, 40 insertions(+), 29 deletions(-) diff --git a/paddle/operators/prior_box_op.cc b/paddle/operators/prior_box_op.cc index 105ff4ac3e3..f35273bf41a 100644 --- a/paddle/operators/prior_box_op.cc +++ b/paddle/operators/prior_box_op.cc @@ -44,12 +44,6 @@ class PriorBoxOp : public framework::OperatorWithKernel { auto aspect_ratios = ctx->Attrs().Get>("aspect_ratios"); bool flip = ctx->Attrs().Get("flip"); - PADDLE_ENFORCE_GT(min_sizes.size(), 0, - "Size of min_sizes must be at least 1."); - for (size_t i = 0; i < min_sizes.size(); ++i) { - PADDLE_ENFORCE_GT(min_sizes[i], 0, "min_sizes[%d] must be positive.", i); - } - std::vector aspect_ratios_vec; ExpandAspectRatios(aspect_ratios, flip, aspect_ratios_vec); @@ -65,17 +59,6 @@ class PriorBoxOp : public framework::OperatorWithKernel { } } - PADDLE_ENFORCE_EQ(variances.size(), 4, "Must and only provide 4 variance."); - for (size_t i = 0; i < variances.size(); ++i) { - PADDLE_ENFORCE_GT(variances[i], 0.0, - "variance[%d] must be greater than 0.", i); - } - - const float step_h = ctx->Attrs().Get("step_h"); - PADDLE_ENFORCE_GT(step_h, 0.0, "step_h should be larger than 0."); - const float step_w = ctx->Attrs().Get("step_w"); - PADDLE_ENFORCE_GT(step_w, 0.0, "step_w should be larger than 0."); - std::vector dim_vec(4); dim_vec[0] = input_dims[2]; dim_vec[1] = input_dims[3]; @@ -106,26 +89,54 @@ class PriorBoxOpMaker : public framework::OpProtoAndCheckerMaker { "PriorBoxOp. The layout is [H, W, num_priors, 4]. " "H is the height of input, W is the width of input, num_priors " "is the box count of each position."); - AddAttr>("min_sizes", "(vector) ", - "List of min sizes of generated prior boxes."); - AddAttr>("max_sizes", "(vector) ", - "List of max sizes of generated prior boxes."); + + AddAttr>("min_sizes", + "(vector) List of min sizes " + "of generated prior boxes.") + .AddCustomChecker([](const std::vector& min_sizes) { + PADDLE_ENFORCE_GT(min_sizes.size(), 0, + "Size of min_sizes must be at least 1."); + for (size_t i = 0; i < min_sizes.size(); ++i) { + PADDLE_ENFORCE_GT(min_sizes[i], 0, + "min_sizes[%d] must be positive.", i); + } + }); + AddAttr>( + "max_sizes", + "(vector) List of max sizes of generated prior boxes."); AddAttr>( - "aspect_ratios", "(vector) ", - "List of aspect ratios of generated prior boxes."); + "aspect_ratios", + "(vector) List of aspect ratios of generated prior boxes."); + AddAttr>( - "variances", "(vector) ", - "List of variances to be encoded in prior boxes."); - AddAttr("flip", "(bool) ", "Whether to flip aspect ratios.") + "variances", + "(vector) List of variances to be encoded in prior boxes.") + .AddCustomChecker([](const std::vector& variances) { + PADDLE_ENFORCE_EQ(variances.size(), 4, + "Must and only provide 4 variance."); + for (size_t i = 0; i < variances.size(); ++i) { + PADDLE_ENFORCE_GT(variances[i], 0.0, + "variance[%d] must be greater than 0.", i); + } + }); + AddAttr("flip", "(bool) Whether to flip aspect ratios.") .SetDefault(true); - AddAttr("clip", "(bool) ", "Whether to clip out-of-boundary boxes.") + AddAttr("clip", "(bool) Whether to clip out-of-boundary boxes.") .SetDefault(true); + AddAttr("step_w", "Prior boxes step across width, 0 for auto calculation.") - .SetDefault(0.0); + .SetDefault(0.0) + .AddCustomChecker([](const float& step_w) { + PADDLE_ENFORCE_GT(step_w, 0.0, "step_h should be larger than 0."); + }); AddAttr("step_h", "Prior boxes step across height, 0 for auto calculation.") - .SetDefault(0.0); + .SetDefault(0.0) + .AddCustomChecker([](const float& step_h) { + PADDLE_ENFORCE_GT(step_h, 0.0, "step_h should be larger than 0."); + }); + AddAttr("offset", "(float) " "Prior boxes center offset.") -- GitLab From d7a371cbf25f4dcc5dcbfbf0a043e6dc98ae322a Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 5 Feb 2018 19:51:42 +0800 Subject: [PATCH 038/138] follow comments --- paddle/operators/prior_box_op.cc | 2 +- paddle/operators/prior_box_op.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/operators/prior_box_op.cc b/paddle/operators/prior_box_op.cc index f35273bf41a..1dc4b288559 100644 --- a/paddle/operators/prior_box_op.cc +++ b/paddle/operators/prior_box_op.cc @@ -128,7 +128,7 @@ class PriorBoxOpMaker : public framework::OpProtoAndCheckerMaker { "Prior boxes step across width, 0 for auto calculation.") .SetDefault(0.0) .AddCustomChecker([](const float& step_w) { - PADDLE_ENFORCE_GT(step_w, 0.0, "step_h should be larger than 0."); + PADDLE_ENFORCE_GT(step_w, 0.0, "step_w should be larger than 0."); }); AddAttr("step_h", "Prior boxes step across height, 0 for auto calculation.") diff --git a/paddle/operators/prior_box_op.h b/paddle/operators/prior_box_op.h index e0a663ace8f..12ff1623560 100644 --- a/paddle/operators/prior_box_op.h +++ b/paddle/operators/prior_box_op.h @@ -25,7 +25,7 @@ inline void ExpandAspectRatios(const std::vector& input_aspect_ratior, std::vector& output_aspect_ratior) { constexpr float epsilon = 1e-6; output_aspect_ratior.clear(); - output_aspect_ratior.push_back(1.); + output_aspect_ratior.push_back(1.0f); for (size_t i = 0; i < input_aspect_ratior.size(); ++i) { float ar = input_aspect_ratior[i]; bool already_exist = false; @@ -38,7 +38,7 @@ inline void ExpandAspectRatios(const std::vector& input_aspect_ratior, if (!already_exist) { output_aspect_ratior.push_back(ar); if (flip) { - output_aspect_ratior.push_back(1. / ar); + output_aspect_ratior.push_back(1.0f / ar); } } } -- GitLab From f367ad6c6cae825c46b7262c77fa0cf6f8394796 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Mon, 5 Feb 2018 20:03:50 +0800 Subject: [PATCH 039/138] add "inline" for ClipFunctor and refine code --- paddle/operators/prior_box_op.h | 39 ++++++++++++++++++++++----------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/paddle/operators/prior_box_op.h b/paddle/operators/prior_box_op.h index 12ff1623560..6b221cb74eb 100644 --- a/paddle/operators/prior_box_op.h +++ b/paddle/operators/prior_box_op.h @@ -46,7 +46,7 @@ inline void ExpandAspectRatios(const std::vector& input_aspect_ratior, template struct ClipFunctor { - HOSTDEVICE T operator()(T in) const { + HOSTDEVICE inline T operator()(T in) const { return std::min(std::max(in, 0.), 1.); } }; @@ -97,6 +97,9 @@ class PriorBoxOpKernel : public framework::OpKernel { boxes->mutable_data(ctx.GetPlace()); vars->mutable_data(ctx.GetPlace()); + T inv_img_width = 1.0 / img_width; + T inv_img_height = 1.0 / img_height; + auto e_boxes = framework::EigenTensor::From(*boxes); for (int h = 0; h < feature_height; ++h) { for (int w = 0; w < feature_width; ++w) { @@ -109,13 +112,15 @@ class PriorBoxOpKernel : public framework::OpKernel { // first prior: aspect_ratio = 1, size = min_size box_width = box_height = min_size; // xmin - e_boxes(h, w, idx, 0) = (center_x - box_width / 2.) / img_width; + e_boxes(h, w, idx, 0) = (center_x - box_width * 0.5) * inv_img_width; // ymin - e_boxes(h, w, idx, 1) = (center_y - box_height / 2.) / img_height; + e_boxes(h, w, idx, 1) = + (center_y - box_height * 0.5) * inv_img_height; // xmax - e_boxes(h, w, idx, 2) = (center_x + box_width / 2.) / img_width; + e_boxes(h, w, idx, 2) = (center_x + box_width * 0.5) * inv_img_width; // ymax - e_boxes(h, w, idx, 3) = (center_y + box_height / 2.) / img_height; + e_boxes(h, w, idx, 3) = + (center_y + box_height * 0.5) * inv_img_height; idx++; if (max_sizes.size() > 0) { @@ -124,13 +129,17 @@ class PriorBoxOpKernel : public framework::OpKernel { // size = sqrt(min_size * max_size) box_width = box_height = sqrt(min_size * max_size); // xmin - e_boxes(h, w, idx, 0) = (center_x - box_width / 2.) / img_width; + e_boxes(h, w, idx, 0) = + (center_x - box_width * 0.5) * inv_img_width; // ymin - e_boxes(h, w, idx, 1) = (center_y - box_height / 2.) / img_height; + e_boxes(h, w, idx, 1) = + (center_y - box_height * 0.5) * inv_img_height; // xmax - e_boxes(h, w, idx, 2) = (center_x + box_width / 2.) / img_width; + e_boxes(h, w, idx, 2) = + (center_x + box_width * 0.5) * inv_img_width; // ymax - e_boxes(h, w, idx, 3) = (center_y + box_height / 2.) / img_height; + e_boxes(h, w, idx, 3) = + (center_y + box_height * 0.5) * inv_img_height; idx++; } @@ -143,13 +152,17 @@ class PriorBoxOpKernel : public framework::OpKernel { box_width = min_size * sqrt(ar); box_height = min_size / sqrt(ar); // xmin - e_boxes(h, w, idx, 0) = (center_x - box_width / 2.) / img_width; + e_boxes(h, w, idx, 0) = + (center_x - box_width * 0.5) * inv_img_width; // ymin - e_boxes(h, w, idx, 1) = (center_y - box_height / 2.) / img_height; + e_boxes(h, w, idx, 1) = + (center_y - box_height * 0.5) * inv_img_height; // xmax - e_boxes(h, w, idx, 2) = (center_x + box_width / 2.) / img_width; + e_boxes(h, w, idx, 2) = + (center_x + box_width * 0.5) * inv_img_width; // ymax - e_boxes(h, w, idx, 3) = (center_y + box_height / 2.) / img_height; + e_boxes(h, w, idx, 3) = + (center_y + box_height * 0.5) * inv_img_height; idx++; } } -- GitLab From e9e24249217c1b234a9ce8f8d0d9c1e6e18fd2d3 Mon Sep 17 00:00:00 2001 From: qingqing01 Date: Mon, 5 Feb 2018 21:38:53 +0800 Subject: [PATCH 040/138] Fix warnings in multiclass_nms_op.cc. --- paddle/operators/multiclass_nms_op.cc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/paddle/operators/multiclass_nms_op.cc b/paddle/operators/multiclass_nms_op.cc index 8a65fe69f15..41b9335fb8f 100644 --- a/paddle/operators/multiclass_nms_op.cc +++ b/paddle/operators/multiclass_nms_op.cc @@ -85,7 +85,7 @@ static inline void GetMaxScoreIndex( std::stable_sort(sorted_indices->begin(), sorted_indices->end(), SortScorePairDescend); // Keep top_k scores if needed. - if (top_k > -1 && top_k < sorted_indices->size()) { + if (top_k > -1 && top_k < static_cast(sorted_indices->size())) { sorted_indices->resize(top_k); } } @@ -151,7 +151,7 @@ class MultiClassNMSKernel : public framework::OpKernel { while (sorted_indices.size() != 0) { const int idx = sorted_indices.front().second; bool keep = true; - for (int k = 0; k < selected_indices->size(); ++k) { + for (size_t k = 0; k < selected_indices->size(); ++k) { if (keep) { const int kept_idx = (*selected_indices)[k]; T overlap = JaccardOverlap(bbox_data + idx * box_size, @@ -201,7 +201,7 @@ class MultiClassNMSKernel : public framework::OpKernel { int label = it.first; const T* sdata = scores_data + label * predict_dim; const std::vector& label_indices = it.second; - for (int j = 0; j < label_indices.size(); ++j) { + for (size_t j = 0; j < label_indices.size(); ++j) { int idx = label_indices[j]; PADDLE_ENFORCE_LT(idx, predict_dim); score_index_pairs.push_back( @@ -215,7 +215,7 @@ class MultiClassNMSKernel : public framework::OpKernel { // Store the new indices. std::map> new_indices; - for (int j = 0; j < score_index_pairs.size(); ++j) { + for (size_t j = 0; j < score_index_pairs.size(); ++j) { int label = score_index_pairs[j].second.first; int idx = score_index_pairs[j].second.second; new_indices[label].push_back(idx); @@ -238,7 +238,7 @@ class MultiClassNMSKernel : public framework::OpKernel { int label = it.first; const T* sdata = scores_data + label * predict_dim; const std::vector& indices = it.second; - for (int j = 0; j < indices.size(); ++j) { + for (size_t j = 0; j < indices.size(); ++j) { int idx = indices[j]; const T* bdata = bboxes_data + idx * kBBoxSize; odata[count * kOutputDim] = label; // label -- GitLab From 497a131e53316fc3d81cf92e68845d2fd33243e3 Mon Sep 17 00:00:00 2001 From: kavyasrinet Date: Mon, 5 Feb 2018 10:45:43 -0800 Subject: [PATCH 041/138] Proposing Python syntax for send and recv in design doc (#8093) * Adding send and recv in design doc * fix typo * fixed code * Adding threading --- doc/design/csp.md | 76 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 73 insertions(+), 3 deletions(-) diff --git a/doc/design/csp.md b/doc/design/csp.md index ba9cacfdea7..2f6ce8d6fa8 100644 --- a/doc/design/csp.md +++ b/doc/design/csp.md @@ -71,14 +71,14 @@ ch1 := make(chan int, 100) // a channel that can buffer 100 ints. In Fluid, we should be able to do the same: ```python -ch = fluid.make_chan(dtype=INT) -ch1 = fluid.make_chan(dtype=INT, 100) +ch = fluid.make_channel(dtype=INT) +ch1 = fluid.make_channel(dtype=INT, 100) ``` In addition to that, we want channels that can hold more complex element types, e.g., Tensors of float16: ```python -ch = fluid.make_chan(dtype=Tensor, etype=float16) +ch = fluid.make_channel(dtype=Tensor, etype=float16) ``` or Tensors of Tensors of float16 etc. @@ -87,6 +87,76 @@ The point here is that we need a consistent way to compose types, like in C++ we ### Send and Recv +In Go, we first create a channel as explained in the section above and then perform read and write operations on top of the channels. + +```go +ch1 := make(chan int) +ch2 := make(chan int, 100) +``` + +To write (or perform a `Send` operation) the value of a variable `x`, to channel `ch1` above, we perform the following: + +```go +ch1 <- x +fmt.Println("Written to the channel") +``` +Now to read (or perform a `Recv` operation) the value stored in `ch2` into a variable `y`, we perform the following: + +```go +y <- ch2 +fmt.Println("Received from channel") +``` + +In Fluid, we should be able to perform the above operations on the channel objects as well. As of now, we support two different kinds of channels : [Buffered Channel](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/details/buffered_channel.h) and [UnBuffered Channel](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/details/unbuffered_channel.h) + +Send and Receive can be performed as following on a buffered channel: + +```python +import threading + +def send_to_channel(channel, num_time=1): + for i in xrange(num_time): + channel.send(i) + +# Create a buffered channel of capacity 10 +buffer_size = 10; +ch = fluid.make_channel(dtype=INT, buffer_size) + +# Now write three elements to the channel +thread = threading.Thread(target=send_to_channel, args=(ch, 3, )) +thread.daemon = True +thread.start() + +# Read all the data from the channel +for i in xrange(3): + y = ch.recv() + +# Done receiving , now close the channel +ch.close() +``` + +The send and receive operations will be similar for unbuffered channel as well, except for the fact that there is no buffer in an unbuffered channel, so the operations are completely synchronized. For example: + +```python +import threading + +def send_to_channel(channel, data): + channel.send(data) + +# Create an unbuffered channel +ch = fluid.make_channel(dtype=INT) + +# Writes and Reads are synchronous otherwise the calls will block. +thread = threading.Thread(target=send_to_channel, args=(ch, 10, )) +thread.daemon = True +thread.start() + +y = ch.recv() + +# Done receiving , now close the channel +ch.close() +``` + ### Select ## Example Programs -- GitLab From 1ead6c2691be09f34303c06d119c17ba4e4aeab7 Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Mon, 5 Feb 2018 11:06:02 -0800 Subject: [PATCH 042/138] Add proposed fluid syntax for select statement in Fluid's implementation of CSP (#7908) * Add proposed fluid syntax for select statement in Fluid's implementation of CSP * Fix Typo --- doc/design/csp.md | 49 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/doc/design/csp.md b/doc/design/csp.md index 2f6ce8d6fa8..36422d8236f 100644 --- a/doc/design/csp.md +++ b/doc/design/csp.md @@ -159,6 +159,55 @@ ch.close() ### Select +In Go, the `select` statement lets a goroutine wait on multiple communication operations. A `select` blocks untill one of its cases can run, then it executes that case. It chooses one at random if multiple are ready. + +```go + +ch1 := make(chan int) +ch2 := make(chan int, 100) + +x := 0 + +for { + select { + case ch1 <- x: + x := x + 1 + case y <- ch2: + fmt.Println("Received on channel") + default: + fmt.Println("Default") + } + } + +``` + +In Fluid, we should be able to do the same: + +```python +ch1 = fluid.make_chan(dtype=INT) +ch2 = fluid.make_chan(dtype=INT, 100) + +sel = fluid.select() + +with sel.case(ch1, 'w', X): + fluid.layers.increment(X) + +with sel.case(ch2, 'r', Y): + fluid.print("Received on Channel") + +with sel.default(): + fluid.print("Default") + +``` + +In the above code snippet, `X` and `Y` are variables. Now let us look at each of these statements one by one. + +- `sel.case(ch1, 'w', X)` : This specifies that we are writing to `ch1` and we want to write the integer in variable `X` to the channel. The character `w` is used here to make the syntax familar to write syntax in Python I/O. + +- `sel.case(ch2, 'r', Y)` : This specifies that we would like to read the result from `ch2` into variable `Y`. The character `r` is used here to make the syntax familar to read syntax in Python I/O. + +- `sel.default()` : This is equivalent to the default in Go `select`. If none of the channels are ready for read or write, then the fluid code in the default block will be executed. + ## Example Programs ### 1. RPC between Trainers and Parameter Servers -- GitLab From 6f0e630c5ce67bef5e87e26441c60870d1ab207e Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Mon, 5 Feb 2018 13:25:20 -0800 Subject: [PATCH 043/138] fix prune and program desc constructor --- paddle/framework/block_desc.cc | 2 ++ paddle/framework/op_desc.cc | 17 +++++++-- paddle/framework/program_desc.cc | 18 ++++++++++ paddle/framework/prune.cc | 36 ++++++++++++------- python/paddle/v2/fluid/io.py | 6 ++++ .../tests/book/test_rnn_encoder_decoder.py | 31 ++++++++++------ 6 files changed, 85 insertions(+), 25 deletions(-) diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc index dd2ed872521..ca3d03e5541 100644 --- a/paddle/framework/block_desc.cc +++ b/paddle/framework/block_desc.cc @@ -155,6 +155,8 @@ BlockDesc::BlockDesc(ProgramDesc *prog, proto::BlockDesc *desc) for (const proto::OpDesc &op_desc : desc_->ops()) { ops_.emplace_back(new OpDesc(op_desc, prog, this)); } + std::cout << "Constructed block idx " << desc->idx() << " from protobuf str" + << std::endl; } BlockDesc::BlockDesc(const BlockDesc &other, proto::BlockDesc *desc, diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index f8df2cf97ad..5ebd2b3ad5e 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -124,11 +124,24 @@ OpDesc::OpDesc(const proto::OpDesc &desc, ProgramDesc *prog, BlockDesc *block) // restore attrs_ for (const proto::OpDesc::Attr &attr : desc_.attrs()) { std::string attr_name = attr.name(); + // we use a trick to handle attr.type() is BLOCK here, because at this + // moment the sub_block hasn't beed added to ProgramDesc's vector + // so we cast the block_idx to a dummy BlockDesc pointer if (attr.type() != proto::AttrType::BLOCK) { attrs_[attr_name] = GetAttrValue(attr); } else { - auto bid = attr.block_idx(); - attrs_[attr_name] = prog->MutableBlock(bid); + size_t blk_idx = attr.block_idx(); + if (blk_idx < prog->Size()) { + attrs_[attr_name] = prog->MutableBlock(blk_idx); + } else { + std::cout << "Setting blockdesc attribute for id " << blk_idx + << std::endl; + attrs_[attr_name] = reinterpret_cast(blk_idx); + std::cout << "Testing reinterpret_cast result is " + << reinterpret_cast( + boost::get(attrs_[attr_name])) + << std::endl; + } } } this->block_ = block; diff --git a/paddle/framework/program_desc.cc b/paddle/framework/program_desc.cc index 15ea4035c6e..9124607623b 100644 --- a/paddle/framework/program_desc.cc +++ b/paddle/framework/program_desc.cc @@ -52,9 +52,27 @@ ProgramDesc::ProgramDesc(const ProgramDesc &o) { ProgramDesc::ProgramDesc(const proto::ProgramDesc &desc) { desc_ = desc; + std::cout << std::endl << "starting in ProgDesc constructor" << std::endl; for (auto &block_desc : *desc_.mutable_blocks()) { blocks_.emplace_back(new BlockDesc(this, &block_desc)); + std::cout << "Done constructing block idx " << block_desc.idx() + << " parent idx " << block_desc.parent_idx() << std::endl; } + for (auto &block : blocks_) { + for (auto *op : block->AllOps()) { + for (auto &name : op->AttrNames()) { + if (op->GetAttrType(name) == proto::AttrType::BLOCK) { + auto attr = op->GetAttr(name); + size_t blk_idx = + reinterpret_cast(boost::get(attr)); + op->SetBlockAttr(name, *this->MutableBlock(blk_idx)); + std::cout << "Update attr name " << name << " for block idx " + << blk_idx << std::endl; + } + } + } + } + std::cout << "Done ProgDesc construction" << std::endl << std::endl; } ProgramDesc::ProgramDesc(const std::string &binary_str) { diff --git a/paddle/framework/prune.cc b/paddle/framework/prune.cc index 6a3882f199e..3c3ec875851 100644 --- a/paddle/framework/prune.cc +++ b/paddle/framework/prune.cc @@ -109,15 +109,14 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, // we reverse the should_run vector std::reverse(should_run.begin(), should_run.end()); - //*output = input; // copy the current block from input to output auto* block_field = output->mutable_blocks(); *block_field->Add() = input.blocks(block_id); int output_block_id = output->blocks_size() - 1; auto* output_block = output->mutable_blocks(output_block_id); - output_block->set_idx = output_block_id; - output_block->set_parent_idx = parent_block_id; + output_block->set_idx(output_block_id); + output_block->set_parent_idx(parent_block_id); auto* op_field = output_block->mutable_ops(); op_field->Clear(); @@ -128,17 +127,18 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, if (HasSubBlock(*op)) { // create sub_block_dependent_vars here to help prune the sub block std::set sub_block_dependent_vars; - for (auto& var : op.inputs()) { + for (auto& var : op->inputs()) { for (auto& argu : var.arguments()) { sub_block_dependent_vars.insert(argu); } } - for (auto& var : op.outputs()) { + for (auto& var : op->outputs()) { for (auto& argu : var.arguments()) { sub_block_dependent_vars.insert(argu); } } - + std::cout << "pruning the next block, the current output_block_id is " + << output_block_id << std::endl; // GetSubBlockIndex(*op) is the idx of the sub_block in the input desc // output_block_id is the idx of the current block in the output desc prune_impl(input, output, GetSubBlockIndex(*op), output_block_id, @@ -147,6 +147,8 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, } } + std::cout << "Starting to remove unreferenced variables" + << " for block idx " << output_block_id << std::endl; // remove the VarDescs in BlockDesc that are not referenced in // the pruned OpDescs std::unordered_map var_map; @@ -155,28 +157,38 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, var_map[var.name()] = var; } - var_field->Clear(); + std::set var_names; for (const auto& op : *op_field) { - // add VarDescs of all input arguments for each OpDesc auto& input_field = op.inputs(); for (auto& input_var : input_field) { for (auto& arg : input_var.arguments()) { - *var_field->Add() = var_map.at(arg); + if (var_map.count(arg) != 0) { + var_names.insert(arg); + } } } - // add VarDescs of all output arguments for each OpDesc auto& output_field = op.outputs(); for (auto& output_var : output_field) { for (auto& arg : output_var.arguments()) { - *var_field->Add() = var_map.at(arg); + if (var_map.count(arg) != 0) { + var_names.insert(arg); + } } } } + + var_field->Clear(); + for (const auto& name : var_names) { + *var_field->Add() = var_map[name]; + } } // TODO(fengjiayi): Prune() could be inplaced to avoid unnecessary copies void Prune(const proto::ProgramDesc& input, proto::ProgramDesc* output) { - prune_impl(input, output, 0, -1, {}); + std::set dependent_vars; + std::cout << std::endl << "Start C++ framework::prune" << std::endl; + prune_impl(input, output, 0, -1, dependent_vars); + std::cout << "Finished C++ framework::prune" << std::endl << std::endl; } void inference_optimize_impl(const proto::ProgramDesc& input, diff --git a/python/paddle/v2/fluid/io.py b/python/paddle/v2/fluid/io.py index 613dc20b6ea..e410549f8a5 100644 --- a/python/paddle/v2/fluid/io.py +++ b/python/paddle/v2/fluid/io.py @@ -342,6 +342,12 @@ def save_inference_model(dirname, prepend_feed_ops(inference_program, feeded_var_names) append_fetch_ops(inference_program, fetch_var_names) + # save for checking + curstr = inference_program.to_string(True) + f = open("save_inf_prog_after_feed_fetch.txt", 'w') + f.write(curstr) + f.close() + model_file_name = dirname + "/__model__" with open(model_file_name, "wb") as f: f.write(inference_program.desc.serialize_to_string()) diff --git a/python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py b/python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py index 593d0013c9d..15f00f95d40 100644 --- a/python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py +++ b/python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py @@ -197,14 +197,15 @@ def train(save_dirname=None): " avg_cost=" + str(avg_cost_val)) if batch_id > 3: if save_dirname is not None: - fluid.io.save_inference_model(save_dirname, [ - 'source_sequence', 'target_sequence', 'label_sequence' - ], [prediction], exe) + fluid.io.save_inference_model( + save_dirname, ['source_sequence', + 'target_sequence'], [prediction], exe) + return exit(0) batch_id += 1 -def inference(save_dirname=None): +def infer(save_dirname=None): if save_dirname is None: return @@ -221,24 +222,32 @@ def inference(save_dirname=None): data = [[0, 1, 0, 1], [0, 1, 1, 0, 0, 1]] word_data = to_lodtensor(data, place) trg_word = to_lodtensor(data, place) - trg_word_next = to_lodtensor(data, place) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. + print("Print feed fetch target names as follows") print(feed_target_names) assert feed_target_names[0] == 'source_sequence' assert feed_target_names[1] == 'target_sequence' - assert feed_target_names[2] == 'label_sequence' + print([var.name for var in fetch_targets]) + + # save for checking + curstr = inference_program.to_string(True) + f = open("loaded_infer_prog.txt", 'w') + f.write(curstr) + f.close() + results = exe.run(inference_program, feed={ feed_target_names[0]: word_data, feed_target_names[1]: trg_word, - feed_target_names[2]: trg_word_next }, - fetch_list=fetch_targets) - - print("Inference Shape: ", results[0].shape) - print("infer results: ", results[0]) + fetch_list=fetch_targets, + return_numpy=False) + print(results[0].lod()) + np_data = np.array(results[0]) + print("Inference shape: ", np_data.shape) + print("Inference results: ", np_data) if __name__ == '__main__': -- GitLab From dc68e7c44b198acbdf588e97d219822602dc90db Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Mon, 5 Feb 2018 15:36:54 -0800 Subject: [PATCH 044/138] fix constructor bug --- paddle/framework/op_desc.cc | 9 +++------ paddle/framework/program_desc.cc | 26 ++++++++++++++++++-------- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index 5ebd2b3ad5e..7859c391fa3 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -133,13 +133,10 @@ OpDesc::OpDesc(const proto::OpDesc &desc, ProgramDesc *prog, BlockDesc *block) size_t blk_idx = attr.block_idx(); if (blk_idx < prog->Size()) { attrs_[attr_name] = prog->MutableBlock(blk_idx); - } else { - std::cout << "Setting blockdesc attribute for id " << blk_idx + std::cout << "In OpDesc: set up attr block idx " << blk_idx << std::endl; - attrs_[attr_name] = reinterpret_cast(blk_idx); - std::cout << "Testing reinterpret_cast result is " - << reinterpret_cast( - boost::get(attrs_[attr_name])) + } else { + std::cout << "In OpDesc: We don't have this block idx " << blk_idx << std::endl; } } diff --git a/paddle/framework/program_desc.cc b/paddle/framework/program_desc.cc index 9124607623b..ba461b09339 100644 --- a/paddle/framework/program_desc.cc +++ b/paddle/framework/program_desc.cc @@ -48,6 +48,18 @@ ProgramDesc::ProgramDesc(const ProgramDesc &o) { auto *block = desc_.mutable_blocks(i); blocks_.emplace_back(new BlockDesc(*o.blocks_[i], block, this)); } + for (auto &block : blocks_) { + for (auto *op : block->AllOps()) { + for (const auto &attr : op->Proto()->attrs()) { + if (attr.type() == proto::AttrType::BLOCK) { + size_t blk_idx = attr.block_idx(); + op->SetBlockAttr(attr.name(), *this->MutableBlock(blk_idx)); + std::cout << "In ProgramDesc 1: set block attr idx " << blk_idx + << std::endl; + } + } + } + } } ProgramDesc::ProgramDesc(const proto::ProgramDesc &desc) { @@ -60,14 +72,12 @@ ProgramDesc::ProgramDesc(const proto::ProgramDesc &desc) { } for (auto &block : blocks_) { for (auto *op : block->AllOps()) { - for (auto &name : op->AttrNames()) { - if (op->GetAttrType(name) == proto::AttrType::BLOCK) { - auto attr = op->GetAttr(name); - size_t blk_idx = - reinterpret_cast(boost::get(attr)); - op->SetBlockAttr(name, *this->MutableBlock(blk_idx)); - std::cout << "Update attr name " << name << " for block idx " - << blk_idx << std::endl; + for (const auto &attr : op->Proto()->attrs()) { + if (attr.type() == proto::AttrType::BLOCK) { + size_t blk_idx = attr.block_idx(); + op->SetBlockAttr(attr.name(), *this->MutableBlock(blk_idx)); + std::cout << "In ProgramDesc 2: set block attr idx " << blk_idx + << std::endl; } } } -- GitLab From b0ecb36583ed97737bd5c43cbafbdc8fa29cbd68 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Mon, 5 Feb 2018 17:11:11 -0800 Subject: [PATCH 045/138] Rewrite the Send/Recv part of csp.md (#8164) * Update csp.md * Update csp.md * Update csp.md --- doc/design/csp.md | 110 +++++++++++++++++++++++++--------------------- 1 file changed, 59 insertions(+), 51 deletions(-) diff --git a/doc/design/csp.md b/doc/design/csp.md index 36422d8236f..ae2e3e1b998 100644 --- a/doc/design/csp.md +++ b/doc/design/csp.md @@ -42,7 +42,7 @@ The type *channel* is conceptually the blocking queue. In Go, its implemented i The `select` operation has been in OS kernels long before Go language. All Unix kernels implement system calls *poll* and *select*. They monitor multiple file descriptors to see if I/O is possible on any of them. This takes O(N) time. Since Linux 2.6, a new system call, *epoll*, can do the same in O(1) time. In BSD systems, there is a similar system call *kqueue*. Go's Linux implementation uses epoll. -It might be a good idea to implement Fluid's select using epoll too. In this design doc, we start from the O(N) way, so we could focus on Python binding and the syntax. +It might be a good idea to implement Fluid's select using epoll too. In this design doc, we start from the O(N) way so that we could focus on Python binding and the syntax. ### Type Channel @@ -87,79 +87,87 @@ The point here is that we need a consistent way to compose types, like in C++ we ### Send and Recv -In Go, we first create a channel as explained in the section above and then perform read and write operations on top of the channels. +Go's CSP implementation depends on data type *channel*. There are two types of channels: -```go -ch1 := make(chan int) -ch2 := make(chan int, 100) -``` +1. The unblocked channel, or buffered channel, is a blocking queue with a non-zero sized buffer. The sending to buffered channel blocks if the buffer is full, and the receive operation blocks if the buffer is empty. +1. blocked channel, or unbuffered channel, is a blocking queue with no buffer. Both sending and receiving block with unbuffered channels. -To write (or perform a `Send` operation) the value of a variable `x`, to channel `ch1` above, we perform the following: +There are four types of actions with a channel: -```go -ch1 <- x -fmt.Println("Written to the channel") -``` -Now to read (or perform a `Recv` operation) the value stored in `ch2` into a variable `y`, we perform the following: +1. Create a channel -```go -y <- ch2 -fmt.Println("Received from channel") -``` + ```go + ch := make(chan int) // this is an unbuffered channel + ch := make(chan int, 100) // this is a buffered channel of 100 ints. + ``` -In Fluid, we should be able to perform the above operations on the channel objects as well. As of now, we support two different kinds of channels : [Buffered Channel](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/details/buffered_channel.h) and [UnBuffered Channel](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/details/unbuffered_channel.h) +1. Send -Send and Receive can be performed as following on a buffered channel: + ```go + ch <- 111 + ``` -```python -import threading +1. Recv -def send_to_channel(channel, num_time=1): - for i in xrange(num_time): - channel.send(i) + ```go + y, ok <- ch + ``` -# Create a buffered channel of capacity 10 -buffer_size = 10; -ch = fluid.make_channel(dtype=INT, buffer_size) +1. Close -# Now write three elements to the channel -thread = threading.Thread(target=send_to_channel, args=(ch, 3, )) -thread.daemon = True -thread.start() + ```go + close(ch) + ``` + + Please be aware that a closed channel is not a nil channel, which is `var ch chan int`. + +There are some [axioms with channels](https://dave.cheney.net/2014/03/19/channel-axioms): -# Read all the data from the channel -for i in xrange(3): - y = ch.recv() +1. A send to a nil channel blocks forever -# Done receiving , now close the channel -ch.close() -``` +1. A receive from a nil channel blocks forever + +1. A send to a closed channel panics + +1. A receive from a closed channel returns the residual values and then zeros. -The send and receive operations will be similar for unbuffered channel as well, except for the fact that there is no buffer in an unbuffered channel, so the operations are completely synchronized. For example: +In Fluid, we have [buffered channels](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/details/buffered_channel.h) and [unbuffered channels](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/details/unbuffered_channel.h) + +The following program illustrates the Python syntax for accessing Fluid buffers. ```python -import threading +import fluid + +buffer_size = 10 +ch = fluid.make_channel(dtype=INT, buffer_size) -def send_to_channel(channel, data): - channel.send(data) +# Now write three elements to the channel +with fluid.while(steps=buffer_size): + fluid.send(ch, step) + fluid.close_channel(ch) + +with fluid.while(steps=buffer_size): + fluid.print(fluid.recv(ch)) +``` + +The following example shows that to avoid the always-blocking behavior of unbuffered channels, we need to use Fluid's goroutines. + +```python +import fluid -# Create an unbuffered channel ch = fluid.make_channel(dtype=INT) -# Writes and Reads are synchronous otherwise the calls will block. -thread = threading.Thread(target=send_to_channel, args=(ch, 10, )) -thread.daemon = True -thread.start() +with fluid.go(): + fluid.send(ch) -y = ch.recv() +y = fluid.recv(ch) -# Done receiving , now close the channel -ch.close() +fluid.close_channel(ch) ``` ### Select -In Go, the `select` statement lets a goroutine wait on multiple communication operations. A `select` blocks untill one of its cases can run, then it executes that case. It chooses one at random if multiple are ready. +In Go, the `select` statement lets a goroutine wait on multiple communication operations. A `select` blocks until one of its cases can run, then it executes that case. It chooses one at random if multiple are ready. ```go @@ -202,9 +210,9 @@ with sel.default(): In the above code snippet, `X` and `Y` are variables. Now let us look at each of these statements one by one. -- `sel.case(ch1, 'w', X)` : This specifies that we are writing to `ch1` and we want to write the integer in variable `X` to the channel. The character `w` is used here to make the syntax familar to write syntax in Python I/O. +- `sel.case(ch1, 'w', X)` : This specifies that we are writing to `ch1` and we want to write the integer in variable `X` to the channel. The character `w` is used here to make the syntax familiar to write syntax in Python I/O. -- `sel.case(ch2, 'r', Y)` : This specifies that we would like to read the result from `ch2` into variable `Y`. The character `r` is used here to make the syntax familar to read syntax in Python I/O. +- `sel.case(ch2, 'r', Y)` : This specifies that we would like to read the result from `ch2` into variable `Y`. The character `r` is used here to make the syntax familiar to read syntax in Python I/O. - `sel.default()` : This is equivalent to the default in Go `select`. If none of the channels are ready for read or write, then the fluid code in the default block will be executed. -- GitLab From 165450ff6ca5bc0f02ffe63ec11f50ed4c240f09 Mon Sep 17 00:00:00 2001 From: Yiqun Liu Date: Tue, 6 Feb 2018 09:52:18 +0800 Subject: [PATCH 046/138] Refine the inference unittest recognize_digits. (#8147) --- .../book/test_inference_recognize_digits.cc | 63 ++++++++++++++----- .../fluid/tests/book/test_recognize_digits.py | 4 +- 2 files changed, 49 insertions(+), 18 deletions(-) diff --git a/paddle/inference/tests/book/test_inference_recognize_digits.cc b/paddle/inference/tests/book/test_inference_recognize_digits.cc index 26dc2aee042..ce8772587f3 100644 --- a/paddle/inference/tests/book/test_inference_recognize_digits.cc +++ b/paddle/inference/tests/book/test_inference_recognize_digits.cc @@ -58,6 +58,47 @@ void TestInference(const std::string& dirname, delete scope; } +template +void SetupTensor(paddle::framework::LoDTensor& input, + paddle::framework::DDim dims, + T lower, + T upper) { + srand(time(0)); + float* input_ptr = input.mutable_data(dims, paddle::platform::CPUPlace()); + for (int i = 0; i < input.numel(); ++i) { + input_ptr[i] = + (static_cast(rand()) / static_cast(RAND_MAX)) * (upper - lower) + + lower; + } +} + +template +void CheckError(paddle::framework::LoDTensor& output1, + paddle::framework::LoDTensor& output2) { + // Check lod information + EXPECT_EQ(output1.lod(), output2.lod()); + + EXPECT_EQ(output1.dims(), output2.dims()); + EXPECT_EQ(output1.numel(), output2.numel()); + + T err = static_cast(0); + if (typeid(T) == typeid(float)) { + err = 1E-3; + } else if (typeid(T) == typeid(double)) { + err = 1E-6; + } else { + err = 0; + } + + size_t count = 0; + for (int64_t i = 0; i < output1.numel(); ++i) { + if (fabs(output1.data()[i] - output2.data()[i]) > err) { + count++; + } + } + EXPECT_EQ(count, 0) << "There are " << count << " different elements."; +} + TEST(inference, recognize_digits) { if (FLAGS_dirname.empty()) { LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; @@ -70,12 +111,10 @@ TEST(inference, recognize_digits) { // In unittests, this is done in paddle/testing/paddle_gtest_main.cc paddle::framework::LoDTensor input; - srand(time(0)); - float* input_ptr = - input.mutable_data({1, 28, 28}, paddle::platform::CPUPlace()); - for (int i = 0; i < 784; ++i) { - input_ptr[i] = rand() / (static_cast(RAND_MAX)); - } + // Use normilized image pixels as input data, + // which should be in the range [-1.0, 1.0]. + SetupTensor( + input, {1, 28, 28}, static_cast(-1), static_cast(1)); std::vector cpu_feeds; cpu_feeds.push_back(&input); @@ -98,16 +137,6 @@ TEST(inference, recognize_digits) { dirname, cpu_feeds, cpu_fetchs2); LOG(INFO) << output2.dims(); - EXPECT_EQ(output1.dims(), output2.dims()); - EXPECT_EQ(output1.numel(), output2.numel()); - - float err = 1E-3; - int count = 0; - for (int64_t i = 0; i < output1.numel(); ++i) { - if (fabs(output1.data()[i] - output2.data()[i]) > err) { - count++; - } - } - EXPECT_EQ(count, 0) << "There are " << count << " different elements."; + CheckError(output1, output2); #endif } diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py index b8f55c813b6..fb6b1f7192d 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py @@ -166,7 +166,9 @@ def infer(use_cuda, save_dirname=None): fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) # The input's dimension of conv should be 4-D or 5-D. - tensor_img = numpy.random.rand(1, 1, 28, 28).astype("float32") + # Use normilized image pixels as input data, which should be in the range [-1.0, 1.0]. + tensor_img = numpy.random.uniform(-1.0, 1.0, + [1, 1, 28, 28]).astype("float32") # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. -- GitLab From 863cd9c766e30b487d88ddd0b797a3b59a421282 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Tue, 6 Feb 2018 09:54:14 +0800 Subject: [PATCH 047/138] Add comments to explain the empty result --- python/paddle/v2/fluid/layers/nn.py | 39 +++++++++++++++-------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index a79479f469a..2209625344e 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -410,12 +410,12 @@ def dynamic_lstmp(input, """ **Dynamic LSTMP Layer** - LSTMP (LSTM with recurrent projection) layer has a separate projection - layer after the LSTM layer, projecting the original hidden state to a - lower-dimensional one, which is proposed to reduce the number of total - parameters and furthermore computational complexity for the LSTM, - espeacially for the case that the size of output units is relative - large (https://research.google.com/pubs/archive/43905.pdf). + LSTMP (LSTM with recurrent projection) layer has a separate projection + layer after the LSTM layer, projecting the original hidden state to a + lower-dimensional one, which is proposed to reduce the number of total + parameters and furthermore computational complexity for the LSTM, + espeacially for the case that the size of output units is relative + large (https://research.google.com/pubs/archive/43905.pdf). The formula is as follows: @@ -441,27 +441,27 @@ def dynamic_lstmp(input, the matrix of weights from the input gate to the input). * :math:`W_{ic}`, :math:`W_{fc}`, :math:`W_{oc}`: Diagonal weight \ matrices for peephole connections. In our implementation, \ - we use vectors to reprenset these diagonal weight matrices. + we use vectors to reprenset these diagonal weight matrices. * :math:`b`: Denotes bias vectors (e.g. :math:`b_i` is the input gate \ - bias vector). + bias vector). * :math:`\sigma`: The activation, such as logistic sigmoid function. * :math:`i, f, o` and :math:`c`: The input gate, forget gate, output \ gate, and cell activation vectors, respectively, all of which have \ - the same size as the cell output activation vector :math:`h`. + the same size as the cell output activation vector :math:`h`. * :math:`h`: The hidden state. - * :math:`r`: The recurrent projection of the hidden state. + * :math:`r`: The recurrent projection of the hidden state. * :math:`\\tilde{c_t}`: The candidate hidden state, whose \ computation is based on the current input and previous hidden state. - * :math:`\odot`: The element-wise product of the vectors. + * :math:`\odot`: The element-wise product of the vectors. * :math:`act_g` and :math:`act_h`: The cell input and cell output \ - activation functions and `tanh` is usually used for them. + activation functions and `tanh` is usually used for them. * :math:`\overline{act_h}`: The activation function for the projection \ output, usually using `identity` or same as :math:`act_h`. Set `use_peepholes` to `False` to disable peephole connection. The formula is omitted here, please refer to the paper http://www.bioinf.jku.at/publications/older/2604.pdf for details. - + Note that these :math:`W_{xi}x_{t}, W_{xf}x_{t}, W_{xc}x_{t}, W_{xo}x_{t}` operations on the input :math:`x_{t}` are NOT included in this operator. Users can choose to use fully-connected layer before LSTMP layer. @@ -479,8 +479,8 @@ def dynamic_lstmp(input, - Hidden-hidden weight = {:math:`W_{ch}, W_{ih}, \ W_{fh}, W_{oh}`}. - - The shape of hidden-hidden weight is (P x 4D), - where P is the projection size and D the hidden + - The shape of hidden-hidden weight is (P x 4D), + where P is the projection size and D the hidden size. - Projection weight = {:math:`W_{rh}`}. - The shape of projection weight is (D x P). @@ -525,9 +525,9 @@ def dynamic_lstmp(input, hidden_dim, proj_dim = 512, 256 fc_out = fluid.layers.fc(input=input_seq, size=hidden_dim * 4, act=None, bias_attr=None) - proj_out, _ = fluid.layers.dynamic_lstmp(input=fc_out, - size=hidden_dim * 4, - proj_size=proj_dim, + proj_out, _ = fluid.layers.dynamic_lstmp(input=fc_out, + size=hidden_dim * 4, + proj_size=proj_dim, use_peepholes=False, is_reverse=True, cell_activation="tanh", @@ -2525,7 +2525,8 @@ def ctc_greedy_decoder(input, blank, name=None): interval [0, num_classes + 1). Returns: - Variable: CTC greedy decode result. + Variable: CTC greedy decode result. If all the sequences in result were + empty, the result LoDTensor will be [-1] with LoD [[0]] and dims [1]. Examples: .. code-block:: python -- GitLab From d5686f5831adea6bc9b0ceb94b81cd3f79270800 Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Mon, 5 Feb 2018 18:00:07 -0800 Subject: [PATCH 048/138] clean code --- paddle/framework/block_desc.cc | 5 +---- paddle/framework/op_desc.cc | 15 ++------------- paddle/framework/program_desc.cc | 9 --------- paddle/framework/prune.cc | 6 ------ python/paddle/v2/fluid/io.py | 6 ------ .../fluid/tests/book/test_rnn_encoder_decoder.py | 10 ---------- 6 files changed, 3 insertions(+), 48 deletions(-) diff --git a/paddle/framework/block_desc.cc b/paddle/framework/block_desc.cc index ca3d03e5541..3e344ea3790 100644 --- a/paddle/framework/block_desc.cc +++ b/paddle/framework/block_desc.cc @@ -155,8 +155,6 @@ BlockDesc::BlockDesc(ProgramDesc *prog, proto::BlockDesc *desc) for (const proto::OpDesc &op_desc : desc_->ops()) { ops_.emplace_back(new OpDesc(op_desc, prog, this)); } - std::cout << "Constructed block idx " << desc->idx() << " from protobuf str" - << std::endl; } BlockDesc::BlockDesc(const BlockDesc &other, proto::BlockDesc *desc, @@ -164,9 +162,8 @@ BlockDesc::BlockDesc(const BlockDesc &other, proto::BlockDesc *desc, : prog_(prog), desc_(desc) { need_update_ = true; for (auto &op : other.ops_) { - ops_.emplace_back(new OpDesc(*op, this)); + ops_.emplace_back(new OpDesc(*op->Proto(), prog, this)); } - for (auto &it : other.vars_) { auto *var = new VarDesc(*it.second); vars_[it.first].reset(var); diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index 7859c391fa3..46c50d9250f 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -124,21 +124,10 @@ OpDesc::OpDesc(const proto::OpDesc &desc, ProgramDesc *prog, BlockDesc *block) // restore attrs_ for (const proto::OpDesc::Attr &attr : desc_.attrs()) { std::string attr_name = attr.name(); - // we use a trick to handle attr.type() is BLOCK here, because at this - // moment the sub_block hasn't beed added to ProgramDesc's vector - // so we cast the block_idx to a dummy BlockDesc pointer + // The sub_block referred to by the BLOCK attr hasn't be added + // to ProgramDesc class yet, we skip setting BLOCK attr here. if (attr.type() != proto::AttrType::BLOCK) { attrs_[attr_name] = GetAttrValue(attr); - } else { - size_t blk_idx = attr.block_idx(); - if (blk_idx < prog->Size()) { - attrs_[attr_name] = prog->MutableBlock(blk_idx); - std::cout << "In OpDesc: set up attr block idx " << blk_idx - << std::endl; - } else { - std::cout << "In OpDesc: We don't have this block idx " << blk_idx - << std::endl; - } } } this->block_ = block; diff --git a/paddle/framework/program_desc.cc b/paddle/framework/program_desc.cc index ba461b09339..0e937dda4e1 100644 --- a/paddle/framework/program_desc.cc +++ b/paddle/framework/program_desc.cc @@ -43,7 +43,6 @@ ProgramDesc::ProgramDesc() { ProgramDesc::ProgramDesc(const ProgramDesc &o) { desc_ = o.desc_; - for (int i = 0; i < desc_.blocks_size(); ++i) { auto *block = desc_.mutable_blocks(i); blocks_.emplace_back(new BlockDesc(*o.blocks_[i], block, this)); @@ -54,8 +53,6 @@ ProgramDesc::ProgramDesc(const ProgramDesc &o) { if (attr.type() == proto::AttrType::BLOCK) { size_t blk_idx = attr.block_idx(); op->SetBlockAttr(attr.name(), *this->MutableBlock(blk_idx)); - std::cout << "In ProgramDesc 1: set block attr idx " << blk_idx - << std::endl; } } } @@ -64,11 +61,8 @@ ProgramDesc::ProgramDesc(const ProgramDesc &o) { ProgramDesc::ProgramDesc(const proto::ProgramDesc &desc) { desc_ = desc; - std::cout << std::endl << "starting in ProgDesc constructor" << std::endl; for (auto &block_desc : *desc_.mutable_blocks()) { blocks_.emplace_back(new BlockDesc(this, &block_desc)); - std::cout << "Done constructing block idx " << block_desc.idx() - << " parent idx " << block_desc.parent_idx() << std::endl; } for (auto &block : blocks_) { for (auto *op : block->AllOps()) { @@ -76,13 +70,10 @@ ProgramDesc::ProgramDesc(const proto::ProgramDesc &desc) { if (attr.type() == proto::AttrType::BLOCK) { size_t blk_idx = attr.block_idx(); op->SetBlockAttr(attr.name(), *this->MutableBlock(blk_idx)); - std::cout << "In ProgramDesc 2: set block attr idx " << blk_idx - << std::endl; } } } } - std::cout << "Done ProgDesc construction" << std::endl << std::endl; } ProgramDesc::ProgramDesc(const std::string &binary_str) { diff --git a/paddle/framework/prune.cc b/paddle/framework/prune.cc index 3c3ec875851..00fe551e55e 100644 --- a/paddle/framework/prune.cc +++ b/paddle/framework/prune.cc @@ -137,8 +137,6 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, sub_block_dependent_vars.insert(argu); } } - std::cout << "pruning the next block, the current output_block_id is " - << output_block_id << std::endl; // GetSubBlockIndex(*op) is the idx of the sub_block in the input desc // output_block_id is the idx of the current block in the output desc prune_impl(input, output, GetSubBlockIndex(*op), output_block_id, @@ -147,8 +145,6 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, } } - std::cout << "Starting to remove unreferenced variables" - << " for block idx " << output_block_id << std::endl; // remove the VarDescs in BlockDesc that are not referenced in // the pruned OpDescs std::unordered_map var_map; @@ -186,9 +182,7 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, // TODO(fengjiayi): Prune() could be inplaced to avoid unnecessary copies void Prune(const proto::ProgramDesc& input, proto::ProgramDesc* output) { std::set dependent_vars; - std::cout << std::endl << "Start C++ framework::prune" << std::endl; prune_impl(input, output, 0, -1, dependent_vars); - std::cout << "Finished C++ framework::prune" << std::endl << std::endl; } void inference_optimize_impl(const proto::ProgramDesc& input, diff --git a/python/paddle/v2/fluid/io.py b/python/paddle/v2/fluid/io.py index e410549f8a5..613dc20b6ea 100644 --- a/python/paddle/v2/fluid/io.py +++ b/python/paddle/v2/fluid/io.py @@ -342,12 +342,6 @@ def save_inference_model(dirname, prepend_feed_ops(inference_program, feeded_var_names) append_fetch_ops(inference_program, fetch_var_names) - # save for checking - curstr = inference_program.to_string(True) - f = open("save_inf_prog_after_feed_fetch.txt", 'w') - f.write(curstr) - f.close() - model_file_name = dirname + "/__model__" with open(model_file_name, "wb") as f: f.write(inference_program.desc.serialize_to_string()) diff --git a/python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py b/python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py index 15f00f95d40..2211637b5b0 100644 --- a/python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py +++ b/python/paddle/v2/fluid/tests/book/test_rnn_encoder_decoder.py @@ -225,18 +225,8 @@ def infer(save_dirname=None): # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. - print("Print feed fetch target names as follows") - print(feed_target_names) assert feed_target_names[0] == 'source_sequence' assert feed_target_names[1] == 'target_sequence' - print([var.name for var in fetch_targets]) - - # save for checking - curstr = inference_program.to_string(True) - f = open("loaded_infer_prog.txt", 'w') - f.write(curstr) - f.close() - results = exe.run(inference_program, feed={ feed_target_names[0]: word_data, -- GitLab From 9a1fa890a0c510ca1863eea358423bc89fd4fdef Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 6 Feb 2018 11:10:34 +0800 Subject: [PATCH 049/138] remove unnecessary comments --- python/paddle/v2/fluid/tests/test_protobuf_descs.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/python/paddle/v2/fluid/tests/test_protobuf_descs.py b/python/paddle/v2/fluid/tests/test_protobuf_descs.py index ac6de68b5fe..8f335d13db3 100644 --- a/python/paddle/v2/fluid/tests/test_protobuf_descs.py +++ b/python/paddle/v2/fluid/tests/test_protobuf_descs.py @@ -123,8 +123,6 @@ class TestVarDesc(unittest.TestCase): var.set_tensor_num(3) src_shapes = [[2, 3, 3], [4, 5], [6, 7, 8, 9]] var.set_shapes(src_shapes) - #import pdb - # pdb.set_trace() res_shapes = var.shapes() self.assertEqual(src_shapes, res_shapes) self.assertEqual(core.VarDesc.VarType.READER, var.type()) -- GitLab From 450c39a41301800f9b71e499d539ff7bbbd7414f Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Mon, 5 Feb 2018 20:39:38 -0800 Subject: [PATCH 050/138] fix bug --- paddle/framework/prune.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/framework/prune.cc b/paddle/framework/prune.cc index 00fe551e55e..ddd6b993d40 100644 --- a/paddle/framework/prune.cc +++ b/paddle/framework/prune.cc @@ -182,6 +182,7 @@ void prune_impl(const proto::ProgramDesc& input, proto::ProgramDesc* output, // TODO(fengjiayi): Prune() could be inplaced to avoid unnecessary copies void Prune(const proto::ProgramDesc& input, proto::ProgramDesc* output) { std::set dependent_vars; + output->clear_blocks(); prune_impl(input, output, 0, -1, dependent_vars); } -- GitLab From 1010e39bdf738029fcb78b0d388a91dfdebdda2f Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 6 Feb 2018 12:39:51 +0800 Subject: [PATCH 051/138] Add ReadOp --- paddle/framework/framework.proto | 4 +- paddle/framework/op_desc.cc | 29 +++++++-- paddle/framework/operator.cc | 26 ++++++-- paddle/framework/reader.cc | 40 ++++++------ paddle/framework/reader.h | 32 +++++----- paddle/framework/shape_inference.cc | 14 +++++ paddle/framework/shape_inference.h | 3 +- paddle/operators/read_op.cc | 94 +++++++++++++++++++++++++++++ 8 files changed, 193 insertions(+), 49 deletions(-) create mode 100644 paddle/operators/read_op.cc diff --git a/paddle/framework/framework.proto b/paddle/framework/framework.proto index f65ccae6e6a..d7be1a7352d 100644 --- a/paddle/framework/framework.proto +++ b/paddle/framework/framework.proto @@ -116,7 +116,7 @@ message LoDTensorArrayDesc { optional int32 lod_level = 2 [ default = 0 ]; } -message Reader { repeated LoDTensorDesc lod_tensor = 1; } +message ReaderDesc { repeated LoDTensorDesc lod_tensor = 1; } message VarDesc { enum VarType { @@ -136,7 +136,7 @@ message VarDesc { optional LoDTensorDesc lod_tensor = 4; optional TensorDesc selected_rows = 5; optional LoDTensorArrayDesc tensor_array = 6; - optional Reader reader = 7; + optional ReaderDesc reader = 7; } message BlockDesc { diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index ad361852ec9..772ec26895e 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -72,6 +72,8 @@ class CompileTimeInferShapeContext : public InferShapeContext { void SetDim(const std::string &name, const DDim &dim) override; + std::vector GetRepeatedDim(const std::string &name) const override; + const OpDesc &op_; const BlockDesc &block_; }; @@ -457,22 +459,37 @@ const std::vector &CompileTimeInferShapeContext::Outputs( DDim CompileTimeInferShapeContext::GetDim(const std::string &name) const { auto var = block_.FindVarRecursive(name); PADDLE_ENFORCE(var != nullptr, "Cannot find variable %s", name); + DDim res; try { auto shape = var->GetShape(); - if (shape.empty()) { - return framework::make_ddim({0UL}); - } else { - return framework::make_ddim(var->GetShape()); - } + res = shape.empty() ? make_ddim({0UL}) : make_ddim(shape); } catch (...) { VLOG(5) << "GetDim of variable " << name << " error"; std::rethrow_exception(std::current_exception()); } + return res; +} + +std::vector CompileTimeInferShapeContext::GetRepeatedDim( + const std::string &name) const { + auto var = block_.FindVarRecursive(name); + PADDLE_ENFORCE(var != nullptr, "Cannot find variable %s", name); + std::vector res; + try { + auto shapes = var->GetShapes(); + for (const auto &s : shapes) { + res.push_back(s.empty() ? make_ddim({0UL}) : make_ddim(s)); + } + } catch (...) { + VLOG(5) << "GetRepeatedDim of variable " << name << " error."; + std::rethrow_exception(std::current_exception()); + } + return res; } void CompileTimeInferShapeContext::SetDim(const std::string &name, const DDim &dim) { - block_.FindVarRecursive(name)->SetShape(framework::vectorize(dim)); + block_.FindVarRecursive(name)->SetShape(vectorize(dim)); } bool CompileTimeInferShapeContext::IsRuntime() const { return false; } diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 81fa8cf4774..1aa111dc76d 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -320,8 +320,8 @@ class RuntimeInferShapeContext : public InferShapeContext { if (length == 0) { return false; } - PADDLE_ENFORCE_EQ(length, 1UL, "Input %s should have more than one inputs", - name); + PADDLE_ENFORCE_EQ(length, 1UL, + "Input %s should not have more than one inputs", name); auto ipt = ins[0]; auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt); return var != nullptr; @@ -333,8 +333,8 @@ class RuntimeInferShapeContext : public InferShapeContext { if (length == 0) { return false; } - PADDLE_ENFORCE_EQ(length, 1UL, "Output %s should have more than one inputs", - name); + PADDLE_ENFORCE_EQ(length, 1UL, + "Output %s should not have more than one inputs", name); auto ipt = outs[0]; auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt); return var != nullptr; @@ -421,8 +421,22 @@ class RuntimeInferShapeContext : public InferShapeContext { } else if (var->IsType()) { return var->Get().GetCompleteDims(); } else { - PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.", - name, var->Type().name()); + PADDLE_THROW( + "Only LoDTensor/SelectedRows support 'GetDim', but Variable %s's " + "type_id is %s.", + name, var->Type().name()); + } + } + + std::vector GetRepeatedDim(const std::string& name) const override { + Variable* var = scope_.FindVar(name); + if (var->IsType()) { + return var->Get().shapes(); + } else { + PADDLE_THROW( + "Only ReaderHolder support 'GetRepeatedDim', but Variable %s's " + "type_id is %s.", + name, var->Type().name()); } } diff --git a/paddle/framework/reader.cc b/paddle/framework/reader.cc index a05bef42ffa..76cbc827ba5 100644 --- a/paddle/framework/reader.cc +++ b/paddle/framework/reader.cc @@ -25,13 +25,15 @@ DDim FileReader::shape(size_t idx) const { return shapes_[idx]; } -std::vector ShuffleReader::ReadNext() { +void ShuffleReader::ReadNext(std::vector* out) { if (iteration_pos_ >= buffer_.size()) { // Reload buffer with new data buffer_.clear(); + buffer_.reverse(buffer_size_); for (int i = 0; i < buffer_size_; ++i) { if (reader_->HasNext()) { - buffer_.push_back(reader_->ReadNext()); + buffer.push_back(std::vector()); + reader_->ReadNext(&buffer.back()); } else { break; } @@ -39,29 +41,32 @@ std::vector ShuffleReader::ReadNext() { std::random_shuffle(buffer_.begin(), buffer_.end()); iteration_pos_ = 0; } - if (buffer_.empty()) { - std::vector empty_res; - return empty_res; + out->clear(); + if (!buffer_.empty()) { + std::swap(*out, buffer_[iteration_pos_++]); } - return buffer_[iteration_pos_++]; + // if buffer_ is empty, the 'out' will return as an empty vector. } -std::vector BatchReader::ReadNext() { +void BatchReader::ReadNext(std::vector* out) { buffer_.clear(); + buffer_.reserve(batch_size_); for (int i = 0; i < batch_size_; ++i) { if (reader_->HasNext()) { - buffer_.push_back(reader_->ReadNext()); + buffer_.push_back(std::vector()); + reader_->ReadNext(&buffer_.back()); } else { break; } } // Concat instances - std::vector res; + out.clear(); if (buffer_.empty()) { - return res; + // if buffer_ is empty, the 'out' will return as an empty vector. + return; } int out_num = buffer_[0].size(); - res.reserve(out_num); + out->reserve(out_num); for (int j = 0; j < out_num; ++j) { // Merge shape and check date type std::type_index batch_type = buffer_[0][j].type(); @@ -76,9 +81,9 @@ std::vector BatchReader::ReadNext() { batch_shape[0] += ins_shape[0]; } - LoDTensor out; - out.Resize(batch_shape); - out.mutable_data(platform::CPUPlace(), batch_type); + LoDTensor out_tensor; + out_tensor.Resize(batch_shape); + out_tensor.mutable_data(platform::CPUPlace(), batch_type); int64_t dst_offset = 0; // Merge lod and data @@ -102,15 +107,14 @@ std::vector BatchReader::ReadNext() { top_level_lod.back() + (ins_lod.empty() ? ins_shape[0] : (ins_lod[0].size() - 1))); - Tensor dst = out.Slice(dst_offset, dst_offset + ins_shape[0]); + Tensor dst = out_tensor.Slice(dst_offset, dst_offset + ins_shape[0]); Copy(buffer_[i][j], platform::CPUPlace(), &dst); dst_offset += ins_shape[0]; } batch_lod.insert(batch_lod.begin(), top_level_lod); - out.set_lod(batch_lod); - res.push_back(out); + out_tensor.set_lod(batch_lod); + out->push_back(out_tensor); } - return res; } } // namespace framework } // namespace paddle diff --git a/paddle/framework/reader.h b/paddle/framework/reader.h index f450e67689a..523ff28c990 100644 --- a/paddle/framework/reader.h +++ b/paddle/framework/reader.h @@ -15,14 +15,14 @@ #pragma once #include "paddle/framework/ddim.h" -#include "paddle/framework/lod_tensor.h" +#include "paddle/framework/lod_tensor_array.h" namespace paddle { namespace framework { class ReaderBase { public: - virtual std::vector ReadNext() = 0; + virtual void ReadNext(std::vector* out) = 0; virtual bool HasNext() const = 0; virtual DDim shape(size_t idx) const = 0; @@ -73,24 +73,24 @@ class RandomReader : public FileReader { dist_ = std::uniform_real_distribution(min_, max_); } - std::vector ReadNext() override { - std::vector res; - res.reserve(shapes_.size()); + void ReadNext(std::vector* out) override { + out.clear(); + out.reserve(shapes_.size()); for (const DDim& shape : shapes_) { PADDLE_ENFORCE_GE( shape.size(), 2, - "The rank of input data should be 2 at least.(Now it's %d)", + "The rank of reader's output data should be 2 at least.(Now it's %d)", shape.size()); - LoDTensor out; - out.Resize(shape); - T* data = out.mutable_data(platform::CPUPlace()); + LoDTensor out_tensor; + out_tensor.Resize(shape); + T* data = out_tensor.mutable_data(platform::CPUPlace()); int64_t numel = product(shape); for (int64_t i = 0; i < numel; ++i) { data[i] = dist_(engine_); } - res.push_back(out); + out.push_back(out_tensor); } - return res; + return out; } bool HasNext() const override { return true; } @@ -111,11 +111,11 @@ class ShuffleReader : public DecoratedReader { buffer_.reserve(buffer_size); } - std::vector ReadNext() override; + void ReadNext(std::vector* out) override; private: int buffer_size_; - std::vector> buffer_; + std::vector> buffer_; size_t iteration_pos_; }; @@ -126,11 +126,11 @@ class BatchReader : public DecoratedReader { buffer_.reserve(batch_size_); } - std::vector ReadNext() override; + void ReadNext(std::vector* out) override; private: int batch_size_; - std::vector> buffer_; + std::vector> buffer_; }; // The ReaderHolder is used as readers' unified wrapper, @@ -141,7 +141,7 @@ class ReaderHolder { ReaderBase* Get() const { return reader_.get(); } - std::vector ReadNext() { return reader_->ReadNext(); } + void ReadNext(std::vector* out) { reader_->ReadNext(out); } bool HasNext() const { return reader_->HasNext(); } DDim shape(size_t idx) const { return reader_->shape(idx); } diff --git a/paddle/framework/shape_inference.cc b/paddle/framework/shape_inference.cc index a0fa467291b..4a8acfb87ff 100644 --- a/paddle/framework/shape_inference.cc +++ b/paddle/framework/shape_inference.cc @@ -32,6 +32,16 @@ std::vector InferShapeContext::GetInputsDim( return GetDims(arg_names); } +std::vector InferShapeContext::GetReaderDims( + const std::string &name) const { + const std::vector &arg_names = Inputs(name); + PADDLE_ENFORCE_EQ( + arg_names.size(), 1UL, + "Reader input '%s' should hold one element, but now it holds %d", name, + arg_names.size()); + return this->GetRepeatedDims(arg_names[0]); +} + DDim InferShapeContext::GetInputsElementDim(const std::string &name, int idx) const { const std::vector &names = Inputs(name); @@ -61,6 +71,7 @@ std::vector InferShapeContext::GetDims( [this](const std::string &name) { return this->GetDim(name); }); return ret; } + void InferShapeContext::SetDims(const std::vector &names, const std::vector &dims) { size_t length = names.size(); @@ -72,14 +83,17 @@ void InferShapeContext::SetDims(const std::vector &names, SetDim(names[i], dims[i]); } } + std::vector InferShapeContext::GetInputsVarType( const std::string &name) const { return GetVarTypes(Inputs(name)); } + std::vector InferShapeContext::GetOutputsVarType( const std::string &name) const { return GetVarTypes(Outputs(name)); } + std::vector InferShapeContext::GetVarTypes( const std::vector &names) const { std::vector retv; diff --git a/paddle/framework/shape_inference.h b/paddle/framework/shape_inference.h index 830f199ed14..f1a64e9024b 100644 --- a/paddle/framework/shape_inference.h +++ b/paddle/framework/shape_inference.h @@ -36,8 +36,8 @@ class InferShapeContext { virtual bool HasOutputs(const std::string &name) const = 0; DDim GetInputDim(const std::string &name) const; - std::vector GetInputsDim(const std::string &name) const; + std::vector GetReaderDims(const std::string &name) const DDim; DDim GetInputsElementDim(const std::string &name, int idx) const; void SetOutputDim(const std::string &name, const DDim &dim); @@ -61,6 +61,7 @@ class InferShapeContext { protected: virtual DDim GetDim(const std::string &name) const = 0; virtual void SetDim(const std::string &name, const DDim &dim) = 0; + std::vector GetRepeatedDim(const std::string &name) const = 0; std::vector GetDims(const std::vector &names) const; std::vector GetVarTypes( diff --git a/paddle/operators/read_op.cc b/paddle/operators/read_op.cc new file mode 100644 index 00000000000..c6ff4ba8fee --- /dev/null +++ b/paddle/operators/read_op.cc @@ -0,0 +1,94 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/framework/op_registry.h" +#include "paddle/framework/reader.h" + +namespace paddle { +namespace operators { + +class ReadInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Reader"), + "The ReadOp must take a reader as input."); + PADDLE_ENFORCE(ctx->HasOutputs("Out"), + "The ReadOp should be assigned with output."); + std::vector reader_dims = ctx->GetReaderDims("Reader"); + std::vector out_names = ctx->Outputs("Out"); + PADDLE_ENFORCE_EQ( + reader_dims.size(), out_names.size(), + "The reader's dim number doesn't match the output number."); + ctx->SetOutputsDim("Out", reader_dims); + } +}; + +class ReadInferVarType : public framework::VarTypeInference { + public: + void operator()(const framework::OpDesc& op_desc, + framework::BlockDesc* block) const override { + std::string reader_name = op_desc.Input("Reader")[0]; + std::vector out_names = op_desc.Output("Out"); + framework::VarDesc reader = block.FindVarRecursive(reader_name); + auto dtypes = reader.GetDataTypes(); + PADDLE_ENFORCE_EQ(dtypes.size(), out_names.size()); + for (size_t i = 0; i < dtypes.size(); ++i) { + faremwork::VarDesc& out = block->FindRecursiveOrCreateVar(out_names[i]); + out.SetType(framework::proto::DataType::LOD_TENSOR); + out.SetDataType(dtypes[i]); + } + } +}; + +class ReadOp : public framework::OperatorBase { + public: + using framework::OperatorBase::OperatorBase; + void Run(const framework::Scope& scope, + const platform::Place& dev_place) const override { + const framework::ReaderHolder& reader = + scope.FindVar(Input("Reader"))->Get(); + if (!reader.HasNext()) { + // what shall we do??? + return; + } + std::vector out_arg_names = Outputs("Out"); + std::vector ins; + reader.ReadNext(&ins); + PADDLE_ENFORCE_EQ(ins.size(), out_arg_names.size()); + for (size_t i = 0; i < ins.size(); ++i) { + auto* out = + scope.FindVar(out_arg_names[i])->GetMutable(); + PADDLE_ENFORCE_EQ(ins[i].dims(), out->dims()); + out->ShareDataWith(ins[i]); + out->set_lod(ins[i].lod()); + } + } +}; + +class ReadOpMaker : public framework::OpProtoAndCheckerMaker { + public: + ReadOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(op_proto, op_checker) { + AddInput("Reader", "(ReaderHolder) The executed reader."); + AddOutput("Out", "(LoDTensor) The output data.").AsDuplicable(); + AddComment(R"DOC( + Read Operator + + Execute a given reader once and output data. + )DOC") + } +}; + +} // namespace operators +} // namespace paddle \ No newline at end of file -- GitLab From 70324911e701829eb1a5ef484ec26920b6578d96 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Tue, 6 Feb 2018 12:52:10 +0800 Subject: [PATCH 052/138] refine buffer receive --- paddle/framework/details/buffered_channel.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/details/buffered_channel.h b/paddle/framework/details/buffered_channel.h index 7ac234b8d42..b9761eab9b5 100644 --- a/paddle/framework/details/buffered_channel.h +++ b/paddle/framework/details/buffered_channel.h @@ -71,7 +71,7 @@ bool Buffered::Receive(T* item) { std::unique_lock lock(mu_); empty_cond_var_.wait(lock, [this]() { return !channel_.empty() || closed_; }); bool ret = false; - if (!closed_) { + if (!channel_.empty()) { *item = std::move(channel_.front()); channel_.pop_front(); full_cond_var_.notify_one(); -- GitLab From c966c2813022e145f75263dab780d8cb9273a2f1 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Tue, 6 Feb 2018 13:45:36 +0800 Subject: [PATCH 053/138] Add RunAndGetException in threadpool Change the behaviour of thread pool. Thread pool will ignore the thrown exception implicitly. It is hard to debug. Corrently, ThreadPool::Run will invoke `LOG(FATAL)` if an exception thrown. --- paddle/framework/threadpool.h | 44 +++++++++++++++++++++++++++++++---- 1 file changed, 40 insertions(+), 4 deletions(-) diff --git a/paddle/framework/threadpool.h b/paddle/framework/threadpool.h index 4e9b58679d9..77d31a1176d 100644 --- a/paddle/framework/threadpool.h +++ b/paddle/framework/threadpool.h @@ -21,7 +21,8 @@ limitations under the License. */ #include #include #include - +#include "glog/logging.h" +#include "paddle/platform/enforce.h" #include "paddle/platform/macros.h" // for DISABLE_COPY_AND_ASSIGN namespace paddle { @@ -31,7 +32,7 @@ namespace framework { // number of threads. class ThreadPool { public: - typedef std::packaged_task Task; + using Task = std::packaged_task()>; // Returns the singleton of ThreadPool. static ThreadPool* GetInstance(); @@ -52,9 +53,28 @@ class ThreadPool { // std::future::wait(). template std::future Run(Callback fn) { + auto f = this->RunAndGetException(fn); + return std::async(std::launch::deferred, ExceptionHandler(std::move(f))); + } + + template + std::future> RunAndGetException( + Callback fn) { std::unique_lock lock(mutex_); - Task task(std::bind(fn)); - std::future f = task.get_future(); + Task task([fn]() -> std::unique_ptr { + try { + fn(); + return nullptr; + } catch (platform::EnforceNotMet ex) { + return std::unique_ptr( + new platform::EnforceNotMet(ex)); + } catch (...) { + LOG(FATAL) + << "Unexpected exception is catched in thread pool. All " + "throwable exception in Fluid should be an EnforceNotMet."; + } + }); + std::future> f = task.get_future(); tasks_.push(std::move(task)); lock.unlock(); scheduled_.notify_one(); @@ -65,6 +85,22 @@ class ThreadPool { void Wait(); private: + struct ExceptionHandler { + mutable std::future> future_; + explicit ExceptionHandler( + std::future>&& f) + : future_(std::move(f)) {} + void operator()() const { + auto ex = this->future_.get(); + if (ex != nullptr) { + LOG(FATAL) << "The exception is thrown inside the thread pool. You " + "should use RunAndGetException to handle the exception.\n" + "The default exception handler is LOG(FATAL)." + << ex->what(); + } + } + }; + DISABLE_COPY_AND_ASSIGN(ThreadPool); explicit ThreadPool(int num_threads); -- GitLab From 59e4dd579770df7e0fb7208a11517784a7b02b4e Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Tue, 6 Feb 2018 14:17:35 +0800 Subject: [PATCH 054/138] add independent inference_lib.cmake --- CMakeLists.txt | 1 + cmake/external/eigen.cmake | 8 ---- cmake/external/gflags.cmake | 7 ---- cmake/external/glog.cmake | 7 ---- cmake/external/protobuf.cmake | 7 ---- cmake/inference_lib.cmake | 74 +++++++++++++++++++++++++++++++++ paddle/framework/CMakeLists.txt | 8 ---- paddle/inference/CMakeLists.txt | 11 ----- paddle/memory/CMakeLists.txt | 7 ---- paddle/platform/CMakeLists.txt | 8 ---- paddle/string/CMakeLists.txt | 7 ---- 11 files changed, 75 insertions(+), 70 deletions(-) create mode 100644 cmake/inference_lib.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 49334279f6d..3a21574b855 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -156,6 +156,7 @@ include(rdma) # set rdma libraries include(flags) # set paddle compile flags include(version) # set PADDLE_VERSION include(coveralls) # set code coverage +include(inference_lib) # add paddle fluid inference libraries include_directories("${PADDLE_SOURCE_DIR}") diff --git a/cmake/external/eigen.cmake b/cmake/external/eigen.cmake index eb6c0cef574..6a701e076c9 100644 --- a/cmake/external/eigen.cmake +++ b/cmake/external/eigen.cmake @@ -28,11 +28,3 @@ endif() add_dependencies(eigen3 extern_eigen3) LIST(APPEND external_project_dependencies eigen3) - -set(lib_dir "${CMAKE_INSTALL_PREFIX}/third_party/eigen3") -add_custom_target(eigen3_lib - COMMAND mkdir -p "${lib_dir}/Eigen" "${lib_dir}/unsupported" - COMMAND cp "${EIGEN_INCLUDE_DIR}/Eigen/Core" "${lib_dir}/Eigen" - COMMAND cp -r "${EIGEN_INCLUDE_DIR}/Eigen/src" "${lib_dir}/Eigen" - COMMAND cp -r "${EIGEN_INCLUDE_DIR}/unsupported/Eigen" "${lib_dir}/unsupported" -) diff --git a/cmake/external/gflags.cmake b/cmake/external/gflags.cmake index 9cbc376ba0e..d4f252bb9f6 100644 --- a/cmake/external/gflags.cmake +++ b/cmake/external/gflags.cmake @@ -60,10 +60,3 @@ IF(WITH_C_API) INSTALL(FILES ${GFLAGS_LIBRARIES} DESTINATION third_party/gflags/lib) ENDIF() ENDIF() - -set(lib_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/gflags") -add_custom_target(gflags_lib - COMMAND mkdir -p "${lib_dir}/lib" - COMMAND cp -r "${GFLAGS_INCLUDE_DIR}" "${lib_dir}" - COMMAND cp "${GFLAGS_LIBRARIES}" "${lib_dir}/lib" -) diff --git a/cmake/external/glog.cmake b/cmake/external/glog.cmake index 0031225a6cb..0c6b3aafcb4 100644 --- a/cmake/external/glog.cmake +++ b/cmake/external/glog.cmake @@ -76,10 +76,3 @@ IF(WITH_C_API) INSTALL(FILES ${GLOG_LIBRARIES} DESTINATION third_party/glog/lib) ENDIF() ENDIF() - -set(lib_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/glog") -add_custom_target(glog_lib - COMMAND mkdir -p "${lib_dir}/lib" - COMMAND cp -r "${GLOG_INCLUDE_DIR}" "${lib_dir}" - COMMAND cp "${GLOG_LIBRARIES}" "${lib_dir}/lib" -) diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index ff3d38a691a..ff5855052da 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -259,13 +259,6 @@ IF(NOT PROTOBUF_FOUND) ENDIF() ENDIF() - set(lib_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/protobuf") - add_custom_target(protobuf_lib - COMMAND mkdir -p "${lib_dir}/lib" - COMMAND cp -r "${PROTOBUF_INCLUDE_DIR}" "${lib_dir}" - COMMAND cp "${PROTOBUF_LITE_LIBRARY}" "${lib_dir}/lib" - ) - IF(CMAKE_CROSSCOMPILING) PROMPT_PROTOBUF_LIB(protobuf_host extern_protobuf) ELSE() diff --git a/cmake/inference_lib.cmake b/cmake/inference_lib.cmake new file mode 100644 index 00000000000..d71fbce3820 --- /dev/null +++ b/cmake/inference_lib.cmake @@ -0,0 +1,74 @@ +# make package for paddle fluid shared and static library +# third party +set(lib_dir "${CMAKE_INSTALL_PREFIX}/third_party/eigen3") +add_custom_target(eigen3_lib + COMMAND mkdir -p "${lib_dir}/Eigen" "${lib_dir}/unsupported" + COMMAND cp "${EIGEN_INCLUDE_DIR}/Eigen/Core" "${lib_dir}/Eigen" + COMMAND cp -r "${EIGEN_INCLUDE_DIR}/Eigen/src" "${lib_dir}/Eigen" + COMMAND cp -r "${EIGEN_INCLUDE_DIR}/unsupported/Eigen" "${lib_dir}/unsupported" +) + +set(lib_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/gflags") +add_custom_target(gflags_lib + COMMAND mkdir -p "${lib_dir}/lib" + COMMAND cp -r "${GFLAGS_INCLUDE_DIR}" "${lib_dir}" + COMMAND cp "${GFLAGS_LIBRARIES}" "${lib_dir}/lib" +) + +set(lib_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/glog") +add_custom_target(glog_lib + COMMAND mkdir -p "${lib_dir}/lib" + COMMAND cp -r "${GLOG_INCLUDE_DIR}" "${lib_dir}" + COMMAND cp "${GLOG_LIBRARIES}" "${lib_dir}/lib" +) + +IF(NOT PROTOBUF_FOUND) + set(lib_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/protobuf") + add_custom_target(protobuf_lib + COMMAND mkdir -p "${lib_dir}/lib" + COMMAND cp -r "${PROTOBUF_INCLUDE_DIR}" "${lib_dir}" + COMMAND cp "${PROTOBUF_LITE_LIBRARY}" "${lib_dir}/lib" + ) +ENDIF(NOT PROTOBUF_FOUND) + +# paddle fluid module +set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/framework") +add_custom_target(framework_lib DEPENDS framework_py_proto + COMMAND mkdir -p "${lib_dir}/details" + COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/framework/*.h" "${lib_dir}" + COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/framework/details/*.h" "${lib_dir}/details" + COMMAND cp "${PADDLE_BINARY_DIR}/paddle/framework/framework.pb.h" "${lib_dir}" +) + +set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/memory") +add_custom_target(memory_lib + COMMAND mkdir -p "${lib_dir}/detail" + COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/memory/*.h" "${lib_dir}" + COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/memory/detail/*.h" "${lib_dir}/detail" +) + +set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/inference") +add_custom_target(inference_lib DEPENDS paddle_fluid_shared + COMMAND mkdir -p "${lib_dir}" + COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/inference/*.h" "${lib_dir}" + COMMAND cp "${PADDLE_BINARY_DIR}/paddle/inference/libpaddle_fluid.so" "${lib_dir}" +) + +set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/platform") +add_custom_target(platform_lib + COMMAND mkdir -p "${lib_dir}/dynload" "${lib_dir}/details" + COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/platform/*.h" "${lib_dir}" + COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/platform/dynload/*.h" "${lib_dir}/dynload" + COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/platform/details/*.h" "${lib_dir}/details" +) + +set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/string") +add_custom_target(string_lib + COMMAND mkdir -p "${lib_dir}/tinyformat" + COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/string/*.h" "${lib_dir}" + COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/string/tinyformat/*.h" "${lib_dir}/tinyformat" +) + +add_custom_target(inference_lib_dist DEPENDS + inference_lib framework_lib memory_lib platform_lib string_lib + gflags_lib glog_lib protobuf_lib eigen3_lib) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index a2a0be08d94..8b3768b2319 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -92,12 +92,4 @@ cc_test(init_test SRCS init_test.cc DEPS init) cc_test(op_kernel_type_test SRCS op_kernel_type_test.cc DEPS place device_context framework_proto) cc_test(cow_ptr_tests SRCS details/cow_ptr_test.cc) -set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/framework") -add_custom_target(framework_lib DEPENDS framework_py_proto - COMMAND mkdir -p "${lib_dir}/details" - COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/*.h" "${lib_dir}" - COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/details/*.h" "${lib_dir}/details" - COMMAND cp "${CMAKE_CURRENT_BINARY_DIR}/framework.pb.h" "${lib_dir}" -) - cc_test(channel_test SRCS channel_test.cc) diff --git a/paddle/inference/CMakeLists.txt b/paddle/inference/CMakeLists.txt index e8e0ee21071..654a6119bdc 100644 --- a/paddle/inference/CMakeLists.txt +++ b/paddle/inference/CMakeLists.txt @@ -18,17 +18,6 @@ target_circle_link_libraries(paddle_fluid_shared SET_TARGET_PROPERTIES(paddle_fluid_shared PROPERTIES OUTPUT_NAME paddle_fluid) -# install library & headers -set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/inference") -add_custom_target(inference_lib DEPENDS paddle_fluid_shared - COMMAND mkdir -p "${lib_dir}" - COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/*.h" "${lib_dir}" - COMMAND cp "${CMAKE_CURRENT_BINARY_DIR}/libpaddle_fluid.so" "${lib_dir}" -) -add_custom_target(inference_lib_dist DEPENDS - inference_lib framework_lib memory_lib platform_lib string_lib - gflags_lib glog_lib protobuf_lib eigen3_lib) - if(WITH_TESTING) add_subdirectory(tests/book) endif() diff --git a/paddle/memory/CMakeLists.txt b/paddle/memory/CMakeLists.txt index fad49346f23..1a61c484823 100644 --- a/paddle/memory/CMakeLists.txt +++ b/paddle/memory/CMakeLists.txt @@ -14,10 +14,3 @@ cc_library(paddle_memory system_allocator) cc_test(memory_test SRCS memory_test.cc DEPS place paddle_memory) - -set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/memory") -add_custom_target(memory_lib - COMMAND mkdir -p "${lib_dir}/detail" - COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/*.h" "${lib_dir}" - COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/detail/*.h" "${lib_dir}/detail" -) diff --git a/paddle/platform/CMakeLists.txt b/paddle/platform/CMakeLists.txt index d70530aadb8..5ce4b3de39d 100644 --- a/paddle/platform/CMakeLists.txt +++ b/paddle/platform/CMakeLists.txt @@ -39,11 +39,3 @@ nv_test(nccl_test SRCS nccl_test.cu DEPS dynload_cuda gpu_info device_context) cc_library(profiler SRCS profiler.cc DEPS device_context) cc_test(profiler_test SRCS profiler_test.cc DEPS profiler) - -set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/platform") -add_custom_target(platform_lib - COMMAND mkdir -p "${lib_dir}/dynload" "${lib_dir}/details" - COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/*.h" "${lib_dir}" - COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/dynload/*.h" "${lib_dir}/dynload" - COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/details/*.h" "${lib_dir}/details" -) diff --git a/paddle/string/CMakeLists.txt b/paddle/string/CMakeLists.txt index 234a9a6d036..1fe7f42ca1c 100644 --- a/paddle/string/CMakeLists.txt +++ b/paddle/string/CMakeLists.txt @@ -2,10 +2,3 @@ cc_library(stringpiece SRCS piece.cc) cc_test(stringpiece_test SRCS piece_test.cc DEPS stringpiece glog gflags) cc_test(stringprintf_test SRCS printf_test.cc DEPS glog gflags) cc_test(to_string_test SRCS to_string_test.cc) - -set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/string") -add_custom_target(string_lib - COMMAND mkdir -p "${lib_dir}/tinyformat" - COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/*.h" "${lib_dir}" - COMMAND cp "${CMAKE_CURRENT_SOURCE_DIR}/tinyformat/*.h" "${lib_dir}/tinyformat" -) -- GitLab From 17b1c369b1f2dadff102ec283b847ea064593dec Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 5 Feb 2018 23:08:12 -0800 Subject: [PATCH 055/138] "fix ci" --- paddle/framework/mixed_vector.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/paddle/framework/mixed_vector.h b/paddle/framework/mixed_vector.h index aade7d83918..1fc7622e9b2 100644 --- a/paddle/framework/mixed_vector.h +++ b/paddle/framework/mixed_vector.h @@ -116,6 +116,8 @@ inline T *Vector::mutable_data(platform::Place place) { this->size() * sizeof(T), ctx->stream()); ctx->Wait(); return static_cast(cuda_ptr_.get()); +#else + return nullptr; #endif } else { PADDLE_THROW("Unsupport Place."); -- GitLab From 709c157a2ff4d51846c373b465d021be93033363 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 5 Feb 2018 23:59:41 -0800 Subject: [PATCH 056/138] "fix ci" --- paddle/framework/lod_tensor.h | 8 +------- paddle/framework/selected_rows.h | 8 +------- paddle/operators/parallel_do_op.cc | 11 ++++++++--- 3 files changed, 10 insertions(+), 17 deletions(-) diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index 3465e02c826..a773c1eb32d 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -129,13 +129,7 @@ class LoDTensor : public Tensor { explicit LoDTensor(const LoD& lod) : lod_(lod) {} - void set_lod(const LoD& lod) { - lod_ = lod; - if (holder_ != nullptr && - !platform::is_same_place(holder_->place(), lod.place())) { - lod_.CopyToPeer(holder_->place()); - } - } + void set_lod(const LoD& lod) { lod_ = lod; } const LoD& lod() const { return lod_; } diff --git a/paddle/framework/selected_rows.h b/paddle/framework/selected_rows.h index 11323442446..30d3dfc1e89 100644 --- a/paddle/framework/selected_rows.h +++ b/paddle/framework/selected_rows.h @@ -42,13 +42,7 @@ class SelectedRows { Vector* mutable_rows() { return &rows_; } - void set_rows(const Vector& rows) { - rows_ = rows; - if (value_ != nullptr && - !platform::is_same_place(value_->place(), rows.place())) { - rows_.mutable_data(value_->place()); - } - } + void set_rows(const Vector& rows) { rows_ = rows; } DDim GetCompleteDims() const { std::vector dims = vectorize(value_->dims()); diff --git a/paddle/operators/parallel_do_op.cc b/paddle/operators/parallel_do_op.cc index 87678decde1..0db2fb6238a 100644 --- a/paddle/operators/parallel_do_op.cc +++ b/paddle/operators/parallel_do_op.cc @@ -76,21 +76,26 @@ inline void CopyOrShare(const framework::Variable &src, if (src.IsType()) { if (src.Get().place() == dst_place) { dst->GetMutable()->ShareDataWith(src.Get()); + dst->GetMutable()->set_lod(src.Get().lod()); } else { Copy(src.Get(), dst_place, dst->GetMutable()); + LoD lod(src.Get().lod()); + lod.CopyToPeer(dst_place); + dst->GetMutable()->set_lod(lod); } - dst->GetMutable()->set_lod(src.Get().lod()); } else if (src.IsType()) { auto &src_sr = src.Get(); auto *dst_sr = dst->GetMutable(); - dst_sr->set_rows(src_sr.rows()); dst_sr->set_height(src_sr.height()); if (src_sr.value().place() == dst_place) { dst_sr->mutable_value()->ShareDataWith(src_sr.value()); + dst_sr->set_rows(src_sr.rows()); } else { Copy(src_sr.value(), dst_place, dst_sr->mutable_value()); + LoD lod(src.Get().lod()); + lod.CopyToPeer(dst_place); + dst_sr->set_rows(lod); } - dst_sr->set_rows(src_sr.rows()); } else { PADDLE_THROW("Expect LoDTensor/SelectedRows, get %s", src.Type().name()); } -- GitLab From 179b78934a81c7935b3a3d6fa22f9596170a31dc Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Tue, 6 Feb 2018 00:24:13 -0800 Subject: [PATCH 057/138] "fix CopyToPeer" --- paddle/framework/lod_tensor.h | 2 +- paddle/framework/mixed_vector.h | 25 +++++++++++++++++++++++-- paddle/operators/parallel_do_op.cc | 4 ++-- 3 files changed, 26 insertions(+), 5 deletions(-) diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index a773c1eb32d..be2b3016196 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -65,7 +65,7 @@ struct LoD : public std::vector> { void CopyToPeer(platform::Place place) { for (auto it = this->begin(); it != this->end(); ++it) { - it->mutable_data(place); + it->CopyToPeer(place); } } }; diff --git a/paddle/framework/mixed_vector.h b/paddle/framework/mixed_vector.h index 1fc7622e9b2..cdb968e3cb7 100644 --- a/paddle/framework/mixed_vector.h +++ b/paddle/framework/mixed_vector.h @@ -82,7 +82,7 @@ inline const T *Vector::data(platform::Place place) const { if (cuda_ptr_ == nullptr) { return nullptr; } - if (platform::is_same_place(place, place_)) { + if (boost::get(place) == place_) { return static_cast(cuda_ptr_.get()); } else { PADDLE_THROW( @@ -99,7 +99,7 @@ inline T *Vector::mutable_data(platform::Place place) { if (platform::is_cpu_place(place)) { return std::vector::data(); } else if (platform::is_gpu_place(place)) { - if (!platform::is_same_place(place, place_)) { + if (boost::get(place) != place_) { place_ = boost::get(place); } #ifdef PADDLE_WITH_CUDA @@ -159,5 +159,26 @@ void Vector::CopyFromCUDA() { #endif } +template +void Vector::CopyToPeer(platform::Place place) { +#ifdef PADDLE_WITH_CUDA + if (boost::get(place) != place_) { + place_ = boost::get(place); + } + if (cuda_size_ < this->size() || cuda_ptr_ == nullptr) { + cuda_ptr_.reset( + memory::Alloc(place_, this->size() * sizeof(T)), + memory::PlainDeleter(place_)); + } + cuda_size_ = this->size(); + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto *ctx = pool.GetByPlace(place_); + memory::Copy(place_, cuda_ptr_.get(), platform::CPUPlace(), + static_cast(this->data()), + this->size() * sizeof(T), ctx->stream()); + ctx->Wait(); +#endif +} + } // namespace framework } // namespace paddle diff --git a/paddle/operators/parallel_do_op.cc b/paddle/operators/parallel_do_op.cc index 0db2fb6238a..eb6308d306a 100644 --- a/paddle/operators/parallel_do_op.cc +++ b/paddle/operators/parallel_do_op.cc @@ -79,7 +79,7 @@ inline void CopyOrShare(const framework::Variable &src, dst->GetMutable()->set_lod(src.Get().lod()); } else { Copy(src.Get(), dst_place, dst->GetMutable()); - LoD lod(src.Get().lod()); + framework::LoD lod(src.Get().lod()); lod.CopyToPeer(dst_place); dst->GetMutable()->set_lod(lod); } @@ -92,7 +92,7 @@ inline void CopyOrShare(const framework::Variable &src, dst_sr->set_rows(src_sr.rows()); } else { Copy(src_sr.value(), dst_place, dst_sr->mutable_value()); - LoD lod(src.Get().lod()); + framework::Vector lod(src_sr.rows()); lod.CopyToPeer(dst_place); dst_sr->set_rows(lod); } -- GitLab From 3aae78159b6b9cd12f2a60b071c7e86abf45e7ee Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Tue, 6 Feb 2018 16:36:31 +0800 Subject: [PATCH 058/138] Change the dims of empty result to [1, 1] --- paddle/operators/ctc_align_op.cu | 2 +- paddle/operators/ctc_align_op.h | 2 +- python/paddle/v2/fluid/layers/nn.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/operators/ctc_align_op.cu b/paddle/operators/ctc_align_op.cu index 918df83effb..cea595d7c5d 100644 --- a/paddle/operators/ctc_align_op.cu +++ b/paddle/operators/ctc_align_op.cu @@ -82,7 +82,7 @@ class CTCAlignOpCUDAKernel : public framework::OpKernel { output->Resize({static_cast(host_out_lod0.back()), 1}); if (host_out_lod0.back() == 0) { - output->Resize({1}); + output->Resize({1, 1}); output->mutable_data(ctx.GetPlace()); math::SetConstant set_constant; set_constant(ctx.template device_context(), diff --git a/paddle/operators/ctc_align_op.h b/paddle/operators/ctc_align_op.h index 7a063870f3c..54ad1d6f5cc 100644 --- a/paddle/operators/ctc_align_op.h +++ b/paddle/operators/ctc_align_op.h @@ -71,7 +71,7 @@ class CTCAlignKernel : public framework::OpKernel { output->Resize({static_cast(output_lod0.back()), 1}); // for empty sequence if (output_lod0.back() == 0) { - output->Resize({1}); + output->Resize({1, 1}); output_data = output->mutable_data(ctx.GetPlace()); output_data[0] = -1; } diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index 2209625344e..0b3b56bc22a 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -2526,7 +2526,7 @@ def ctc_greedy_decoder(input, blank, name=None): Returns: Variable: CTC greedy decode result. If all the sequences in result were - empty, the result LoDTensor will be [-1] with LoD [[0]] and dims [1]. + empty, the result LoDTensor will be [-1] with LoD [[0]] and dims [1, 1]. Examples: .. code-block:: python -- GitLab From 78949c073e534f798573e94488aa27a79ce5a063 Mon Sep 17 00:00:00 2001 From: Siddharth Goyal Date: Tue, 6 Feb 2018 01:25:49 -0800 Subject: [PATCH 059/138] Inference example for image_classification and unit_test for "inference" (#8020) * First basic implementation * Add infer example for image_classification * Address review comments: round 1 --- paddle/inference/tests/book/CMakeLists.txt | 12 ++ .../test_inference_image_classification.cc | 113 ++++++++++++++++++ .../book/test_image_classification_train.py | 91 +++++++++++--- 3 files changed, 197 insertions(+), 19 deletions(-) create mode 100644 paddle/inference/tests/book/test_inference_image_classification.cc diff --git a/paddle/inference/tests/book/CMakeLists.txt b/paddle/inference/tests/book/CMakeLists.txt index 0e987eb0240..4c71517dc98 100644 --- a/paddle/inference/tests/book/CMakeLists.txt +++ b/paddle/inference/tests/book/CMakeLists.txt @@ -3,5 +3,17 @@ cc_test(test_inference_recognize_digits_mlp SRCS test_inference_recognize_digits.cc DEPS ARCHIVE_START paddle_fluid ARCHIVE_END ARGS --dirname=${PYTHON_TESTS_DIR}/book/recognize_digits_mlp.inference.model) +cc_test(test_inference_image_classification_vgg + SRCS test_inference_image_classification.cc + DEPS ARCHIVE_START paddle_fluid ARCHIVE_END + ARGS --dirname=${PYTHON_TESTS_DIR}/book/image_classification_vgg.inference.model) +cc_test(test_inference_image_classification_resnet + SRCS test_inference_image_classification.cc + DEPS ARCHIVE_START paddle_fluid ARCHIVE_END + ARGS --dirname=${PYTHON_TESTS_DIR}/book/image_classification_resnet.inference.model) set_tests_properties(test_inference_recognize_digits_mlp PROPERTIES DEPENDS test_recognize_digits) +set_tests_properties(test_inference_image_classification_vgg + PROPERTIES DEPENDS test_image_classification_train) +set_tests_properties(test_inference_image_classification_resnet + PROPERTIES DEPENDS test_image_classification_train) diff --git a/paddle/inference/tests/book/test_inference_image_classification.cc b/paddle/inference/tests/book/test_inference_image_classification.cc new file mode 100644 index 00000000000..e01f5b312a0 --- /dev/null +++ b/paddle/inference/tests/book/test_inference_image_classification.cc @@ -0,0 +1,113 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include "gflags/gflags.h" +#include "paddle/framework/lod_tensor.h" +#include "paddle/inference/io.h" + +DEFINE_string(dirname, "", "Directory of the inference model."); + +template +void TestInference(const std::string& dirname, + const std::vector& cpu_feeds, + std::vector& cpu_fetchs) { + // 1. Define place, executor and scope + auto place = Place(); + auto executor = paddle::framework::Executor(place); + auto* scope = new paddle::framework::Scope(); + + // 2. Initialize the inference_program and load all parameters from file + auto inference_program = paddle::inference::Load(executor, *scope, dirname); + + // 3. Get the feed_target_names and fetch_target_names + const std::vector& feed_target_names = + inference_program->GetFeedTargetNames(); + const std::vector& fetch_target_names = + inference_program->GetFetchTargetNames(); + + // 4. Prepare inputs: set up maps for feed targets + std::map feed_targets; + for (size_t i = 0; i < feed_target_names.size(); ++i) { + // Please make sure that cpu_feeds[i] is right for feed_target_names[i] + feed_targets[feed_target_names[i]] = cpu_feeds[i]; + } + + // 5. Define Tensor to get the outputs: set up maps for fetch targets + std::map fetch_targets; + for (size_t i = 0; i < fetch_target_names.size(); ++i) { + fetch_targets[fetch_target_names[i]] = cpu_fetchs[i]; + } + + // 6. Run the inference program + executor.Run(*inference_program, scope, feed_targets, fetch_targets); + + delete scope; +} + +TEST(inference, image_classification) { + if (FLAGS_dirname.empty()) { + LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; + } + + LOG(INFO) << "FLAGS_dirname: " << FLAGS_dirname << std::endl; + std::string dirname = FLAGS_dirname; + + // 0. Call `paddle::framework::InitDevices()` initialize all the devices + // In unittests, this is done in paddle/testing/paddle_gtest_main.cc + + paddle::framework::LoDTensor input; + srand(time(0)); + float* input_ptr = + input.mutable_data({1, 3, 32, 32}, paddle::platform::CPUPlace()); + for (int i = 0; i < 3072; ++i) { + input_ptr[i] = rand() / (static_cast(RAND_MAX)); + } + std::vector cpu_feeds; + cpu_feeds.push_back(&input); + + paddle::framework::LoDTensor output1; + std::vector cpu_fetchs1; + cpu_fetchs1.push_back(&output1); + + // Run inference on CPU + TestInference( + dirname, cpu_feeds, cpu_fetchs1); + LOG(INFO) << output1.dims(); + +#ifdef PADDLE_WITH_CUDA + paddle::framework::LoDTensor output2; + std::vector cpu_fetchs2; + cpu_fetchs2.push_back(&output2); + + // Run inference on CUDA GPU + TestInference( + dirname, cpu_feeds, cpu_fetchs2); + LOG(INFO) << output2.dims(); + + EXPECT_EQ(output1.dims(), output2.dims()); + EXPECT_EQ(output1.numel(), output2.numel()); + + float err = 1E-3; + int count = 0; + for (int64_t i = 0; i < output1.numel(); ++i) { + if (fabs(output1.data()[i] - output2.data()[i]) > err) { + count++; + } + } + EXPECT_EQ(count, 0) << "There are " << count << " different elements."; +#endif +} diff --git a/python/paddle/v2/fluid/tests/book/test_image_classification_train.py b/python/paddle/v2/fluid/tests/book/test_image_classification_train.py index a4168d16db0..03b009ebb07 100644 --- a/python/paddle/v2/fluid/tests/book/test_image_classification_train.py +++ b/python/paddle/v2/fluid/tests/book/test_image_classification_train.py @@ -16,8 +16,9 @@ from __future__ import print_function import paddle.v2 as paddle import paddle.v2.fluid as fluid -import unittest import contextlib +import numpy +import unittest def resnet_cifar10(input, depth=32): @@ -89,10 +90,7 @@ def vgg16_bn_drop(input): return fc2 -def main(net_type, use_cuda): - if use_cuda and not fluid.core.is_compiled_with_cuda(): - return - +def train(net_type, use_cuda, save_dirname): classdim = 10 data_shape = [3, 32, 32] @@ -111,12 +109,14 @@ def main(net_type, use_cuda): predict = fluid.layers.fc(input=net, size=classdim, act='softmax') cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(x=cost) + acc = fluid.layers.accuracy(input=predict, label=label) + + # Test program + test_program = fluid.default_main_program().clone() optimizer = fluid.optimizer.Adam(learning_rate=0.001) optimizer.minimize(avg_cost) - accuracy = fluid.evaluator.Accuracy(input=predict, label=label) - BATCH_SIZE = 128 PASS_NUM = 1 @@ -125,6 +125,9 @@ def main(net_type, use_cuda): paddle.dataset.cifar.train10(), buf_size=128 * 10), batch_size=BATCH_SIZE) + test_reader = paddle.batch( + paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE) + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) feeder = fluid.DataFeeder(place=place, feed_list=[images, label]) @@ -132,18 +135,68 @@ def main(net_type, use_cuda): loss = 0.0 for pass_id in range(PASS_NUM): - accuracy.reset(exe) - for data in train_reader(): - loss, acc = exe.run(fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[avg_cost] + accuracy.metrics) - pass_acc = accuracy.eval(exe) - print("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str( - pass_acc)) - return - - raise AssertionError( - "Image classification loss is too large, {0:2.2}".format(loss)) + for batch_id, data in enumerate(train_reader()): + exe.run(feed=feeder.feed(data)) + + if (batch_id % 10) == 0: + acc_list = [] + avg_loss_list = [] + for tid, test_data in enumerate(test_reader()): + loss_t, acc_t = exe.run(program=test_program, + feed=feeder.feed(test_data), + fetch_list=[avg_cost, acc]) + acc_list.append(float(acc_t)) + avg_loss_list.append(float(loss_t)) + break # Use 1 segment for speeding up CI + + acc_value = numpy.array(acc_list).mean() + avg_loss_value = numpy.array(avg_loss_list).mean() + + print( + 'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'. + format(pass_id, batch_id + 1, + float(avg_loss_value), float(acc_value))) + + if acc_value > 0.01: # Low threshold for speeding up CI + fluid.io.save_inference_model(save_dirname, ["pixel"], + [predict], exe) + return + + +def infer(use_cuda, save_dirname=None): + if save_dirname is None: + return + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + + # The input's dimension of conv should be 4-D or 5-D. + tensor_img = numpy.random.rand(1, 3, 32, 32).astype("float32") + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + results = exe.run(inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + print("infer results: ", results[0]) + + +def main(net_type, use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + + # Directory for saving the trained model + save_dirname = "image_classification_" + net_type + ".inference.model" + + train(net_type, use_cuda, save_dirname) + infer(use_cuda, save_dirname) class TestImageClassification(unittest.TestCase): -- GitLab From 4793e86b9247d1c5a7d1b1534a7d7d971a73fd79 Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Tue, 6 Feb 2018 19:38:22 +0800 Subject: [PATCH 060/138] Add target_assign_op for SSD detection. --- paddle/framework/mixed_vector.h | 8 + paddle/operators/target_assign_op.cc | 172 ++++++++++++++++++ paddle/operators/target_assign_op.cu | 61 +++++++ paddle/operators/target_assign_op.h | 155 ++++++++++++++++ paddle/platform/assert.h | 26 +-- .../v2/fluid/tests/test_target_assign_op.py | 126 +++++++++++++ 6 files changed, 535 insertions(+), 13 deletions(-) create mode 100644 paddle/operators/target_assign_op.cc create mode 100644 paddle/operators/target_assign_op.cu create mode 100644 paddle/operators/target_assign_op.h create mode 100755 python/paddle/v2/fluid/tests/test_target_assign_op.py diff --git a/paddle/framework/mixed_vector.h b/paddle/framework/mixed_vector.h index 85caac8dcd9..422fbbac488 100644 --- a/paddle/framework/mixed_vector.h +++ b/paddle/framework/mixed_vector.h @@ -60,6 +60,14 @@ class Vector : public std::vector { T *data() { return std::vector::data(); } const T *data() const { return std::vector::data(); } + T *data(const platform::Place &place) { + if (platform::is_cpu_place(place)) { + return data(); + } else { + return cuda_data(); + } + } + /* Synchronize host vector to device vector */ void CopyToCUDA(); /* Synchronize device vector to host vector */ diff --git a/paddle/operators/target_assign_op.cc b/paddle/operators/target_assign_op.cc new file mode 100644 index 00000000000..9c7d625136b --- /dev/null +++ b/paddle/operators/target_assign_op.cc @@ -0,0 +1,172 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/target_assign_op.h" + +namespace paddle { +namespace operators { + +class TargetAssignOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + // checkout inputs + PADDLE_ENFORCE(ctx->HasInput("EncodedGTBBox"), + "Input(EncodedGTBBox) of TargetAssignOp should not be null"); + PADDLE_ENFORCE(ctx->HasInput("GTScoreLabel"), + "Input(GTScoreLabel) of TargetAssignOp should not be null"); + PADDLE_ENFORCE(ctx->HasInput("MatchIndices"), + "Input(MatchIndices) of TargetAssignOp should not be null"); + PADDLE_ENFORCE(ctx->HasInput("NegIndices"), + "Input(NegIndices) of TargetAssignOp should not be null"); + + // checkout outputs + PADDLE_ENFORCE( + ctx->HasOutput("PredBBoxLabel"), + "Output(PredBBoxLabel) of TargetAssignOp should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("PredBBoxWeight"), + "Output(PredBBoxWeight) of TargetAssignOp should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("PredScoreLabel"), + "Output(PredScoreLabel) of TargetAssignOp should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("PredScoreWeight"), + "Output(PredScoreWeight) of TargetAssignOp should not be null."); + + auto blabel_dims = ctx->GetInputDim("EncodedGTBBox"); + auto slabel_dims = ctx->GetInputDim("GTScoreLabel"); + auto mi_dims = ctx->GetInputDim("MatchIndices"); + auto neg_dims = ctx->GetInputDim("NegIndices"); + + PADDLE_ENFORCE_EQ(blabel_dims.size(), 3UL, + "The rank of Input(EncodedGTBBox) must be 3."); + PADDLE_ENFORCE_EQ(slabel_dims.size(), 2UL, + "The rank of Input(GTScoreLabel) must be 2."); + PADDLE_ENFORCE_EQ(mi_dims.size(), 2UL, + "The rank of Input(MatchIndices) must be 2."); + PADDLE_ENFORCE_EQ(neg_dims.size(), 2UL, + "The rank of Input(NegIndices) must be 2."); + + PADDLE_ENFORCE_EQ(blabel_dims[0], slabel_dims[0], + "The 1st dimension of Input(EncodedGTBBox) and " + "Input(GTScoreLabel) must be the same."); + PADDLE_ENFORCE_EQ(blabel_dims[1], mi_dims[1], + "The 2nd dimension of Input(EncodedGTBBox) and " + "Input(MatchIndices) must be the same."); + PADDLE_ENFORCE_EQ(blabel_dims[2], 4, + "The 3rd dimension of Input(EncodedGTBBox) must be 4."); + + auto n = mi_dims[0]; + auto np = mi_dims[1]; + ctx->SetOutputDim("PredBBoxLabel", {n, np, 4}); + ctx->SetOutputDim("PredBBoxWeight", {n, np, 1}); + ctx->SetOutputDim("PredScoreLabel", {n, np, 1}); + ctx->SetOutputDim("PredScoreWeight", {n, np, 1}); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType( + ctx.Input("EncodedGTBBox")->type()), + ctx.device_context()); + } +}; + +class TargetAssignOpMaker : public framework::OpProtoAndCheckerMaker { + public: + TargetAssignOpMaker(OpProto* proto, OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("EncodedGTBBox", + "(LoDTensor), The encoded ground-truth bounding boxes with shape " + "[Ng, Np, 4], where Ng is the total number of ground-truth boxes " + "in this mini-batch, Np the number of predictions, 4 is the " + "number of coordinate in [xmin, ymin, xmax, ymax] layout."); + AddInput("GTScoreLabel", + "(LoDTensor, default LoDTensor), The input ground-truth " + "labels with shape [Ng, 1], where the Ng is the same as it in " + "the input of EncodedGTBBox."); + AddInput("MatchIndices", + "(Tensor, default LoDTensor), The input matched indices " + "with shape [N, Np], where N is the batch size, Np is the same " + "as it in the input of EncodedGTBBox. If MatchIndices[i][j] " + "is -1, the j-th prior box is not matched to any ground-truh " + "box in i-th instance."); + AddInput("NegIndices", + "(LoDTensor, default LoDTensor), The input negative example " + "indics with shape [Neg, 1], where is the total number of " + "negative example indices."); + AddAttr("background_label", + "(int, default 0), Label id for background class.") + .SetDefault(0); + AddOutput("PredBBoxLabel", + "(Tensor), The output encoded ground-truth labels " + "with shape [N, Np, 4], N is the batch size and Np, 4 is the " + "same as they in input of EncodedGTBBox. If MatchIndices[i][j] " + "is -1, the PredBBoxLabel[i][j][:] is the encoded ground-truth " + "box for background_label_id in i-th instance."); + AddOutput("PredBBoxWeight", + "(Tensor), The weight for PredBBoxLabel with the shape " + "of [N, Np, 1]"); + AddOutput("PredScoreLabel", + "(Tensor, default Tensor), The output score labels for " + "each predictions with shape [N, Np, 1]. If MatchIndices[i][j] " + "is -1, PredScoreLabel[i][j] = background_label_id."); + AddOutput("PredScoreWeight", + "(Tensor), The weight for PredScoreLabel with the shape " + "of [N, Np, 1]"); + AddComment(R"DOC( +This operator is, for given the encoded boxes between prior boxes and +ground-truth boxes and ground-truth class labels, to assign classification +and regression targets to each prior box as well as weights to each +prior box. The weights is used to specify which prior box would not contribute +to training loss. + +TODO(dang qingqing) add an example. + + )DOC"); + } +}; + +template +struct UpdateTargetLabelFunctor { + void operator()(const platform::CPUDeviceContext& ctx, const int* neg_indices, + const size_t* lod, const int num, const int num_prior_box, + const int background_label, int* out_label, T* out_label_wt) { + for (int i = 0; i < num; ++i) { + for (int j = lod[i]; j < lod[i + 1]; ++j) { + int id = neg_indices[j]; + out_label[i * num_prior_box + id] = background_label; + out_label_wt[i * num_prior_box + id] = static_cast(1.0); + } + } + } +}; + +template struct UpdateTargetLabelFunctor; +template struct UpdateTargetLabelFunctor; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_WITHOUT_GRADIENT(target_assign, ops::TargetAssignOp, + ops::TargetAssignOpMaker); +REGISTER_OP_CPU_KERNEL( + target_assign, + ops::TargetAssignKernel, + ops::TargetAssignKernel); diff --git a/paddle/operators/target_assign_op.cu b/paddle/operators/target_assign_op.cu new file mode 100644 index 00000000000..c04de86ec58 --- /dev/null +++ b/paddle/operators/target_assign_op.cu @@ -0,0 +1,61 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/target_assign_op.h" + +namespace paddle { +namespace operators { + +template +__global__ void UpdateTargetLabelKernel(const int* neg_indices, + const size_t* lod, const int num, + const int num_prior_box, + const int background_label, + int* out_label, T* out_label_wt) { + int bidx = blockIdx.x; + int st = lod[bidx]; + int ed = lod[bidx + 1]; + + for (int i = st + threadIdx.x; i < ed; i += blockDim.x) { + int id = neg_indices[i]; + out_label[bidx * num_prior_box + id] = background_label; + out_label_wt[bidx * num_prior_box + id] = 1.; + } +} + +template +struct UpdateTargetLabelFunctor { + void operator()(const platform::CUDADeviceContext& ctx, + const int* neg_indices, const size_t* lod, const int num, + const int num_prior_box, const int background_label, + int* out_label, T* out_label_wt) { + const int block_size = 256; + const int grid_size = num; + UpdateTargetLabelKernel<<>>( + neg_indices, lod, num, num_prior_box, background_label, out_label, + out_label_wt); + } +}; + +template struct UpdateTargetLabelFunctor; +template struct UpdateTargetLabelFunctor; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + target_assign, + ops::TargetAssignKernel, + ops::TargetAssignKernel); diff --git a/paddle/operators/target_assign_op.h b/paddle/operators/target_assign_op.h new file mode 100644 index 00000000000..267bdbf1eff --- /dev/null +++ b/paddle/operators/target_assign_op.h @@ -0,0 +1,155 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/op_registry.h" +#include "paddle/platform/assert.h" +#include "paddle/platform/for_range.h" + +namespace paddle { +namespace operators { + +template +struct TargetAssignFunctor { + const T* gt_box_; + const int* gt_label_; + const int* match_indices_; + const size_t* lod_; + const int background_label_; + const int64_t num_; + const int64_t num_prior_box_; + + T* out_box_; + T* out_box_wt_; + int* out_label_; + T* out_label_wt_; + + TargetAssignFunctor(const T* gt_box, const int* gt_label, + const int* match_indices, const size_t* lod, + const int background_label, const int64_t num, + const int64_t np, T* out_box, T* out_box_wt, + int* out_label, T* out_label_wt) + : gt_box_(gt_box), + gt_label_(gt_label), + match_indices_(match_indices), + lod_(lod), + background_label_(background_label), + num_(num), + num_prior_box_(np), + out_box_(out_box), + out_box_wt_(out_box_wt), + out_label_(out_label), + out_label_wt_(out_label_wt) {} + + HOSTDEVICE void operator()(size_t i) const { + int row = i / num_prior_box_; + int col = i - row * num_prior_box_; + + size_t off = lod_[row]; + + int id = match_indices_[row * num_prior_box_ + col]; + T* obox = out_box_ + (row * num_prior_box_ + col) * 4; + int* olabel = out_label_ + row * num_prior_box_ + col; + T* obox_wt = out_box_wt_ + row * num_prior_box_ + col; + T* olabel_wt = out_label_wt_ + row * num_prior_box_ + col; + + if (id > -1) { + const T* gtbox = gt_box_ + ((off + id) * num_prior_box_ + col) * 4; + + obox[0] = gtbox[0]; + obox[1] = gtbox[1]; + obox[2] = gtbox[2]; + obox[3] = gtbox[3]; + + olabel[0] = gt_label_[off + id]; + obox_wt[0] = 1.; + olabel_wt[0] = 1.; + } else { + obox[0] = 0.; + obox[1] = 0.; + obox[2] = 0.; + obox[3] = 0.; + + olabel[0] = background_label_; + obox_wt[0] = 0.; + olabel_wt[0] = 0.; + } + } +}; + +template +struct UpdateTargetLabelFunctor { + void operator()(const platform::DeviceContext& ctx, const int* neg_indices, + const size_t* lod, const int num, const int num_prior_box, + const int background_label, int* out_label, + T* out_label_wt) const; +}; + +template +class TargetAssignKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* enc_gt_box = ctx.Input("EncodedGTBBox"); + auto* gt_label = ctx.Input("GTScoreLabel"); + auto* match_indices = ctx.Input("MatchIndices"); + auto* neg_indices = ctx.Input("NegIndices"); + + auto* out_box = ctx.Output("PredBBoxLabel"); + auto* out_box_wt = ctx.Output("PredBBoxWeight"); + auto* out_label = ctx.Output("PredScoreLabel"); + auto* out_label_wt = ctx.Output("PredScoreWeight"); + + PADDLE_ENFORCE_EQ(enc_gt_box->lod().size(), 1UL); + PADDLE_ENFORCE_EQ(gt_label->lod().size(), 1UL); + PADDLE_ENFORCE_EQ(neg_indices->lod().size(), 1UL); + + int background_label = ctx.Attr("background_label"); + + const T* box_data = enc_gt_box->data(); + const int* label_data = gt_label->data(); + const int* match_idx_data = match_indices->data(); + const int* neg_idx_data = neg_indices->data(); + + T* obox_data = out_box->mutable_data(ctx.GetPlace()); + T* obox_wt_data = out_box_wt->mutable_data(ctx.GetPlace()); + int* olabel_data = out_label->mutable_data(ctx.GetPlace()); + T* olabel_wt_data = out_label_wt->mutable_data(ctx.GetPlace()); + + int64_t num = match_indices->dims()[0]; + int64_t num_prior_box = match_indices->dims()[1]; + + auto gt_lod = enc_gt_box->lod().back(); + auto neg_lod = neg_indices->lod().back(); + + size_t* gt_lod_data = gt_lod.data(ctx.GetPlace()); + size_t* neg_lod_data = neg_lod.data(ctx.GetPlace()); + + TargetAssignFunctor functor(box_data, label_data, match_idx_data, + gt_lod_data, background_label, num, + num_prior_box, obox_data, obox_wt_data, + olabel_data, olabel_wt_data); + + auto& device_ctx = ctx.template device_context(); + platform::ForRange for_range(device_ctx, + num * num_prior_box); + for_range(functor); + + UpdateTargetLabelFunctor update_functor; + update_functor(device_ctx, neg_idx_data, neg_lod_data, num, num_prior_box, + background_label, olabel_data, olabel_wt_data); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/platform/assert.h b/paddle/platform/assert.h index d813b9529ba..1f5a8f6a195 100644 --- a/paddle/platform/assert.h +++ b/paddle/platform/assert.h @@ -1,16 +1,16 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ #pragma once diff --git a/python/paddle/v2/fluid/tests/test_target_assign_op.py b/python/paddle/v2/fluid/tests/test_target_assign_op.py new file mode 100755 index 00000000000..49edff5c7fd --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_target_assign_op.py @@ -0,0 +1,126 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +import math +import sys +import random +from op_test import OpTest + + +def gen_match_and_neg_indices(num_prior, gt_lod, neg_lod): + if len(gt_lod) != len(neg_lod): + raise AssertionError("The input arguments are illegal.") + + batch_size = len(gt_lod) - 1 + + match_indices = -1 * np.ones((batch_size, num_prior)).astype('int32') + neg_indices = np.zeros((neg_lod[-1], 1)).astype('int32') + + for n in range(batch_size): + gt_num = gt_lod[n + 1] - gt_lod[n] + ids = random.sample([i for i in range(num_prior)], gt_num) + match_indices[n, ids] = [i for i in range(gt_num)] + + ret_ids = set([i for i in range(num_prior)]) - set(ids) + s = neg_lod[n] + e = neg_lod[n + 1] + l = e - s + neg_ids = random.sample(ret_ids, l) + neg_indices[s:e, :] = np.array(neg_ids).astype('int32').reshape(l, 1) + + return match_indices, neg_indices + + +def target_assign(encoded_box, gt_label, match_indices, neg_indices, gt_lod, + neg_lod, background_label): + batch_size, num_prior = match_indices.shape + + # init target bbox + trg_box = np.zeros((batch_size, num_prior, 4)).astype('float32') + # init weight for target bbox + trg_box_wt = np.zeros((batch_size, num_prior, 1)).astype('float32') + # init target label + trg_label = np.ones((batch_size, num_prior, 1)).astype('int32') + trg_label = trg_label * background_label + # init weight for target label + trg_label_wt = np.zeros((batch_size, num_prior, 1)).astype('float32') + + for i in range(batch_size): + cur_indices = match_indices[i] + col_ids = np.where(cur_indices > -1) + col_val = cur_indices[col_ids] + + gt_start = gt_lod[i] + # target bbox + for v, c in zip(col_val + gt_start, col_ids[0].tolist()): + trg_box[i][c][:] = encoded_box[v][c][:] + + # weight for target bbox + trg_box_wt[i][col_ids] = 1.0 + + trg_label[i][col_ids] = gt_label[col_val + gt_start] + + trg_label_wt[i][col_ids] = 1.0 + # set target label weight to 1.0 for the negative samples + neg_ids = neg_indices[neg_lod[i]:neg_lod[i + 1]] + trg_label_wt[i][neg_ids] = 1.0 + + return trg_box, trg_box_wt, trg_label, trg_label_wt + + +class TestTargetAssginOp(OpTest): + def setUp(self): + self.op_type = "target_assign" + + num_prior = 120 + num_class = 21 + gt_lod = [0, 5, 11, 23] + neg_lod = [0, 4, 7, 13] + #gt_lod = [0, 2, 5] + #neg_lod = [0, 2, 4] + batch_size = len(gt_lod) - 1 + num_gt = gt_lod[-1] + background_label = 0 + + encoded_box = np.random.random((num_gt, num_prior, 4)).astype('float32') + gt_label = np.random.randint( + num_class, size=(num_gt, 1)).astype('int32') + match_indices, neg_indices = gen_match_and_neg_indices(num_prior, + gt_lod, neg_lod) + trg_box, trg_box_wt, trg_label, trg_label_wt = target_assign( + encoded_box, gt_label, match_indices, neg_indices, gt_lod, neg_lod, + background_label) + + self.inputs = { + 'EncodedGTBBox': (encoded_box, [gt_lod]), + 'GTScoreLabel': (gt_label, [gt_lod]), + 'MatchIndices': (match_indices), + 'NegIndices': (neg_indices, [neg_lod]), + } + self.attrs = {'background_label': background_label} + self.outputs = { + 'PredBBoxLabel': (trg_box), + 'PredBBoxWeight': (trg_box_wt), + 'PredScoreLabel': (trg_label), + 'PredScoreWeight': (trg_label_wt), + } + + def test_check_output(self): + self.check_output() + + +if __name__ == '__main__': + unittest.main() -- GitLab From de7fa8bc197a88f325c3d7ee2e4a4f5d66d44fc0 Mon Sep 17 00:00:00 2001 From: chengduo Date: Tue, 6 Feb 2018 21:36:32 +0800 Subject: [PATCH 061/138] refine CSP doc (#8182) --- doc/design/csp.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/design/csp.md b/doc/design/csp.md index ae2e3e1b998..10d936860fa 100644 --- a/doc/design/csp.md +++ b/doc/design/csp.md @@ -144,8 +144,9 @@ ch = fluid.make_channel(dtype=INT, buffer_size) # Now write three elements to the channel with fluid.while(steps=buffer_size): fluid.send(ch, step) - fluid.close_channel(ch) - + +fluid.close_channel(ch) + with fluid.while(steps=buffer_size): fluid.print(fluid.recv(ch)) ``` -- GitLab From 6024a170f321e3ed572260e68cad39820f15ff67 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Tue, 6 Feb 2018 05:36:50 -0800 Subject: [PATCH 062/138] Receive from closed channel (#8175) * Add test case to return zero on a closed channel * Rename method * Fix test * ReceiveFromBufferedChannelReturnResidualValuesTest * Adding the variable and case for unbuffered channel * Fix review comments * Fix format * Remove a zero-value comparison --- paddle/framework/channel_test.cc | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/paddle/framework/channel_test.cc b/paddle/framework/channel_test.cc index 444d68498c9..6416c04f36e 100644 --- a/paddle/framework/channel_test.cc +++ b/paddle/framework/channel_test.cc @@ -60,6 +60,38 @@ TEST(Channel, SufficientBufferSizeDoesntBlock) { delete ch; } +TEST(Channel, ReceiveFromBufferedChannelReturnResidualValuesTest) { + const size_t buffer_size = 10; + auto ch = MakeChannel(buffer_size); + + for (size_t i = 0; i < buffer_size; ++i) { + EXPECT_EQ(ch->Send(&i), true); // sending should not block + } + + size_t out; + for (size_t i = 0; i < buffer_size / 2; ++i) { + EXPECT_EQ(ch->Receive(&out), true); // receiving should not block + EXPECT_EQ(out, i); + } + + CloseChannel(ch); + + for (size_t i = buffer_size / 2; i < buffer_size; ++i) { + EXPECT_EQ(ch->Receive(&out), + true); // receving should return residual values. + EXPECT_EQ(out, i); + } + + for (size_t i = 0; i < buffer_size; ++i) { + EXPECT_EQ(ch->Receive(&out), + false); // after receiving residual values, return zeros. + // Note: we cannot check EXPECT_EQ(out, 0), because C++ doesn't + // define zero values like Go does. + } + + delete ch; +} + TEST(Channel, ConcurrentSendNonConcurrentReceiveWithSufficientBufferSize) { const size_t buffer_size = 10; auto ch = MakeChannel(buffer_size); -- GitLab From b90244921b30dea85ccdf552e9e7d7925636050c Mon Sep 17 00:00:00 2001 From: kavyasrinet Date: Tue, 6 Feb 2018 05:38:15 -0800 Subject: [PATCH 063/138] Fixing the gradient check writeup (#8057) --- doc/design/auto_gradient_check.md | 98 ++++++++++++++++--------------- 1 file changed, 51 insertions(+), 47 deletions(-) diff --git a/doc/design/auto_gradient_check.md b/doc/design/auto_gradient_check.md index f9991541bc5..773b7b6a767 100644 --- a/doc/design/auto_gradient_check.md +++ b/doc/design/auto_gradient_check.md @@ -1,23 +1,23 @@ -## Auto Gradient Checker Design +## Auto Gradient Check Design -## Backgraound: -- Generally, it is easy to check whether the forward computation of an Operator is correct or not. However, backpropagation is a notoriously difficult algorithm to debug and get right: - 1. you should get the right backpropagation formula according to the forward computation. - 2. you should implement it right in CPP. - 3. it's difficult to prepare test data. +## Background: +- Generally, it is easy to check whether the forward computation of an Operator is correct or not. However, backpropagation is a notoriously difficult algorithm to debug and get right because of the following challenges: + 1. The formula for backpropagation formula should be correct according to the forward computation. + 2. The Implementation of the above shoule be correct in CPP. + 3. It is difficult to prepare an unbiased test data. -- Auto gradient checking gets a numerical gradient by forward Operator and use it as a reference of the backward Operator's result. It has several advantages: - 1. numerical gradient checker only need forward operator. - 2. user only need to prepare the input data for forward Operator. +- Auto gradient checking gets a numerical gradient using forward Operator and uses it as a reference for the backward Operator's result. It has several advantages: + 1. Numerical gradient checker only needs the forward operator. + 2. The user only needs to prepare the input data for forward Operator and not worry about the backward Operator. ## Mathematical Theory -The following two document from Stanford has a detailed explanation of how to get numerical gradient and why it's useful. +The following documents from Stanford have a detailed explanation of how to compute the numerical gradient and why it is useful. - [Gradient checking and advanced optimization(en)](http://deeplearning.stanford.edu/wiki/index.php/Gradient_checking_and_advanced_optimization) - [Gradient checking and advanced optimization(cn)](http://ufldl.stanford.edu/wiki/index.php/%E6%A2%AF%E5%BA%A6%E6%A3%80%E9%AA%8C%E4%B8%8E%E9%AB%98%E7%BA%A7%E4%BC%98%E5%8C%96) -## Numeric Gradient Implementation +## Numerical Gradient Implementation ### Python Interface ```python def get_numerical_gradient(op, @@ -27,73 +27,76 @@ def get_numerical_gradient(op, delta=0.005, local_scope=None): """ - Get Numeric Gradient for an operator's input. + Get Numerical Gradient for the input of an operator. - :param op: C++ operator instance, could be an network + :param op: C++ operator instance, could be an network. :param input_values: The input variables. Should be an dictionary, whose key is - variable name, and value is numpy array. + variable name, and value is a numpy array. :param output_name: The final output variable name. - :param input_to_check: The input variable with respect to which to compute the gradient. - :param delta: The perturbation value for numeric gradient method. The - smaller delta is, the more accurate result will get. But if that delta is - too small, it will suffer from numerical stability problem. + :param input_to_check: The input variable with respect to which the gradient has to be computed. + :param delta: The perturbation value for numerical gradient method. The + smaller the delta, the more accurate the result. But if the delta is too + small, it will suffer from the numerical stability problem. :param local_scope: The local scope used for get_numeric_gradient. :return: The gradient array in numpy format. """ ``` -### Explaination: +### Explanation: -- Why need `output_name` - - An Operator may have multiple Output, one can get independent gradient from each Output. So caller should specify the name of the output variable. +- Why do we need an `output_name` + - An Operator may have multiple Outputs, one can compute an independent gradient from each Output. So the caller should specify the name of the output variable. -- Why need `input_to_check` - - One operator may have multiple inputs. Gradient Op can calculate the gradient of these inputs at the same time. But Numeric Gradient needs to calculate them one by one. So `get_numeric_gradient` is designed to calculate the gradient for one input. If you need to compute multiple inputs, you can call `get_numeric_gradient` multiple times. +- Why do we need `input_to_check` + - One operator can have multiple inputs. Gradient Op can calculate the gradient of these inputs at the same time. But Numerical Gradient needs to calculate them one by one. So `get_numeric_gradient` is designed to calculate the gradient for one input. If you need to compute multiple inputs, you can call `get_numeric_gradient` multiple times each with a different input. ### Core Algorithm Implementation ```python - # we only compute gradient of one element a time. + # we only compute the gradient of one element a time. # we use a for loop to compute the gradient of each element. for i in xrange(tensor_size): - # get one input element by its index i. - origin = tensor_to_check.get_float_element(i) + # get one input element using the index i. + original = tensor_to_check.get_float_element(i) - # add delta to it, run op and then get the new value of the result tensor. - x_pos = origin + delta + # add delta to it, run the forward op and then + # get the new value of the result tensor. + x_pos = original + delta tensor_to_check.set_float_element(i, x_pos) y_pos = get_output() - # plus delta to this element, run op and get the new value of the result tensor. - x_neg = origin - delta + # Subtract delta from this element, run the op again + # and get the new value of the result tensor. + x_neg = original - delta tensor_to_check.set_float_element(i, x_neg) y_neg = get_output() # restore old value - tensor_to_check.set_float_element(i, origin) + tensor_to_check.set_float_element(i, original) - # compute the gradient of this element and store it into a numpy array. + # compute the gradient of this element and store + # it into a numpy array. gradient_flat[i] = (y_pos - y_neg) / delta / 2 # reshape the gradient result to the shape of the source tensor. return gradient_flat.reshape(tensor_to_check.get_dims()) ``` -## Auto Graident Checker Framework +## Auto Gradient Check Framework Each Operator Kernel has three kinds of Gradient: 1. Numerical gradient 2. CPU kernel gradient -3. GPU kernel gradient (if supported) +3. GPU kernel gradient (if supported by the device) -The numerical gradient only relies on forward Operator. So we use the numerical gradient as the reference value. And the gradient checking is performed in the following three steps: +The numerical gradient only relies on the forward Operator, so we use the numerical gradient as the reference value. The gradient checking is performed in the following three steps: -1. calculate the numerical gradient -2. calculate CPU kernel gradient with the backward Operator and compare it with the numerical gradient -3. calculate GPU kernel gradient with the backward Operator and compare it with the numeric gradient (if supported) +1. Calculate the numerical gradient +2. Calculate CPU kernel gradient with the backward Operator and compare it with the numerical gradient. +3. Calculate GPU kernel gradient with the backward Operator and compare it with the numeric gradient. (if supported) #### Python Interface @@ -109,26 +112,27 @@ The numerical gradient only relies on forward Operator. So we use the numerical """ :param forward_op: used to create backward_op :param input_vars: numpy value of input variable. The following - computation will use these variables. - :param inputs_to_check: the input variable with respect to which to compute the gradient. + computation will use these variables. + :param inputs_to_check: the input variable with respect to which the + gradient will be computed. :param output_name: The final output variable name. :param max_relative_error: The relative tolerance parameter. - :param no_grad_set: used when create backward ops + :param no_grad_set: used to create backward ops :param only_cpu: only compute and check gradient on cpu kernel. :return: """ ``` -### How to check if two numpy array is close enough? -if `abs_numerical_grad` is nearly zero, then use abs error for numerical_grad +### How to check if two numpy arrays are close enough? +if `abs_numerical_grad` is nearly zero, then use absolute error for numerical_grad. ```python numerical_grad = ... operator_grad = numpy.array(scope.find_var(grad_var_name(name)).get_tensor()) abs_numerical_grad = numpy.abs(numerical_grad) -# if abs_numerical_grad is nearly zero, then use abs error for numeric_grad, not relative -# error. +# if abs_numerical_grad is nearly zero, then use abs error for +# numeric_grad, instead of relative error. abs_numerical_grad[abs_numerical_grad < 1e-3] = 1 diff_mat = numpy.abs(abs_numerical_grad - operator_grad) / abs_numerical_grad @@ -137,10 +141,10 @@ max_diff = numpy.max(diff_mat) #### Notes: -The Input data for auto gradient checker should be reasonable to avoid numerical stability problem. +The Input data for auto gradient checker should be reasonable to avoid numerical stability problem. -#### Refs: +#### References: - [Gradient checking and advanced optimization(en)](http://deeplearning.stanford.edu/wiki/index.php/Gradient_checking_and_advanced_optimization) - [Gradient checking and advanced optimization(cn)](http://ufldl.stanford.edu/wiki/index.php/%E6%A2%AF%E5%BA%A6%E6%A3%80%E9%AA%8C%E4%B8%8E%E9%AB%98%E7%BA%A7%E4%BC%98%E5%8C%96) -- GitLab From f28dc9a68d9db8f711410a60fc57030cfc68ad6e Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Tue, 6 Feb 2018 22:13:27 +0800 Subject: [PATCH 064/138] refine inference_lib.cmake --- cmake/inference_lib.cmake | 114 ++++++++++++++++++++++---------------- 1 file changed, 65 insertions(+), 49 deletions(-) diff --git a/cmake/inference_lib.cmake b/cmake/inference_lib.cmake index d71fbce3820..7d535543584 100644 --- a/cmake/inference_lib.cmake +++ b/cmake/inference_lib.cmake @@ -1,72 +1,88 @@ # make package for paddle fluid shared and static library +function(copy TARGET) + set(options "") + set(oneValueArgs "") + set(multiValueArgs SRCS DSTS DEPS) + cmake_parse_arguments(copy_lib "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + list(LENGTH copy_lib_SRCS copy_lib_SRCS_len) + list(LENGTH copy_lib_DSTS copy_lib_DSTS_len) + if(NOT ${copy_lib_SRCS_len} EQUAL ${copy_lib_DSTS_len}) + message(FATAL_ERROR "${TARGET} source numbers are not equal to destination numbers") + endif() + math(EXPR len "${copy_lib_SRCS_len} - 1") + + add_custom_target(${TARGET} DEPENDS ${copy_lib_DEPS}) + foreach(index RANGE ${len}) + list(GET copy_lib_SRCS ${index} src) + list(GET copy_lib_DSTS ${index} dst) + add_custom_command(TARGET ${TARGET} PRE_BUILD COMMAND mkdir -p "${dst}") + if(IS_DIRECTORY ${src}) + add_custom_command(TARGET ${TARGET} PRE_BUILD COMMAND cp -r "${src}" "${dst}") + else() + add_custom_command(TARGET ${TARGET} PRE_BUILD COMMAND cp "${src}" "${dst}") + endif() + endforeach() +endfunction() + # third party -set(lib_dir "${CMAKE_INSTALL_PREFIX}/third_party/eigen3") -add_custom_target(eigen3_lib - COMMAND mkdir -p "${lib_dir}/Eigen" "${lib_dir}/unsupported" - COMMAND cp "${EIGEN_INCLUDE_DIR}/Eigen/Core" "${lib_dir}/Eigen" - COMMAND cp -r "${EIGEN_INCLUDE_DIR}/Eigen/src" "${lib_dir}/Eigen" - COMMAND cp -r "${EIGEN_INCLUDE_DIR}/unsupported/Eigen" "${lib_dir}/unsupported" +set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/eigen3") +copy(eigen3_lib + SRCS ${EIGEN_INCLUDE_DIR}/Eigen/Core ${EIGEN_INCLUDE_DIR}/Eigen/src ${EIGEN_INCLUDE_DIR}/unsupported/Eigen + DSTS ${dst_dir}/Eigen ${dst_dir}/Eigen ${dst_dir}/unsupported ) -set(lib_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/gflags") -add_custom_target(gflags_lib - COMMAND mkdir -p "${lib_dir}/lib" - COMMAND cp -r "${GFLAGS_INCLUDE_DIR}" "${lib_dir}" - COMMAND cp "${GFLAGS_LIBRARIES}" "${lib_dir}/lib" +set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/gflags") +copy(gflags_lib + SRCS ${GFLAGS_INCLUDE_DIR} ${GFLAGS_LIBRARIES} + DSTS ${dst_dir} ${dst_dir}/lib ) -set(lib_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/glog") -add_custom_target(glog_lib - COMMAND mkdir -p "${lib_dir}/lib" - COMMAND cp -r "${GLOG_INCLUDE_DIR}" "${lib_dir}" - COMMAND cp "${GLOG_LIBRARIES}" "${lib_dir}/lib" +set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/glog") +copy(glog_lib + SRCS ${GLOG_INCLUDE_DIR} ${GLOG_LIBRARIES} + DSTS ${dst_dir} ${dst_dir}/lib ) IF(NOT PROTOBUF_FOUND) - set(lib_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/protobuf") - add_custom_target(protobuf_lib - COMMAND mkdir -p "${lib_dir}/lib" - COMMAND cp -r "${PROTOBUF_INCLUDE_DIR}" "${lib_dir}" - COMMAND cp "${PROTOBUF_LITE_LIBRARY}" "${lib_dir}/lib" + set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/protobuf") + copy(protobuf_lib + SRCS ${PROTOBUF_INCLUDE_DIR} ${PROTOBUF_LITE_LIBRARY} + DSTS ${dst_dir} ${dst_dir}/lib ) ENDIF(NOT PROTOBUF_FOUND) # paddle fluid module -set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/framework") -add_custom_target(framework_lib DEPENDS framework_py_proto - COMMAND mkdir -p "${lib_dir}/details" - COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/framework/*.h" "${lib_dir}" - COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/framework/details/*.h" "${lib_dir}/details" - COMMAND cp "${PADDLE_BINARY_DIR}/paddle/framework/framework.pb.h" "${lib_dir}" +set(src_dir "${PADDLE_SOURCE_DIR}/paddle") +set(dst_dir "${CMAKE_INSTALL_PREFIX}/paddle") +set(module "framework") +copy(framework_lib DEPS framework_py_proto + SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/details/*.h ${PADDLE_BINARY_DIR}/paddle/framework/framework.pb.h + DSTS ${dst_dir}/${module} ${dst_dir}/${module}/details ${dst_dir}/${module} ) -set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/memory") -add_custom_target(memory_lib - COMMAND mkdir -p "${lib_dir}/detail" - COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/memory/*.h" "${lib_dir}" - COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/memory/detail/*.h" "${lib_dir}/detail" +set(module "memory") +copy(memory_lib + SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/detail/*.h + DSTS ${dst_dir}/${module} ${dst_dir}/${module}/detail ) -set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/inference") -add_custom_target(inference_lib DEPENDS paddle_fluid_shared - COMMAND mkdir -p "${lib_dir}" - COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/inference/*.h" "${lib_dir}" - COMMAND cp "${PADDLE_BINARY_DIR}/paddle/inference/libpaddle_fluid.so" "${lib_dir}" +set(module "inference") +copy(inference_lib DEPENDS paddle_fluid_shared + SRCS ${src_dir}/${module}/*.h ${PADDLE_BINARY_DIR}/paddle/inference/libpaddle_fluid.so + DSTS ${dst_dir}/${module} ${dst_dir}/${module} ) -set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/platform") -add_custom_target(platform_lib - COMMAND mkdir -p "${lib_dir}/dynload" "${lib_dir}/details" - COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/platform/*.h" "${lib_dir}" - COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/platform/dynload/*.h" "${lib_dir}/dynload" - COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/platform/details/*.h" "${lib_dir}/details" -) +set(module "platform") +copy(platform_lib + SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/dynload/*.h ${src_dir}/${module}/details/*.h + DSTS ${dst_dir}/${module} ${dst_dir}/${module}/dynload ${dst_dir}/${module}/details +) -set(lib_dir "${CMAKE_INSTALL_PREFIX}/paddle/string") -add_custom_target(string_lib - COMMAND mkdir -p "${lib_dir}/tinyformat" - COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/string/*.h" "${lib_dir}" - COMMAND cp "${PADDLE_SOURCE_DIR}/paddle/string/tinyformat/*.h" "${lib_dir}/tinyformat" +set(module "string") +copy(string_lib + SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/tinyformat/*.h + DSTS ${dst_dir}/${module} ${dst_dir}/${module}/tinyformat ) add_custom_target(inference_lib_dist DEPENDS -- GitLab From 0bb9c80ef960d777c5937f8fed8ddf75f2ac6a18 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Tue, 6 Feb 2018 23:46:18 +0800 Subject: [PATCH 065/138] refine code and add unit tests --- paddle/framework/executor.cc | 7 +- paddle/framework/op_desc.cc | 17 ++++- paddle/framework/operator.cc | 17 ++++- paddle/framework/reader.cc | 16 ++--- paddle/framework/reader.h | 51 +++++++------ paddle/framework/shape_inference.cc | 10 +++ paddle/framework/shape_inference.h | 7 +- paddle/framework/var_desc.cc | 35 +++++---- paddle/framework/var_type.h | 8 ++- paddle/operators/create_reader_op.cc | 61 +++++++++++----- paddle/operators/read_op.cc | 28 ++++---- paddle/pybind/protobuf.cc | 2 - python/paddle/v2/fluid/executor.py | 3 +- .../paddle/v2/fluid/tests/test_cpp_reader.py | 71 +++++++++++++++++++ 14 files changed, 244 insertions(+), 89 deletions(-) create mode 100644 python/paddle/v2/fluid/tests/test_cpp_reader.py diff --git a/paddle/framework/executor.cc b/paddle/framework/executor.cc index 9a232b08434..2a88e5a9298 100644 --- a/paddle/framework/executor.cc +++ b/paddle/framework/executor.cc @@ -22,6 +22,7 @@ limitations under the License. */ #include "paddle/framework/lod_rank_table.h" #include "paddle/framework/lod_tensor_array.h" #include "paddle/framework/op_registry.h" +#include "paddle/framework/reader.h" #include "paddle/platform/place.h" #include "paddle/platform/profiler.h" @@ -52,11 +53,13 @@ static void CreateTensor(Variable* var, proto::VarDesc::VarType var_type) { var->GetMutable(); } else if (var_type == proto::VarDesc::PLACE_LIST) { var->GetMutable(); + } else if (var_type == proto::VarDesc::READER) { + var->GetMutable(); } else { PADDLE_THROW( "Variable type %d is not in " - "[LoDTensor, SelectedRows, FEED_MINIBATCH, FETCH_LIST, LOD_RANK_TABLE," - " PLACE_LIST]", + "[LOD_TENSOR, SELECTED_ROWS, FEED_MINIBATCH, FETCH_LIST, " + "LOD_RANK_TABLE, PLACE_LIST, READER]", var_type); } } diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index 772ec26895e..ea402875024 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -72,7 +72,10 @@ class CompileTimeInferShapeContext : public InferShapeContext { void SetDim(const std::string &name, const DDim &dim) override; - std::vector GetRepeatedDim(const std::string &name) const override; + std::vector GetRepeatedDims(const std::string &name) const override; + + void SetRepeatedDims(const std::string &name, + const std::vector &dims) override; const OpDesc &op_; const BlockDesc &block_; @@ -470,7 +473,7 @@ DDim CompileTimeInferShapeContext::GetDim(const std::string &name) const { return res; } -std::vector CompileTimeInferShapeContext::GetRepeatedDim( +std::vector CompileTimeInferShapeContext::GetRepeatedDims( const std::string &name) const { auto var = block_.FindVarRecursive(name); PADDLE_ENFORCE(var != nullptr, "Cannot find variable %s", name); @@ -491,6 +494,16 @@ void CompileTimeInferShapeContext::SetDim(const std::string &name, const DDim &dim) { block_.FindVarRecursive(name)->SetShape(vectorize(dim)); } + +void CompileTimeInferShapeContext::SetRepeatedDims( + const std::string &name, const std::vector &dims) { + auto var = block_.FindVarRecursive(name); + PADDLE_ENFORCE(var != nullptr, "Cannot find variable %s", name); + std::vector> dim_vec(dims.size()); + std::transform(dims.begin(), dims.end(), dim_vec.begin(), vectorize); + var->SetShapes(dim_vec); +} + bool CompileTimeInferShapeContext::IsRuntime() const { return false; } proto::VarDesc::VarType CompileTimeInferShapeContext::GetVarType( diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 1aa111dc76d..52387aabd9d 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -428,13 +428,13 @@ class RuntimeInferShapeContext : public InferShapeContext { } } - std::vector GetRepeatedDim(const std::string& name) const override { + std::vector GetRepeatedDims(const std::string& name) const override { Variable* var = scope_.FindVar(name); if (var->IsType()) { return var->Get().shapes(); } else { PADDLE_THROW( - "Only ReaderHolder support 'GetRepeatedDim', but Variable %s's " + "Only ReaderHolder support 'GetRepeatedDims', but Variable %s's " "type_id is %s.", name, var->Type().name()); } @@ -452,6 +452,19 @@ class RuntimeInferShapeContext : public InferShapeContext { } } + void SetRepeatedDims(const std::string& name, + const std::vector& dims) override { + Variable* var = scope_.FindVar(name); + if (var->IsType()) { + var->GetMutable()->set_shapes(dims); + } else { + PADDLE_THROW( + "Only ReaderHolder support 'SetRepeatedDims', but Variable %s's " + "type_id is %s.", + name, var->Type().name()); + } + } + proto::VarDesc::VarType GetVarType(const std::string& name) const override { auto* var = scope_.FindVar(name); return ToVarType(var->Type()); diff --git a/paddle/framework/reader.cc b/paddle/framework/reader.cc index 76cbc827ba5..86220cd0bba 100644 --- a/paddle/framework/reader.cc +++ b/paddle/framework/reader.cc @@ -17,7 +17,7 @@ namespace paddle { namespace framework { -DDim FileReader::shape(size_t idx) const { +DDim ReaderBase::shape(size_t idx) const { PADDLE_ENFORCE_LT( idx, shapes_.size(), "Cannot get the %d'th shape, 'shapes_' only has %d elements.", idx, @@ -25,15 +25,15 @@ DDim FileReader::shape(size_t idx) const { return shapes_[idx]; } -void ShuffleReader::ReadNext(std::vector* out) { +void ShuffleReader::ReadNext(std::vector* out) { if (iteration_pos_ >= buffer_.size()) { // Reload buffer with new data buffer_.clear(); - buffer_.reverse(buffer_size_); + buffer_.reserve(buffer_size_); for (int i = 0; i < buffer_size_; ++i) { if (reader_->HasNext()) { - buffer.push_back(std::vector()); - reader_->ReadNext(&buffer.back()); + buffer_.push_back(std::vector()); + reader_->ReadNext(&buffer_.back()); } else { break; } @@ -48,19 +48,19 @@ void ShuffleReader::ReadNext(std::vector* out) { // if buffer_ is empty, the 'out' will return as an empty vector. } -void BatchReader::ReadNext(std::vector* out) { +void BatchReader::ReadNext(std::vector* out) { buffer_.clear(); buffer_.reserve(batch_size_); for (int i = 0; i < batch_size_; ++i) { if (reader_->HasNext()) { - buffer_.push_back(std::vector()); + buffer_.push_back(std::vector()); reader_->ReadNext(&buffer_.back()); } else { break; } } // Concat instances - out.clear(); + out->clear(); if (buffer_.empty()) { // if buffer_ is empty, the 'out' will return as an empty vector. return; diff --git a/paddle/framework/reader.h b/paddle/framework/reader.h index 523ff28c990..ff7153bc7bf 100644 --- a/paddle/framework/reader.h +++ b/paddle/framework/reader.h @@ -22,39 +22,36 @@ namespace framework { class ReaderBase { public: - virtual void ReadNext(std::vector* out) = 0; + explicit ReaderBase(const std::vector& shapes) : shapes_(shapes) { + PADDLE_ENFORCE(!shapes_.empty()); + } + virtual void ReadNext(std::vector* out) = 0; virtual bool HasNext() const = 0; - virtual DDim shape(size_t idx) const = 0; - virtual std::vector shapes() const = 0; + DDim shape(size_t idx) const; + std::vector shapes() const { return shapes_; } + void set_shapes(const std::vector& shapes) { shapes_ = shapes; } virtual ~ReaderBase() {} + + protected: + std::vector shapes_; }; class FileReader : public ReaderBase { public: - explicit FileReader(const std::vector& shapes) : shapes_(shapes) { - PADDLE_ENFORCE(!shapes_.empty()); - } - - DDim shape(size_t idx) const override; - std::vector shapes() const override { return shapes_; } - - protected: - std::vector shapes_; + explicit FileReader(const std::vector& shapes) : ReaderBase(shapes) {} }; class DecoratedReader : public ReaderBase { public: - explicit DecoratedReader(ReaderBase* reader) : reader_(reader) { + explicit DecoratedReader(ReaderBase* reader) + : ReaderBase(reader->shapes()), reader_(reader) { PADDLE_ENFORCE_NOT_NULL(reader_); } bool HasNext() const override { return reader_->HasNext(); } - DDim shape(size_t idx) const override { return reader_->shape(idx); } - std::vector shapes() const override { return reader_->shapes(); } - protected: ReaderBase* reader_; }; @@ -73,9 +70,9 @@ class RandomReader : public FileReader { dist_ = std::uniform_real_distribution(min_, max_); } - void ReadNext(std::vector* out) override { - out.clear(); - out.reserve(shapes_.size()); + void ReadNext(std::vector* out) override { + out->clear(); + out->reserve(shapes_.size()); for (const DDim& shape : shapes_) { PADDLE_ENFORCE_GE( shape.size(), 2, @@ -88,9 +85,8 @@ class RandomReader : public FileReader { for (int64_t i = 0; i < numel; ++i) { data[i] = dist_(engine_); } - out.push_back(out_tensor); + out->push_back(out_tensor); } - return out; } bool HasNext() const override { return true; } @@ -111,11 +107,11 @@ class ShuffleReader : public DecoratedReader { buffer_.reserve(buffer_size); } - void ReadNext(std::vector* out) override; + void ReadNext(std::vector* out) override; private: int buffer_size_; - std::vector> buffer_; + std::vector> buffer_; size_t iteration_pos_; }; @@ -126,11 +122,11 @@ class BatchReader : public DecoratedReader { buffer_.reserve(batch_size_); } - void ReadNext(std::vector* out) override; + void ReadNext(std::vector* out) override; private: int batch_size_; - std::vector> buffer_; + std::vector> buffer_; }; // The ReaderHolder is used as readers' unified wrapper, @@ -141,11 +137,14 @@ class ReaderHolder { ReaderBase* Get() const { return reader_.get(); } - void ReadNext(std::vector* out) { reader_->ReadNext(out); } + void ReadNext(std::vector* out) { reader_->ReadNext(out); } bool HasNext() const { return reader_->HasNext(); } DDim shape(size_t idx) const { return reader_->shape(idx); } std::vector shapes() const { return reader_->shapes(); } + void set_shapes(const std::vector& shapes) { + reader_->set_shapes(shapes); + } private: std::unique_ptr reader_; diff --git a/paddle/framework/shape_inference.cc b/paddle/framework/shape_inference.cc index 4a8acfb87ff..2f4d4505771 100644 --- a/paddle/framework/shape_inference.cc +++ b/paddle/framework/shape_inference.cc @@ -62,6 +62,16 @@ void InferShapeContext::SetOutputsDim(const std::string &name, SetDims(names, dims); } +void InferShapeContext::SetReaderDims(const std::string &name, + const std::vector &dims) { + const std::vector &arg_names = Outputs(name); + PADDLE_ENFORCE_EQ( + arg_names.size(), 1UL, + "Reader output '%s' should hold one element, but now it holds %d", name, + arg_names.size()); + return this->SetRepeatedDims(arg_names[0], dims); +} + std::vector InferShapeContext::GetDims( const std::vector &names) const { std::vector ret; diff --git a/paddle/framework/shape_inference.h b/paddle/framework/shape_inference.h index f1a64e9024b..7bee8698523 100644 --- a/paddle/framework/shape_inference.h +++ b/paddle/framework/shape_inference.h @@ -37,11 +37,12 @@ class InferShapeContext { DDim GetInputDim(const std::string &name) const; std::vector GetInputsDim(const std::string &name) const; - std::vector GetReaderDims(const std::string &name) const DDim; + std::vector GetReaderDims(const std::string &name) const; DDim GetInputsElementDim(const std::string &name, int idx) const; void SetOutputDim(const std::string &name, const DDim &dim); void SetOutputsDim(const std::string &name, const std::vector &dims); + void SetReaderDims(const std::string &name, const std::vector &dims); virtual AttrReader Attrs() const = 0; virtual const std::vector &Inputs( @@ -61,7 +62,9 @@ class InferShapeContext { protected: virtual DDim GetDim(const std::string &name) const = 0; virtual void SetDim(const std::string &name, const DDim &dim) = 0; - std::vector GetRepeatedDim(const std::string &name) const = 0; + virtual std::vector GetRepeatedDims(const std::string &name) const = 0; + virtual void SetRepeatedDims(const std::string &name, + const std::vector &dims) = 0; std::vector GetDims(const std::vector &names) const; std::vector GetVarTypes( diff --git a/paddle/framework/var_desc.cc b/paddle/framework/var_desc.cc index 6d83e2e4112..11a4daf2c99 100644 --- a/paddle/framework/var_desc.cc +++ b/paddle/framework/var_desc.cc @@ -57,10 +57,13 @@ size_t VarDesc::GetTensorDescNum() const { void VarDesc::SetShapes( const std::vector> &multiple_dims) { - PADDLE_ENFORCE_EQ(multiple_dims.size(), GetTensorDescNum(), - "The number of given shapes(%d) doesn't equal to the " - "number of sub tensor.", - multiple_dims.size(), GetTensorDescNum()); + if (multiple_dims.size() != GetTensorDescNum()) { + VLOG(3) << "WARNING: The number of given shapes(" << multiple_dims.size() + << ") doesn't match the existing tensor number(" + << GetTensorDescNum() + << "). The Reader is going to be reinitialized."; + SetTensorDescNum(multiple_dims.size()); + } std::vector tensors = mutable_tensor_descs(); for (size_t i = 0; i < multiple_dims.size(); ++i) { VectorToRepeated(multiple_dims[i], tensors[i]->mutable_dims()); @@ -87,10 +90,14 @@ void VarDesc::SetDataType(proto::DataType data_type) { void VarDesc::SetDataTypes( const std::vector &multiple_data_type) { - PADDLE_ENFORCE_EQ(multiple_data_type.size(), GetTensorDescNum(), - "The number of given data types(%d) doesn't equal to the " - "number of sub tensor.", - multiple_data_type.size(), GetTensorDescNum()); + if (multiple_data_type.size() != GetTensorDescNum()) { + VLOG(3) << "WARNING: The number of given data types(" + << multiple_data_type.size() + << ") doesn't match the existing tensor number(" + << GetTensorDescNum() + << "). The Reader is going to be reinitialized."; + SetTensorDescNum(multiple_data_type.size()); + } std::vector tensor_descs = mutable_tensor_descs(); for (size_t i = 0; i < multiple_data_type.size(); ++i) { tensor_descs[i]->set_data_type(multiple_data_type[i]); @@ -127,10 +134,14 @@ void VarDesc::SetLoDLevel(int32_t lod_level) { } void VarDesc::SetLoDLevels(const std::vector &multiple_lod_level) { - PADDLE_ENFORCE_EQ(multiple_lod_level.size(), GetTensorDescNum(), - "The number of given data types(%d) doesn't equal to the " - "number of sub tensor.", - multiple_lod_level.size(), GetTensorDescNum()); + if (multiple_lod_level.size() != GetTensorDescNum()) { + VLOG(3) << "WARNING: The number of given lod_levels(" + << multiple_lod_level.size() + << ") doesn't match the existing tensor number(" + << GetTensorDescNum() + << "). The Reader is going to be reinitialized."; + SetTensorDescNum(multiple_lod_level.size()); + } switch (desc_.type()) { case proto::VarDesc::READER: { size_t i = 0; diff --git a/paddle/framework/var_type.h b/paddle/framework/var_type.h index 5b7a08a0873..599d4514902 100644 --- a/paddle/framework/var_type.h +++ b/paddle/framework/var_type.h @@ -17,6 +17,7 @@ limitations under the License. */ #include "paddle/framework/lod_rank_table.h" #include "paddle/framework/lod_tensor.h" #include "paddle/framework/lod_tensor_array.h" +#include "paddle/framework/reader.h" #include "paddle/framework/selected_rows.h" #include "paddle/framework/variable.h" @@ -31,6 +32,8 @@ inline proto::VarDesc::VarType ToVarType(std::type_index type) { return proto::VarDesc_VarType_LOD_TENSOR_ARRAY; } else if (type.hash_code() == typeid(SelectedRows).hash_code()) { return proto::VarDesc_VarType_SELECTED_ROWS; + } else if (type.hash_code() == typeid(ReaderHolder).hash_code()) { + return proto::VarDesc_VarType_READER; } else { PADDLE_THROW("ToVarType:Unsupported type %s", type.name()); } @@ -40,7 +43,7 @@ template inline void VisitVarType(const framework::Variable& var, Visitor visitor) { switch (ToVarType(var.Type())) { case proto::VarDesc_VarType_LOD_TENSOR: - visitor(var.Get()); + visitor(var.Get()); return; case proto::VarDesc_VarType_LOD_RANK_TABLE: visitor(var.Get()); @@ -51,6 +54,9 @@ inline void VisitVarType(const framework::Variable& var, Visitor visitor) { case proto::VarDesc_VarType_SELECTED_ROWS: visitor(var.Get()); return; + case proto::VarDesc_VarType_READER: + visitor(var.Get()); + return; default: PADDLE_THROW("Not supported visit type, %d", ToVarType(var.Type())); } diff --git a/paddle/operators/create_reader_op.cc b/paddle/operators/create_reader_op.cc index 9cf27bbfc69..11c77a06032 100644 --- a/paddle/operators/create_reader_op.cc +++ b/paddle/operators/create_reader_op.cc @@ -18,12 +18,30 @@ namespace paddle { namespace operators { +std::vector RestoreShapes(const std::vector& shape_concat, + const std::vector& ranks) { + std::vector res; + int offset = 0; + for (int len : ranks) { + auto start_it = shape_concat.begin() + offset; + auto end_it = start_it + len; + res.push_back(framework::make_ddim(std::vector(start_it, end_it))); + offset += len; + } + return res; +} + // general infershape for file readers class CreateFileReaderInferShape : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasOutput("Out"), "The output file reader should not be null."); + const auto shape_concat = + ctx->Attrs().Get>("shape_concat"); + const auto ranks = ctx->Attrs().Get>("ranks"); + std::vector shapes = RestoreShapes(shape_concat, ranks); + ctx->SetReaderDims("Out", shapes); } }; @@ -31,10 +49,22 @@ class CreateFileReaderInferShape : public framework::InferShapeBase { class CreateDecoratedReaderInferShape : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Underlying_reader"), - "Input(Underlying_reader) should not be null."); + PADDLE_ENFORCE(ctx->HasInput("UnderlyingReader"), + "Input(UnderlyingReader) should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), "The output decorated reader should not be null."); + ctx->SetReaderDims("Out", ctx->GetReaderDims("UnderlyingReader")); + } +}; + +// general var type inference for all readers +class CreateReaderInferVarType : public framework::VarTypeInference { + public: + void operator()(const framework::OpDesc& op_desc, + framework::BlockDesc* block) const override { + std::string reader_name = op_desc.Output("Out")[0]; + framework::VarDesc* reader = block->FindVarRecursive(reader_name); + reader->SetType(framework::proto::VarDesc::READER); } }; @@ -51,15 +81,7 @@ class CreateRandomReaderOp : public framework::OperatorBase { int(shape_concat.size()), "The accumulate of all ranks should be equal to the " "shape concat's length."); - std::vector shapes; - int offset = 0; - for (int len : ranks) { - auto start_it = shape_concat.begin() + offset; - auto end_it = start_it + len; - shapes.push_back( - framework::make_ddim(std::vector(start_it, end_it))); - offset += len; - } + std::vector shapes = RestoreShapes(shape_concat, ranks); auto* out = scope.FindVar(Output("Out")) ->template GetMutable(); out->Reset(new framework::RandomReader(shapes, Attr("min"), @@ -99,7 +121,7 @@ class CreateShuffleReaderOp : public framework::OperatorBase { using framework::OperatorBase::OperatorBase; void Run(const framework::Scope& scope, const platform::Place& dev_place) const override { - const auto& underlying_reader = scope.FindVar(Input("Underlying_reader")) + const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) ->Get(); auto* out = scope.FindVar(Output("Out")) ->template GetMutable(); @@ -113,7 +135,7 @@ class CreateShuffleReaderOpMaker : public framework::OpProtoAndCheckerMaker { CreateShuffleReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(op_proto, op_checker) { AddInput( - "Underlying_reader", + "UnderlyingReader", "(ReaderHolder) The underlying reader for creating a shuffle reader."); AddOutput("Out", "(ReaderHolder) The created shuffle reader."); AddAttr("buffer_size", "The shuffle buffer size.").GreaterThan(0); @@ -131,7 +153,7 @@ class CreateBatchReaderOp : public framework::OperatorBase { using framework::OperatorBase::OperatorBase; void Run(const framework::Scope& scope, const platform::Place& dev_place) const override { - const auto& underlying_reader = scope.FindVar(Input("Underlying_reader")) + const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) ->Get(); auto* out = scope.FindVar(Output("Out")) ->template GetMutable(); @@ -145,7 +167,7 @@ class CreateBatchReaderOpMaker : public framework::OpProtoAndCheckerMaker { CreateBatchReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(op_proto, op_checker) { AddInput( - "Underlying_reader", + "UnderlyingReader", "(ReaderHolder) The underlying reader for creating a batch reader."); AddOutput("Out", "(ReaderHolder) The created batch reader."); AddAttr("batch_size", @@ -167,12 +189,15 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(create_random_reader, ops::CreateRandomReaderOp, ops::CreateFileReaderInferShape, ops::CreateRandomReaderOpMaker, - paddle::framework::EmptyGradOpMaker); + paddle::framework::EmptyGradOpMaker, + ops::CreateReaderInferVarType); REGISTER_OPERATOR(create_shuffle_reader, ops::CreateShuffleReaderOp, ops::CreateDecoratedReaderInferShape, ops::CreateShuffleReaderOpMaker, - paddle::framework::EmptyGradOpMaker); + paddle::framework::EmptyGradOpMaker, + ops::CreateReaderInferVarType); REGISTER_OPERATOR(create_batch_reader, ops::CreateBatchReaderOp, ops::CreateDecoratedReaderInferShape, ops::CreateBatchReaderOpMaker, - paddle::framework::EmptyGradOpMaker); + paddle::framework::EmptyGradOpMaker, + ops::CreateReaderInferVarType); diff --git a/paddle/operators/read_op.cc b/paddle/operators/read_op.cc index c6ff4ba8fee..3d17b26c998 100644 --- a/paddle/operators/read_op.cc +++ b/paddle/operators/read_op.cc @@ -25,7 +25,7 @@ class ReadInferShape : public framework::InferShapeBase { "The ReadOp must take a reader as input."); PADDLE_ENFORCE(ctx->HasOutputs("Out"), "The ReadOp should be assigned with output."); - std::vector reader_dims = ctx->GetReaderDims("Reader"); + std::vector reader_dims = ctx->GetReaderDims("Reader"); std::vector out_names = ctx->Outputs("Out"); PADDLE_ENFORCE_EQ( reader_dims.size(), out_names.size(), @@ -40,12 +40,12 @@ class ReadInferVarType : public framework::VarTypeInference { framework::BlockDesc* block) const override { std::string reader_name = op_desc.Input("Reader")[0]; std::vector out_names = op_desc.Output("Out"); - framework::VarDesc reader = block.FindVarRecursive(reader_name); - auto dtypes = reader.GetDataTypes(); + framework::VarDesc* reader = block->FindVarRecursive(reader_name); + auto dtypes = reader->GetDataTypes(); PADDLE_ENFORCE_EQ(dtypes.size(), out_names.size()); for (size_t i = 0; i < dtypes.size(); ++i) { - faremwork::VarDesc& out = block->FindRecursiveOrCreateVar(out_names[i]); - out.SetType(framework::proto::DataType::LOD_TENSOR); + framework::VarDesc& out = block->FindRecursiveOrCreateVar(out_names[i]); + out.SetType(framework::proto::VarDesc::LOD_TENSOR); out.SetDataType(dtypes[i]); } } @@ -56,20 +56,18 @@ class ReadOp : public framework::OperatorBase { using framework::OperatorBase::OperatorBase; void Run(const framework::Scope& scope, const platform::Place& dev_place) const override { - const framework::ReaderHolder& reader = - scope.FindVar(Input("Reader"))->Get(); - if (!reader.HasNext()) { - // what shall we do??? + framework::ReaderHolder* reader = + scope.FindVar(Input("Reader"))->GetMutable(); + if (!reader->HasNext()) { return; } std::vector out_arg_names = Outputs("Out"); std::vector ins; - reader.ReadNext(&ins); + reader->ReadNext(&ins); PADDLE_ENFORCE_EQ(ins.size(), out_arg_names.size()); for (size_t i = 0; i < ins.size(); ++i) { auto* out = scope.FindVar(out_arg_names[i])->GetMutable(); - PADDLE_ENFORCE_EQ(ins[i].dims(), out->dims()); out->ShareDataWith(ins[i]); out->set_lod(ins[i].lod()); } @@ -86,9 +84,13 @@ class ReadOpMaker : public framework::OpProtoAndCheckerMaker { Read Operator Execute a given reader once and output data. - )DOC") + )DOC"); } }; } // namespace operators -} // namespace paddle \ No newline at end of file +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(read, ops::ReadOp, ops::ReadInferShape, ops::ReadOpMaker, + paddle::framework::EmptyGradOpMaker, ops::ReadInferVarType); diff --git a/paddle/pybind/protobuf.cc b/paddle/pybind/protobuf.cc index 0f1953abe08..0a92e10927c 100644 --- a/paddle/pybind/protobuf.cc +++ b/paddle/pybind/protobuf.cc @@ -217,8 +217,6 @@ void BindVarDsec(py::module &m) { .def("set_shapes", &VarDesc::SetShapes) .def("set_dtype", &VarDesc::SetDataType) .def("set_dtypes", &VarDesc::SetDataTypes) - .def("set_tensor_num", &VarDesc::SetTensorDescNum) - .def("tensor_num", &VarDesc::GetTensorDescNum) .def("shape", &VarDesc::GetShape, py::return_value_policy::reference) .def("shapes", &VarDesc::GetShapes, py::return_value_policy::reference) .def("dtype", &VarDesc::GetDataType, py::return_value_policy::reference) diff --git a/python/paddle/v2/fluid/executor.py b/python/paddle/v2/fluid/executor.py index 0eddcc3a5ab..1bc3423f10c 100644 --- a/python/paddle/v2/fluid/executor.py +++ b/python/paddle/v2/fluid/executor.py @@ -51,7 +51,8 @@ def as_numpy(tensor): if len(lod) == 0: ans = tensor_data else: - raise RuntimeError("LoD Calculate lacks unit tests and buggy") + #raise RuntimeError("LoD Calculate lacks unit tests and buggy") + ans = tensor_data # elif len(lod) == 1: # ans = [] # idx = 0 diff --git a/python/paddle/v2/fluid/tests/test_cpp_reader.py b/python/paddle/v2/fluid/tests/test_cpp_reader.py new file mode 100644 index 00000000000..cd5fff9425c --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_cpp_reader.py @@ -0,0 +1,71 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.v2 as paddle +import paddle.v2.fluid as fluid +import numpy as np + +prog = fluid.framework.Program() +block = prog.current_block() + +random_reader = block.create_var( + type=fluid.core.VarDesc.VarType.READER, name="RandomReader") +random_reader.desc.set_lod_levels([0, 0]) + +create_random_reader_op = block.append_op( + type="create_random_reader", + outputs={"Out": random_reader}, + attrs={ + "shape_concat": [1, 2, 1, 1], + "ranks": [2, 2], + "min": 0.0, + "max": 1.0 + }) + +batch_reader = block.create_var( + type=fluid.core.VarDesc.VarType.READER, name=("BatchReader")) +batch_reader.desc.set_lod_levels([0, 0]) + +create_batch_reader_op = block.append_op( + type="create_batch_reader", + inputs={"UnderlyingReader": random_reader}, + outputs={"Out": batch_reader}, + attrs={"batch_size": 10}) + +out1 = block.create_var( + type=fluid.core.VarDesc.VarType.LOD_TENSOR, + name="Out1", + shape=[10, 2], + dtype="float32", + lod_level=1) +out2 = block.create_var( + type=fluid.core.VarDesc.VarType.LOD_TENSOR, + name="Out2", + shape=[10, 1], + dtype="float32", + lod_level=1) + +read_op = block.append_op( + type="read", inputs={"Reader": batch_reader}, + outputs={"Out": [out1, out2]}) + +place = fluid.CPUPlace() +exe = fluid.Executor(place) + +[res1, res2] = exe.run(prog, fetch_list=[out1, out2]) + +if len(res1) == 0 or len(res2) == 0: + exit(1) + +exit(0) -- GitLab From f21540021219a50fb392e59343d6af5ce3e4b6da Mon Sep 17 00:00:00 2001 From: kavyasrinet Date: Tue, 6 Feb 2018 11:28:00 -0800 Subject: [PATCH 066/138] Adding panic logic and test case (#8171) * Adding panic logic and test case * Change panic behavior to boolean instead of exception * Adding atomic * Switch to boolean * Fix spacing * Add to close method --- paddle/framework/channel_test.cc | 11 ++++++++++- paddle/framework/details/buffered_channel.h | 11 +++++++++-- paddle/framework/details/unbuffered_channel.h | 8 +++++++- 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/paddle/framework/channel_test.cc b/paddle/framework/channel_test.cc index 6416c04f36e..df9e15e22b8 100644 --- a/paddle/framework/channel_test.cc +++ b/paddle/framework/channel_test.cc @@ -60,6 +60,16 @@ TEST(Channel, SufficientBufferSizeDoesntBlock) { delete ch; } +TEST(Channel, SendOnClosedChannelPanics) { + const size_t buffer_size = 10; + auto ch = MakeChannel(buffer_size); + size_t i = 5; + EXPECT_EQ(ch->Send(&i), true); // should not block or panic + CloseChannel(ch); + EXPECT_EQ(ch->Send(&i), false); // should panic + delete ch; +} + TEST(Channel, ReceiveFromBufferedChannelReturnResidualValuesTest) { const size_t buffer_size = 10; auto ch = MakeChannel(buffer_size); @@ -88,7 +98,6 @@ TEST(Channel, ReceiveFromBufferedChannelReturnResidualValuesTest) { // Note: we cannot check EXPECT_EQ(out, 0), because C++ doesn't // define zero values like Go does. } - delete ch; } diff --git a/paddle/framework/details/buffered_channel.h b/paddle/framework/details/buffered_channel.h index b9761eab9b5..00b63da4da7 100644 --- a/paddle/framework/details/buffered_channel.h +++ b/paddle/framework/details/buffered_channel.h @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include #include #include @@ -42,7 +43,7 @@ class Buffered : public paddle::framework::Channel { std::condition_variable empty_cond_var_; std::condition_variable full_cond_var_; std::deque channel_; - bool closed_; + std::atomic closed_{false}; Buffered(size_t cap) : cap_(cap), closed_(false) { PADDLE_ENFORCE_GT(cap, 0); @@ -53,10 +54,13 @@ class Buffered : public paddle::framework::Channel { template bool Buffered::Send(T* item) { + bool ret = false; + if (closed_) { + return ret; + } std::unique_lock lock(mu_); full_cond_var_.wait(lock, [this]() { return channel_.size() < cap_ || closed_; }); - bool ret = false; if (!closed_) { channel_.push_back(std::move(*item)); lock.unlock(); @@ -82,6 +86,9 @@ bool Buffered::Receive(T* item) { template void Buffered::Close() { + if (closed_) { + return; + } std::unique_lock lock(mu_); closed_ = true; NotifyAllParticipants(&lock); diff --git a/paddle/framework/details/unbuffered_channel.h b/paddle/framework/details/unbuffered_channel.h index f86a894bb4a..815cebad2d8 100644 --- a/paddle/framework/details/unbuffered_channel.h +++ b/paddle/framework/details/unbuffered_channel.h @@ -58,6 +58,10 @@ class UnBuffered : public paddle::framework::Channel { // be sent from a writer to a reader. template bool UnBuffered::Send(T* data) { + bool ret = false; + if (closed_) { + return ret; + } // Prevent other writers from entering std::unique_lock writer_lock(mu_write_); writer_found_ = true; @@ -66,7 +70,6 @@ bool UnBuffered::Send(T* data) { cv_writer_.wait(cv_lock, [this]() { return reader_found_ == true || closed_; }); cv_reader_.notify_one(); - bool ret = false; if (!closed_) { std::unique_lock channel_lock(mu_ch_); item = data; @@ -114,6 +117,9 @@ bool UnBuffered::Receive(T* data) { // that take place once the channel is closed. template void UnBuffered::Close() { + if (closed_) { + return; + } std::unique_lock lock(mu_ch_); item = nullptr; closed_ = true; -- GitLab From 2668b4d67e15bd4ca729a2f837fdb39f8b69dc51 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Wed, 7 Feb 2018 01:58:36 +0000 Subject: [PATCH 067/138] disable nccl test --- paddle/operators/nccl_op_test.cu.cc | 3 +++ paddle/platform/nccl_test.cu | 3 +++ 2 files changed, 6 insertions(+) diff --git a/paddle/operators/nccl_op_test.cu.cc b/paddle/operators/nccl_op_test.cu.cc index 072e4eb2eff..827a6253477 100644 --- a/paddle/operators/nccl_op_test.cu.cc +++ b/paddle/operators/nccl_op_test.cu.cc @@ -287,6 +287,9 @@ TEST_F(NCCLTester, ncclBcastOp) { } int main(int argc, char **argv) { + // FIXME(tonyyang-svail): + // Due to the driver issue on our CI, disable for now + return 0; const int dev_count = p::GetCUDADeviceCount(); if (dev_count <= 1) { LOG(WARNING) diff --git a/paddle/platform/nccl_test.cu b/paddle/platform/nccl_test.cu index ef6d8458747..84f5ac28be3 100644 --- a/paddle/platform/nccl_test.cu +++ b/paddle/platform/nccl_test.cu @@ -127,6 +127,9 @@ TEST(NCCL, all_reduce) { } // namespace paddle int main(int argc, char** argv) { + // FIXME(tonyyang-svail): + // Due to the driver issue on our CI, disable for now + return 0; dev_count = paddle::platform::GetCUDADeviceCount(); if (dev_count <= 1) { LOG(WARNING) -- GitLab From 542bdef7a5142bbfebafc327ff393a8c1aa62214 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 7 Feb 2018 10:17:31 +0800 Subject: [PATCH 068/138] fix a unit test --- python/paddle/v2/fluid/tests/test_protobuf_descs.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/python/paddle/v2/fluid/tests/test_protobuf_descs.py b/python/paddle/v2/fluid/tests/test_protobuf_descs.py index 8f335d13db3..c590bf1c657 100644 --- a/python/paddle/v2/fluid/tests/test_protobuf_descs.py +++ b/python/paddle/v2/fluid/tests/test_protobuf_descs.py @@ -120,7 +120,6 @@ class TestVarDesc(unittest.TestCase): block = program_desc.block(0) var = block.var('my_reader') var.set_type(core.VarDesc.VarType.READER) - var.set_tensor_num(3) src_shapes = [[2, 3, 3], [4, 5], [6, 7, 8, 9]] var.set_shapes(src_shapes) res_shapes = var.shapes() @@ -141,7 +140,6 @@ class TestVarDesc(unittest.TestCase): block = program_desc.block(0) var = block.var('my_reader') var.set_type(core.VarDesc.VarType.READER) - var.set_tensor_num(3) src_types = [ core.DataType.INT32, core.DataType.FP64, core.DataType.FP32 ] @@ -154,7 +152,6 @@ class TestVarDesc(unittest.TestCase): block = program_desc.block(0) var = block.var('my_reader') var.set_type(core.VarDesc.VarType.READER) - var.set_tensor_num(3) src_types = [3, 1, 2] var.set_lod_levels(src_types) self.assertEqual(src_types, var.lod_levels()) -- GitLab From b00cae60abdea7402baf70798885f9634b8eb0b0 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 7 Feb 2018 10:59:21 +0800 Subject: [PATCH 069/138] refine code --- python/paddle/v2/fluid/executor.py | 3 +-- python/paddle/v2/fluid/tests/test_cpp_reader.py | 13 ++----------- 2 files changed, 3 insertions(+), 13 deletions(-) diff --git a/python/paddle/v2/fluid/executor.py b/python/paddle/v2/fluid/executor.py index 1bc3423f10c..0eddcc3a5ab 100644 --- a/python/paddle/v2/fluid/executor.py +++ b/python/paddle/v2/fluid/executor.py @@ -51,8 +51,7 @@ def as_numpy(tensor): if len(lod) == 0: ans = tensor_data else: - #raise RuntimeError("LoD Calculate lacks unit tests and buggy") - ans = tensor_data + raise RuntimeError("LoD Calculate lacks unit tests and buggy") # elif len(lod) == 1: # ans = [] # idx = 0 diff --git a/python/paddle/v2/fluid/tests/test_cpp_reader.py b/python/paddle/v2/fluid/tests/test_cpp_reader.py index cd5fff9425c..7efcb0c46d2 100644 --- a/python/paddle/v2/fluid/tests/test_cpp_reader.py +++ b/python/paddle/v2/fluid/tests/test_cpp_reader.py @@ -33,16 +33,6 @@ create_random_reader_op = block.append_op( "max": 1.0 }) -batch_reader = block.create_var( - type=fluid.core.VarDesc.VarType.READER, name=("BatchReader")) -batch_reader.desc.set_lod_levels([0, 0]) - -create_batch_reader_op = block.append_op( - type="create_batch_reader", - inputs={"UnderlyingReader": random_reader}, - outputs={"Out": batch_reader}, - attrs={"batch_size": 10}) - out1 = block.create_var( type=fluid.core.VarDesc.VarType.LOD_TENSOR, name="Out1", @@ -57,7 +47,8 @@ out2 = block.create_var( lod_level=1) read_op = block.append_op( - type="read", inputs={"Reader": batch_reader}, + type="read", + inputs={"Reader": random_reader}, outputs={"Out": [out1, out2]}) place = fluid.CPUPlace() -- GitLab From 1eb3d6cdb261bb41eff6b44b301e3da881b2fa26 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Tue, 6 Feb 2018 21:24:02 -0800 Subject: [PATCH 070/138] "rerun ci" --- paddle/operators/parallel_do_op.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/paddle/operators/parallel_do_op.cc b/paddle/operators/parallel_do_op.cc index eb6308d306a..6c85ca6cde7 100644 --- a/paddle/operators/parallel_do_op.cc +++ b/paddle/operators/parallel_do_op.cc @@ -152,7 +152,9 @@ class ParallelDoOp : public framework::OperatorBase { auto *sub_scope = sub_scopes[i]; auto *dst = sub_scope->Var(param)->GetMutable(); framework::Copy(src, place, dst); - dst->set_lod(src.lod()); + framework::LoD lod(src.lod()); + lod.CopyToPeer(place); + dst->set_lod(lod); } } WaitOnPlaces(places); -- GitLab From 2f41aaa492aa952fc5429ffb408f4de044f6229f Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Wed, 7 Feb 2018 13:35:11 +0800 Subject: [PATCH 071/138] adjust the structure of documentation --- .../dev => build_and_install}/build_cn.md | 0 .../dev => build_and_install}/build_en.md | 0 .../build_from_source_cn.rst | 0 .../build_from_source_en.rst | 0 .../build_and_install/docker_install_cn.rst | 0 .../build_and_install/docker_install_en.rst | 0 .../build_and_install/index_cn.rst | 2 +- .../build_and_install/index_en.rst | 2 +- .../build_and_install/paddleci.png | Bin .../build_and_install/pip_install_cn.rst | 0 .../build_and_install/pip_install_en.rst | 0 doc/{howto => }/dev/FullyConnected.jpg | Bin .../dev/contribute_to_paddle_cn.md | 0 doc/dev/contribute_to_paddle_en.md | 1 + doc/dev/index_cn.rst | 8 +++++ doc/dev/index_en.rst | 9 +++++ doc/{howto => }/dev/new_layer_cn.rst | 0 doc/{howto => }/dev/new_layer_en.rst | 0 doc/{howto => }/dev/new_op_cn.md | 0 doc/{howto => }/dev/new_op_en.md | 0 doc/{howto => }/dev/new_op_kernel_en.md | 0 doc/{howto => }/dev/use_eigen_cn.md | 0 doc/{howto => }/dev/use_eigen_en.md | 0 doc/{howto => }/dev/write_docs_cn.rst | 6 ++-- doc/{howto => }/dev/write_docs_en.rst | 4 +-- .../{usage => }/capi/compile_paddle_lib_cn.md | 2 +- doc/howto/{usage => }/capi/images/csr.png | Bin .../{usage => }/capi/images/sequence_data.png | Bin .../capi/images/workflow_of_CAPI.png | Bin doc/howto/{usage => }/capi/index_cn.rst | 2 +- .../capi/organization_of_the_inputs_cn.md | 0 .../{usage => }/capi/workflow_of_capi_cn.md | 2 +- .../{usage => }/cluster/cluster_train_cn.md | 0 .../{usage => }/cluster/cluster_train_en.md | 0 doc/howto/{usage => }/cluster/fabric_cn.md | 0 doc/howto/{usage => }/cluster/fabric_en.md | 0 .../cluster/fluid_cluster_train_en.md | 0 doc/howto/{usage => }/cluster/k8s_aws_cn.md | 0 doc/howto/{usage => }/cluster/k8s_aws_en.md | 0 doc/howto/{usage => }/cluster/k8s_cn.md | 0 .../{usage => }/cluster/k8s_distributed_cn.md | 0 doc/howto/{usage => }/cluster/k8s_en.md | 0 doc/howto/{usage => }/cluster/openmpi_cn.md | 0 doc/howto/{usage => }/cluster/openmpi_en.md | 0 doc/howto/{usage => }/cluster/src/Dockerfile | 0 .../cluster/src/add_security_group.png | Bin .../{usage => }/cluster/src/create_efs.png | Bin .../{usage => }/cluster/src/efs_mount.png | Bin .../cluster/src/k8s-paddle-arch.png | Bin .../cluster/src/k8s_data/Dockerfile | 0 .../cluster/src/k8s_data/README.md | 0 .../cluster/src/k8s_data/get_data.sh | 0 .../cluster/src/k8s_train/Dockerfile | 0 .../cluster/src/k8s_train/README.md | 0 .../cluster/src/k8s_train/start.sh | 0 .../cluster/src/k8s_train/start_paddle.py | 0 .../cluster/src/managed_policy.png | Bin .../cluster/src/pserver_and_trainer.png | Bin .../cluster/src/route53_create_recordset.png | Bin .../cluster/src/route53_create_zone.png | Bin doc/howto/{usage => }/cluster/src/trainer.png | Bin .../{usage => }/cluster/src/trainer_cn.png | Bin .../cluster/src/word2vec/api_train_v2.py | 0 .../src/word2vec/api_train_v2_cluster.py | 0 .../cluster/src/word2vec/prepare.py | 0 .../cluster/src/worker_security_group.png | Bin .../{usage => }/cmd_parameter/arguments_cn.md | 0 .../{usage => }/cmd_parameter/arguments_en.md | 0 .../cmd_parameter/detail_introduction_cn.md | 0 .../cmd_parameter/detail_introduction_en.md | 0 .../{usage => }/cmd_parameter/index_cn.rst | 2 +- .../{usage => }/cmd_parameter/index_en.rst | 0 .../{usage => }/cmd_parameter/use_case_cn.md | 0 .../{usage => }/cmd_parameter/use_case_en.md | 0 doc/howto/dev/contribute_to_paddle_en.md | 1 - doc/howto/index_cn.rst | 34 +++--------------- doc/howto/index_en.rst | 33 ++--------------- .../{cpu_profiling.md => cpu_profiling_en.md} | 0 doc/howto/optimization/gpu_profiling_cn.rst | 6 ++-- .../rnn/hierarchical_layer_cn.rst | 0 .../rnn/hrnn_rnn_api_compare_cn.rst | 0 doc/howto/{deep_model => }/rnn/index_cn.rst | 0 doc/howto/{deep_model => }/rnn/index_en.rst | 0 .../rnn/recurrent_group_cn.md | 0 .../{deep_model => }/rnn/rnn_config_cn.rst | 0 .../{deep_model => }/rnn/rnn_config_en.rst | 0 .../{deep_model => }/rnn/src/bi_lstm.jpg | Bin .../src/encoder-decoder-attention-model.png | Bin .../{deep_model => }/rnn/src/glossary_rnn.dot | 0 .../rnn/src/glossary_rnn_with_memory.dot | 0 .../simple_full_hierarchical_recurrent.dot | 0 .../rnn/src/simple_full_recurrent.dot | 0 doc/index_cn.rst | 2 ++ doc/index_en.rst | 2 ++ 94 files changed, 43 insertions(+), 75 deletions(-) rename doc/{howto/dev => build_and_install}/build_cn.md (100%) rename doc/{howto/dev => build_and_install}/build_en.md (100%) rename doc/{getstarted => }/build_and_install/build_from_source_cn.rst (100%) rename doc/{getstarted => }/build_and_install/build_from_source_en.rst (100%) rename doc/{getstarted => }/build_and_install/docker_install_cn.rst (100%) rename doc/{getstarted => }/build_and_install/docker_install_en.rst (100%) rename doc/{getstarted => }/build_and_install/index_cn.rst (94%) rename doc/{getstarted => }/build_and_install/index_en.rst (95%) rename doc/{getstarted => }/build_and_install/paddleci.png (100%) rename doc/{getstarted => }/build_and_install/pip_install_cn.rst (100%) rename doc/{getstarted => }/build_and_install/pip_install_en.rst (100%) rename doc/{howto => }/dev/FullyConnected.jpg (100%) rename doc/{howto => }/dev/contribute_to_paddle_cn.md (100%) create mode 120000 doc/dev/contribute_to_paddle_en.md create mode 100644 doc/dev/index_cn.rst create mode 100644 doc/dev/index_en.rst rename doc/{howto => }/dev/new_layer_cn.rst (100%) rename doc/{howto => }/dev/new_layer_en.rst (100%) rename doc/{howto => }/dev/new_op_cn.md (100%) rename doc/{howto => }/dev/new_op_en.md (100%) rename doc/{howto => }/dev/new_op_kernel_en.md (100%) rename doc/{howto => }/dev/use_eigen_cn.md (100%) rename doc/{howto => }/dev/use_eigen_en.md (100%) rename doc/{howto => }/dev/write_docs_cn.rst (98%) rename doc/{howto => }/dev/write_docs_en.rst (98%) rename doc/howto/{usage => }/capi/compile_paddle_lib_cn.md (99%) rename doc/howto/{usage => }/capi/images/csr.png (100%) rename doc/howto/{usage => }/capi/images/sequence_data.png (100%) rename doc/howto/{usage => }/capi/images/workflow_of_CAPI.png (100%) rename doc/howto/{usage => }/capi/index_cn.rst (87%) rename doc/howto/{usage => }/capi/organization_of_the_inputs_cn.md (100%) rename doc/howto/{usage => }/capi/workflow_of_capi_cn.md (99%) rename doc/howto/{usage => }/cluster/cluster_train_cn.md (100%) rename doc/howto/{usage => }/cluster/cluster_train_en.md (100%) rename doc/howto/{usage => }/cluster/fabric_cn.md (100%) rename doc/howto/{usage => }/cluster/fabric_en.md (100%) rename doc/howto/{usage => }/cluster/fluid_cluster_train_en.md (100%) rename doc/howto/{usage => }/cluster/k8s_aws_cn.md (100%) rename doc/howto/{usage => }/cluster/k8s_aws_en.md (100%) rename doc/howto/{usage => }/cluster/k8s_cn.md (100%) rename doc/howto/{usage => }/cluster/k8s_distributed_cn.md (100%) rename doc/howto/{usage => }/cluster/k8s_en.md (100%) rename doc/howto/{usage => }/cluster/openmpi_cn.md (100%) rename doc/howto/{usage => }/cluster/openmpi_en.md (100%) rename doc/howto/{usage => }/cluster/src/Dockerfile (100%) rename doc/howto/{usage => }/cluster/src/add_security_group.png (100%) rename doc/howto/{usage => }/cluster/src/create_efs.png (100%) rename doc/howto/{usage => }/cluster/src/efs_mount.png (100%) rename doc/howto/{usage => }/cluster/src/k8s-paddle-arch.png (100%) rename doc/howto/{usage => }/cluster/src/k8s_data/Dockerfile (100%) rename doc/howto/{usage => }/cluster/src/k8s_data/README.md (100%) rename doc/howto/{usage => }/cluster/src/k8s_data/get_data.sh (100%) rename doc/howto/{usage => }/cluster/src/k8s_train/Dockerfile (100%) rename doc/howto/{usage => }/cluster/src/k8s_train/README.md (100%) rename doc/howto/{usage => }/cluster/src/k8s_train/start.sh (100%) rename doc/howto/{usage => }/cluster/src/k8s_train/start_paddle.py (100%) rename doc/howto/{usage => }/cluster/src/managed_policy.png (100%) rename doc/howto/{usage => }/cluster/src/pserver_and_trainer.png (100%) rename doc/howto/{usage => }/cluster/src/route53_create_recordset.png (100%) rename doc/howto/{usage => }/cluster/src/route53_create_zone.png (100%) rename doc/howto/{usage => }/cluster/src/trainer.png (100%) rename doc/howto/{usage => }/cluster/src/trainer_cn.png (100%) rename doc/howto/{usage => }/cluster/src/word2vec/api_train_v2.py (100%) rename doc/howto/{usage => }/cluster/src/word2vec/api_train_v2_cluster.py (100%) rename doc/howto/{usage => }/cluster/src/word2vec/prepare.py (100%) rename doc/howto/{usage => }/cluster/src/worker_security_group.png (100%) rename doc/howto/{usage => }/cmd_parameter/arguments_cn.md (100%) rename doc/howto/{usage => }/cmd_parameter/arguments_en.md (100%) rename doc/howto/{usage => }/cmd_parameter/detail_introduction_cn.md (100%) rename doc/howto/{usage => }/cmd_parameter/detail_introduction_en.md (100%) rename doc/howto/{usage => }/cmd_parameter/index_cn.rst (85%) rename doc/howto/{usage => }/cmd_parameter/index_en.rst (100%) rename doc/howto/{usage => }/cmd_parameter/use_case_cn.md (100%) rename doc/howto/{usage => }/cmd_parameter/use_case_en.md (100%) delete mode 120000 doc/howto/dev/contribute_to_paddle_en.md rename doc/howto/optimization/{cpu_profiling.md => cpu_profiling_en.md} (100%) rename doc/howto/{deep_model => }/rnn/hierarchical_layer_cn.rst (100%) rename doc/howto/{deep_model => }/rnn/hrnn_rnn_api_compare_cn.rst (100%) rename doc/howto/{deep_model => }/rnn/index_cn.rst (100%) rename doc/howto/{deep_model => }/rnn/index_en.rst (100%) rename doc/howto/{deep_model => }/rnn/recurrent_group_cn.md (100%) rename doc/howto/{deep_model => }/rnn/rnn_config_cn.rst (100%) rename doc/howto/{deep_model => }/rnn/rnn_config_en.rst (100%) rename doc/howto/{deep_model => }/rnn/src/bi_lstm.jpg (100%) rename doc/howto/{deep_model => }/rnn/src/encoder-decoder-attention-model.png (100%) rename doc/howto/{deep_model => }/rnn/src/glossary_rnn.dot (100%) rename doc/howto/{deep_model => }/rnn/src/glossary_rnn_with_memory.dot (100%) rename doc/howto/{deep_model => }/rnn/src/simple_full_hierarchical_recurrent.dot (100%) rename doc/howto/{deep_model => }/rnn/src/simple_full_recurrent.dot (100%) diff --git a/doc/howto/dev/build_cn.md b/doc/build_and_install/build_cn.md similarity index 100% rename from doc/howto/dev/build_cn.md rename to doc/build_and_install/build_cn.md diff --git a/doc/howto/dev/build_en.md b/doc/build_and_install/build_en.md similarity index 100% rename from doc/howto/dev/build_en.md rename to doc/build_and_install/build_en.md diff --git a/doc/getstarted/build_and_install/build_from_source_cn.rst b/doc/build_and_install/build_from_source_cn.rst similarity index 100% rename from doc/getstarted/build_and_install/build_from_source_cn.rst rename to doc/build_and_install/build_from_source_cn.rst diff --git a/doc/getstarted/build_and_install/build_from_source_en.rst b/doc/build_and_install/build_from_source_en.rst similarity index 100% rename from doc/getstarted/build_and_install/build_from_source_en.rst rename to doc/build_and_install/build_from_source_en.rst diff --git a/doc/getstarted/build_and_install/docker_install_cn.rst b/doc/build_and_install/docker_install_cn.rst similarity index 100% rename from doc/getstarted/build_and_install/docker_install_cn.rst rename to doc/build_and_install/docker_install_cn.rst diff --git a/doc/getstarted/build_and_install/docker_install_en.rst b/doc/build_and_install/docker_install_en.rst similarity index 100% rename from doc/getstarted/build_and_install/docker_install_en.rst rename to doc/build_and_install/docker_install_en.rst diff --git a/doc/getstarted/build_and_install/index_cn.rst b/doc/build_and_install/index_cn.rst similarity index 94% rename from doc/getstarted/build_and_install/index_cn.rst rename to doc/build_and_install/index_cn.rst index c9ba84c842b..4220ff22793 100644 --- a/doc/getstarted/build_and_install/index_cn.rst +++ b/doc/build_and_install/index_cn.rst @@ -13,7 +13,7 @@ PaddlePaddle提供pip和Docker的安装方式: pip_install_cn.rst docker_install_cn.rst - ../../howto/dev/build_cn.md + build_cn.md 编译流程 ++++++++ diff --git a/doc/getstarted/build_and_install/index_en.rst b/doc/build_and_install/index_en.rst similarity index 95% rename from doc/getstarted/build_and_install/index_en.rst rename to doc/build_and_install/index_en.rst index 32d66d63dd5..db6b5be742b 100644 --- a/doc/getstarted/build_and_install/index_en.rst +++ b/doc/build_and_install/index_en.rst @@ -13,7 +13,7 @@ You can choose either pip or Docker to complete your install: pip_install_en.rst docker_install_en.rst - ../../howto/dev/build_en.md + build_en.md Build from Source diff --git a/doc/getstarted/build_and_install/paddleci.png b/doc/build_and_install/paddleci.png similarity index 100% rename from doc/getstarted/build_and_install/paddleci.png rename to doc/build_and_install/paddleci.png diff --git a/doc/getstarted/build_and_install/pip_install_cn.rst b/doc/build_and_install/pip_install_cn.rst similarity index 100% rename from doc/getstarted/build_and_install/pip_install_cn.rst rename to doc/build_and_install/pip_install_cn.rst diff --git a/doc/getstarted/build_and_install/pip_install_en.rst b/doc/build_and_install/pip_install_en.rst similarity index 100% rename from doc/getstarted/build_and_install/pip_install_en.rst rename to doc/build_and_install/pip_install_en.rst diff --git a/doc/howto/dev/FullyConnected.jpg b/doc/dev/FullyConnected.jpg similarity index 100% rename from doc/howto/dev/FullyConnected.jpg rename to doc/dev/FullyConnected.jpg diff --git a/doc/howto/dev/contribute_to_paddle_cn.md b/doc/dev/contribute_to_paddle_cn.md similarity index 100% rename from doc/howto/dev/contribute_to_paddle_cn.md rename to doc/dev/contribute_to_paddle_cn.md diff --git a/doc/dev/contribute_to_paddle_en.md b/doc/dev/contribute_to_paddle_en.md new file mode 120000 index 00000000000..f939e75f21a --- /dev/null +++ b/doc/dev/contribute_to_paddle_en.md @@ -0,0 +1 @@ +../../CONTRIBUTING.md \ No newline at end of file diff --git a/doc/dev/index_cn.rst b/doc/dev/index_cn.rst new file mode 100644 index 00000000000..487db868bb2 --- /dev/null +++ b/doc/dev/index_cn.rst @@ -0,0 +1,8 @@ +开发标准 +======== + +.. toctree:: + :maxdepth: 1 + + contribute_to_paddle_cn.md + write_docs_cn.rst diff --git a/doc/dev/index_en.rst b/doc/dev/index_en.rst new file mode 100644 index 00000000000..5dd12d2233c --- /dev/null +++ b/doc/dev/index_en.rst @@ -0,0 +1,9 @@ +Development +------------ + +.. toctree:: + :maxdepth: 1 + + new_layer_en.rst + contribute_to_paddle_en.md + write_docs_en.rst diff --git a/doc/howto/dev/new_layer_cn.rst b/doc/dev/new_layer_cn.rst similarity index 100% rename from doc/howto/dev/new_layer_cn.rst rename to doc/dev/new_layer_cn.rst diff --git a/doc/howto/dev/new_layer_en.rst b/doc/dev/new_layer_en.rst similarity index 100% rename from doc/howto/dev/new_layer_en.rst rename to doc/dev/new_layer_en.rst diff --git a/doc/howto/dev/new_op_cn.md b/doc/dev/new_op_cn.md similarity index 100% rename from doc/howto/dev/new_op_cn.md rename to doc/dev/new_op_cn.md diff --git a/doc/howto/dev/new_op_en.md b/doc/dev/new_op_en.md similarity index 100% rename from doc/howto/dev/new_op_en.md rename to doc/dev/new_op_en.md diff --git a/doc/howto/dev/new_op_kernel_en.md b/doc/dev/new_op_kernel_en.md similarity index 100% rename from doc/howto/dev/new_op_kernel_en.md rename to doc/dev/new_op_kernel_en.md diff --git a/doc/howto/dev/use_eigen_cn.md b/doc/dev/use_eigen_cn.md similarity index 100% rename from doc/howto/dev/use_eigen_cn.md rename to doc/dev/use_eigen_cn.md diff --git a/doc/howto/dev/use_eigen_en.md b/doc/dev/use_eigen_en.md similarity index 100% rename from doc/howto/dev/use_eigen_en.md rename to doc/dev/use_eigen_en.md diff --git a/doc/howto/dev/write_docs_cn.rst b/doc/dev/write_docs_cn.rst similarity index 98% rename from doc/howto/dev/write_docs_cn.rst rename to doc/dev/write_docs_cn.rst index 1bc947c260d..f79769b810b 100644 --- a/doc/howto/dev/write_docs_cn.rst +++ b/doc/dev/write_docs_cn.rst @@ -1,6 +1,6 @@ -################## -如何贡献/修改文档 -################## +############# +如何贡献文档 +############# PaddlePaddle的文档包括英文文档 ``doc`` 和中文文档 ``doc_cn`` 两个部分。文档都是通过 `cmake`_ 驱动 `sphinx`_ 编译生成,生成后的文档分别存储在编译目录的 ``doc`` 和 ``doc_cn`` 两个子目录下。 也可以利用PaddlePaddle 工具来编译文档,这个情况下所有的文件会存在整理过的的文件目录 .ppo_workspace/content 下 diff --git a/doc/howto/dev/write_docs_en.rst b/doc/dev/write_docs_en.rst similarity index 98% rename from doc/howto/dev/write_docs_en.rst rename to doc/dev/write_docs_en.rst index b3ef07eb1d0..f3408a84269 100644 --- a/doc/howto/dev/write_docs_en.rst +++ b/doc/dev/write_docs_en.rst @@ -1,6 +1,6 @@ -################## +######################## Contribute Documentation -################## +######################## PaddlePaddle supports English documentation ``doc`` and Chinese documentation ``doc_cn``. Both are compiled by `cmake`_ and `sphinx`_ , the compiled documentations will be stored under ``doc`` and ``doc_cn`` directories. diff --git a/doc/howto/usage/capi/compile_paddle_lib_cn.md b/doc/howto/capi/compile_paddle_lib_cn.md similarity index 99% rename from doc/howto/usage/capi/compile_paddle_lib_cn.md rename to doc/howto/capi/compile_paddle_lib_cn.md index ac5ecffe2ea..fd8dec81645 100644 --- a/doc/howto/usage/capi/compile_paddle_lib_cn.md +++ b/doc/howto/capi/compile_paddle_lib_cn.md @@ -1,4 +1,4 @@ -## 编译 PaddlePaddle 预测库 +## 安装与编译C-API预测库 ### 概述 diff --git a/doc/howto/usage/capi/images/csr.png b/doc/howto/capi/images/csr.png similarity index 100% rename from doc/howto/usage/capi/images/csr.png rename to doc/howto/capi/images/csr.png diff --git a/doc/howto/usage/capi/images/sequence_data.png b/doc/howto/capi/images/sequence_data.png similarity index 100% rename from doc/howto/usage/capi/images/sequence_data.png rename to doc/howto/capi/images/sequence_data.png diff --git a/doc/howto/usage/capi/images/workflow_of_CAPI.png b/doc/howto/capi/images/workflow_of_CAPI.png similarity index 100% rename from doc/howto/usage/capi/images/workflow_of_CAPI.png rename to doc/howto/capi/images/workflow_of_CAPI.png diff --git a/doc/howto/usage/capi/index_cn.rst b/doc/howto/capi/index_cn.rst similarity index 87% rename from doc/howto/usage/capi/index_cn.rst rename to doc/howto/capi/index_cn.rst index fd774fbc742..e589a6d346a 100644 --- a/doc/howto/usage/capi/index_cn.rst +++ b/doc/howto/capi/index_cn.rst @@ -1,4 +1,4 @@ -PaddlePaddle C-API +C-API预测库 ================== .. toctree:: diff --git a/doc/howto/usage/capi/organization_of_the_inputs_cn.md b/doc/howto/capi/organization_of_the_inputs_cn.md similarity index 100% rename from doc/howto/usage/capi/organization_of_the_inputs_cn.md rename to doc/howto/capi/organization_of_the_inputs_cn.md diff --git a/doc/howto/usage/capi/workflow_of_capi_cn.md b/doc/howto/capi/workflow_of_capi_cn.md similarity index 99% rename from doc/howto/usage/capi/workflow_of_capi_cn.md rename to doc/howto/capi/workflow_of_capi_cn.md index e0a42fff12c..a61d2267bfd 100644 --- a/doc/howto/usage/capi/workflow_of_capi_cn.md +++ b/doc/howto/capi/workflow_of_capi_cn.md @@ -1,4 +1,4 @@ -## C-API 使用流程 +## C-API使用流程 这篇文档介绍 PaddlePaddle C-API 整体使用流程。 diff --git a/doc/howto/usage/cluster/cluster_train_cn.md b/doc/howto/cluster/cluster_train_cn.md similarity index 100% rename from doc/howto/usage/cluster/cluster_train_cn.md rename to doc/howto/cluster/cluster_train_cn.md diff --git a/doc/howto/usage/cluster/cluster_train_en.md b/doc/howto/cluster/cluster_train_en.md similarity index 100% rename from doc/howto/usage/cluster/cluster_train_en.md rename to doc/howto/cluster/cluster_train_en.md diff --git a/doc/howto/usage/cluster/fabric_cn.md b/doc/howto/cluster/fabric_cn.md similarity index 100% rename from doc/howto/usage/cluster/fabric_cn.md rename to doc/howto/cluster/fabric_cn.md diff --git a/doc/howto/usage/cluster/fabric_en.md b/doc/howto/cluster/fabric_en.md similarity index 100% rename from doc/howto/usage/cluster/fabric_en.md rename to doc/howto/cluster/fabric_en.md diff --git a/doc/howto/usage/cluster/fluid_cluster_train_en.md b/doc/howto/cluster/fluid_cluster_train_en.md similarity index 100% rename from doc/howto/usage/cluster/fluid_cluster_train_en.md rename to doc/howto/cluster/fluid_cluster_train_en.md diff --git a/doc/howto/usage/cluster/k8s_aws_cn.md b/doc/howto/cluster/k8s_aws_cn.md similarity index 100% rename from doc/howto/usage/cluster/k8s_aws_cn.md rename to doc/howto/cluster/k8s_aws_cn.md diff --git a/doc/howto/usage/cluster/k8s_aws_en.md b/doc/howto/cluster/k8s_aws_en.md similarity index 100% rename from doc/howto/usage/cluster/k8s_aws_en.md rename to doc/howto/cluster/k8s_aws_en.md diff --git a/doc/howto/usage/cluster/k8s_cn.md b/doc/howto/cluster/k8s_cn.md similarity index 100% rename from doc/howto/usage/cluster/k8s_cn.md rename to doc/howto/cluster/k8s_cn.md diff --git a/doc/howto/usage/cluster/k8s_distributed_cn.md b/doc/howto/cluster/k8s_distributed_cn.md similarity index 100% rename from doc/howto/usage/cluster/k8s_distributed_cn.md rename to doc/howto/cluster/k8s_distributed_cn.md diff --git a/doc/howto/usage/cluster/k8s_en.md b/doc/howto/cluster/k8s_en.md similarity index 100% rename from doc/howto/usage/cluster/k8s_en.md rename to doc/howto/cluster/k8s_en.md diff --git a/doc/howto/usage/cluster/openmpi_cn.md b/doc/howto/cluster/openmpi_cn.md similarity index 100% rename from doc/howto/usage/cluster/openmpi_cn.md rename to doc/howto/cluster/openmpi_cn.md diff --git a/doc/howto/usage/cluster/openmpi_en.md b/doc/howto/cluster/openmpi_en.md similarity index 100% rename from doc/howto/usage/cluster/openmpi_en.md rename to doc/howto/cluster/openmpi_en.md diff --git a/doc/howto/usage/cluster/src/Dockerfile b/doc/howto/cluster/src/Dockerfile similarity index 100% rename from doc/howto/usage/cluster/src/Dockerfile rename to doc/howto/cluster/src/Dockerfile diff --git a/doc/howto/usage/cluster/src/add_security_group.png b/doc/howto/cluster/src/add_security_group.png similarity index 100% rename from doc/howto/usage/cluster/src/add_security_group.png rename to doc/howto/cluster/src/add_security_group.png diff --git a/doc/howto/usage/cluster/src/create_efs.png b/doc/howto/cluster/src/create_efs.png similarity index 100% rename from doc/howto/usage/cluster/src/create_efs.png rename to doc/howto/cluster/src/create_efs.png diff --git a/doc/howto/usage/cluster/src/efs_mount.png b/doc/howto/cluster/src/efs_mount.png similarity index 100% rename from doc/howto/usage/cluster/src/efs_mount.png rename to doc/howto/cluster/src/efs_mount.png diff --git a/doc/howto/usage/cluster/src/k8s-paddle-arch.png b/doc/howto/cluster/src/k8s-paddle-arch.png similarity index 100% rename from doc/howto/usage/cluster/src/k8s-paddle-arch.png rename to doc/howto/cluster/src/k8s-paddle-arch.png diff --git a/doc/howto/usage/cluster/src/k8s_data/Dockerfile b/doc/howto/cluster/src/k8s_data/Dockerfile similarity index 100% rename from doc/howto/usage/cluster/src/k8s_data/Dockerfile rename to doc/howto/cluster/src/k8s_data/Dockerfile diff --git a/doc/howto/usage/cluster/src/k8s_data/README.md b/doc/howto/cluster/src/k8s_data/README.md similarity index 100% rename from doc/howto/usage/cluster/src/k8s_data/README.md rename to doc/howto/cluster/src/k8s_data/README.md diff --git a/doc/howto/usage/cluster/src/k8s_data/get_data.sh b/doc/howto/cluster/src/k8s_data/get_data.sh similarity index 100% rename from doc/howto/usage/cluster/src/k8s_data/get_data.sh rename to doc/howto/cluster/src/k8s_data/get_data.sh diff --git a/doc/howto/usage/cluster/src/k8s_train/Dockerfile b/doc/howto/cluster/src/k8s_train/Dockerfile similarity index 100% rename from doc/howto/usage/cluster/src/k8s_train/Dockerfile rename to doc/howto/cluster/src/k8s_train/Dockerfile diff --git a/doc/howto/usage/cluster/src/k8s_train/README.md b/doc/howto/cluster/src/k8s_train/README.md similarity index 100% rename from doc/howto/usage/cluster/src/k8s_train/README.md rename to doc/howto/cluster/src/k8s_train/README.md diff --git a/doc/howto/usage/cluster/src/k8s_train/start.sh b/doc/howto/cluster/src/k8s_train/start.sh similarity index 100% rename from doc/howto/usage/cluster/src/k8s_train/start.sh rename to doc/howto/cluster/src/k8s_train/start.sh diff --git a/doc/howto/usage/cluster/src/k8s_train/start_paddle.py b/doc/howto/cluster/src/k8s_train/start_paddle.py similarity index 100% rename from doc/howto/usage/cluster/src/k8s_train/start_paddle.py rename to doc/howto/cluster/src/k8s_train/start_paddle.py diff --git a/doc/howto/usage/cluster/src/managed_policy.png b/doc/howto/cluster/src/managed_policy.png similarity index 100% rename from doc/howto/usage/cluster/src/managed_policy.png rename to doc/howto/cluster/src/managed_policy.png diff --git a/doc/howto/usage/cluster/src/pserver_and_trainer.png b/doc/howto/cluster/src/pserver_and_trainer.png similarity index 100% rename from doc/howto/usage/cluster/src/pserver_and_trainer.png rename to doc/howto/cluster/src/pserver_and_trainer.png diff --git a/doc/howto/usage/cluster/src/route53_create_recordset.png b/doc/howto/cluster/src/route53_create_recordset.png similarity index 100% rename from doc/howto/usage/cluster/src/route53_create_recordset.png rename to doc/howto/cluster/src/route53_create_recordset.png diff --git a/doc/howto/usage/cluster/src/route53_create_zone.png b/doc/howto/cluster/src/route53_create_zone.png similarity index 100% rename from doc/howto/usage/cluster/src/route53_create_zone.png rename to doc/howto/cluster/src/route53_create_zone.png diff --git a/doc/howto/usage/cluster/src/trainer.png b/doc/howto/cluster/src/trainer.png similarity index 100% rename from doc/howto/usage/cluster/src/trainer.png rename to doc/howto/cluster/src/trainer.png diff --git a/doc/howto/usage/cluster/src/trainer_cn.png b/doc/howto/cluster/src/trainer_cn.png similarity index 100% rename from doc/howto/usage/cluster/src/trainer_cn.png rename to doc/howto/cluster/src/trainer_cn.png diff --git a/doc/howto/usage/cluster/src/word2vec/api_train_v2.py b/doc/howto/cluster/src/word2vec/api_train_v2.py similarity index 100% rename from doc/howto/usage/cluster/src/word2vec/api_train_v2.py rename to doc/howto/cluster/src/word2vec/api_train_v2.py diff --git a/doc/howto/usage/cluster/src/word2vec/api_train_v2_cluster.py b/doc/howto/cluster/src/word2vec/api_train_v2_cluster.py similarity index 100% rename from doc/howto/usage/cluster/src/word2vec/api_train_v2_cluster.py rename to doc/howto/cluster/src/word2vec/api_train_v2_cluster.py diff --git a/doc/howto/usage/cluster/src/word2vec/prepare.py b/doc/howto/cluster/src/word2vec/prepare.py similarity index 100% rename from doc/howto/usage/cluster/src/word2vec/prepare.py rename to doc/howto/cluster/src/word2vec/prepare.py diff --git a/doc/howto/usage/cluster/src/worker_security_group.png b/doc/howto/cluster/src/worker_security_group.png similarity index 100% rename from doc/howto/usage/cluster/src/worker_security_group.png rename to doc/howto/cluster/src/worker_security_group.png diff --git a/doc/howto/usage/cmd_parameter/arguments_cn.md b/doc/howto/cmd_parameter/arguments_cn.md similarity index 100% rename from doc/howto/usage/cmd_parameter/arguments_cn.md rename to doc/howto/cmd_parameter/arguments_cn.md diff --git a/doc/howto/usage/cmd_parameter/arguments_en.md b/doc/howto/cmd_parameter/arguments_en.md similarity index 100% rename from doc/howto/usage/cmd_parameter/arguments_en.md rename to doc/howto/cmd_parameter/arguments_en.md diff --git a/doc/howto/usage/cmd_parameter/detail_introduction_cn.md b/doc/howto/cmd_parameter/detail_introduction_cn.md similarity index 100% rename from doc/howto/usage/cmd_parameter/detail_introduction_cn.md rename to doc/howto/cmd_parameter/detail_introduction_cn.md diff --git a/doc/howto/usage/cmd_parameter/detail_introduction_en.md b/doc/howto/cmd_parameter/detail_introduction_en.md similarity index 100% rename from doc/howto/usage/cmd_parameter/detail_introduction_en.md rename to doc/howto/cmd_parameter/detail_introduction_en.md diff --git a/doc/howto/usage/cmd_parameter/index_cn.rst b/doc/howto/cmd_parameter/index_cn.rst similarity index 85% rename from doc/howto/usage/cmd_parameter/index_cn.rst rename to doc/howto/cmd_parameter/index_cn.rst index 4c872982111..17b379f6295 100644 --- a/doc/howto/usage/cmd_parameter/index_cn.rst +++ b/doc/howto/cmd_parameter/index_cn.rst @@ -1,6 +1,6 @@ .. _cmd_line_index: -设置命令行参数 +命令行参数设置 =============== .. toctree:: diff --git a/doc/howto/usage/cmd_parameter/index_en.rst b/doc/howto/cmd_parameter/index_en.rst similarity index 100% rename from doc/howto/usage/cmd_parameter/index_en.rst rename to doc/howto/cmd_parameter/index_en.rst diff --git a/doc/howto/usage/cmd_parameter/use_case_cn.md b/doc/howto/cmd_parameter/use_case_cn.md similarity index 100% rename from doc/howto/usage/cmd_parameter/use_case_cn.md rename to doc/howto/cmd_parameter/use_case_cn.md diff --git a/doc/howto/usage/cmd_parameter/use_case_en.md b/doc/howto/cmd_parameter/use_case_en.md similarity index 100% rename from doc/howto/usage/cmd_parameter/use_case_en.md rename to doc/howto/cmd_parameter/use_case_en.md diff --git a/doc/howto/dev/contribute_to_paddle_en.md b/doc/howto/dev/contribute_to_paddle_en.md deleted file mode 120000 index c97564d93a7..00000000000 --- a/doc/howto/dev/contribute_to_paddle_en.md +++ /dev/null @@ -1 +0,0 @@ -../../../CONTRIBUTING.md \ No newline at end of file diff --git a/doc/howto/index_cn.rst b/doc/howto/index_cn.rst index e0c69f7a6a4..37a34c113f3 100644 --- a/doc/howto/index_cn.rst +++ b/doc/howto/index_cn.rst @@ -1,37 +1,11 @@ 进阶指南 ======== -使用说明 --------- - -.. toctree:: - :maxdepth: 1 - - usage/cmd_parameter/index_cn.rst - usage/cluster/cluster_train_cn.md - usage/capi/index_cn.rst - -开发标准 --------- - -.. toctree:: - :maxdepth: 1 - - dev/contribute_to_paddle_cn.md - dev/write_docs_cn.rst - -模型配置 --------- - -.. toctree:: - :maxdepth: 1 - - deep_model/rnn/index_cn.rst - -性能优化 --------- - .. toctree:: :maxdepth: 1 + cmd_parameter/index_cn.rst + cluster/cluster_train_cn.md + capi/index_cn.rst + rnn/index_cn.rst optimization/gpu_profiling_cn.rst diff --git a/doc/howto/index_en.rst b/doc/howto/index_en.rst index 6d1bf7dfc00..3ba76d6aad1 100644 --- a/doc/howto/index_en.rst +++ b/doc/howto/index_en.rst @@ -1,37 +1,10 @@ HOW TO ======= -Usage -------- - -.. toctree:: - :maxdepth: 1 - - usage/cmd_parameter/index_en.rst - usage/cluster/cluster_train_en.md - -Development ------------- - -.. toctree:: - :maxdepth: 1 - - dev/new_layer_en.rst - dev/contribute_to_paddle_en.md - dev/write_docs_en.rst - -Configuration -------------- - -.. toctree:: - :maxdepth: 1 - - deep_model/rnn/index_en.rst - -Optimization -------------- - .. toctree:: :maxdepth: 1 + cmd_parameter/index_en.rst + cluster/cluster_train_en.md + rnn/index_en.rst optimization/gpu_profiling_en.rst diff --git a/doc/howto/optimization/cpu_profiling.md b/doc/howto/optimization/cpu_profiling_en.md similarity index 100% rename from doc/howto/optimization/cpu_profiling.md rename to doc/howto/optimization/cpu_profiling_en.md diff --git a/doc/howto/optimization/gpu_profiling_cn.rst b/doc/howto/optimization/gpu_profiling_cn.rst index e2b0b0396e0..0239eef4f11 100644 --- a/doc/howto/optimization/gpu_profiling_cn.rst +++ b/doc/howto/optimization/gpu_profiling_cn.rst @@ -1,6 +1,6 @@ -================== -GPU性能分析与调优 -================== +============ +GPU性能调优 +============ .. contents:: diff --git a/doc/howto/deep_model/rnn/hierarchical_layer_cn.rst b/doc/howto/rnn/hierarchical_layer_cn.rst similarity index 100% rename from doc/howto/deep_model/rnn/hierarchical_layer_cn.rst rename to doc/howto/rnn/hierarchical_layer_cn.rst diff --git a/doc/howto/deep_model/rnn/hrnn_rnn_api_compare_cn.rst b/doc/howto/rnn/hrnn_rnn_api_compare_cn.rst similarity index 100% rename from doc/howto/deep_model/rnn/hrnn_rnn_api_compare_cn.rst rename to doc/howto/rnn/hrnn_rnn_api_compare_cn.rst diff --git a/doc/howto/deep_model/rnn/index_cn.rst b/doc/howto/rnn/index_cn.rst similarity index 100% rename from doc/howto/deep_model/rnn/index_cn.rst rename to doc/howto/rnn/index_cn.rst diff --git a/doc/howto/deep_model/rnn/index_en.rst b/doc/howto/rnn/index_en.rst similarity index 100% rename from doc/howto/deep_model/rnn/index_en.rst rename to doc/howto/rnn/index_en.rst diff --git a/doc/howto/deep_model/rnn/recurrent_group_cn.md b/doc/howto/rnn/recurrent_group_cn.md similarity index 100% rename from doc/howto/deep_model/rnn/recurrent_group_cn.md rename to doc/howto/rnn/recurrent_group_cn.md diff --git a/doc/howto/deep_model/rnn/rnn_config_cn.rst b/doc/howto/rnn/rnn_config_cn.rst similarity index 100% rename from doc/howto/deep_model/rnn/rnn_config_cn.rst rename to doc/howto/rnn/rnn_config_cn.rst diff --git a/doc/howto/deep_model/rnn/rnn_config_en.rst b/doc/howto/rnn/rnn_config_en.rst similarity index 100% rename from doc/howto/deep_model/rnn/rnn_config_en.rst rename to doc/howto/rnn/rnn_config_en.rst diff --git a/doc/howto/deep_model/rnn/src/bi_lstm.jpg b/doc/howto/rnn/src/bi_lstm.jpg similarity index 100% rename from doc/howto/deep_model/rnn/src/bi_lstm.jpg rename to doc/howto/rnn/src/bi_lstm.jpg diff --git a/doc/howto/deep_model/rnn/src/encoder-decoder-attention-model.png b/doc/howto/rnn/src/encoder-decoder-attention-model.png similarity index 100% rename from doc/howto/deep_model/rnn/src/encoder-decoder-attention-model.png rename to doc/howto/rnn/src/encoder-decoder-attention-model.png diff --git a/doc/howto/deep_model/rnn/src/glossary_rnn.dot b/doc/howto/rnn/src/glossary_rnn.dot similarity index 100% rename from doc/howto/deep_model/rnn/src/glossary_rnn.dot rename to doc/howto/rnn/src/glossary_rnn.dot diff --git a/doc/howto/deep_model/rnn/src/glossary_rnn_with_memory.dot b/doc/howto/rnn/src/glossary_rnn_with_memory.dot similarity index 100% rename from doc/howto/deep_model/rnn/src/glossary_rnn_with_memory.dot rename to doc/howto/rnn/src/glossary_rnn_with_memory.dot diff --git a/doc/howto/deep_model/rnn/src/simple_full_hierarchical_recurrent.dot b/doc/howto/rnn/src/simple_full_hierarchical_recurrent.dot similarity index 100% rename from doc/howto/deep_model/rnn/src/simple_full_hierarchical_recurrent.dot rename to doc/howto/rnn/src/simple_full_hierarchical_recurrent.dot diff --git a/doc/howto/deep_model/rnn/src/simple_full_recurrent.dot b/doc/howto/rnn/src/simple_full_recurrent.dot similarity index 100% rename from doc/howto/deep_model/rnn/src/simple_full_recurrent.dot rename to doc/howto/rnn/src/simple_full_recurrent.dot diff --git a/doc/index_cn.rst b/doc/index_cn.rst index 9279bac7f4b..63a78428583 100644 --- a/doc/index_cn.rst +++ b/doc/index_cn.rst @@ -5,6 +5,8 @@ PaddlePaddle 文档 :maxdepth: 1 getstarted/index_cn.rst + build_and_install/index_cn.rst howto/index_cn.rst + dev/index_cn.rst api/index_cn.rst faq/index_cn.rst diff --git a/doc/index_en.rst b/doc/index_en.rst index 64684b8b9b2..5631381be08 100644 --- a/doc/index_en.rst +++ b/doc/index_en.rst @@ -5,5 +5,7 @@ PaddlePaddle Documentation :maxdepth: 1 getstarted/index_en.rst + build_and_install/index_en.rst howto/index_en.rst + dev/index_en.rst api/index_en.rst -- GitLab From e5832019a8906728ecf8e3f51552c738acd26e22 Mon Sep 17 00:00:00 2001 From: kexinzhao Date: Tue, 6 Feb 2018 21:54:49 -0800 Subject: [PATCH 072/138] Inference example and unit test for label_semantic_roles (#8058) * set up python code * fix bug * add cc file * fix cmake * add inference test for label semantic role * fix * address comments * address comments * address comments * address comments * add use_cuda --- paddle/inference/tests/book/CMakeLists.txt | 6 + paddle/inference/tests/book/test_helper.h | 104 ++++++++++++++++ .../test_inference_label_semantic_roles.cc | 81 +++++++++++++ .../book/test_inference_recognize_digits.cc | 81 +------------ .../tests/book/test_label_semantic_roles.py | 114 ++++++++++++++++-- 5 files changed, 299 insertions(+), 87 deletions(-) create mode 100644 paddle/inference/tests/book/test_helper.h create mode 100644 paddle/inference/tests/book/test_inference_label_semantic_roles.cc diff --git a/paddle/inference/tests/book/CMakeLists.txt b/paddle/inference/tests/book/CMakeLists.txt index 4c71517dc98..8f48b2f0e02 100644 --- a/paddle/inference/tests/book/CMakeLists.txt +++ b/paddle/inference/tests/book/CMakeLists.txt @@ -11,9 +11,15 @@ cc_test(test_inference_image_classification_resnet SRCS test_inference_image_classification.cc DEPS ARCHIVE_START paddle_fluid ARCHIVE_END ARGS --dirname=${PYTHON_TESTS_DIR}/book/image_classification_resnet.inference.model) +cc_test(test_inference_label_semantic_roles + SRCS test_inference_label_semantic_roles.cc + DEPS ARCHIVE_START paddle_fluid ARCHIVE_END + ARGS --dirname=${PYTHON_TESTS_DIR}/book/label_semantic_roles.inference.model) set_tests_properties(test_inference_recognize_digits_mlp PROPERTIES DEPENDS test_recognize_digits) set_tests_properties(test_inference_image_classification_vgg PROPERTIES DEPENDS test_image_classification_train) set_tests_properties(test_inference_image_classification_resnet PROPERTIES DEPENDS test_image_classification_train) +set_tests_properties(test_inference_label_semantic_roles + PROPERTIES DEPENDS test_label_semantic_roles) diff --git a/paddle/inference/tests/book/test_helper.h b/paddle/inference/tests/book/test_helper.h new file mode 100644 index 00000000000..17c3d58de6a --- /dev/null +++ b/paddle/inference/tests/book/test_helper.h @@ -0,0 +1,104 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/framework/lod_tensor.h" +#include "paddle/inference/io.h" + +template +void SetupTensor(paddle::framework::LoDTensor& input, + paddle::framework::DDim dims, + T lower, + T upper) { + srand(time(0)); + T* input_ptr = input.mutable_data(dims, paddle::platform::CPUPlace()); + for (int i = 0; i < input.numel(); ++i) { + input_ptr[i] = + (static_cast(rand()) / static_cast(RAND_MAX)) * (upper - lower) + + lower; + } +} + +template +void SetupLoDTensor(paddle::framework::LoDTensor& input, + paddle::framework::LoD& lod, + T lower, + T upper) { + input.set_lod(lod); + int dim = lod[0][lod[0].size() - 1]; + SetupTensor(input, {dim, 1}, lower, upper); +} + +template +void CheckError(paddle::framework::LoDTensor& output1, + paddle::framework::LoDTensor& output2) { + // Check lod information + EXPECT_EQ(output1.lod(), output2.lod()); + + EXPECT_EQ(output1.dims(), output2.dims()); + EXPECT_EQ(output1.numel(), output2.numel()); + + T err = static_cast(0); + if (typeid(T) == typeid(float)) { + err = 1E-3; + } else if (typeid(T) == typeid(double)) { + err = 1E-6; + } else { + err = 0; + } + + size_t count = 0; + for (int64_t i = 0; i < output1.numel(); ++i) { + if (fabs(output1.data()[i] - output2.data()[i]) > err) { + count++; + } + } + EXPECT_EQ(count, 0) << "There are " << count << " different elements."; +} + +template +void TestInference(const std::string& dirname, + const std::vector& cpu_feeds, + std::vector& cpu_fetchs) { + // 1. Define place, executor and scope + auto place = Place(); + auto executor = paddle::framework::Executor(place); + auto* scope = new paddle::framework::Scope(); + + // 2. Initialize the inference_program and load all parameters from file + auto inference_program = paddle::inference::Load(executor, *scope, dirname); + + // 3. Get the feed_target_names and fetch_target_names + const std::vector& feed_target_names = + inference_program->GetFeedTargetNames(); + const std::vector& fetch_target_names = + inference_program->GetFetchTargetNames(); + + // 4. Prepare inputs: set up maps for feed targets + std::map feed_targets; + for (size_t i = 0; i < feed_target_names.size(); ++i) { + // Please make sure that cpu_feeds[i] is right for feed_target_names[i] + feed_targets[feed_target_names[i]] = cpu_feeds[i]; + } + + // 5. Define Tensor to get the outputs: set up maps for fetch targets + std::map fetch_targets; + for (size_t i = 0; i < fetch_target_names.size(); ++i) { + fetch_targets[fetch_target_names[i]] = cpu_fetchs[i]; + } + + // 6. Run the inference program + executor.Run(*inference_program, scope, feed_targets, fetch_targets); + + delete scope; +} diff --git a/paddle/inference/tests/book/test_inference_label_semantic_roles.cc b/paddle/inference/tests/book/test_inference_label_semantic_roles.cc new file mode 100644 index 00000000000..c5646db2a77 --- /dev/null +++ b/paddle/inference/tests/book/test_inference_label_semantic_roles.cc @@ -0,0 +1,81 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include "gflags/gflags.h" +#include "test_helper.h" + +DEFINE_string(dirname, "", "Directory of the inference model."); + +TEST(inference, label_semantic_roles) { + if (FLAGS_dirname.empty()) { + LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; + } + + LOG(INFO) << "FLAGS_dirname: " << FLAGS_dirname << std::endl; + std::string dirname = FLAGS_dirname; + + // 0. Call `paddle::framework::InitDevices()` initialize all the devices + // In unittests, this is done in paddle/testing/paddle_gtest_main.cc + + paddle::framework::LoDTensor word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, + ctx_p2, mark; + paddle::framework::LoD lod{{0, 4, 10}}; + + SetupLoDTensor(word, lod, static_cast(0), static_cast(1)); + SetupLoDTensor( + predicate, lod, static_cast(0), static_cast(1)); + SetupLoDTensor(ctx_n2, lod, static_cast(0), static_cast(1)); + SetupLoDTensor(ctx_n1, lod, static_cast(0), static_cast(1)); + SetupLoDTensor(ctx_0, lod, static_cast(0), static_cast(1)); + SetupLoDTensor(ctx_p1, lod, static_cast(0), static_cast(1)); + SetupLoDTensor(ctx_p2, lod, static_cast(0), static_cast(1)); + SetupLoDTensor(mark, lod, static_cast(0), static_cast(1)); + + std::vector cpu_feeds; + cpu_feeds.push_back(&word); + cpu_feeds.push_back(&predicate); + cpu_feeds.push_back(&ctx_n2); + cpu_feeds.push_back(&ctx_n1); + cpu_feeds.push_back(&ctx_0); + cpu_feeds.push_back(&ctx_p1); + cpu_feeds.push_back(&ctx_p2); + cpu_feeds.push_back(&mark); + + paddle::framework::LoDTensor output1; + std::vector cpu_fetchs1; + cpu_fetchs1.push_back(&output1); + + // Run inference on CPU + TestInference( + dirname, cpu_feeds, cpu_fetchs1); + LOG(INFO) << output1.lod(); + LOG(INFO) << output1.dims(); + +#ifdef PADDLE_WITH_CUDA + paddle::framework::LoDTensor output2; + std::vector cpu_fetchs2; + cpu_fetchs2.push_back(&output2); + + // Run inference on CUDA GPU + TestInference( + dirname, cpu_feeds, cpu_fetchs2); + LOG(INFO) << output2.lod(); + LOG(INFO) << output2.dims(); + + CheckError(output1, output2); +#endif +} diff --git a/paddle/inference/tests/book/test_inference_recognize_digits.cc b/paddle/inference/tests/book/test_inference_recognize_digits.cc index ce8772587f3..2c0cf941001 100644 --- a/paddle/inference/tests/book/test_inference_recognize_digits.cc +++ b/paddle/inference/tests/book/test_inference_recognize_digits.cc @@ -16,89 +16,10 @@ limitations under the License. */ #include #include #include "gflags/gflags.h" -#include "paddle/framework/lod_tensor.h" -#include "paddle/inference/io.h" +#include "test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); -template -void TestInference(const std::string& dirname, - const std::vector& cpu_feeds, - std::vector& cpu_fetchs) { - // 1. Define place, executor and scope - auto place = Place(); - auto executor = paddle::framework::Executor(place); - auto* scope = new paddle::framework::Scope(); - - // 2. Initialize the inference_program and load all parameters from file - auto inference_program = paddle::inference::Load(executor, *scope, dirname); - - // 3. Get the feed_target_names and fetch_target_names - const std::vector& feed_target_names = - inference_program->GetFeedTargetNames(); - const std::vector& fetch_target_names = - inference_program->GetFetchTargetNames(); - - // 4. Prepare inputs: set up maps for feed targets - std::map feed_targets; - for (size_t i = 0; i < feed_target_names.size(); ++i) { - // Please make sure that cpu_feeds[i] is right for feed_target_names[i] - feed_targets[feed_target_names[i]] = cpu_feeds[i]; - } - - // 5. Define Tensor to get the outputs: set up maps for fetch targets - std::map fetch_targets; - for (size_t i = 0; i < fetch_target_names.size(); ++i) { - fetch_targets[fetch_target_names[i]] = cpu_fetchs[i]; - } - - // 6. Run the inference program - executor.Run(*inference_program, scope, feed_targets, fetch_targets); - - delete scope; -} - -template -void SetupTensor(paddle::framework::LoDTensor& input, - paddle::framework::DDim dims, - T lower, - T upper) { - srand(time(0)); - float* input_ptr = input.mutable_data(dims, paddle::platform::CPUPlace()); - for (int i = 0; i < input.numel(); ++i) { - input_ptr[i] = - (static_cast(rand()) / static_cast(RAND_MAX)) * (upper - lower) + - lower; - } -} - -template -void CheckError(paddle::framework::LoDTensor& output1, - paddle::framework::LoDTensor& output2) { - // Check lod information - EXPECT_EQ(output1.lod(), output2.lod()); - - EXPECT_EQ(output1.dims(), output2.dims()); - EXPECT_EQ(output1.numel(), output2.numel()); - - T err = static_cast(0); - if (typeid(T) == typeid(float)) { - err = 1E-3; - } else if (typeid(T) == typeid(double)) { - err = 1E-6; - } else { - err = 0; - } - - size_t count = 0; - for (int64_t i = 0; i < output1.numel(); ++i) { - if (fabs(output1.data()[i] - output2.data()[i]) > err) { - count++; - } - } - EXPECT_EQ(count, 0) << "There are " << count << " different elements."; -} - TEST(inference, recognize_digits) { if (FLAGS_dirname.empty()) { LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; diff --git a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py index f85768de99a..1491f7a8d54 100644 --- a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py @@ -18,7 +18,9 @@ import numpy as np import paddle.v2 as paddle import paddle.v2.dataset.conll05 as conll05 import paddle.v2.fluid as fluid +import contextlib import time +import unittest word_dict, verb_dict, label_dict = conll05.get_dict() word_dict_len = len(word_dict) @@ -127,7 +129,15 @@ def to_lodtensor(data, place): return res -def main(): +def create_random_lodtensor(lod, place, low, high): + data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64") + res = fluid.LoDTensor() + res.set(data, place) + res.set_lod([lod]) + return res + + +def train(use_cuda, save_dirname=None): # define network topology word = fluid.layers.data( name='word_data', shape=[1], dtype='int64', lod_level=1) @@ -175,8 +185,8 @@ def main(): paddle.reader.shuffle( paddle.dataset.conll05.test(), buf_size=8192), batch_size=BATCH_SIZE) - # place = fluid.CPUPlace() - place = fluid.CUDAPlace(0) + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() feeder = fluid.DataFeeder( feed_list=[ word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, mark, target @@ -211,12 +221,102 @@ def main(): if batch_id != 0: print("second per batch: " + str((time.time() - start_time) / batch_id)) - - # exit early for CI - exit(0) + # Set the threshold low to speed up the CI test + if float(pass_precision) > 0.05: + if save_dirname is not None: + fluid.io.save_inference_model(save_dirname, [ + 'word_data', 'verb_data', 'ctx_n2_data', + 'ctx_n1_data', 'ctx_0_data', 'ctx_p1_data', + 'ctx_p2_data', 'mark_data' + ], [feature_out], exe) + return batch_id = batch_id + 1 +def infer(use_cuda, save_dirname=None): + if save_dirname is None: + return + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + + lod = [0, 4, 10] + ts_word = create_random_lodtensor(lod, place, low=0, high=1) + ts_pred = create_random_lodtensor(lod, place, low=0, high=1) + ts_ctx_n2 = create_random_lodtensor(lod, place, low=0, high=1) + ts_ctx_n1 = create_random_lodtensor(lod, place, low=0, high=1) + ts_ctx_0 = create_random_lodtensor(lod, place, low=0, high=1) + ts_ctx_p1 = create_random_lodtensor(lod, place, low=0, high=1) + ts_ctx_p2 = create_random_lodtensor(lod, place, low=0, high=1) + ts_mark = create_random_lodtensor(lod, place, low=0, high=1) + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + assert feed_target_names[0] == 'word_data' + assert feed_target_names[1] == 'verb_data' + assert feed_target_names[2] == 'ctx_n2_data' + assert feed_target_names[3] == 'ctx_n1_data' + assert feed_target_names[4] == 'ctx_0_data' + assert feed_target_names[5] == 'ctx_p1_data' + assert feed_target_names[6] == 'ctx_p2_data' + assert feed_target_names[7] == 'mark_data' + + results = exe.run(inference_program, + feed={ + feed_target_names[0]: ts_word, + feed_target_names[1]: ts_pred, + feed_target_names[2]: ts_ctx_n2, + feed_target_names[3]: ts_ctx_n1, + feed_target_names[4]: ts_ctx_0, + feed_target_names[5]: ts_ctx_p1, + feed_target_names[6]: ts_ctx_p2, + feed_target_names[7]: ts_mark + }, + fetch_list=fetch_targets, + return_numpy=False) + print(results[0].lod()) + np_data = np.array(results[0]) + print("Inference Shape: ", np_data.shape) + print("Inference results: ", np_data) + + +def main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + + # Directory for saving the trained model + save_dirname = "label_semantic_roles.inference.model" + + train(use_cuda, save_dirname) + infer(use_cuda, save_dirname) + + +class TestLabelSemanticRoles(unittest.TestCase): + def test_cuda(self): + with self.scope_prog_guard(): + main(use_cuda=True) + + def test_cpu(self): + with self.scope_prog_guard(): + main(use_cuda=False) + + @contextlib.contextmanager + def scope_prog_guard(self): + prog = fluid.Program() + startup_prog = fluid.Program() + scope = fluid.core.Scope() + with fluid.scope_guard(scope): + with fluid.program_guard(prog, startup_prog): + yield + + if __name__ == '__main__': - main() + unittest.main() -- GitLab From c1349d98aa48060b449c4eea4dfc95a2989ad203 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 7 Feb 2018 14:43:14 +0800 Subject: [PATCH 073/138] fix compile errors --- paddle/framework/reader.cc | 2 ++ paddle/framework/reader.h | 11 ++++++++-- paddle/operators/CMakeLists.txt | 2 +- paddle/operators/create_reader_op.cc | 22 ++++++++++--------- paddle/operators/read_op.cc | 5 ++++- .../paddle/v2/fluid/tests/test_cpp_reader.py | 6 ++--- 6 files changed, 31 insertions(+), 17 deletions(-) diff --git a/paddle/framework/reader.cc b/paddle/framework/reader.cc index 86220cd0bba..928b661aaad 100644 --- a/paddle/framework/reader.cc +++ b/paddle/framework/reader.cc @@ -38,6 +38,8 @@ void ShuffleReader::ReadNext(std::vector* out) { break; } } + // TODO(fengjiayi): 'std::random_shuffle' can be very slow. It needs to be + // optimize. std::random_shuffle(buffer_.begin(), buffer_.end()); iteration_pos_ = 0; } diff --git a/paddle/framework/reader.h b/paddle/framework/reader.h index ff7153bc7bf..534894cfbd6 100644 --- a/paddle/framework/reader.h +++ b/paddle/framework/reader.h @@ -28,6 +28,8 @@ class ReaderBase { virtual void ReadNext(std::vector* out) = 0; virtual bool HasNext() const = 0; + virtual void ReInit() = 0; + DDim shape(size_t idx) const; std::vector shapes() const { return shapes_; } void set_shapes(const std::vector& shapes) { shapes_ = shapes; } @@ -52,6 +54,8 @@ class DecoratedReader : public ReaderBase { bool HasNext() const override { return reader_->HasNext(); } + void ReInit() override { reader_->ReInit(); } + protected: ReaderBase* reader_; }; @@ -59,9 +63,9 @@ class DecoratedReader : public ReaderBase { // file readers template -class RandomReader : public FileReader { +class RandomDataGenerator : public FileReader { public: - RandomReader(const std::vector& shapes, float min, float max) + RandomDataGenerator(const std::vector& shapes, float min, float max) : FileReader(shapes), min_(min), max_(max) { PADDLE_ENFORCE_LE( min, max, "'min' shouldn't be greater than 'max'.(%f vs %f)", min, max); @@ -91,6 +95,8 @@ class RandomReader : public FileReader { bool HasNext() const override { return true; } + void ReInit() override { return; } + private: float min_; float max_; @@ -139,6 +145,7 @@ class ReaderHolder { void ReadNext(std::vector* out) { reader_->ReadNext(out); } bool HasNext() const { return reader_->HasNext(); } + void ReInit() { reader_->ReInit(); } DDim shape(size_t idx) const { return reader_->shape(idx); } std::vector shapes() const { return reader_->shapes(); } diff --git a/paddle/operators/CMakeLists.txt b/paddle/operators/CMakeLists.txt index e1dba8bb3f9..25bb7187d36 100644 --- a/paddle/operators/CMakeLists.txt +++ b/paddle/operators/CMakeLists.txt @@ -186,7 +186,7 @@ list(REMOVE_ITEM GENERAL_OPS ${DEPS_OPS}) foreach(src ${GENERAL_OPS}) op_library(${src}) endforeach() -file(APPEND ${pybind_file} "USE_OP(less_than);\nUSE_OP(logical_and);\nUSE_NO_KERNEL_OP(read_from_array);\nUSE_NO_KERNEL_OP(create_random_reader);\n") +file(APPEND ${pybind_file} "USE_OP(less_than);\nUSE_OP(logical_and);\nUSE_NO_KERNEL_OP(read_from_array);\nUSE_NO_KERNEL_OP(create_random_data_generator);\n") set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library") diff --git a/paddle/operators/create_reader_op.cc b/paddle/operators/create_reader_op.cc index 11c77a06032..5ba2a25ab4c 100644 --- a/paddle/operators/create_reader_op.cc +++ b/paddle/operators/create_reader_op.cc @@ -18,8 +18,8 @@ namespace paddle { namespace operators { -std::vector RestoreShapes(const std::vector& shape_concat, - const std::vector& ranks) { +static std::vector RestoreShapes( + const std::vector& shape_concat, const std::vector& ranks) { std::vector res; int offset = 0; for (int len : ranks) { @@ -69,7 +69,7 @@ class CreateReaderInferVarType : public framework::VarTypeInference { }; template -class CreateRandomReaderOp : public framework::OperatorBase { +class CreateRandomDataGeneratorOp : public framework::OperatorBase { public: using framework::OperatorBase::OperatorBase; void Run(const framework::Scope& scope, @@ -84,14 +84,15 @@ class CreateRandomReaderOp : public framework::OperatorBase { std::vector shapes = RestoreShapes(shape_concat, ranks); auto* out = scope.FindVar(Output("Out")) ->template GetMutable(); - out->Reset(new framework::RandomReader(shapes, Attr("min"), - Attr("max"))); + out->Reset(new framework::RandomDataGenerator(shapes, Attr("min"), + Attr("max"))); } }; -class CreateRandomReaderOpMaker : public framework::OpProtoAndCheckerMaker { +class CreateRandomDataGeneratorOpMaker + : public framework::OpProtoAndCheckerMaker { public: - CreateRandomReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) + CreateRandomDataGeneratorOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) : OpProtoAndCheckerMaker(op_proto, op_checker) { AddOutput("Out", "(ReaderHolder) The created random reader."); AddAttr>("shape_concat", @@ -107,7 +108,7 @@ class CreateRandomReaderOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("min", "The lower bound of reader's uniform distribution."); AddAttr("max", "The upper bound of reader's uniform distribution."); AddComment(R"DOC( - CreateRandomReader Operator + CreateRandomDataGenerator Operator This Op creates a random reader. The reader generates random data instead of really reading from files. @@ -186,9 +187,10 @@ class CreateBatchReaderOpMaker : public framework::OpProtoAndCheckerMaker { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OPERATOR(create_random_reader, ops::CreateRandomReaderOp, +REGISTER_OPERATOR(create_random_data_generator, + ops::CreateRandomDataGeneratorOp, ops::CreateFileReaderInferShape, - ops::CreateRandomReaderOpMaker, + ops::CreateRandomDataGeneratorOpMaker, paddle::framework::EmptyGradOpMaker, ops::CreateReaderInferVarType); REGISTER_OPERATOR(create_shuffle_reader, ops::CreateShuffleReaderOp, diff --git a/paddle/operators/read_op.cc b/paddle/operators/read_op.cc index 3d17b26c998..3ae454101f5 100644 --- a/paddle/operators/read_op.cc +++ b/paddle/operators/read_op.cc @@ -59,7 +59,10 @@ class ReadOp : public framework::OperatorBase { framework::ReaderHolder* reader = scope.FindVar(Input("Reader"))->GetMutable(); if (!reader->HasNext()) { - return; + reader->ReInit(); + PADDLE_ENFORCE( + reader->HasNext(), + "Reader can not read the next data even it has been re-initialized."); } std::vector out_arg_names = Outputs("Out"); std::vector ins; diff --git a/python/paddle/v2/fluid/tests/test_cpp_reader.py b/python/paddle/v2/fluid/tests/test_cpp_reader.py index 7efcb0c46d2..e71c3a290c9 100644 --- a/python/paddle/v2/fluid/tests/test_cpp_reader.py +++ b/python/paddle/v2/fluid/tests/test_cpp_reader.py @@ -20,11 +20,11 @@ prog = fluid.framework.Program() block = prog.current_block() random_reader = block.create_var( - type=fluid.core.VarDesc.VarType.READER, name="RandomReader") + type=fluid.core.VarDesc.VarType.READER, name="RandomDataGenerator") random_reader.desc.set_lod_levels([0, 0]) -create_random_reader_op = block.append_op( - type="create_random_reader", +create_random_data_generator_op = block.append_op( + type="create_random_data_generator", outputs={"Out": random_reader}, attrs={ "shape_concat": [1, 2, 1, 1], -- GitLab From 20c4a4cb4f716d433ba435dac3632a2d1459b055 Mon Sep 17 00:00:00 2001 From: Qiao Longfei Date: Wed, 7 Feb 2018 14:47:42 +0800 Subject: [PATCH 074/138] Impl scalar switch case op with condition op (#8184) Impl scalar switch case op with condition op --- doc/design/switch.md | 3 +- paddle/operators/conditional_block_op.cc | 44 +++++++++++-- python/paddle/v2/fluid/layers/control_flow.py | 66 ++++++++++++++++++- python/paddle/v2/fluid/layers/ops.py | 4 ++ python/paddle/v2/fluid/tests/test_switch.py | 64 ++++++++++++++++++ 5 files changed, 171 insertions(+), 10 deletions(-) create mode 100644 python/paddle/v2/fluid/tests/test_switch.py diff --git a/doc/design/switch.md b/doc/design/switch.md index 9db1b2782a5..827d0601c62 100644 --- a/doc/design/switch.md +++ b/doc/design/switch.md @@ -10,8 +10,7 @@ The following example shows the usage of `fluid.switch`. a = fluid.Var(10) b = fluid.Var(0) -switch = fluid.switch() -with switch.block(): +with switch() as switch: with switch.case(fluid.less_equal(a, 10)): fluid.print("Case 1") with switch.case(fluid.larger(a, 0)): diff --git a/paddle/operators/conditional_block_op.cc b/paddle/operators/conditional_block_op.cc index 3cae61a4384..bdcdb85be7a 100644 --- a/paddle/operators/conditional_block_op.cc +++ b/paddle/operators/conditional_block_op.cc @@ -41,6 +41,21 @@ class ConditionalOp : public framework::OperatorBase { }); return retv; } + + bool ScalarCondition( + const std::vector &ips) const { + if (!(ips.size() == 1UL && ips[0]->IsInitialized())) { + PADDLE_THROW("should have one initialized input as condition"); + } + if (!(ips[0]->type().hash_code() == typeid(bool).hash_code() && + ips[0]->numel() == 1)) { + PADDLE_THROW( + "condition input's data type should be bool, " + "numel should be 1, actual numel is %d", + ips[0]->numel()); + } + return ips[0]->data()[0]; + } }; class ConditionalBlockOp : public ConditionalOp { @@ -53,9 +68,15 @@ class ConditionalBlockOp : public ConditionalOp { void Run(const framework::Scope &scope, const platform::Place &dev_place) const override { auto xs = InputTensors(scope); - bool need_run = std::all_of( - xs.begin(), xs.end(), - [](const framework::LoDTensor *t) { return t->numel() != 0; }); + + bool need_run; + if (Attr("is_scalar_condition")) { + need_run = ScalarCondition(xs); + } else { + need_run = std::all_of( + xs.begin(), xs.end(), + [](const framework::LoDTensor *t) { return t->numel() != 0; }); + } if (need_run) { auto *scope_var = scope.FindVar(Output("Scope")); @@ -88,6 +109,10 @@ class ConditionalBlockOpProtoMaker : public framework::OpProtoAndCheckerMaker { "scope is std::vector"); AddAttr( "sub_block", "The step block of conditional block operator"); + AddAttr("is_scalar_condition", + "the input X is used as scalar " + "condition") + .SetDefault(false); AddComment(R"DOC(Conditional block operator Run the sub-block if X is not empty. Params is the other inputs and Out is the @@ -106,9 +131,15 @@ class ConditionalBlockGradOp : public ConditionalOp { void Run(const framework::Scope &scope, const platform::Place &dev_place) const override { auto xs = this->InputTensors(scope); - bool need_run = std::all_of( - xs.begin(), xs.end(), - [](const framework::LoDTensor *t) { return t->numel() != 0; }); + + bool need_run; + if (Attr("is_scalar_condition")) { + need_run = ScalarCondition(xs); + } else { + need_run = std::all_of( + xs.begin(), xs.end(), + [](const framework::LoDTensor *t) { return t->numel() != 0; }); + } if (need_run) { auto *scope_var = scope.FindVar(Input("Scope")); @@ -182,6 +213,7 @@ class ConditionalBlockGradMaker : public framework::SingleGradOpDescMaker { grad_op->SetOutput(framework::GradVarName("Params"), InputGrad("Params", false)); grad_op->SetBlockAttr("sub_block", *this->grad_block_[0]); + grad_op->SetAttr("is_scalar_condition", GetAttr("is_scalar_condition")); return std::unique_ptr(grad_op); } }; diff --git a/python/paddle/v2/fluid/layers/control_flow.py b/python/paddle/v2/fluid/layers/control_flow.py index 0fcbfe0e2f2..e71f3858b0a 100644 --- a/python/paddle/v2/fluid/layers/control_flow.py +++ b/python/paddle/v2/fluid/layers/control_flow.py @@ -18,6 +18,7 @@ from tensor import assign, fill_constant from .. import core from ..framework import Program, Variable, Operator from ..layer_helper import LayerHelper, unique_name +from ops import logical_and, logical_not, logical_or __all__ = [ 'split_lod_tensor', @@ -27,6 +28,7 @@ __all__ = [ 'StaticRNNMemoryLink', 'WhileGuard', 'While', + 'Switch', 'lod_rank_table', 'max_sequence_len', 'topk', @@ -1063,11 +1065,12 @@ class ConditionalBlockGuard(BlockGuard): class ConditionalBlock(object): - def __init__(self, inputs, name=None): + def __init__(self, inputs, is_scalar_condition=False, name=None): for each_input in inputs: if not isinstance(each_input, Variable): raise TypeError("Each input should be variable") self.inputs = inputs + self.is_scalar_condition = is_scalar_condition self.helper = LayerHelper('conditional_block', name=name) def block(self): @@ -1112,7 +1115,66 @@ class ConditionalBlock(object): }, outputs={'Out': out_list, 'Scope': [step_scope]}, - attrs={'sub_block': inside_block}) + attrs={ + 'sub_block': inside_block, + 'is_scalar_condition': self.is_scalar_condition + }) + + +class Switch(object): + def __init__(self, name=None): + self.helper = LayerHelper('switch', name=name) + self.inside_scope = False + self.pre_not_conditions = [] + + def case(self, condition): + """create a new block for this condition + """ + if not self.inside_scope: + raise ValueError("case should be called inside with") + + if len(self.pre_not_conditions) == 0: + cond_block = ConditionalBlock([condition], is_scalar_condition=True) + not_cond = logical_not(x=condition) + self.pre_not_conditions.append(not_cond) + else: + pre_cond_num = len(self.pre_not_conditions) + pre_not_cond = self.pre_not_conditions[pre_cond_num - 1] + new_not_cond = logical_and( + x=pre_not_cond, y=logical_not(x=condition)) + self.pre_not_conditions.append(new_not_cond) + cond_block = ConditionalBlock( + [logical_and( + x=pre_not_cond, y=condition)], + is_scalar_condition=True) + + return ConditionalBlockGuard(cond_block) + + def default(self): + """create a default case for this switch + """ + pre_cond_num = len(self.pre_not_conditions) + if pre_cond_num == 0: + raise ValueError("there should be at least one condition") + cond_block = ConditionalBlock( + [self.pre_not_conditions[pre_cond_num - 1]], + is_scalar_condition=True) + return ConditionalBlockGuard(cond_block) + + def __enter__(self): + """ + set flag that now is inside switch.block {} + :return: + """ + self.inside_scope = True + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.inside_scope = False + if exc_type is not None: + return False # re-raise exception + + return True class IfElseBlockGuard(object): diff --git a/python/paddle/v2/fluid/layers/ops.py b/python/paddle/v2/fluid/layers/ops.py index c701e79ad26..38dea2892fc 100644 --- a/python/paddle/v2/fluid/layers/ops.py +++ b/python/paddle/v2/fluid/layers/ops.py @@ -61,6 +61,10 @@ __all__ = [ 'clip_by_norm', 'softmax', 'sequence_softmax', + 'logical_and', + 'logical_or', + 'logical_xor', + 'logical_not', ] + __activations__ for _OP in set(__all__): diff --git a/python/paddle/v2/fluid/tests/test_switch.py b/python/paddle/v2/fluid/tests/test_switch.py new file mode 100644 index 00000000000..52ebf773ec7 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_switch.py @@ -0,0 +1,64 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import paddle.v2.fluid.core as core +import paddle.v2.fluid.layers as layers +import paddle.v2.fluid.framework as framework +from paddle.v2.fluid.executor import Executor +from paddle.v2.fluid.framework import default_startup_program + + +class TestSwitch(unittest.TestCase): + def check_switch(self, value): + x = layers.fill_constant(shape=[1], dtype='float32', value=value) + + zero_var = layers.fill_constant(shape=[1], dtype='float32', value=0.0) + one_var = layers.fill_constant(shape=[1], dtype='float32', value=1.0) + two_var = layers.fill_constant(shape=[1], dtype='float32', value=2.0) + three_var = layers.fill_constant(shape=[1], dtype='float32', value=3.0) + + result = layers.create_global_var( + shape=[1], value=-1.0, dtype='float32', persistable=True) + + with layers.Switch() as switch: + with switch.case(layers.less_than(x, zero_var)): + layers.assign(zero_var, result) + with switch.case(layers.less_than(x, one_var)): + layers.assign(one_var, result) + with switch.case(layers.less_than(x, two_var)): + layers.assign(two_var, result) + with switch.default(): + layers.assign(three_var, result) + + cpu = core.CPUPlace() + exe = Executor(cpu) + exe.run(default_startup_program()) + + out = exe.run(feed={}, fetch_list=[result])[0][0] + return out + + def test_switch(self): + test_data = {(-0.1, 0), (0.1, 1), (1.1, 2), (2.1, 3)} + for x, expected_result in test_data: + main_program = framework.Program() + startup_program = framework.Program() + with framework.program_guard(main_program, startup_program): + result = self.check_switch(x) + self.assertEqual(result, expected_result) + + +if __name__ == '__main__': + unittest.main() -- GitLab From d1d8257fdfab57499dbdd2ad4967052efb43df00 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Wed, 7 Feb 2018 15:45:54 +0800 Subject: [PATCH 075/138] adjust the structure of getstarted and cluster --- doc/getstarted/index_cn.rst | 55 +----------------- doc/getstarted/index_en.rst | 56 +----------------- doc/getstarted/quickstart_cn.rst | 41 +++++++++++++ doc/getstarted/quickstart_en.rst | 45 +++++++++++++++ ...cluster_train_cn.md => cmd_argument_cn.md} | 57 +------------------ ...cluster_train_en.md => cmd_argument_en.md} | 55 +----------------- doc/howto/cluster/index_cn.rst | 10 ++++ doc/howto/cluster/index_en.rst | 10 ++++ doc/howto/cluster/introduction_cn.md | 13 +++++ doc/howto/cluster/introduction_en.md | 13 +++++ .../cluster/{ => multi_cluster}/fabric_cn.md | 0 .../cluster/{ => multi_cluster}/fabric_en.md | 0 doc/howto/cluster/multi_cluster/index_cn.rst | 20 +++++++ doc/howto/cluster/multi_cluster/index_en.rst | 19 +++++++ .../cluster/{ => multi_cluster}/k8s_aws_cn.md | 0 .../cluster/{ => multi_cluster}/k8s_aws_en.md | 0 .../cluster/{ => multi_cluster}/k8s_cn.md | 0 .../{ => multi_cluster}/k8s_distributed_cn.md | 0 .../cluster/{ => multi_cluster}/k8s_en.md | 0 .../cluster/{ => multi_cluster}/openmpi_cn.md | 0 .../cluster/{ => multi_cluster}/openmpi_en.md | 0 doc/howto/cluster/preparations_cn.md | 16 ++++++ doc/howto/cluster/preparations_en.md | 17 ++++++ doc/howto/index_cn.rst | 2 +- doc/howto/index_en.rst | 2 +- 25 files changed, 212 insertions(+), 219 deletions(-) create mode 100644 doc/getstarted/quickstart_cn.rst create mode 100644 doc/getstarted/quickstart_en.rst rename doc/howto/cluster/{cluster_train_cn.md => cmd_argument_cn.md} (56%) rename doc/howto/cluster/{cluster_train_en.md => cmd_argument_en.md} (58%) create mode 100644 doc/howto/cluster/index_cn.rst create mode 100644 doc/howto/cluster/index_en.rst create mode 100644 doc/howto/cluster/introduction_cn.md create mode 100644 doc/howto/cluster/introduction_en.md rename doc/howto/cluster/{ => multi_cluster}/fabric_cn.md (100%) rename doc/howto/cluster/{ => multi_cluster}/fabric_en.md (100%) create mode 100644 doc/howto/cluster/multi_cluster/index_cn.rst create mode 100644 doc/howto/cluster/multi_cluster/index_en.rst rename doc/howto/cluster/{ => multi_cluster}/k8s_aws_cn.md (100%) rename doc/howto/cluster/{ => multi_cluster}/k8s_aws_en.md (100%) rename doc/howto/cluster/{ => multi_cluster}/k8s_cn.md (100%) rename doc/howto/cluster/{ => multi_cluster}/k8s_distributed_cn.md (100%) rename doc/howto/cluster/{ => multi_cluster}/k8s_en.md (100%) rename doc/howto/cluster/{ => multi_cluster}/openmpi_cn.md (100%) rename doc/howto/cluster/{ => multi_cluster}/openmpi_en.md (100%) create mode 100644 doc/howto/cluster/preparations_cn.md create mode 100644 doc/howto/cluster/preparations_en.md diff --git a/doc/getstarted/index_cn.rst b/doc/getstarted/index_cn.rst index 9f6ee25987d..1dc141396b9 100644 --- a/doc/getstarted/index_cn.rst +++ b/doc/getstarted/index_cn.rst @@ -1,61 +1,8 @@ 新手入门 ============ -.. _quick_install: - -快速安装 -++++++++ - -PaddlePaddle支持使用pip快速安装,目前支持CentOS 6以上, Ubuntu 14.04以及MacOS 10.12,并安装有Python2.7。 -执行下面的命令完成快速安装,版本为cpu_avx_openblas: - - .. code-block:: bash - - pip install paddlepaddle - -如果需要安装支持GPU的版本(cuda7.5_cudnn5_avx_openblas),需要执行: - - .. code-block:: bash - - pip install paddlepaddle-gpu - -更详细的安装和编译方法参考: - -.. toctree:: - :maxdepth: 1 - - build_and_install/index_cn.rst - -.. _quick_start: - -快速开始 -++++++++ - -创建一个 housing.py 并粘贴此Python代码: - - .. code-block:: python - - import paddle.v2 as paddle - - # Initialize PaddlePaddle. - paddle.init(use_gpu=False, trainer_count=1) - - # Configure the neural network. - x = paddle.layer.data(name='x', type=paddle.data_type.dense_vector(13)) - y_predict = paddle.layer.fc(input=x, size=1, act=paddle.activation.Linear()) - - # Infer using provided test data. - probs = paddle.infer( - output_layer=y_predict, - parameters=paddle.dataset.uci_housing.model(), - input=[item for item in paddle.dataset.uci_housing.test()()]) - - for i in xrange(len(probs)): - print 'Predicted price: ${:,.2f}'.format(probs[i][0] * 1000) - -执行 :code:`python housing.py` 瞧! 它应该打印出预测住房数据的清单。 - .. toctree:: :maxdepth: 1 + quickstart_cn.rst concepts/use_concepts_cn.rst diff --git a/doc/getstarted/index_en.rst b/doc/getstarted/index_en.rst index 063d9d880c8..c680e190375 100644 --- a/doc/getstarted/index_en.rst +++ b/doc/getstarted/index_en.rst @@ -1,61 +1,7 @@ GET STARTED ============ -.. _quick_install: - -Quick Install ----------------------- - -You can use pip to install PaddlePaddle with a single command, supports -CentOS 6 above, Ubuntu 14.04 above or MacOS 10.12, with Python 2.7 installed. -Simply run the following command to install, the version is cpu_avx_openblas: - - .. code-block:: bash - - pip install paddlepaddle - -If you need to install GPU version (cuda7.5_cudnn5_avx_openblas), run: - - .. code-block:: bash - - pip install paddlepaddle-gpu - -For more details about installation and build: - .. toctree:: :maxdepth: 1 - build_and_install/index_en.rst - - -.. _quick_start: - -Quick Start -++++++++ - -Create a new file called housing.py, and paste this Python -code: - - - .. code-block:: python - - import paddle.v2 as paddle - - # Initialize PaddlePaddle. - paddle.init(use_gpu=False, trainer_count=1) - - # Configure the neural network. - x = paddle.layer.data(name='x', type=paddle.data_type.dense_vector(13)) - y_predict = paddle.layer.fc(input=x, size=1, act=paddle.activation.Linear()) - - # Infer using provided test data. - probs = paddle.infer( - output_layer=y_predict, - parameters=paddle.dataset.uci_housing.model(), - input=[item for item in paddle.dataset.uci_housing.test()()]) - - for i in xrange(len(probs)): - print 'Predicted price: ${:,.2f}'.format(probs[i][0] * 1000) - -Run :code:`python housing.py` and voila! It should print out a list of predictions -for the test housing data. + quickstart_en.rst diff --git a/doc/getstarted/quickstart_cn.rst b/doc/getstarted/quickstart_cn.rst new file mode 100644 index 00000000000..51dd00f1e80 --- /dev/null +++ b/doc/getstarted/quickstart_cn.rst @@ -0,0 +1,41 @@ +快速开始 +======== + +PaddlePaddle支持使用pip快速安装,目前支持CentOS 6以上, Ubuntu 14.04以及MacOS 10.12,并安装有Python2.7。 +执行下面的命令完成快速安装,版本为cpu_avx_openblas: + + .. code-block:: bash + + pip install paddlepaddle + +如果需要安装支持GPU的版本(cuda7.5_cudnn5_avx_openblas),需要执行: + + .. code-block:: bash + + pip install paddlepaddle-gpu + +更详细的安装和编译方法参考::ref:`install_steps` 。 + +创建一个 housing.py 并粘贴此Python代码: + + .. code-block:: python + + import paddle.v2 as paddle + + # Initialize PaddlePaddle. + paddle.init(use_gpu=False, trainer_count=1) + + # Configure the neural network. + x = paddle.layer.data(name='x', type=paddle.data_type.dense_vector(13)) + y_predict = paddle.layer.fc(input=x, size=1, act=paddle.activation.Linear()) + + # Infer using provided test data. + probs = paddle.infer( + output_layer=y_predict, + parameters=paddle.dataset.uci_housing.model(), + input=[item for item in paddle.dataset.uci_housing.test()()]) + + for i in xrange(len(probs)): + print 'Predicted price: ${:,.2f}'.format(probs[i][0] * 1000) + +执行 :code:`python housing.py` 瞧! 它应该打印出预测住房数据的清单。 diff --git a/doc/getstarted/quickstart_en.rst b/doc/getstarted/quickstart_en.rst new file mode 100644 index 00000000000..d1bcf82ea07 --- /dev/null +++ b/doc/getstarted/quickstart_en.rst @@ -0,0 +1,45 @@ +Quick Start +============ + +You can use pip to install PaddlePaddle with a single command, supports +CentOS 6 above, Ubuntu 14.04 above or MacOS 10.12, with Python 2.7 installed. +Simply run the following command to install, the version is cpu_avx_openblas: + + .. code-block:: bash + + pip install paddlepaddle + +If you need to install GPU version (cuda7.5_cudnn5_avx_openblas), run: + + .. code-block:: bash + + pip install paddlepaddle-gpu + +For more details about installation and build: :ref:`install_steps` . + +Create a new file called housing.py, and paste this Python +code: + + + .. code-block:: python + + import paddle.v2 as paddle + + # Initialize PaddlePaddle. + paddle.init(use_gpu=False, trainer_count=1) + + # Configure the neural network. + x = paddle.layer.data(name='x', type=paddle.data_type.dense_vector(13)) + y_predict = paddle.layer.fc(input=x, size=1, act=paddle.activation.Linear()) + + # Infer using provided test data. + probs = paddle.infer( + output_layer=y_predict, + parameters=paddle.dataset.uci_housing.model(), + input=[item for item in paddle.dataset.uci_housing.test()()]) + + for i in xrange(len(probs)): + print 'Predicted price: ${:,.2f}'.format(probs[i][0] * 1000) + +Run :code:`python housing.py` and voila! It should print out a list of predictions +for the test housing data. diff --git a/doc/howto/cluster/cluster_train_cn.md b/doc/howto/cluster/cmd_argument_cn.md similarity index 56% rename from doc/howto/cluster/cluster_train_cn.md rename to doc/howto/cluster/cmd_argument_cn.md index 0f3db59607f..5c575dd5b53 100644 --- a/doc/howto/cluster/cluster_train_cn.md +++ b/doc/howto/cluster/cmd_argument_cn.md @@ -1,41 +1,7 @@ -# 分布式训练 - - -## 概述 - -本文将介绍如何使用PaddlePaddle在不同的集群框架下完成分布式训练。分布式训练架构如下图所示: - - - -- 数据分片(Data shard): 用于训练神经网络的数据,被切分成多个部分,每个部分分别给每个trainer使用。 -- 计算节点(Trainer): 每个trainer启动后读取切分好的一部分数据,开始神经网络的“前馈”和“后馈”计算,并和参数服务器通信。在完成一定量数据的训练后,上传计算得出的梯度(gradients),然后下载优化更新后的神经网络参数(parameters)。 -- 参数服务器(Parameter server):每个参数服务器只保存整个神经网络所有参数的一部分。参数服务器接收从计算节点上传的梯度,并完成参数优化更新,再将更新后的参数下发到每个计算节点。 - -这样,通过计算节点和参数服务器的分布式协作,可以完成神经网络的SGD方法的训练。PaddlePaddle可以同时支持同步随机梯度下降(SGD)和异步随机梯度下降。 - -在使用同步SGD训练神经网络时,PaddlePaddle使用同步屏障(barrier),使梯度的提交和参数的更新按照顺序方式执行。在异步SGD中,则并不会等待所有trainer提交梯度才更新参数,这样极大地提高了计算的并行性:参数服务器之间不相互依赖,并行地接收梯度和更新参数,参数服务器也不会等待计算节点全部都提交梯度之后才开始下一步,计算节点之间也不会相互依赖,并行地执行模型的训练。可以看出,虽然异步SGD方式会提高参数更新并行度, 但是并不能保证参数同步更新,在任意时间某一台参数服务器上保存的参数可能比另一台要更新,与同步SGD相比,梯度会有噪声。 - - -## 环境准备 - -1. 准备您的计算集群。计算集群通常由一组(几台到几千台规模)的Linux服务器组成。服务器之间可以通过局域网(LAN)联通,每台服务器具有集群中唯一的IP地址(或者可被DNS解析的主机名)。集群中的每台计算机通常被成为一个“节点”。 -1. 我们需要在集群的所有节点上安装 PaddlePaddle。 如果要启用GPU,还需要在节点上安装对应的GPU驱动以及CUDA。PaddlePaddle的安装可以参考[build_and_install](http://www.paddlepaddle.org/docs/develop/documentation/zh/getstarted/build_and_install/index_cn.html)的多种安装方式。我们推荐使用[Docker](http://www.paddlepaddle.org/docs/develop/documentation/zh/getstarted/build_and_install/docker_install_cn.html)安装方式来快速安装PaddlePaddle。 - -安装完成之后,执行下面的命令可以查看已经安装的版本(docker安装方式可以进入docker容器执行:`docker run -it paddlepaddle/paddle:[tag] /bin/bash`): -```bash -$ paddle version -PaddlePaddle 0.10.0, compiled with - with_avx: ON - with_gpu: OFF - with_double: OFF - with_python: ON - with_rdma: OFF - with_timer: OFF -``` +## 启动参数说明 -下面以`doc/howto/usage/cluster/src/word2vec`中的代码作为实例,介绍使用PaddlePaddle v2 API完成分布式训练。 +下面以`doc/howto/cluster/src/word2vec`中的代码作为实例,介绍使用PaddlePaddle v2 API完成分布式训练。 -## 启动参数说明 ### 启动参数服务器 执行以下的命令启动一个参数服务器并等待和计算节点的数据交互 ```bash @@ -167,22 +133,3 @@ test.txt-00002 - `train_data_dir`:包含训练数据的目录,可以是从分布式存储挂载过来的,也可以是在任务启动前下载到本地的。 - `test_data_dir`:包含测试数据集的目录。 - -## 使用分布式计算平台或工具 - -PaddlePaddle可以使用多种分布式计算平台构建分布式计算任务,包括: -- [Kubernetes](http://kubernetes.io) Google开源的容器集群的调度框架,支持大规模集群生产环境的完整集群方案。 -- [OpenMPI](https://www.open-mpi.org) 成熟的高性能并行计算框架。 -- [Fabric](http://www.fabfile.org) 集群管理工具。可以使用`Fabric`编写集群任务提交和管理脚本。 - -对于不同的集群平台,会分别介绍集群作业的启动和停止方法。这些例子都可以在[cluster_train_v2](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/scripts/cluster_train_v2)找到。 - -在使用分布式计算平台进行训练时,任务被调度在集群中时,分布式计算平台通常会通过API或者环境变量提供任务运行需要的参数,比如节点的ID、IP和任务节点个数等。 - -## 在不同集群中运行 - - - [fabric集群](fabric_cn.md) - - [openmpi集群](openmpi_cn.md) - - [kubernetes单机](k8s_cn.md) - - [kubernetes distributed分布式](k8s_distributed_cn.md) - - [AWS上运行kubernetes集群训练](k8s_aws_cn.md) diff --git a/doc/howto/cluster/cluster_train_en.md b/doc/howto/cluster/cmd_argument_en.md similarity index 58% rename from doc/howto/cluster/cluster_train_en.md rename to doc/howto/cluster/cmd_argument_en.md index f9424f8f1a2..06fd5717564 100644 --- a/doc/howto/cluster/cluster_train_en.md +++ b/doc/howto/cluster/cmd_argument_en.md @@ -1,40 +1,7 @@ -# Distributed Training - -## Introduction - -In this article, we'll explain how to run distributed training jobs with PaddlePaddle on different types of clusters. The diagram below shows the main architecture of a distributed trainning job: - - - -- Data shard: training data will be split into multiple partitions, trainers use the partitions of the whole dataset to do the training job. -- Trainer: each trainer reads the data shard, and train the neural network. Then the trainer will upload calculated "gradients" to parameter servers, and wait for parameters to be optimized on the parameter server side. When that finishes, the trainer download optimized parameters and continues its training. -- Parameter server: every parameter server stores part of the whole neural network model data. They will do optimization calculations when gradients are uploaded from trainers, and then send updated parameters to trainers. - -PaddlePaddle can support both synchronize stochastic gradient descent (SGD) and asynchronous SGD. - -When training with synchronize SGD, PaddlePaddle uses an internal "synchronize barrier" which makes gradients update and parameter download in strict order. On the other hand, asynchronous SGD won't wait for all trainers to finish upload at a single step, this will increase the parallelism of distributed training: parameter servers do not depend on each other, they'll do parameter optimization concurrently. Parameter servers will not wait for trainers, so trainers will also do their work concurrently. But asynchronous SGD will introduce more randomness and noises in the gradient. - -## Preparations -1. Prepare your computer cluster. It's normally a bunch of Linux servers connected by LAN. Each server will be assigned a unique IP address. The computers in the cluster can be called "nodes". -2. Install PaddlePaddle on every node. If you are going to take advantage of GPU cards, you'll also need to install proper driver and CUDA libraries. To install PaddlePaddle please read [this build and install](http://www.paddlepaddle.org/docs/develop/documentation/en/getstarted/build_and_install/index_en.html) document. We strongly recommend using [Docker installation](http://www.paddlepaddle.org/docs/develop/documentation/en/getstarted/build_and_install/docker_install_en.html). - -After installation, you can check the version by typing the below command (run a docker container if using docker: `docker run -it paddlepaddle/paddle:[tag] /bin/bash`): - -```bash -$ paddle version -PaddlePaddle 0.10.0rc, compiled with - with_avx: ON - with_gpu: OFF - with_double: OFF - with_python: ON - with_rdma: OFF - with_timer: OFF -``` - -We'll take `doc/howto/usage/cluster/src/word2vec` as an example to introduce distributed training using PaddlePaddle v2 API. - ## Command-line arguments +We'll take `doc/howto/cluster/src/word2vec` as an example to introduce distributed training using PaddlePaddle v2 API. + ### Starting parameter server Type the below command to start a parameter server which will wait for trainers to connect: @@ -171,21 +138,3 @@ Your workspace may looks like: - `train_data_dir`: containing training data. Mount from storage service or copy trainning data to here. - `test_data_dir`: containing testing data. - -## Use cluster platforms or cluster management tools - -PaddlePaddle supports running jobs on several platforms including: -- [Kubernetes](http://kubernetes.io) open-source system for automating deployment, scaling, and management of containerized applications from Google. -- [OpenMPI](https://www.open-mpi.org) Mature high performance parallel computing framework. -- [Fabric](http://www.fabfile.org) A cluster management tool. Write scripts to submit jobs or manage the cluster. - -We'll introduce cluster job management on these platforms. The examples can be found under [cluster_train_v2](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/scripts/cluster_train_v2). - -These cluster platforms provide API or environment variables for training processes, when the job is dispatched to different nodes. Like node ID, IP or total number of nodes etc. - -## Use different clusters - - - [fabric](fabric_en.md) - - [openmpi](openmpi_en.md) - - [kubernetes](k8s_en.md) - - [kubernetes on AWS](k8s_aws_en.md) diff --git a/doc/howto/cluster/index_cn.rst b/doc/howto/cluster/index_cn.rst new file mode 100644 index 00000000000..c68b2655b65 --- /dev/null +++ b/doc/howto/cluster/index_cn.rst @@ -0,0 +1,10 @@ +分布式训练 +========== + +.. toctree:: + :maxdepth: 1 + + introduction_cn.md + preparations_cn.md + cmd_argument_cn.md + multi_cluster/index_cn.rst diff --git a/doc/howto/cluster/index_en.rst b/doc/howto/cluster/index_en.rst new file mode 100644 index 00000000000..af957e06cd7 --- /dev/null +++ b/doc/howto/cluster/index_en.rst @@ -0,0 +1,10 @@ +Distributed Training +==================== + +.. toctree:: + :maxdepth: 1 + + introduction_en.md + preparations_en.md + cmd_argument_en.md + multi_cluster/index_en.rst diff --git a/doc/howto/cluster/introduction_cn.md b/doc/howto/cluster/introduction_cn.md new file mode 100644 index 00000000000..562008a8984 --- /dev/null +++ b/doc/howto/cluster/introduction_cn.md @@ -0,0 +1,13 @@ +## 概述 + +本节将介绍如何使用PaddlePaddle在不同的集群框架下完成分布式训练。分布式训练架构如下图所示: + + + +- 数据分片(Data shard): 用于训练神经网络的数据,被切分成多个部分,每个部分分别给每个trainer使用。 +- 计算节点(Trainer): 每个trainer启动后读取切分好的一部分数据,开始神经网络的“前馈”和“后馈”计算,并和参数服务器通信。在完成一定量数据的训练后,上传计算得出的梯度(gradients),然后下载优化更新后的神经网络参数(parameters)。 +- 参数服务器(Parameter server):每个参数服务器只保存整个神经网络所有参数的一部分。参数服务器接收从计算节点上传的梯度,并完成参数优化更新,再将更新后的参数下发到每个计算节点。 + +这样,通过计算节点和参数服务器的分布式协作,可以完成神经网络的SGD方法的训练。PaddlePaddle可以同时支持同步随机梯度下降(SGD)和异步随机梯度下降。 + +在使用同步SGD训练神经网络时,PaddlePaddle使用同步屏障(barrier),使梯度的提交和参数的更新按照顺序方式执行。在异步SGD中,则并不会等待所有trainer提交梯度才更新参数,这样极大地提高了计算的并行性:参数服务器之间不相互依赖,并行地接收梯度和更新参数,参数服务器也不会等待计算节点全部都提交梯度之后才开始下一步,计算节点之间也不会相互依赖,并行地执行模型的训练。可以看出,虽然异步SGD方式会提高参数更新并行度, 但是并不能保证参数同步更新,在任意时间某一台参数服务器上保存的参数可能比另一台要更新,与同步SGD相比,梯度会有噪声。 diff --git a/doc/howto/cluster/introduction_en.md b/doc/howto/cluster/introduction_en.md new file mode 100644 index 00000000000..eb70d7cf35a --- /dev/null +++ b/doc/howto/cluster/introduction_en.md @@ -0,0 +1,13 @@ +## Introduction + +In this section, we'll explain how to run distributed training jobs with PaddlePaddle on different types of clusters. The diagram below shows the main architecture of a distributed trainning job: + + + +- Data shard: training data will be split into multiple partitions, trainers use the partitions of the whole dataset to do the training job. +- Trainer: each trainer reads the data shard, and train the neural network. Then the trainer will upload calculated "gradients" to parameter servers, and wait for parameters to be optimized on the parameter server side. When that finishes, the trainer download optimized parameters and continues its training. +- Parameter server: every parameter server stores part of the whole neural network model data. They will do optimization calculations when gradients are uploaded from trainers, and then send updated parameters to trainers. + +PaddlePaddle can support both synchronize stochastic gradient descent (SGD) and asynchronous SGD. + +When training with synchronize SGD, PaddlePaddle uses an internal "synchronize barrier" which makes gradients update and parameter download in strict order. On the other hand, asynchronous SGD won't wait for all trainers to finish upload at a single step, this will increase the parallelism of distributed training: parameter servers do not depend on each other, they'll do parameter optimization concurrently. Parameter servers will not wait for trainers, so trainers will also do their work concurrently. But asynchronous SGD will introduce more randomness and noises in the gradient. diff --git a/doc/howto/cluster/fabric_cn.md b/doc/howto/cluster/multi_cluster/fabric_cn.md similarity index 100% rename from doc/howto/cluster/fabric_cn.md rename to doc/howto/cluster/multi_cluster/fabric_cn.md diff --git a/doc/howto/cluster/fabric_en.md b/doc/howto/cluster/multi_cluster/fabric_en.md similarity index 100% rename from doc/howto/cluster/fabric_en.md rename to doc/howto/cluster/multi_cluster/fabric_en.md diff --git a/doc/howto/cluster/multi_cluster/index_cn.rst b/doc/howto/cluster/multi_cluster/index_cn.rst new file mode 100644 index 00000000000..ef56b6ddb38 --- /dev/null +++ b/doc/howto/cluster/multi_cluster/index_cn.rst @@ -0,0 +1,20 @@ +在不同集群中运行 +================ + +PaddlePaddle可以使用多种分布式计算平台构建分布式计算任务,包括: +- `Kubernetes `_ Google开源的容器集群的调度框架,支持大规模集群生产环境的完整集群方案。 +- `OpenMPI `_ 成熟的高性能并行计算框架。 +- `Fabric `_ 集群管理工具。可以使用`Fabric`编写集群任务提交和管理脚本。 + +对于不同的集群平台,会分别介绍集群作业的启动和停止方法。这些例子都可以在 `cluster_train_v2 `_ 找到。 + +在使用分布式计算平台进行训练时,任务被调度在集群中时,分布式计算平台通常会通过API或者环境变量提供任务运行需要的参数,比如节点的ID、IP和任务节点个数等。 + +.. toctree:: + :maxdepth: 1 + + fabric_cn.md + openmpi_cn.md + k8s_cn.md + k8s_distributed_cn.md + k8s_aws_cn.md diff --git a/doc/howto/cluster/multi_cluster/index_en.rst b/doc/howto/cluster/multi_cluster/index_en.rst new file mode 100644 index 00000000000..dac7aaef085 --- /dev/null +++ b/doc/howto/cluster/multi_cluster/index_en.rst @@ -0,0 +1,19 @@ +Use different clusters +====================== + +PaddlePaddle supports running jobs on several platforms including: +- `Kubernetes `_ open-source system for automating deployment, scaling, and management of containerized applications from Google. +- `OpenMPI `_ Mature high performance parallel computing framework. +- `Fabric `_ A cluster management tool. Write scripts to submit jobs or manage the cluster. + +We'll introduce cluster job management on these platforms. The examples can be found under `cluster_train_v2 `_ . + +These cluster platforms provide API or environment variables for training processes, when the job is dispatched to different nodes. Like node ID, IP or total number of nodes etc. + +.. toctree:: + :maxdepth: 1 + + fabric_en.md + openmpi_en.md + k8s_en.md + k8s_aws_en.md diff --git a/doc/howto/cluster/k8s_aws_cn.md b/doc/howto/cluster/multi_cluster/k8s_aws_cn.md similarity index 100% rename from doc/howto/cluster/k8s_aws_cn.md rename to doc/howto/cluster/multi_cluster/k8s_aws_cn.md diff --git a/doc/howto/cluster/k8s_aws_en.md b/doc/howto/cluster/multi_cluster/k8s_aws_en.md similarity index 100% rename from doc/howto/cluster/k8s_aws_en.md rename to doc/howto/cluster/multi_cluster/k8s_aws_en.md diff --git a/doc/howto/cluster/k8s_cn.md b/doc/howto/cluster/multi_cluster/k8s_cn.md similarity index 100% rename from doc/howto/cluster/k8s_cn.md rename to doc/howto/cluster/multi_cluster/k8s_cn.md diff --git a/doc/howto/cluster/k8s_distributed_cn.md b/doc/howto/cluster/multi_cluster/k8s_distributed_cn.md similarity index 100% rename from doc/howto/cluster/k8s_distributed_cn.md rename to doc/howto/cluster/multi_cluster/k8s_distributed_cn.md diff --git a/doc/howto/cluster/k8s_en.md b/doc/howto/cluster/multi_cluster/k8s_en.md similarity index 100% rename from doc/howto/cluster/k8s_en.md rename to doc/howto/cluster/multi_cluster/k8s_en.md diff --git a/doc/howto/cluster/openmpi_cn.md b/doc/howto/cluster/multi_cluster/openmpi_cn.md similarity index 100% rename from doc/howto/cluster/openmpi_cn.md rename to doc/howto/cluster/multi_cluster/openmpi_cn.md diff --git a/doc/howto/cluster/openmpi_en.md b/doc/howto/cluster/multi_cluster/openmpi_en.md similarity index 100% rename from doc/howto/cluster/openmpi_en.md rename to doc/howto/cluster/multi_cluster/openmpi_en.md diff --git a/doc/howto/cluster/preparations_cn.md b/doc/howto/cluster/preparations_cn.md new file mode 100644 index 00000000000..ce40697e703 --- /dev/null +++ b/doc/howto/cluster/preparations_cn.md @@ -0,0 +1,16 @@ +## 环境准备 + +1. 准备您的计算集群。计算集群通常由一组(几台到几千台规模)的Linux服务器组成。服务器之间可以通过局域网(LAN)联通,每台服务器具有集群中唯一的IP地址(或者可被DNS解析的主机名)。集群中的每台计算机通常被成为一个“节点”。 +1. 我们需要在集群的所有节点上安装 PaddlePaddle。 如果要启用GPU,还需要在节点上安装对应的GPU驱动以及CUDA。PaddlePaddle的安装可以参考[build_and_install](http://www.paddlepaddle.org/docs/develop/documentation/zh/getstarted/build_and_install/index_cn.html)的多种安装方式。我们推荐使用[Docker](http://www.paddlepaddle.org/docs/develop/documentation/zh/getstarted/build_and_install/docker_install_cn.html)安装方式来快速安装PaddlePaddle。 + +安装完成之后,执行下面的命令可以查看已经安装的版本(docker安装方式可以进入docker容器执行:`docker run -it paddlepaddle/paddle:[tag] /bin/bash`): +```bash +$ paddle version +PaddlePaddle 0.10.0, compiled with + with_avx: ON + with_gpu: OFF + with_double: OFF + with_python: ON + with_rdma: OFF + with_timer: OFF +``` diff --git a/doc/howto/cluster/preparations_en.md b/doc/howto/cluster/preparations_en.md new file mode 100644 index 00000000000..4b77b293907 --- /dev/null +++ b/doc/howto/cluster/preparations_en.md @@ -0,0 +1,17 @@ +## Preparations + +1. Prepare your computer cluster. It's normally a bunch of Linux servers connected by LAN. Each server will be assigned a unique IP address. The computers in the cluster can be called "nodes". +2. Install PaddlePaddle on every node. If you are going to take advantage of GPU cards, you'll also need to install proper driver and CUDA libraries. To install PaddlePaddle please read [this build and install](http://www.paddlepaddle.org/docs/develop/documentation/en/getstarted/build_and_install/index_en.html) document. We strongly recommend using [Docker installation](http://www.paddlepaddle.org/docs/develop/documentation/en/getstarted/build_and_install/docker_install_en.html). + +After installation, you can check the version by typing the below command (run a docker container if using docker: `docker run -it paddlepaddle/paddle:[tag] /bin/bash`): + +```bash +$ paddle version +PaddlePaddle 0.10.0rc, compiled with + with_avx: ON + with_gpu: OFF + with_double: OFF + with_python: ON + with_rdma: OFF + with_timer: OFF +``` diff --git a/doc/howto/index_cn.rst b/doc/howto/index_cn.rst index 37a34c113f3..dd39ef9e79d 100644 --- a/doc/howto/index_cn.rst +++ b/doc/howto/index_cn.rst @@ -5,7 +5,7 @@ :maxdepth: 1 cmd_parameter/index_cn.rst - cluster/cluster_train_cn.md + cluster/index_cn.rst capi/index_cn.rst rnn/index_cn.rst optimization/gpu_profiling_cn.rst diff --git a/doc/howto/index_en.rst b/doc/howto/index_en.rst index 3ba76d6aad1..ae8b86f75b5 100644 --- a/doc/howto/index_en.rst +++ b/doc/howto/index_en.rst @@ -5,6 +5,6 @@ HOW TO :maxdepth: 1 cmd_parameter/index_en.rst - cluster/cluster_train_en.md + cluster/index_en.rst rnn/index_en.rst optimization/gpu_profiling_en.rst -- GitLab From d2a31fb8aa653485c9364f896ba0f3c9c9253737 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Wed, 7 Feb 2018 15:20:50 +0800 Subject: [PATCH 076/138] refine unit test --- paddle/framework/channel_test.cc | 147 +++++++------------- paddle/framework/details/buffered_channel.h | 7 + 2 files changed, 56 insertions(+), 98 deletions(-) diff --git a/paddle/framework/channel_test.cc b/paddle/framework/channel_test.cc index df9e15e22b8..3b8150b4276 100644 --- a/paddle/framework/channel_test.cc +++ b/paddle/framework/channel_test.cc @@ -115,7 +115,7 @@ TEST(Channel, ConcurrentSendNonConcurrentReceiveWithSufficientBufferSize) { sum += i; } }); - std::this_thread::sleep_for(std::chrono::milliseconds(100)); // wait 0.5 sec + std::this_thread::sleep_for(std::chrono::milliseconds(100)); // wait 0.1 sec EXPECT_EQ(sum, 45U); CloseChannel(ch); @@ -144,38 +144,34 @@ TEST(Channel, SimpleUnbufferedChannelTest) { delete ch; } -// This tests that closing a buffered channel also unblocks -// any receivers waiting on the channel -TEST(Channel, BufferedChannelCloseUnblocksReceiversTest) { - auto ch = MakeChannel(1); +void ChannelCloseUnblocksReceiversTest(Channel *ch) { size_t num_threads = 5; std::thread t[num_threads]; bool thread_ended[num_threads]; - // Launches threads that try to read and are blocked because of no writers + // Launches threads that try to read and are blocked becausew of no writers for (size_t i = 0; i < num_threads; i++) { thread_ended[i] = false; t[i] = std::thread( [&](bool *p) { int data; - // All reads should return false EXPECT_EQ(ch->Receive(&data), false); *p = true; }, &thread_ended[i]); } - std::this_thread::sleep_for(std::chrono::milliseconds(100)); // wait + std::this_thread::sleep_for(std::chrono::milliseconds(100)); // wait 0.1 sec - // Verify that all threads are blocked + // Verify that all the threads are blocked for (size_t i = 0; i < num_threads; i++) { EXPECT_EQ(thread_ended[i], false); } - // Explicitly close the channel + // Explicitly close the thread // This should unblock all receivers CloseChannel(ch); - std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait + std::this_thread::sleep_for(std::chrono::milliseconds(100)); // wait 0.1 sec // Verify that all threads got unblocked for (size_t i = 0; i < num_threads; i++) { @@ -183,13 +179,12 @@ TEST(Channel, BufferedChannelCloseUnblocksReceiversTest) { } for (size_t i = 0; i < num_threads; i++) t[i].join(); - delete ch; } -// This tests that closing a buffered channel also unblocks -// any senders waiting for channel to have write space -TEST(Channel, BufferedChannelCloseUnblocksSendersTest) { - auto ch = MakeChannel(1); +void ChannelCloseUnblocksSendersTest(Channel *ch) { + using paddle::framework::details::Buffered; + using paddle::framework::details::UnBuffered; + size_t num_threads = 5; std::thread t[num_threads]; bool thread_ended[num_threads]; @@ -209,34 +204,56 @@ TEST(Channel, BufferedChannelCloseUnblocksSendersTest) { } std::this_thread::sleep_for(std::chrono::milliseconds(100)); // wait - // Verify that atleast 4 threads are blocked - int ct = 0; - for (size_t i = 0; i < num_threads; i++) { - if (thread_ended[i] == false) ct++; + if (dynamic_cast *>(ch)) { + // If ch is Buffered, atleast 4 threads must be blocked. + int ct = 0; + for (size_t i = 0; i < num_threads; i++) { + if (!thread_ended[i]) ct++; + } + EXPECT_GE(ct, 4); + } else { + // If ch is UnBuffered, all the threads should be blocked. + for (size_t i = 0; i < num_threads; i++) { + EXPECT_EQ(thread_ended[i], false); + } } - // Atleast 4 threads must be blocked - EXPECT_GE(ct, 4); - // Explicitly close the thread // This should unblock all senders CloseChannel(ch); - std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait + std::this_thread::sleep_for(std::chrono::milliseconds(100)); // wait // Verify that all threads got unblocked for (size_t i = 0; i < num_threads; i++) { EXPECT_EQ(thread_ended[i], true); } - // Verify that only 1 send was successful - ct = 0; - for (size_t i = 0; i < num_threads; i++) { - if (send_success[i]) ct++; + if (dynamic_cast *>(ch)) { + // Verify that only 1 send was successful + int ct = 0; + for (size_t i = 0; i < num_threads; i++) { + if (send_success[i]) ct++; + } + // Only 1 send must be successful + EXPECT_EQ(ct, 1); } - // Only 1 send must be successful - EXPECT_EQ(ct, 1); for (size_t i = 0; i < num_threads; i++) t[i].join(); +} + +// This tests that closing a buffered channel also unblocks +// any receivers waiting on the channel +TEST(Channel, BufferedChannelCloseUnblocksReceiversTest) { + auto ch = MakeChannel(1); + ChannelCloseUnblocksReceiversTest(ch); + delete ch; +} + +// This tests that closing a buffered channel also unblocks +// any senders waiting for channel to have write space +TEST(Channel, BufferedChannelCloseUnblocksSendersTest) { + auto ch = MakeChannel(1); + ChannelCloseUnblocksSendersTest(ch); delete ch; } @@ -244,40 +261,7 @@ TEST(Channel, BufferedChannelCloseUnblocksSendersTest) { // unblocks any receivers waiting for senders TEST(Channel, UnbufferedChannelCloseUnblocksReceiversTest) { auto ch = MakeChannel(0); - size_t num_threads = 5; - std::thread t[num_threads]; - bool thread_ended[num_threads]; - - // Launches threads that try to read and are blocked becausew of no writers - for (size_t i = 0; i < num_threads; i++) { - thread_ended[i] = false; - t[i] = std::thread( - [&](bool *p) { - int data; - EXPECT_EQ(ch->Receive(&data), false); - *p = true; - }, - &thread_ended[i]); - } - std::this_thread::sleep_for(std::chrono::milliseconds(500)); // wait 0.5 sec - - // Verify that all the threads are blocked - for (size_t i = 0; i < num_threads; i++) { - EXPECT_EQ(thread_ended[i], false); - } - - // Explicitly close the thread - // This should unblock all receivers - CloseChannel(ch); - - std::this_thread::sleep_for(std::chrono::milliseconds(500)); // wait 0.5 sec - - // Verify that all threads got unblocked - for (size_t i = 0; i < num_threads; i++) { - EXPECT_EQ(thread_ended[i], true); - } - - for (size_t i = 0; i < num_threads; i++) t[i].join(); + ChannelCloseUnblocksReceiversTest(ch); delete ch; } @@ -285,40 +269,7 @@ TEST(Channel, UnbufferedChannelCloseUnblocksReceiversTest) { // unblocks any senders waiting for senders TEST(Channel, UnbufferedChannelCloseUnblocksSendersTest) { auto ch = MakeChannel(0); - size_t num_threads = 5; - std::thread t[num_threads]; - bool thread_ended[num_threads]; - - // Launches threads that try to read and are blocked becausew of no writers - for (size_t i = 0; i < num_threads; i++) { - thread_ended[i] = false; - t[i] = std::thread( - [&](bool *p) { - int data = 10; - EXPECT_EQ(ch->Send(&data), false); - *p = true; - }, - &thread_ended[i]); - } - std::this_thread::sleep_for(std::chrono::milliseconds(500)); // wait 0.5 sec - - // Verify that all the threads are blocked - for (size_t i = 0; i < num_threads; i++) { - EXPECT_EQ(thread_ended[i], false); - } - - // Explicitly close the thread - // This should unblock all receivers - CloseChannel(ch); - - std::this_thread::sleep_for(std::chrono::milliseconds(500)); // wait 0.5 sec - - // Verify that all threads got unblocked - for (size_t i = 0; i < num_threads; i++) { - EXPECT_EQ(thread_ended[i], true); - } - - for (size_t i = 0; i < num_threads; i++) t[i].join(); + ChannelCloseUnblocksReceiversTest(ch); delete ch; } diff --git a/paddle/framework/details/buffered_channel.h b/paddle/framework/details/buffered_channel.h index 00b63da4da7..44bf84eb309 100644 --- a/paddle/framework/details/buffered_channel.h +++ b/paddle/framework/details/buffered_channel.h @@ -25,6 +25,13 @@ namespace paddle { namespace framework { namespace details { +// Four of the properties of Buffered Channel: +// - A send to a full channel blocks temporarily until a receive from the +// channel or the channel is closed +// - A receive from an empty channel blocks temporarily until a send to the +// channel or the channel is closed +// - A send to a closed channel returns false immediately +// - A receive from a closed channel returns false immediately template class Buffered : public paddle::framework::Channel { friend Channel* paddle::framework::MakeChannel(size_t); -- GitLab From dff5a8e6da5038e549c7ab66e1dbb94f619646d9 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Wed, 7 Feb 2018 15:48:50 +0800 Subject: [PATCH 077/138] add the properties of buffered channel and unbuffered channel --- paddle/framework/details/buffered_channel.h | 9 +++++---- paddle/framework/details/unbuffered_channel.h | 7 +++++++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/paddle/framework/details/buffered_channel.h b/paddle/framework/details/buffered_channel.h index 44bf84eb309..4275f919bad 100644 --- a/paddle/framework/details/buffered_channel.h +++ b/paddle/framework/details/buffered_channel.h @@ -27,11 +27,12 @@ namespace details { // Four of the properties of Buffered Channel: // - A send to a full channel blocks temporarily until a receive from the -// channel or the channel is closed +// channel or the channel is closed. // - A receive from an empty channel blocks temporarily until a send to the -// channel or the channel is closed -// - A send to a closed channel returns false immediately -// - A receive from a closed channel returns false immediately +// channel or the channel is closed. +// - A send to a closed channel returns false immediately. +// - A receive from a closed channel returns false immediately. + template class Buffered : public paddle::framework::Channel { friend Channel* paddle::framework::MakeChannel(size_t); diff --git a/paddle/framework/details/unbuffered_channel.h b/paddle/framework/details/unbuffered_channel.h index 815cebad2d8..bc4233af738 100644 --- a/paddle/framework/details/unbuffered_channel.h +++ b/paddle/framework/details/unbuffered_channel.h @@ -23,6 +23,13 @@ namespace paddle { namespace framework { namespace details { +// Four of the properties of UnBuffered Channel: +// - A send to a channel blocks temporarily until a receive from the +// channel or the channel is closed. +// - A receive from a channel blocks temporarily until a send to the +// channel or the channel is closed. +// - A send to a closed channel returns false immediately. +// - A receive from a closed channel returns false immediately. template class UnBuffered : public paddle::framework::Channel { friend Channel* paddle::framework::MakeChannel(size_t); -- GitLab From ee7d8421907affb362b9ed9baa0150f734d2c33c Mon Sep 17 00:00:00 2001 From: dangqingqing Date: Wed, 7 Feb 2018 15:01:23 +0800 Subject: [PATCH 078/138] Update doc and follow comments. --- paddle/operators/target_assign_op.cc | 58 ++++++++++++++----- paddle/operators/target_assign_op.cu | 26 ++++----- paddle/operators/target_assign_op.h | 47 ++++++++------- .../v2/fluid/tests/test_target_assign_op.py | 4 -- 4 files changed, 83 insertions(+), 52 deletions(-) diff --git a/paddle/operators/target_assign_op.cc b/paddle/operators/target_assign_op.cc index 9c7d625136b..615ca857ceb 100644 --- a/paddle/operators/target_assign_op.cc +++ b/paddle/operators/target_assign_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -61,10 +61,12 @@ class TargetAssignOp : public framework::OperatorWithKernel { "The rank of Input(NegIndices) must be 2."); PADDLE_ENFORCE_EQ(blabel_dims[0], slabel_dims[0], - "The 1st dimension of Input(EncodedGTBBox) and " - "Input(GTScoreLabel) must be the same."); + "The 1st dimension (means the total number of " + "ground-truth bounding boxes) of Input(EncodedGTBBox) " + "and Input(GTScoreLabel) must be the same."); PADDLE_ENFORCE_EQ(blabel_dims[1], mi_dims[1], - "The 2nd dimension of Input(EncodedGTBBox) and " + "The 2nd dimension (means the number of priod boxes) " + "of Input(EncodedGTBBox) and " "Input(MatchIndices) must be the same."); PADDLE_ENFORCE_EQ(blabel_dims[2], 4, "The 3rd dimension of Input(EncodedGTBBox) must be 4."); @@ -101,31 +103,31 @@ class TargetAssignOpMaker : public framework::OpProtoAndCheckerMaker { "labels with shape [Ng, 1], where the Ng is the same as it in " "the input of EncodedGTBBox."); AddInput("MatchIndices", - "(Tensor, default LoDTensor), The input matched indices " + "(Tensor, default Tensor), The input matched indices " "with shape [N, Np], where N is the batch size, Np is the same " "as it in the input of EncodedGTBBox. If MatchIndices[i][j] " "is -1, the j-th prior box is not matched to any ground-truh " "box in i-th instance."); AddInput("NegIndices", "(LoDTensor, default LoDTensor), The input negative example " - "indics with shape [Neg, 1], where is the total number of " + "indices with shape [Neg, 1], where is the total number of " "negative example indices."); AddAttr("background_label", - "(int, default 0), Label id for background class.") + "(int, default 0), Label index of background class.") .SetDefault(0); AddOutput("PredBBoxLabel", "(Tensor), The output encoded ground-truth labels " "with shape [N, Np, 4], N is the batch size and Np, 4 is the " "same as they in input of EncodedGTBBox. If MatchIndices[i][j] " "is -1, the PredBBoxLabel[i][j][:] is the encoded ground-truth " - "box for background_label_id in i-th instance."); + "box for background_label in i-th instance."); AddOutput("PredBBoxWeight", "(Tensor), The weight for PredBBoxLabel with the shape " "of [N, Np, 1]"); AddOutput("PredScoreLabel", "(Tensor, default Tensor), The output score labels for " "each predictions with shape [N, Np, 1]. If MatchIndices[i][j] " - "is -1, PredScoreLabel[i][j] = background_label_id."); + "is -1, PredScoreLabel[i][j] = background_label."); AddOutput("PredScoreWeight", "(Tensor), The weight for PredScoreLabel with the shape " "of [N, Np, 1]"); @@ -136,19 +138,47 @@ and regression targets to each prior box as well as weights to each prior box. The weights is used to specify which prior box would not contribute to training loss. -TODO(dang qingqing) add an example. +For each instance, the output `PredBBoxLabel`, `PredBBoxWeight`, +`PredScoreLabel` and `PredScoreWeight` are assigned based on `MatchIndices`. +Assumed that the row offset for each instance in `EncodedGTBBox` is called lod, +this operato assigns classification/regression targets by performing the +following steps: + +1. Assigning all outpts based on `MatchIndices`: + +If id = MatchIndices[i][j] > 0, + + PredBBoxLabel[i][j] = EncodedGTBBox[lod[i] + id][j] + PredBBoxWeight[i][j] = 1. + PredScoreLabel[i][j] = GTScoreLabel[lod[i] + id] + PredScoreWeight[i][j] = 1. + +Otherwise, + + PredBBoxLabel[j][j] = [0., 0., 0., 0.] + PredBBoxWeight[i][j] = 0. + PredScoreLabel[i][j] = background_label + PredScoreWeight[i][j] = 0. + +2. Assigning PredScoreWeight based on `NegIndices`: + +Assumed that the row offset for each instance in `NegIndices` is caleed neg_lod, +for i-th instance and all ids of NegIndices in this instance: + + PredScoreLabel[i][id] = background_label + PredScoreWeight[i][id] = 1.0 )DOC"); } }; template -struct UpdateTargetLabelFunctor { +struct NegTargetAssignFunctor { void operator()(const platform::CPUDeviceContext& ctx, const int* neg_indices, const size_t* lod, const int num, const int num_prior_box, const int background_label, int* out_label, T* out_label_wt) { for (int i = 0; i < num; ++i) { - for (int j = lod[i]; j < lod[i + 1]; ++j) { + for (size_t j = lod[i]; j < lod[i + 1]; ++j) { int id = neg_indices[j]; out_label[i * num_prior_box + id] = background_label; out_label_wt[i * num_prior_box + id] = static_cast(1.0); @@ -157,8 +187,8 @@ struct UpdateTargetLabelFunctor { } }; -template struct UpdateTargetLabelFunctor; -template struct UpdateTargetLabelFunctor; +template struct NegTargetAssignFunctor; +template struct NegTargetAssignFunctor; } // namespace operators } // namespace paddle diff --git a/paddle/operators/target_assign_op.cu b/paddle/operators/target_assign_op.cu index c04de86ec58..fc0a1000a42 100644 --- a/paddle/operators/target_assign_op.cu +++ b/paddle/operators/target_assign_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,38 +18,38 @@ namespace paddle { namespace operators { template -__global__ void UpdateTargetLabelKernel(const int* neg_indices, - const size_t* lod, const int num, - const int num_prior_box, - const int background_label, - int* out_label, T* out_label_wt) { +__global__ void NegTargetAssignKernel(const int* neg_indices, const size_t* lod, + const int num, const int num_prior_box, + const int background_label, + int* out_label, T* out_label_wt) { int bidx = blockIdx.x; int st = lod[bidx]; int ed = lod[bidx + 1]; + int row_start = bidx * num_prior_box; for (int i = st + threadIdx.x; i < ed; i += blockDim.x) { - int id = neg_indices[i]; - out_label[bidx * num_prior_box + id] = background_label; - out_label_wt[bidx * num_prior_box + id] = 1.; + int id = row_start + neg_indices[i]; + out_label[id] = background_label; + out_label_wt[id] = 1.; } } template -struct UpdateTargetLabelFunctor { +struct NegTargetAssignFunctor { void operator()(const platform::CUDADeviceContext& ctx, const int* neg_indices, const size_t* lod, const int num, const int num_prior_box, const int background_label, int* out_label, T* out_label_wt) { const int block_size = 256; const int grid_size = num; - UpdateTargetLabelKernel<<>>( + NegTargetAssignKernel<<>>( neg_indices, lod, num, num_prior_box, background_label, out_label, out_label_wt); } }; -template struct UpdateTargetLabelFunctor; -template struct UpdateTargetLabelFunctor; +template struct NegTargetAssignFunctor; +template struct NegTargetAssignFunctor; } // namespace operators } // namespace paddle diff --git a/paddle/operators/target_assign_op.h b/paddle/operators/target_assign_op.h index 267bdbf1eff..82fca5724c0 100644 --- a/paddle/operators/target_assign_op.h +++ b/paddle/operators/target_assign_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -56,40 +56,41 @@ struct TargetAssignFunctor { int row = i / num_prior_box_; int col = i - row * num_prior_box_; - size_t off = lod_[row]; + size_t row_off = lod_[row]; + int offset = row * num_prior_box_ + col; - int id = match_indices_[row * num_prior_box_ + col]; - T* obox = out_box_ + (row * num_prior_box_ + col) * 4; - int* olabel = out_label_ + row * num_prior_box_ + col; - T* obox_wt = out_box_wt_ + row * num_prior_box_ + col; - T* olabel_wt = out_label_wt_ + row * num_prior_box_ + col; + int id = match_indices_[offset]; + T* obox = out_box_ + offset * 4; + int* olabel = out_label_ + offset; + T* obox_wt = out_box_wt_ + offset; + T* olabel_wt = out_label_wt_ + offset; if (id > -1) { - const T* gtbox = gt_box_ + ((off + id) * num_prior_box_ + col) * 4; + const T* gtbox = gt_box_ + ((row_off + id) * num_prior_box_ + col) * 4; obox[0] = gtbox[0]; obox[1] = gtbox[1]; obox[2] = gtbox[2]; obox[3] = gtbox[3]; - olabel[0] = gt_label_[off + id]; - obox_wt[0] = 1.; - olabel_wt[0] = 1.; + olabel[0] = gt_label_[row_off + id]; + obox_wt[0] = static_cast(1.); + olabel_wt[0] = static_cast(1.); } else { - obox[0] = 0.; - obox[1] = 0.; - obox[2] = 0.; - obox[3] = 0.; + obox[0] = static_cast(0.); + obox[1] = static_cast(0.); + obox[2] = static_cast(0.); + obox[3] = static_cast(0.); olabel[0] = background_label_; - obox_wt[0] = 0.; - olabel_wt[0] = 0.; + obox_wt[0] = static_cast(0.); + olabel_wt[0] = static_cast(0.); } } }; template -struct UpdateTargetLabelFunctor { +struct NegTargetAssignFunctor { void operator()(const platform::DeviceContext& ctx, const int* neg_indices, const size_t* lod, const int num, const int num_prior_box, const int background_label, int* out_label, @@ -130,7 +131,11 @@ class TargetAssignKernel : public framework::OpKernel { int64_t num_prior_box = match_indices->dims()[1]; auto gt_lod = enc_gt_box->lod().back(); + auto gt_label_lod = gt_label->lod().back(); auto neg_lod = neg_indices->lod().back(); + for (size_t i = 0; i < gt_lod.size(); ++i) { + PADDLE_ENFORCE_EQ(gt_lod.data()[i], gt_label_lod.data()[i]); + } size_t* gt_lod_data = gt_lod.data(ctx.GetPlace()); size_t* neg_lod_data = neg_lod.data(ctx.GetPlace()); @@ -145,9 +150,9 @@ class TargetAssignKernel : public framework::OpKernel { num * num_prior_box); for_range(functor); - UpdateTargetLabelFunctor update_functor; - update_functor(device_ctx, neg_idx_data, neg_lod_data, num, num_prior_box, - background_label, olabel_data, olabel_wt_data); + NegTargetAssignFunctor neg_trg_functor; + neg_trg_functor(device_ctx, neg_idx_data, neg_lod_data, num, num_prior_box, + background_label, olabel_data, olabel_wt_data); } }; diff --git a/python/paddle/v2/fluid/tests/test_target_assign_op.py b/python/paddle/v2/fluid/tests/test_target_assign_op.py index 49edff5c7fd..8a1155c6217 100755 --- a/python/paddle/v2/fluid/tests/test_target_assign_op.py +++ b/python/paddle/v2/fluid/tests/test_target_assign_op.py @@ -14,8 +14,6 @@ import unittest import numpy as np -import math -import sys import random from op_test import OpTest @@ -89,8 +87,6 @@ class TestTargetAssginOp(OpTest): num_class = 21 gt_lod = [0, 5, 11, 23] neg_lod = [0, 4, 7, 13] - #gt_lod = [0, 2, 5] - #neg_lod = [0, 2, 4] batch_size = len(gt_lod) - 1 num_gt = gt_lod[-1] background_label = 0 -- GitLab From b41205d9a6b71f26694c2cdb979555c261548629 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Wed, 7 Feb 2018 02:57:13 -0500 Subject: [PATCH 079/138] Disable BUILD_TESTS for warpctc (#8210) * It will sightly faster compile and make warpctc compile well on CUDA 9 and GCC 5.5 --- cmake/external/warpctc.cmake | 1 + 1 file changed, 1 insertion(+) diff --git a/cmake/external/warpctc.cmake b/cmake/external/warpctc.cmake index 7cb4efa7bff..5fa60df7b3f 100644 --- a/cmake/external/warpctc.cmake +++ b/cmake/external/warpctc.cmake @@ -52,6 +52,7 @@ ExternalProject_Add( -DWITH_TORCH=OFF -DCMAKE_DISABLE_FIND_PACKAGE_Torch=ON -DBUILD_SHARED=ON + -DBUILD_TESTS=OFF -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE} ${EXTERNAL_OPTIONAL_ARGS} -- GitLab From 56ebb76c000fd56ba46baf64781a8c2df18955cd Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Wed, 7 Feb 2018 15:48:50 +0800 Subject: [PATCH 080/138] add the properties of buffered channel and unbuffered channel --- paddle/framework/channel_test.cc | 4 ++-- paddle/framework/details/buffered_channel.h | 9 +++++---- paddle/framework/details/unbuffered_channel.h | 7 +++++++ 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/paddle/framework/channel_test.cc b/paddle/framework/channel_test.cc index 3b8150b4276..95360d7b776 100644 --- a/paddle/framework/channel_test.cc +++ b/paddle/framework/channel_test.cc @@ -149,7 +149,7 @@ void ChannelCloseUnblocksReceiversTest(Channel *ch) { std::thread t[num_threads]; bool thread_ended[num_threads]; - // Launches threads that try to read and are blocked becausew of no writers + // Launches threads that try to read and are blocked because of no writers for (size_t i = 0; i < num_threads; i++) { thread_ended[i] = false; t[i] = std::thread( @@ -167,7 +167,7 @@ void ChannelCloseUnblocksReceiversTest(Channel *ch) { EXPECT_EQ(thread_ended[i], false); } - // Explicitly close the thread + // Explicitly close the channel // This should unblock all receivers CloseChannel(ch); diff --git a/paddle/framework/details/buffered_channel.h b/paddle/framework/details/buffered_channel.h index 44bf84eb309..4275f919bad 100644 --- a/paddle/framework/details/buffered_channel.h +++ b/paddle/framework/details/buffered_channel.h @@ -27,11 +27,12 @@ namespace details { // Four of the properties of Buffered Channel: // - A send to a full channel blocks temporarily until a receive from the -// channel or the channel is closed +// channel or the channel is closed. // - A receive from an empty channel blocks temporarily until a send to the -// channel or the channel is closed -// - A send to a closed channel returns false immediately -// - A receive from a closed channel returns false immediately +// channel or the channel is closed. +// - A send to a closed channel returns false immediately. +// - A receive from a closed channel returns false immediately. + template class Buffered : public paddle::framework::Channel { friend Channel* paddle::framework::MakeChannel(size_t); diff --git a/paddle/framework/details/unbuffered_channel.h b/paddle/framework/details/unbuffered_channel.h index 815cebad2d8..bc4233af738 100644 --- a/paddle/framework/details/unbuffered_channel.h +++ b/paddle/framework/details/unbuffered_channel.h @@ -23,6 +23,13 @@ namespace paddle { namespace framework { namespace details { +// Four of the properties of UnBuffered Channel: +// - A send to a channel blocks temporarily until a receive from the +// channel or the channel is closed. +// - A receive from a channel blocks temporarily until a send to the +// channel or the channel is closed. +// - A send to a closed channel returns false immediately. +// - A receive from a closed channel returns false immediately. template class UnBuffered : public paddle::framework::Channel { friend Channel* paddle::framework::MakeChannel(size_t); -- GitLab From 5210ff015870ecd002b1048a1aa45c94116e6bbc Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Wed, 7 Feb 2018 16:04:12 +0800 Subject: [PATCH 081/138] add cpp_data_feeding.md --- doc/design/cpp_data_feeding.md | 79 ++++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 doc/design/cpp_data_feeding.md diff --git a/doc/design/cpp_data_feeding.md b/doc/design/cpp_data_feeding.md new file mode 100644 index 00000000000..40205350f99 --- /dev/null +++ b/doc/design/cpp_data_feeding.md @@ -0,0 +1,79 @@ +# C++ Data Feeding + +In training with Paddle V2 API, data feeding wholly dependents on Python code. To get rid of the Python environment and achieve the goal of "wrapping the whole training by a while loop op" in Paddle Fluid, a C++ data feeding mechanism is required. + +In this document we show the fundamental design of C++ data feeding process, which includes the data reading, shuffling and batching. + +## Reader + +A new concept named 'Reader' is introduced. `Reader` is a series of inherited classes which can be hold by our `Variable` and they are used to read or process file data. + + +### `ReaderBase` + +`ReaderBase` is the abstract base class of all readers. It defines the all readers' interfaces. + +```cpp +class ReaderBase { + public: + explicit ReaderBase(const std::vector& shapes) : shapes_(shapes) { + PADDLE_ENFORCE(!shapes_.empty()); + } + // Read the next batch of data. (A 'batch' can be only one instance) + virtual void ReadNext(std::vector* out) = 0; + // Show whether the next bacth exists. + virtual bool HasNext() const = 0; + + // Reinitialize the reader and read the file from the begin. + virtual void ReInit() = 0; + + // Get a certain read in data's shape. + DDim shape(size_t idx) const; + // Get shapes of all read in data. + std::vector shapes() const { return shapes_; } + // Set shapes of read in data. + void set_shapes(const std::vector& shapes) { shapes_ = shapes; } + + virtual ~ReaderBase() {} + + protected: + std::vector shapes_; +}; +``` + +### `FileReader` and `DecoratedReader` + +These two classes are derived from the `ReaderBase` and will further be derived by respective specific readers. That is to say, in our design, there are two kinds of readers: file readers and decorated readers. A file reader reads from a file of some specific format, and yield only one instance of data at a time. e.g. RecordIO reader, jpg reader, .... A decorated reader takes another reader(both file reader and decorated reader are OK) as its 'underlying reader'. It gets data from its underlying reader, does some process on them(shuffling, or batching), then yields processed data. The output data of a decorated reader can be a single instance or a batch. `ShuffleReader` and `BatchReader` are both decorated readers. + +All the readers share exactly the same interfaces defined in `ReaderBase`. So they can be decorated for more than one time: We can **shuffle** a reader's outputs and then **batch** the shuffle outputs. The interface consistency also allows related ops use readers without knowing what they are exactly. + + +### `ReaderHolder` + +Different readers belong to different class types. It leads to a problem: How can we drop them into `Variable`s and fetch them out by a unified method? For example, if a Variable holds a `BatchReader`, we can not get it by the following code: + +```cpp +var->Get("batch_reader"); +``` + +we have to write: + +```cpp +var->Get("batch_reader"); +``` + +This requires each time getting a reader from a variable we must know the reader's type exactly. It is nearly impossible. + +To solve this problem, we introduce `ReaderHolder` as a wrapper. It acts as an empty decorator of `ReaderBase`, which erases reader's type. With `ReaderHolder` we are able to fetch all types of readers by `var->Get("...")` and regard the obtained object as a reader. + +## Related Operators + +To create and invoke readers, some now ops are introduced: + +### `CreateReaderOp` + +Each reader has its creating op. File readers' creating ops have no input and yield the created file reader as its output. Decorated readers' creating ops take the underlying readers as inputs and then yield new decorated readers. + +### `ReadOp` + +A reader is only a Variable. It cannot trigger the reading process by itself. So we add the `ReadOp` to execute it. A `ReadOp` takes a reader Variable as its input. Each time it runs, it invokes the reader‘s `ReadNext()` function and gets a new batch of data(or only one instance of data, if we use file reader directly). The output data of a reader are in the form of `std::vector`, so the `ReadOp` also needs to split the vector and move LoDTensors to their respective output Variables. -- GitLab From 16e005e917e321bd8094e24482fb36047401b626 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Wed, 7 Feb 2018 17:11:03 +0800 Subject: [PATCH 082/138] fix dead links after adjustation --- doc/getstarted/concepts/use_concepts_cn.rst | 2 +- .../{ => multi_cluster}/src/add_security_group.png | Bin .../cluster/{ => multi_cluster}/src/create_efs.png | Bin .../{ => multi_cluster}/src/k8s-paddle-arch.png | Bin .../{ => multi_cluster}/src/k8s_data/Dockerfile | 0 .../{ => multi_cluster}/src/k8s_data/README.md | 0 .../{ => multi_cluster}/src/k8s_data/get_data.sh | 0 .../{ => multi_cluster}/src/k8s_train/Dockerfile | 0 .../{ => multi_cluster}/src/k8s_train/README.md | 0 .../{ => multi_cluster}/src/k8s_train/start.sh | 0 .../src/k8s_train/start_paddle.py | 0 .../{ => multi_cluster}/src/pserver_and_trainer.png | Bin .../src/route53_create_recordset.png | Bin .../{ => multi_cluster}/src/route53_create_zone.png | Bin .../src/worker_security_group.png | Bin doc/howto/index_cn.rst | 2 +- 16 files changed, 2 insertions(+), 2 deletions(-) rename doc/howto/cluster/{ => multi_cluster}/src/add_security_group.png (100%) rename doc/howto/cluster/{ => multi_cluster}/src/create_efs.png (100%) rename doc/howto/cluster/{ => multi_cluster}/src/k8s-paddle-arch.png (100%) rename doc/howto/cluster/{ => multi_cluster}/src/k8s_data/Dockerfile (100%) rename doc/howto/cluster/{ => multi_cluster}/src/k8s_data/README.md (100%) rename doc/howto/cluster/{ => multi_cluster}/src/k8s_data/get_data.sh (100%) rename doc/howto/cluster/{ => multi_cluster}/src/k8s_train/Dockerfile (100%) rename doc/howto/cluster/{ => multi_cluster}/src/k8s_train/README.md (100%) rename doc/howto/cluster/{ => multi_cluster}/src/k8s_train/start.sh (100%) rename doc/howto/cluster/{ => multi_cluster}/src/k8s_train/start_paddle.py (100%) rename doc/howto/cluster/{ => multi_cluster}/src/pserver_and_trainer.png (100%) rename doc/howto/cluster/{ => multi_cluster}/src/route53_create_recordset.png (100%) rename doc/howto/cluster/{ => multi_cluster}/src/route53_create_zone.png (100%) rename doc/howto/cluster/{ => multi_cluster}/src/worker_security_group.png (100%) diff --git a/doc/getstarted/concepts/use_concepts_cn.rst b/doc/getstarted/concepts/use_concepts_cn.rst index e695ff283e2..608f49f5a96 100644 --- a/doc/getstarted/concepts/use_concepts_cn.rst +++ b/doc/getstarted/concepts/use_concepts_cn.rst @@ -4,7 +4,7 @@ PaddlePaddle是源于百度的一个深度学习平台。PaddlePaddle为深度学习研究人员提供了丰富的API,可以轻松地完成神经网络配置,模型训练等任务。 这里将介绍PaddlePaddle的基本使用概念,并且展示了如何利用PaddlePaddle来解决一个经典的线性回归问题。 -在使用该文档之前,请参考 `安装文档 <../build_and_install/index_cn.html>`_ 完成PaddlePaddle的安装。 +在使用该文档之前,请参考 `安装文档 <../../build_and_install/index_cn.html>`_ 完成PaddlePaddle的安装。 配置网络 diff --git a/doc/howto/cluster/src/add_security_group.png b/doc/howto/cluster/multi_cluster/src/add_security_group.png similarity index 100% rename from doc/howto/cluster/src/add_security_group.png rename to doc/howto/cluster/multi_cluster/src/add_security_group.png diff --git a/doc/howto/cluster/src/create_efs.png b/doc/howto/cluster/multi_cluster/src/create_efs.png similarity index 100% rename from doc/howto/cluster/src/create_efs.png rename to doc/howto/cluster/multi_cluster/src/create_efs.png diff --git a/doc/howto/cluster/src/k8s-paddle-arch.png b/doc/howto/cluster/multi_cluster/src/k8s-paddle-arch.png similarity index 100% rename from doc/howto/cluster/src/k8s-paddle-arch.png rename to doc/howto/cluster/multi_cluster/src/k8s-paddle-arch.png diff --git a/doc/howto/cluster/src/k8s_data/Dockerfile b/doc/howto/cluster/multi_cluster/src/k8s_data/Dockerfile similarity index 100% rename from doc/howto/cluster/src/k8s_data/Dockerfile rename to doc/howto/cluster/multi_cluster/src/k8s_data/Dockerfile diff --git a/doc/howto/cluster/src/k8s_data/README.md b/doc/howto/cluster/multi_cluster/src/k8s_data/README.md similarity index 100% rename from doc/howto/cluster/src/k8s_data/README.md rename to doc/howto/cluster/multi_cluster/src/k8s_data/README.md diff --git a/doc/howto/cluster/src/k8s_data/get_data.sh b/doc/howto/cluster/multi_cluster/src/k8s_data/get_data.sh similarity index 100% rename from doc/howto/cluster/src/k8s_data/get_data.sh rename to doc/howto/cluster/multi_cluster/src/k8s_data/get_data.sh diff --git a/doc/howto/cluster/src/k8s_train/Dockerfile b/doc/howto/cluster/multi_cluster/src/k8s_train/Dockerfile similarity index 100% rename from doc/howto/cluster/src/k8s_train/Dockerfile rename to doc/howto/cluster/multi_cluster/src/k8s_train/Dockerfile diff --git a/doc/howto/cluster/src/k8s_train/README.md b/doc/howto/cluster/multi_cluster/src/k8s_train/README.md similarity index 100% rename from doc/howto/cluster/src/k8s_train/README.md rename to doc/howto/cluster/multi_cluster/src/k8s_train/README.md diff --git a/doc/howto/cluster/src/k8s_train/start.sh b/doc/howto/cluster/multi_cluster/src/k8s_train/start.sh similarity index 100% rename from doc/howto/cluster/src/k8s_train/start.sh rename to doc/howto/cluster/multi_cluster/src/k8s_train/start.sh diff --git a/doc/howto/cluster/src/k8s_train/start_paddle.py b/doc/howto/cluster/multi_cluster/src/k8s_train/start_paddle.py similarity index 100% rename from doc/howto/cluster/src/k8s_train/start_paddle.py rename to doc/howto/cluster/multi_cluster/src/k8s_train/start_paddle.py diff --git a/doc/howto/cluster/src/pserver_and_trainer.png b/doc/howto/cluster/multi_cluster/src/pserver_and_trainer.png similarity index 100% rename from doc/howto/cluster/src/pserver_and_trainer.png rename to doc/howto/cluster/multi_cluster/src/pserver_and_trainer.png diff --git a/doc/howto/cluster/src/route53_create_recordset.png b/doc/howto/cluster/multi_cluster/src/route53_create_recordset.png similarity index 100% rename from doc/howto/cluster/src/route53_create_recordset.png rename to doc/howto/cluster/multi_cluster/src/route53_create_recordset.png diff --git a/doc/howto/cluster/src/route53_create_zone.png b/doc/howto/cluster/multi_cluster/src/route53_create_zone.png similarity index 100% rename from doc/howto/cluster/src/route53_create_zone.png rename to doc/howto/cluster/multi_cluster/src/route53_create_zone.png diff --git a/doc/howto/cluster/src/worker_security_group.png b/doc/howto/cluster/multi_cluster/src/worker_security_group.png similarity index 100% rename from doc/howto/cluster/src/worker_security_group.png rename to doc/howto/cluster/multi_cluster/src/worker_security_group.png diff --git a/doc/howto/index_cn.rst b/doc/howto/index_cn.rst index dd39ef9e79d..0c534f107b6 100644 --- a/doc/howto/index_cn.rst +++ b/doc/howto/index_cn.rst @@ -1,4 +1,4 @@ -进阶指南 +进阶使用 ======== .. toctree:: -- GitLab From 83df277ff123d7b102f405cdb512457841f11a32 Mon Sep 17 00:00:00 2001 From: QI JUN Date: Wed, 7 Feb 2018 17:33:27 +0800 Subject: [PATCH 083/138] Refine get_cfgs method of memory optimization transpiler (#8080) * refine get cfgs method in memory optimization transpiler * clean code --- .../fluid/memory_optimization_transpiler.py | 21 +++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/python/paddle/v2/fluid/memory_optimization_transpiler.py b/python/paddle/v2/fluid/memory_optimization_transpiler.py index 2b00923f5e8..11e2cfb3cc5 100644 --- a/python/paddle/v2/fluid/memory_optimization_transpiler.py +++ b/python/paddle/v2/fluid/memory_optimization_transpiler.py @@ -145,7 +145,6 @@ class ControlFlowGraph(object): if op.type() == "while" or op.type() == "while_grad": continue block_desc = op.block() - self.current_block_desc = block_desc is_forward = i < self._forward_num if self.pool: defs_can_optimize = filter( @@ -208,17 +207,17 @@ def get_cfgs(input_program): while_sub_block_ids = [] while_grad_sub_block_ids = [] - while_op_output = set() while_block_id_pair = [] + while_op_dict = {} for i in range(op_size): op = block_desc.op(i) if op.type() == "while": while_sub_block_ids.append(op.attr("sub_block").id) - while_op_output.update(op.output_arg_names()) + while_op_dict[op.attr("sub_block").id] = op elif op.type() == "while_grad": while_grad_sub_block_ids.append(op.attr("sub_block").id) - while_op_output.update(op.output_arg_names()) + while_op_dict[op.attr("sub_block").id] = op # Find while/while_grad block pair for grad_id in while_grad_sub_block_ids: @@ -240,6 +239,10 @@ def get_cfgs(input_program): for i in range(while_grad_block_op_size): while_block_ops.append(while_grad_block.op(i)) + while_op_output = set() + while_op_output.update(while_op_dict[parent_id].output_arg_names()) + while_op_output.update(while_op_dict[grad_id].output_arg_names()) + ops_list.append((while_block_ops, while_block_op_size, while_op_output)) # Process rest while block ops @@ -250,9 +253,15 @@ def get_cfgs(input_program): for i in range(while_block_op_size): while_block_ops.append(while_block.op(i)) - ops_list.append((while_block_ops, while_block_op_size)) + while_op_output = set() + while_op_output.update(while_op_dict[parent_id].output_arg_names()) + + ops_list.append((while_block_ops, while_block_op_size, while_op_output)) - cfgs = [ControlFlowGraph(input_program, i, j, k) for i, j, k in ops_list] + cfgs = [ + ControlFlowGraph(input_program, ops, forward_num, skip_opt) + for ops, forward_num, skip_opt in ops_list + ] return cfgs -- GitLab From c74445017d35ad344c5fc2a19a35c47a72358f3c Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Wed, 7 Feb 2018 19:18:48 +0800 Subject: [PATCH 084/138] refine distribute transpiler --- .../paddle/v2/fluid/distribute_transpiler.py | 124 +++++++++++++----- 1 file changed, 89 insertions(+), 35 deletions(-) diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index 121b407cae4..4eb103cc6ba 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -300,6 +300,9 @@ class DistributeTranspiler: pass return orig_shape + def _op_input_var(self, op, varname): + pass + def _is_op_on_pserver(self, endpoint, all_ops, idx): """ Recursively check if the op need to run on current server. @@ -309,29 +312,35 @@ class DistributeTranspiler: p.name for p in self.param_grad_ep_mapping[endpoint]["params"] ] op = all_ops[idx] - if op.inputs.has_key("Param"): - if op.inputs["Param"].name in param_names: + input_names = set(op.input_names) + # TODO(typhoonzero): using Param and Grad input name to identify + # that the operator is an optimization operator, need a better way. + if "Param" in input_names: + if op.input("Param")[0] in param_names: return True else: for n in param_names: - if same_or_split_var(n, op.inputs[ - "Param"].name) and n != op.inputs["Param"].name: + if same_or_split_var(n, op.input("Param")[0]) \ + and n != op.input("Param")[0]: return True return False else: j = idx - 1 while j >= 0: prev_op = all_ops[j] - prev_output_names = [o.name for o in prev_op.outputs.values()] - prev_input_names = [o.name for o in prev_op.inputs.values()] + # prev_output_names = [o.name for o in prev_op.outputs.values()] + # prev_input_names = [o.name for o in prev_op.inputs.values()] + # NOTE(typhoonzero): consider list input/output + prev_output_names = prev_op.desc.output_arg_names() + prev_input_names = prev_op.desc.input_arg_names() found1 = False found2 = False - for _, v in op.inputs.iteritems(): - if v.name in prev_output_names: + for varname in op.desc.input_arg_names(): + if varname in prev_output_names: found1 = self._is_op_on_pserver(endpoint, all_ops, j) # later ops may produce output for prev op's next batch use. - for _, v in op.outputs.iteritems(): - if v.name in prev_input_names: + for varname in op.desc.output_arg_names(): + if varname in prev_input_names: found2 = self._is_op_on_pserver(endpoint, all_ops, j) if found1 or found2: return True @@ -342,11 +351,11 @@ class DistributeTranspiler: new_inputs = dict() # update param/grad shape first, then other inputs like # moment can use the updated shape - for key, var in opt_op.inputs.iteritems(): + for key in opt_op.input_names: if key == "Grad": grad_block = None for g in self.param_grad_ep_mapping[endpoint]["grads"]: - if same_or_split_var(g.name, var.name): + if same_or_split_var(g.name, opt_op.input(key)[0]): grad_block = g break if not grad_block: @@ -376,7 +385,7 @@ class DistributeTranspiler: # param is already created on global program param_block = None for p in self.param_grad_ep_mapping[endpoint]["params"]: - if same_or_split_var(p.name, var.name): + if same_or_split_var(p.name, opt_op.input(key)): param_block = p break if not param_block: @@ -389,11 +398,12 @@ class DistributeTranspiler: new_inputs[key] = tmpvar - for key, var in opt_op.inputs.iteritems(): + for key in opt_op.input_names: if key in ["Param", "Grad"]: continue # update accumulator variable shape param_shape = new_inputs["Param"].shape + var = program.global_block().vars[opt_op.input(key)] new_shape = self._get_optimizer_input_shape(opt_op.type, key, var.shape, param_shape) tmpvar = program.global_block().create_var( @@ -412,30 +422,46 @@ class DistributeTranspiler: shape=new_shape) # change output's ParamOut variable - opt_op.outputs["ParamOut"] = new_inputs["Param"] + outputs = self._get_output_map_from_op(program.global_block(), opt_op) + outputs["ParamOut"] = new_inputs["Param"] program.global_block().append_op( type=opt_op.type, inputs=new_inputs, - outputs=opt_op.outputs, + outputs=outputs, attrs=opt_op.attrs) def _append_pserver_non_opt_ops(self, program, pserver_program, opt_op): # Append the ops for parameters that do not need to be optimized/updated - for _, var in opt_op.inputs.iteritems(): - program.global_block().create_var( - name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=var.shape) - pserver_program.global_block().create_var( - name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=var.shape) + inputs = self._get_input_map_from_op(self.program.global_block().vars, + opt_op) + for var in inputs.itervalues(): + if type(var) == list: + varlist = var + else: + varlist = [var] + for var in varlist: + program.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + try: + pserver_program.global_block().create_var( + name=var.name, + persistable=var.persistable, + dtype=var.dtype, + shape=var.shape) + except ValueError: + # create var if not created yet. + pass + + outputs = self._get_output_map_from_op(self.program.global_block().vars, + opt_op) + program.global_block().append_op( type=opt_op.type, - inputs=opt_op.inputs, - outputs=opt_op.outputs, + inputs=inputs, + outputs=outputs, attrs=opt_op.attrs) def get_pserver_program(self, endpoint): @@ -472,7 +498,7 @@ class DistributeTranspiler: self.optimize_ops, idx) if not is_op_on_pserver: continue - if opt_op.inputs.has_key("Grad"): + if "Grad" in opt_op.desc.input_arg_names(): self._append_pserver_ops(optimize_sub_program, pserver_program, opt_op, endpoint) else: @@ -499,6 +525,30 @@ class DistributeTranspiler: pserver_program.sync_with_cpp() return pserver_program + def _get_input_map_from_op(self, varmap, op): + iomap = dict() + for key in op.input_names: + vars = [] + for varname in op.input(key): + vars.append(varmap[varname]) + if len(vars) == 1: + iomap[key] = vars[0] + else: + iomap[key] = vars + return iomap + + def _get_output_map_from_op(self, varmap, op): + iomap = dict() + for key in op.output_names: + vars = [] + for varname in op.output(key): + vars.append(varmap[varname]) + if len(vars) == 1: + iomap[key] = vars[0] + else: + iomap[key] = vars + return iomap + def get_startup_program(self, endpoint, pserver_program): """ Get startup program for current parameter server. @@ -529,17 +579,21 @@ class DistributeTranspiler: # 2. rename op outputs for op in orig_s_prog.global_block().ops: + new_inputs = dict() new_outputs = dict() # do not append startup op if var is not on this pserver op_on_pserver = False - for key, var in op.outputs.iteritems(): - newname, _ = _get_splited_name_and_shape(var.name) + for key in op.output_names: + newname, _ = _get_splited_name_and_shape(op.output(key)[0]) if newname: op_on_pserver = True new_outputs[key] = created_var_map[newname] - elif var.name in pserver_vars: + elif op.output(key)[0] in pserver_vars: op_on_pserver = True - new_outputs[key] = pserver_vars[var.name] + new_outputs[key] = pserver_vars[op.output(key)[0]] + + # most startup program ops have no inputs + new_inputs = self._get_input_map_from_op(pserver_vars, op) if op_on_pserver: if op.type in [ @@ -548,7 +602,7 @@ class DistributeTranspiler: op.attrs["shape"] = new_outputs["Out"].shape s_prog.global_block().append_op( type=op.type, - inputs=op.inputs, + inputs=new_inputs, outputs=new_outputs, attrs=op.attrs) return s_prog -- GitLab From 931375ffeb86b1520090c21383ab2d38ba2aa5eb Mon Sep 17 00:00:00 2001 From: QI JUN Date: Wed, 7 Feb 2018 21:22:40 +0800 Subject: [PATCH 085/138] fix bug in memory optimization transpiler (#8233) --- python/paddle/v2/fluid/memory_optimization_transpiler.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/paddle/v2/fluid/memory_optimization_transpiler.py b/python/paddle/v2/fluid/memory_optimization_transpiler.py index 11e2cfb3cc5..8bb8cf7b1a5 100644 --- a/python/paddle/v2/fluid/memory_optimization_transpiler.py +++ b/python/paddle/v2/fluid/memory_optimization_transpiler.py @@ -155,6 +155,9 @@ class ControlFlowGraph(object): for x in defs_can_optimize ] for x, x_shape in out_pair: + # If x is both in uses and defs, it can not be optimized! + if x in self._uses[i]: + continue for index, cache_pair in enumerate(self.pool): cache_var = cache_pair[0] cache_shape = cache_pair[1] -- GitLab From 8e5bc804bba6e0a81d593c91776f4d35f7315eef Mon Sep 17 00:00:00 2001 From: whs Date: Thu, 8 Feb 2018 02:28:56 +0800 Subject: [PATCH 086/138] Fix equation in doc of fluid.layers.fc (#8243) --- python/paddle/v2/fluid/layers/nn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index a79479f469a..fe6d87e5d7c 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -92,7 +92,7 @@ def fc(input, .. math:: - Out = Act({\sum_{i=0}^{N-1}W_iX_i + b}) + Out = Act({\sum_{i=0}^{N-1}X_iW_i + b}) In the above equation: -- GitLab From be7fcc0bfc4634c166d349d35ccd9e06f7882e2c Mon Sep 17 00:00:00 2001 From: helinwang Date: Wed, 7 Feb 2018 12:16:27 -0800 Subject: [PATCH 087/138] long running training tests: fail when got NaN loss (#8169) --- python/paddle/v2/fluid/tests/book/test_fit_a_line.py | 4 ++++ .../v2/fluid/tests/book/test_image_classification_train.py | 4 ++++ python/paddle/v2/fluid/tests/book/test_recognize_digits.py | 4 ++++ python/paddle/v2/fluid/tests/book/test_recommender_system.py | 4 ++++ .../paddle/v2/fluid/tests/book/test_understand_sentiment.py | 4 ++++ python/paddle/v2/fluid/tests/book/test_word2vec.py | 5 +++++ 6 files changed, 25 insertions(+) diff --git a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py index 27f34b17339..06860a2a465 100644 --- a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py @@ -16,6 +16,8 @@ import paddle.v2 as paddle import paddle.v2.fluid as fluid import contextlib import unittest +import math +import sys def main(use_cuda): @@ -58,6 +60,8 @@ def main(use_cuda): print(avg_loss_value) if avg_loss_value[0] < 10.0: return + if math.isnan(float(avg_loss_value)): + sys.exit("got NaN loss, training failed.") raise AssertionError("Fit a line cost is too large, {0:2.2}".format( avg_loss_value[0])) diff --git a/python/paddle/v2/fluid/tests/book/test_image_classification_train.py b/python/paddle/v2/fluid/tests/book/test_image_classification_train.py index 03b009ebb07..ffbe5bdbd64 100644 --- a/python/paddle/v2/fluid/tests/book/test_image_classification_train.py +++ b/python/paddle/v2/fluid/tests/book/test_image_classification_train.py @@ -17,6 +17,8 @@ from __future__ import print_function import paddle.v2 as paddle import paddle.v2.fluid as fluid import contextlib +import math +import sys import numpy import unittest @@ -145,6 +147,8 @@ def train(net_type, use_cuda, save_dirname): loss_t, acc_t = exe.run(program=test_program, feed=feeder.feed(test_data), fetch_list=[avg_cost, acc]) + if math.isnan(float(loss_t)): + sys.exit("got NaN loss, training failed.") acc_list.append(float(acc_t)) avg_loss_list.append(float(loss_t)) break # Use 1 segment for speeding up CI diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py index fb6b1f7192d..c3f68775754 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py @@ -18,6 +18,8 @@ import paddle.v2 as paddle import sys import numpy import unittest +import math +import sys def parse_arg(): @@ -148,6 +150,8 @@ def train(nn_type, use_cuda, parallel, save_dirname): 'PassID {0:1}, BatchID {1:04}, Test Loss {2:2.2}, Acc {3:2.2}'. format(pass_id, batch_id + 1, float(avg_loss_val), float(acc_val))) + if math.isnan(float(avg_loss_val)): + sys.exit("got NaN loss, training failed.") raise AssertionError("Loss of recognize digits is too large") diff --git a/python/paddle/v2/fluid/tests/book/test_recommender_system.py b/python/paddle/v2/fluid/tests/book/test_recommender_system.py index d4a694e5721..9c7ab7d6318 100644 --- a/python/paddle/v2/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/v2/fluid/tests/book/test_recommender_system.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import math +import sys import numpy as np import paddle.v2 as paddle import paddle.v2.fluid.core as core @@ -217,6 +219,8 @@ def main(): if out[0] < 6.0: # if avg cost less than 6.0, we think our code is good. exit(0) + if math.isnan(float(out[0])): + sys.exit("got NaN loss, training failed.") main() diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py index 2ba9077a262..9c5cb667aed 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py @@ -16,6 +16,8 @@ import unittest import paddle.v2.fluid as fluid import paddle.v2 as paddle import contextlib +import math +import sys def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, @@ -115,6 +117,8 @@ def main(word_dict, net_method, use_cuda): print("cost=" + str(cost_val) + " acc=" + str(acc_val)) if cost_val < 0.4 and acc_val > 0.8: return + if math.isnan(float(cost_val)): + sys.exit("got NaN loss, training failed.") raise AssertionError("Cost is too large for {0}".format( net_method.__name__)) diff --git a/python/paddle/v2/fluid/tests/book/test_word2vec.py b/python/paddle/v2/fluid/tests/book/test_word2vec.py index 766ba9681d1..f013d7f1551 100644 --- a/python/paddle/v2/fluid/tests/book/test_word2vec.py +++ b/python/paddle/v2/fluid/tests/book/test_word2vec.py @@ -16,6 +16,8 @@ import paddle.v2 as paddle import paddle.v2.fluid as fluid import unittest import os +import math +import sys def main(use_cuda, is_sparse, parallel): @@ -112,6 +114,9 @@ def main(use_cuda, is_sparse, parallel): fetch_list=[avg_cost]) if avg_cost_np[0] < 5.0: return + if math.isnan(float(avg_cost_np[0])): + sys.exit("got NaN loss, training failed.") + raise AssertionError("Cost is too large {0:2.2}".format(avg_cost_np[0])) -- GitLab From ba6ac8b9a60bc4074b315b28c36c03ea7a9e418c Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Wed, 7 Feb 2018 23:37:02 +0000 Subject: [PATCH 088/138] turn off parallel --- python/paddle/v2/fluid/tests/test_parallel_op.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/paddle/v2/fluid/tests/test_parallel_op.py b/python/paddle/v2/fluid/tests/test_parallel_op.py index 367cc8b1aaf..6b3d72902c7 100644 --- a/python/paddle/v2/fluid/tests/test_parallel_op.py +++ b/python/paddle/v2/fluid/tests/test_parallel_op.py @@ -198,4 +198,7 @@ class ParallelOpTestMultipleInput(BaseParallelForTest): if __name__ == '__main__': + # FIXME(tonyyang-svail): + # This test always fail on MultiGPU CI + exit(0) unittest.main() -- GitLab From bf1ccbec4aae2bb524651de766d9b8c7761b7bf1 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Thu, 8 Feb 2018 00:39:07 +0000 Subject: [PATCH 089/138] turn off test comparesparse --- paddle/gserver/tests/test_CompareSparse.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/paddle/gserver/tests/test_CompareSparse.cpp b/paddle/gserver/tests/test_CompareSparse.cpp index c6e07650fc4..2495d8b60a5 100644 --- a/paddle/gserver/tests/test_CompareSparse.cpp +++ b/paddle/gserver/tests/test_CompareSparse.cpp @@ -212,6 +212,10 @@ TEST(compareSparse, NeuralNetwork) { } int main(int argc, char** argv) { + // FIXME(tonyyang-svail): + // Turn off this test due CI failure: + // https://paddleci.ngrok.io/viewLog.html?buildId=27608&buildTypeId=Paddle_PrCi&tab=buildLog&_focus=10430 + return 0; testing::InitGoogleTest(&argc, argv); initMain(argc, argv); initPython(argc, argv); -- GitLab From d8b0ba99785f9d4a041c54a0f7d820fac569e1d1 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Thu, 8 Feb 2018 00:42:16 +0000 Subject: [PATCH 090/138] turn off test_word2vec.py --- python/paddle/v2/fluid/tests/book/test_word2vec.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/paddle/v2/fluid/tests/book/test_word2vec.py b/python/paddle/v2/fluid/tests/book/test_word2vec.py index 766ba9681d1..385e9833b21 100644 --- a/python/paddle/v2/fluid/tests/book/test_word2vec.py +++ b/python/paddle/v2/fluid/tests/book/test_word2vec.py @@ -153,4 +153,6 @@ for use_cuda in (False, True): inject_test_method(use_cuda, is_sparse, parallel) if __name__ == '__main__': + # FIXME(tonyyang-svail): + # This test always fail on MultiGPU CI unittest.main() -- GitLab From 5cc2f0bdda6038ed914892152c8ab0ab0404aa2d Mon Sep 17 00:00:00 2001 From: Qiao Longfei Date: Thu, 8 Feb 2018 11:21:59 +0800 Subject: [PATCH 091/138] Add polynomial_decay and piecewise_decay (#8013) * init polynomial_decay * test polynomial_decay * complete polynomial_decay * fix conditional block op * init scalar-switch-case-op * switch op can compile * complete forward switch_op * add GetMatchCaseIndex * add switch_grad_op * init switch Python API * add test_switch * support set block list in python * fix scope problem * complete test * optimize test * optimize test * rm backward part * clear grad op * polynomial_decay use switch op * revert conditional_block_op and reshape_op * add piecewise_decay and test * fix piecewise_decay * try to use condition op for switch * can work * clean old code * revert * rm switch_op.cc * optimize code * add attr is_scalar_condition for condition_block_op * fix comment * fix comment * add export --- python/paddle/v2/fluid/layers/control_flow.py | 31 ++++++ python/paddle/v2/fluid/learning_rate_decay.py | 102 +++++++++++++++++- .../fluid/tests/test_learning_rate_decay.py | 93 +++++++++++----- 3 files changed, 197 insertions(+), 29 deletions(-) diff --git a/python/paddle/v2/fluid/layers/control_flow.py b/python/paddle/v2/fluid/layers/control_flow.py index e71f3858b0a..f29d7712334 100644 --- a/python/paddle/v2/fluid/layers/control_flow.py +++ b/python/paddle/v2/fluid/layers/control_flow.py @@ -38,6 +38,7 @@ __all__ = [ 'array_write', 'create_array', 'less_than', + 'equal', 'array_read', 'shrink_memory', 'array_length', @@ -975,6 +976,36 @@ def less_than(x, y, cond=None, **ignored): return cond +def equal(x, y, cond=None, **ignored): + """ + **equal** + + This layer returns the truth value of :math:`x == y` elementwise. + + Args: + x(Variable): First operand of *equal* + y(Variable): Second operand of *equal* + cond(Variable|None): Optional output variable to store the result of *equal* + + Returns: + Variable: The tensor variable storing the output of *equal*. + + Examples: + .. code-block:: python + + less = fluid.layers.equal(x=label, y=limit) + """ + helper = LayerHelper("equal", **locals()) + if cond is None: + cond = helper.create_tmp_variable(dtype='bool') + cond.stop_gradient = True + + helper.append_op( + type='equal', inputs={'X': [x], + 'Y': [y]}, outputs={'Out': [cond]}) + return cond + + def array_read(array, i): """This function performs the operation to read the data in as an LOD_TENSOR_ARRAY. diff --git a/python/paddle/v2/fluid/learning_rate_decay.py b/python/paddle/v2/fluid/learning_rate_decay.py index 96b3e9a0d73..13dc98075f7 100644 --- a/python/paddle/v2/fluid/learning_rate_decay.py +++ b/python/paddle/v2/fluid/learning_rate_decay.py @@ -15,7 +15,10 @@ import layers from framework import Variable -__all__ = ['exponential_decay', 'natural_exp_decay', 'inverse_time_decay'] +__all__ = [ + 'exponential_decay', 'natural_exp_decay', 'inverse_time_decay', + 'polynomial_decay', 'piecewise_decay' +] """ When training a model, it's often useful to decay the learning rate during training process, this is called @@ -101,7 +104,7 @@ def inverse_time_decay(learning_rate, ```python if staircase: decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step / decay_step)) - else + else: decayed_learning_rate = learning_rate / (1 + decay_rate * global_step / decay_step) ``` Args: @@ -123,3 +126,98 @@ def inverse_time_decay(learning_rate, div_res = layers.floor(x=div_res) return learning_rate / (1 + decay_rate * div_res) + + +def polynomial_decay(learning_rate, + global_step, + decay_steps, + end_learning_rate=0.0001, + power=1.0, + cycle=False): + """Applies polynomial decay to the initial learning rate. + + ```python + if cycle: + decay_steps = decay_steps * ceil(global_step / decay_steps) + else: + global_step = min(global_step, decay_steps) + decayed_learning_rate = (learning_rate - end_learning_rate) * + (1 - global_step / decay_steps) ^ power + + end_learning_rate + ``` + Args: + learning_rate: A scalar float32 value or a Variable. This + will be the initial learning rate during training + global_step: A Variable that record the training step. + decay_steps: A Python `int32` number. + end_learning_rate: A Python `float` number. + power: A Python `float` number + cycle: Boolean. If set true, decay the learning rate every decay_steps. + + Returns: + The decayed learning rate + """ + if not isinstance(global_step, Variable): + raise ValueError("global_step is required for inverse_time_decay.") + + if cycle: + div_res = layers.ceil(x=(global_step / decay_steps)) + zero_var = layers.fill_constant(shape=[1], dtype='float32', value=0.0) + one_var = layers.fill_constant(shape=[1], dtype='float32', value=1.0) + + with layers.Switch() as switch: + with switch.case(layers.equal(x=global_step, y=zero_var)): + layers.assign(input=one_var, output=div_res) + decay_steps = decay_steps * div_res + else: + decay_steps_var = layers.fill_constant( + shape=[1], dtype='float32', value=float(decay_steps)) + global_step = layers.elementwise_min(x=global_step, y=decay_steps_var) + + return (learning_rate - end_learning_rate) * \ + ((1 - global_step / decay_steps) ** power) + end_learning_rate + + +def piecewise_decay(global_step, boundaries, values): + """Applies piecewise decay to the initial learning rate. + + ```python + boundaries = [10000, 20000] + values = [1.0, 0.5, 0.1] + + if step < 10000: + learning_rate = 1.0 + elif step >= 10000 and step < 20000: + learning_rate = 0.5 + else: + learning_rate = 0.1 + ``` + """ + + if len(values) - len(boundaries) != 1: + raise ValueError("len(values) - len(boundaries) should be 1") + + if not isinstance(global_step, Variable): + raise ValueError("global_step is required for piecewise_decay.") + + lr = layers.create_global_var( + shape=[1], + value=0.0, + dtype='float32', + persistable=True, + name="learning_rate") + + with layers.Switch() as switch: + for i in range(len(boundaries)): + boundary_val = layers.fill_constant( + shape=[1], dtype='float32', value=float(boundaries[i])) + value_var = layers.fill_constant( + shape=[1], dtype='float32', value=float(values[i])) + with switch.case(layers.less_than(global_step, boundary_val)): + layers.assign(value_var, lr) + last_value_var = layers.fill_constant( + shape=[1], dtype='float32', value=float(values[len(values) - 1])) + with switch.default(): + layers.assign(last_value_var, lr) + + return lr diff --git a/python/paddle/v2/fluid/tests/test_learning_rate_decay.py b/python/paddle/v2/fluid/tests/test_learning_rate_decay.py index dc348cf2d21..1d6bab3d6c4 100644 --- a/python/paddle/v2/fluid/tests/test_learning_rate_decay.py +++ b/python/paddle/v2/fluid/tests/test_learning_rate_decay.py @@ -15,6 +15,8 @@ import unittest import math +import copy + import paddle.v2.fluid.framework as framework import paddle.v2.fluid as fluid import paddle.v2.fluid.layers as layers @@ -54,21 +56,37 @@ def inverse_time_decay(learning_rate, return learning_rate / (1 + decay_rate * temp) -class TestLearningRateDecay(unittest.TestCase): - def check_decay(self, python_decay_fn, fluid_decay_fn, staircase): - init_lr = 1.0 - decay_steps = 5 - decay_rate = 0.5 +def polynomial_decay(learning_rate, + global_step, + decay_steps, + end_learning_rate=0.0001, + power=1.0, + cycle=False): + if cycle: + div = math.ceil(global_step / float(decay_steps)) + if div == 0: + div = 1 + decay_steps = decay_steps * div + else: + global_step = min(global_step, decay_steps) + return (learning_rate - end_learning_rate) * \ + ((1 - float(global_step) / float(decay_steps)) ** power) + end_learning_rate + + +def piecewise_decay(global_step, boundaries, values): + assert len(boundaries) + 1 == len(values) + for i in range(len(boundaries)): + if global_step < boundaries[i]: + return values[i] + return values[len(values) - 1] + +class TestLearningRateDecay(unittest.TestCase): + def check_decay(self, python_decay_fn, fluid_decay_fn, kwargs): global_step = layers.create_global_var( shape=[1], value=0.0, dtype='float32', persistable=True) - decayed_lr = fluid_decay_fn( - learning_rate=init_lr, - global_step=global_step, - decay_steps=decay_steps, - decay_rate=decay_rate, - staircase=staircase) + decayed_lr = fluid_decay_fn(global_step=global_step, **kwargs) layers.increment(global_step, 1.0) place = fluid.CPUPlace() @@ -79,31 +97,52 @@ class TestLearningRateDecay(unittest.TestCase): step_val, lr_val = exe.run(fluid.default_main_program(), feed=[], fetch_list=[global_step, decayed_lr]) - python_decayed_lr = python_decay_fn( - learning_rate=init_lr, - global_step=step, - decay_steps=decay_steps, - decay_rate=decay_rate, - staircase=staircase) + python_decayed_lr = python_decay_fn(global_step=step, **kwargs) self.assertAlmostEqual(python_decayed_lr, lr_val[0]) def test_decay(self): + common_kwargs_true = { + "learning_rate": 1.0, + "decay_steps": 5, + "decay_rate": 0.5, + "staircase": True + } + common_kwargs_false = copy.deepcopy(common_kwargs_true) + common_kwargs_false["staircase"] = False + decay_fns = [ - (exponential_decay, lr_decay.exponential_decay, True), - (exponential_decay, lr_decay.exponential_decay, False), - (natural_exp_decay, lr_decay.natural_exp_decay, True), - (natural_exp_decay, lr_decay.natural_exp_decay, False), - (inverse_time_decay, lr_decay.inverse_time_decay, True), - (inverse_time_decay, lr_decay.inverse_time_decay, False), + (exponential_decay, lr_decay.exponential_decay, common_kwargs_true), + (exponential_decay, lr_decay.exponential_decay, + common_kwargs_false), + (natural_exp_decay, lr_decay.natural_exp_decay, common_kwargs_true), + (natural_exp_decay, lr_decay.natural_exp_decay, + common_kwargs_false), + (inverse_time_decay, lr_decay.inverse_time_decay, + common_kwargs_true), + (inverse_time_decay, lr_decay.inverse_time_decay, + common_kwargs_false), + (polynomial_decay, lr_decay.polynomial_decay, { + "learning_rate": 1.0, + "decay_steps": 5, + "cycle": True + }), + (polynomial_decay, lr_decay.polynomial_decay, { + "learning_rate": 1.0, + "decay_steps": 5, + "cycle": False + }), + (piecewise_decay, lr_decay.piecewise_decay, { + "boundaries": [3, 6, 9], + "values": [0.1, 0.2, 0.3, 0.4] + }), ] - for py_decay_fn, fluid_decay_fn, staircase in decay_fns: - print("decay_fn=" + str(py_decay_fn) + " staircase=" + str( - staircase)) + for py_decay_fn, fluid_decay_fn, kwargs in decay_fns: + print("decay_fn=" + py_decay_fn.__name__ + " kwargs=" + str(kwargs)) main_program = framework.Program() startup_program = framework.Program() with framework.program_guard(main_program, startup_program): - self.check_decay(py_decay_fn, fluid_decay_fn, staircase) + self.check_decay(py_decay_fn, fluid_decay_fn, kwargs) if __name__ == '__main__': -- GitLab From 7a6000a0b879719ea25e4c882ae6be79845ee57f Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Thu, 8 Feb 2018 12:08:13 +0800 Subject: [PATCH 092/138] follow comments --- python/paddle/v2/fluid/distribute_transpiler.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index 4eb103cc6ba..c5f1d51bd71 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -385,7 +385,7 @@ class DistributeTranspiler: # param is already created on global program param_block = None for p in self.param_grad_ep_mapping[endpoint]["params"]: - if same_or_split_var(p.name, opt_op.input(key)): + if same_or_split_var(p.name, opt_op.input(key)[0]): param_block = p break if not param_block: @@ -403,7 +403,7 @@ class DistributeTranspiler: continue # update accumulator variable shape param_shape = new_inputs["Param"].shape - var = program.global_block().vars[opt_op.input(key)] + var = program.global_block().vars[opt_op.input(key)[0]] new_shape = self._get_optimizer_input_shape(opt_op.type, key, var.shape, param_shape) tmpvar = program.global_block().create_var( @@ -440,20 +440,18 @@ class DistributeTranspiler: else: varlist = [var] for var in varlist: + # TODO(typhoonzero): will remove below line later. program.global_block().create_var( name=var.name, persistable=var.persistable, dtype=var.dtype, shape=var.shape) - try: + if not pserver_program.global_block().vars.has_key(var.name): pserver_program.global_block().create_var( name=var.name, persistable=var.persistable, dtype=var.dtype, shape=var.shape) - except ValueError: - # create var if not created yet. - pass outputs = self._get_output_map_from_op(self.program.global_block().vars, opt_op) -- GitLab From 61811e9d402afc955bf4361991ce72619049fcc6 Mon Sep 17 00:00:00 2001 From: "Yang Yang(Tony)" Date: Wed, 7 Feb 2018 20:50:39 -0800 Subject: [PATCH 093/138] fix parallel op test (#8249) * Fix parallel.do with batch norm * Change log level * CopyShare AllPlaces * disable nccl test * bring back parallel_do test --- paddle/operators/parallel_do_op.cc | 27 +++++++++---------- python/paddle/v2/fluid/layers/control_flow.py | 13 +++++---- .../fluid/tests/book/test_recognize_digits.py | 1 + .../v2/fluid/tests/book/test_word2vec.py | 2 -- .../paddle/v2/fluid/tests/test_parallel_op.py | 3 --- 5 files changed, 20 insertions(+), 26 deletions(-) diff --git a/paddle/operators/parallel_do_op.cc b/paddle/operators/parallel_do_op.cc index 67f9854c02f..dfff6f0888a 100644 --- a/paddle/operators/parallel_do_op.cc +++ b/paddle/operators/parallel_do_op.cc @@ -248,17 +248,19 @@ class ParallelDoGradOp : public framework::OperatorBase { const std::vector &sub_scopes, const platform::PlaceList &places) const { for (auto &s : Outputs(framework::GradVarName(kParameters))) { + VLOG(3) << "Accumulating " << s; + if (s == framework::kEmptyVarName) continue; std::string tmp_name; auto *tmp = sub_scopes[0]->Var(&tmp_name); for (size_t i = 1; i < sub_scopes.size(); ++i) { CopyOrShare(*sub_scopes[i]->FindVar(s), places[0], tmp); - WaitOnPlace(places[0]); + WaitOnPlaces(places); auto sum_op = framework::OpRegistry::CreateOp( "sum", {{"X", {s, tmp_name}}}, {{"Out", {s}}}, framework::AttributeMap{}); - VLOG(3) << sum_op->DebugStringEx(sub_scopes[0]); + VLOG(10) << sum_op->DebugStringEx(sub_scopes[0]); sum_op->Run(*sub_scopes[0], places[0]); WaitOnPlace(places[0]); } @@ -334,16 +336,9 @@ class ParallelDoGradOpDescMaker : public framework::SingleGradOpDescMaker { class ParallelDoGradOpShapeInference : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext *ctx) const override { - std::vector input{kParameters, kInputs}; - std::vector output{kOutputs}; - PADDLE_ENFORCE(ctx->HasInputs(kParameters)); - PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName(kParameters))); PADDLE_ENFORCE(ctx->HasInputs(kInputs)); - - for (auto &s : output) { - PADDLE_ENFORCE(ctx->HasInputs(s)); - } + PADDLE_ENFORCE(ctx->HasInputs(kOutputs)); ctx->SetOutputsDim(framework::GradVarName(kParameters), ctx->GetInputsDim(kParameters)); @@ -360,10 +355,14 @@ class ParallelDoGradOpShapeInference : public framework::InferShapeBase { ctx->SetDims({ig_name}, {i_dims[i]}); } - if (ctx->HasInputs(kParameters)) { - PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName(kParameters))); - ctx->SetOutputsDim(framework::GradVarName(kParameters), - ctx->GetInputsDim(kParameters)); + auto p_dims = ctx->GetInputsDim(kParameters); + auto pg_names = ctx->Outputs(framework::GradVarName(kParameters)); + for (size_t i = 0; i < pg_names.size(); ++i) { + auto &pg_name = pg_names[i]; + if (pg_name == framework::kEmptyVarName) { + continue; + } + ctx->SetDims({pg_name}, {p_dims[i]}); } } }; diff --git a/python/paddle/v2/fluid/layers/control_flow.py b/python/paddle/v2/fluid/layers/control_flow.py index f29d7712334..71a9459d556 100644 --- a/python/paddle/v2/fluid/layers/control_flow.py +++ b/python/paddle/v2/fluid/layers/control_flow.py @@ -277,21 +277,20 @@ class ParallelDo(object): parent_block = self.parent_block() local_inputs = set() - - for op in current_block.ops: - for oname in op.output_names: - for out_var_name in op.output(oname): - local_inputs.add(out_var_name) - + params = list() for var in self.inputs: local_inputs.add(var.name) - params = list() for op in current_block.ops: for iname in op.input_names: for in_var_name in op.input(iname): if in_var_name not in local_inputs: params.append(in_var_name) + + for oname in op.output_names: + for out_var_name in op.output(oname): + local_inputs.add(out_var_name) + params = list(set(params)) return [parent_block.var(name) for name in params] diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py index c3f68775754..d8f0ad89cd8 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py @@ -67,6 +67,7 @@ def conv_net(img, label): pool_size=2, pool_stride=2, act="relu") + conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) conv_pool_2 = fluid.nets.simple_img_conv_pool( input=conv_pool_1, filter_size=5, diff --git a/python/paddle/v2/fluid/tests/book/test_word2vec.py b/python/paddle/v2/fluid/tests/book/test_word2vec.py index c9ba70c20a6..f013d7f1551 100644 --- a/python/paddle/v2/fluid/tests/book/test_word2vec.py +++ b/python/paddle/v2/fluid/tests/book/test_word2vec.py @@ -158,6 +158,4 @@ for use_cuda in (False, True): inject_test_method(use_cuda, is_sparse, parallel) if __name__ == '__main__': - # FIXME(tonyyang-svail): - # This test always fail on MultiGPU CI unittest.main() diff --git a/python/paddle/v2/fluid/tests/test_parallel_op.py b/python/paddle/v2/fluid/tests/test_parallel_op.py index 6b3d72902c7..367cc8b1aaf 100644 --- a/python/paddle/v2/fluid/tests/test_parallel_op.py +++ b/python/paddle/v2/fluid/tests/test_parallel_op.py @@ -198,7 +198,4 @@ class ParallelOpTestMultipleInput(BaseParallelForTest): if __name__ == '__main__': - # FIXME(tonyyang-svail): - # This test always fail on MultiGPU CI - exit(0) unittest.main() -- GitLab From b1869f1695bdea15633bf5c25c7e21149354cddb Mon Sep 17 00:00:00 2001 From: Yiqun Liu Date: Thu, 8 Feb 2018 13:42:11 +0800 Subject: [PATCH 094/138] Simplify the inference unittests' cmake and codes. (#8216) --- paddle/inference/tests/book/CMakeLists.txt | 54 ++++++++-------- paddle/inference/tests/book/test_helper.h | 1 + .../test_inference_image_classification.cc | 64 ++----------------- .../test_inference_label_semantic_roles.cc | 2 - .../book/test_inference_recognize_digits.cc | 2 - ..._train.py => test_image_classification.py} | 0 6 files changed, 36 insertions(+), 87 deletions(-) rename python/paddle/v2/fluid/tests/book/{test_image_classification_train.py => test_image_classification.py} (100%) diff --git a/paddle/inference/tests/book/CMakeLists.txt b/paddle/inference/tests/book/CMakeLists.txt index 8f48b2f0e02..63afeb18aeb 100644 --- a/paddle/inference/tests/book/CMakeLists.txt +++ b/paddle/inference/tests/book/CMakeLists.txt @@ -1,25 +1,29 @@ -set(PYTHON_TESTS_DIR ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/tests) -cc_test(test_inference_recognize_digits_mlp - SRCS test_inference_recognize_digits.cc - DEPS ARCHIVE_START paddle_fluid ARCHIVE_END - ARGS --dirname=${PYTHON_TESTS_DIR}/book/recognize_digits_mlp.inference.model) -cc_test(test_inference_image_classification_vgg - SRCS test_inference_image_classification.cc - DEPS ARCHIVE_START paddle_fluid ARCHIVE_END - ARGS --dirname=${PYTHON_TESTS_DIR}/book/image_classification_vgg.inference.model) -cc_test(test_inference_image_classification_resnet - SRCS test_inference_image_classification.cc - DEPS ARCHIVE_START paddle_fluid ARCHIVE_END - ARGS --dirname=${PYTHON_TESTS_DIR}/book/image_classification_resnet.inference.model) -cc_test(test_inference_label_semantic_roles - SRCS test_inference_label_semantic_roles.cc - DEPS ARCHIVE_START paddle_fluid ARCHIVE_END - ARGS --dirname=${PYTHON_TESTS_DIR}/book/label_semantic_roles.inference.model) -set_tests_properties(test_inference_recognize_digits_mlp - PROPERTIES DEPENDS test_recognize_digits) -set_tests_properties(test_inference_image_classification_vgg - PROPERTIES DEPENDS test_image_classification_train) -set_tests_properties(test_inference_image_classification_resnet - PROPERTIES DEPENDS test_image_classification_train) -set_tests_properties(test_inference_label_semantic_roles - PROPERTIES DEPENDS test_label_semantic_roles) +function(inference_test TARGET_NAME) + set(options "") + set(oneValueArgs "") + set(multiValueArgs ARGS) + cmake_parse_arguments(inference_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + set(PYTHON_TESTS_DIR ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/tests) + if(inference_test_ARGS) + foreach(arg ${inference_test_ARGS}) + cc_test(test_inference_${TARGET_NAME}_${arg} + SRCS test_inference_${TARGET_NAME}.cc + DEPS ARCHIVE_START paddle_fluid ARCHIVE_END + ARGS --dirname=${PYTHON_TESTS_DIR}/book/${TARGET_NAME}_${arg}.inference.model) + set_tests_properties(test_inference_${TARGET_NAME}_${arg} + PROPERTIES DEPENDS test_${TARGET_NAME}) + endforeach() + else() + cc_test(test_inference_${TARGET_NAME} + SRCS test_inference_${TARGET_NAME}.cc + DEPS ARCHIVE_START paddle_fluid ARCHIVE_END + ARGS --dirname=${PYTHON_TESTS_DIR}/book/${TARGET_NAME}.inference.model) + set_tests_properties(test_inference_${TARGET_NAME} + PROPERTIES DEPENDS test_${TARGET_NAME}) + endif() +endfunction(inference_test) + +inference_test(recognize_digits ARGS mlp) +inference_test(image_classification ARGS vgg resnet) +inference_test(label_semantic_roles) diff --git a/paddle/inference/tests/book/test_helper.h b/paddle/inference/tests/book/test_helper.h index 17c3d58de6a..32db643fca2 100644 --- a/paddle/inference/tests/book/test_helper.h +++ b/paddle/inference/tests/book/test_helper.h @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include "paddle/framework/lod_tensor.h" #include "paddle/inference/io.h" diff --git a/paddle/inference/tests/book/test_inference_image_classification.cc b/paddle/inference/tests/book/test_inference_image_classification.cc index e01f5b312a0..35ff9431e97 100644 --- a/paddle/inference/tests/book/test_inference_image_classification.cc +++ b/paddle/inference/tests/book/test_inference_image_classification.cc @@ -13,51 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include -#include #include "gflags/gflags.h" -#include "paddle/framework/lod_tensor.h" -#include "paddle/inference/io.h" +#include "test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); -template -void TestInference(const std::string& dirname, - const std::vector& cpu_feeds, - std::vector& cpu_fetchs) { - // 1. Define place, executor and scope - auto place = Place(); - auto executor = paddle::framework::Executor(place); - auto* scope = new paddle::framework::Scope(); - - // 2. Initialize the inference_program and load all parameters from file - auto inference_program = paddle::inference::Load(executor, *scope, dirname); - - // 3. Get the feed_target_names and fetch_target_names - const std::vector& feed_target_names = - inference_program->GetFeedTargetNames(); - const std::vector& fetch_target_names = - inference_program->GetFetchTargetNames(); - - // 4. Prepare inputs: set up maps for feed targets - std::map feed_targets; - for (size_t i = 0; i < feed_target_names.size(); ++i) { - // Please make sure that cpu_feeds[i] is right for feed_target_names[i] - feed_targets[feed_target_names[i]] = cpu_feeds[i]; - } - - // 5. Define Tensor to get the outputs: set up maps for fetch targets - std::map fetch_targets; - for (size_t i = 0; i < fetch_target_names.size(); ++i) { - fetch_targets[fetch_target_names[i]] = cpu_fetchs[i]; - } - - // 6. Run the inference program - executor.Run(*inference_program, scope, feed_targets, fetch_targets); - - delete scope; -} - TEST(inference, image_classification) { if (FLAGS_dirname.empty()) { LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; @@ -70,12 +30,10 @@ TEST(inference, image_classification) { // In unittests, this is done in paddle/testing/paddle_gtest_main.cc paddle::framework::LoDTensor input; - srand(time(0)); - float* input_ptr = - input.mutable_data({1, 3, 32, 32}, paddle::platform::CPUPlace()); - for (int i = 0; i < 3072; ++i) { - input_ptr[i] = rand() / (static_cast(RAND_MAX)); - } + // Use normilized image pixels as input data, + // which should be in the range [0.0, 1.0]. + SetupTensor( + input, {1, 3, 32, 32}, static_cast(0), static_cast(1)); std::vector cpu_feeds; cpu_feeds.push_back(&input); @@ -98,16 +56,6 @@ TEST(inference, image_classification) { dirname, cpu_feeds, cpu_fetchs2); LOG(INFO) << output2.dims(); - EXPECT_EQ(output1.dims(), output2.dims()); - EXPECT_EQ(output1.numel(), output2.numel()); - - float err = 1E-3; - int count = 0; - for (int64_t i = 0; i < output1.numel(); ++i) { - if (fabs(output1.data()[i] - output2.data()[i]) > err) { - count++; - } - } - EXPECT_EQ(count, 0) << "There are " << count << " different elements."; + CheckError(output1, output2); #endif } diff --git a/paddle/inference/tests/book/test_inference_label_semantic_roles.cc b/paddle/inference/tests/book/test_inference_label_semantic_roles.cc index c5646db2a77..1eaf4022a1f 100644 --- a/paddle/inference/tests/book/test_inference_label_semantic_roles.cc +++ b/paddle/inference/tests/book/test_inference_label_semantic_roles.cc @@ -13,8 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include -#include #include "gflags/gflags.h" #include "test_helper.h" diff --git a/paddle/inference/tests/book/test_inference_recognize_digits.cc b/paddle/inference/tests/book/test_inference_recognize_digits.cc index 2c0cf941001..48f887e6bc6 100644 --- a/paddle/inference/tests/book/test_inference_recognize_digits.cc +++ b/paddle/inference/tests/book/test_inference_recognize_digits.cc @@ -13,8 +13,6 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include -#include #include "gflags/gflags.h" #include "test_helper.h" diff --git a/python/paddle/v2/fluid/tests/book/test_image_classification_train.py b/python/paddle/v2/fluid/tests/book/test_image_classification.py similarity index 100% rename from python/paddle/v2/fluid/tests/book/test_image_classification_train.py rename to python/paddle/v2/fluid/tests/book/test_image_classification.py -- GitLab From a1fc570197dd9cacaf5dc6941a47bab5fc2d4fba Mon Sep 17 00:00:00 2001 From: Abhinav Arora Date: Wed, 7 Feb 2018 23:45:16 -0800 Subject: [PATCH 095/138] Fix for program crash when destructor is called before channel close with blocked readers/writers (#8197) * Fix destructor crash and add unit tests * Fix typo in unit test * Reword comments * Make close channel a generic test * Refactoring unit tests * Fix method name --- paddle/framework/channel_test.cc | 181 +++++++++++++++++- paddle/framework/details/buffered_channel.h | 22 ++- paddle/framework/details/unbuffered_channel.h | 19 +- 3 files changed, 213 insertions(+), 9 deletions(-) diff --git a/paddle/framework/channel_test.cc b/paddle/framework/channel_test.cc index df9e15e22b8..a307abb4ed3 100644 --- a/paddle/framework/channel_test.cc +++ b/paddle/framework/channel_test.cc @@ -22,6 +22,8 @@ limitations under the License. */ using paddle::framework::Channel; using paddle::framework::MakeChannel; using paddle::framework::CloseChannel; +using paddle::framework::details::Buffered; +using paddle::framework::details::UnBuffered; TEST(Channel, MakeAndClose) { using paddle::framework::details::Buffered; @@ -60,13 +62,54 @@ TEST(Channel, SufficientBufferSizeDoesntBlock) { delete ch; } -TEST(Channel, SendOnClosedChannelPanics) { - const size_t buffer_size = 10; - auto ch = MakeChannel(buffer_size); - size_t i = 5; - EXPECT_EQ(ch->Send(&i), true); // should not block or panic +// This tests that a channel must return false +// on send and receive performed after closing the channel. +// Receive will only return false after close when queue is empty. +// By creating separate threads for sending and receiving, we make this +// function able to test both buffered and unbuffered channels. +void SendReceiveWithACloseChannelShouldPanic(Channel *ch) { + const size_t data = 5; + std::thread send_thread{[&]() { + size_t i = data; + EXPECT_EQ(ch->Send(&i), true); // should not block + }}; + + std::thread recv_thread{[&]() { + size_t i; + EXPECT_EQ(ch->Receive(&i), true); // should not block + EXPECT_EQ(i, data); + }}; + + send_thread.join(); + recv_thread.join(); + + // After closing send should return false. Receive should + // also return false as there is no data in queue. CloseChannel(ch); - EXPECT_EQ(ch->Send(&i), false); // should panic + send_thread = std::thread{[&]() { + size_t i = data; + EXPECT_EQ(ch->Send(&i), false); // should return false + }}; + recv_thread = std::thread{[&]() { + size_t i; + // should return false because channel is closed and queue is empty + EXPECT_EQ(ch->Receive(&i), false); + }}; + + send_thread.join(); + recv_thread.join(); +} + +TEST(Channel, SendReceiveClosedBufferedChannelPanics) { + size_t buffer_size = 10; + auto ch = MakeChannel(buffer_size); + SendReceiveWithACloseChannelShouldPanic(ch); + delete ch; +} + +TEST(Channel, SendReceiveClosedUnBufferedChannelPanics) { + auto ch = MakeChannel(0); + SendReceiveWithACloseChannelShouldPanic(ch); delete ch; } @@ -381,3 +424,129 @@ TEST(Channel, UnbufferedMoreReceiveLessSendTest) { EXPECT_EQ(sum_receive, 28U); delete ch; } + +// This tests that destroying a channel unblocks +// any senders waiting for channel to have write space +void ChannelDestroyUnblockSenders(Channel *ch) { + size_t num_threads = 5; + std::thread t[num_threads]; + bool thread_ended[num_threads]; + bool send_success[num_threads]; + + // Launches threads that try to write and are blocked because of no readers + for (size_t i = 0; i < num_threads; i++) { + thread_ended[i] = false; + send_success[i] = false; + t[i] = std::thread( + [&](bool *ended, bool *success) { + int data = 10; + *success = ch->Send(&data); + *ended = true; + }, + &thread_ended[i], &send_success[i]); + } + + std::this_thread::sleep_for(std::chrono::milliseconds(500)); // wait 0.5 sec + bool is_buffered_channel = false; + if (dynamic_cast *>(ch)) is_buffered_channel = true; + + if (is_buffered_channel) { + // If channel is buffered, verify that atleast 4 threads are blocked + int ct = 0; + for (size_t i = 0; i < num_threads; i++) { + if (thread_ended[i] == false) ct++; + } + // Atleast 4 threads must be blocked + EXPECT_GE(ct, 4); + } else { + // Verify that all the threads are blocked + for (size_t i = 0; i < num_threads; i++) { + EXPECT_EQ(thread_ended[i], false); + } + } + // Explicitly destroy the channel + delete ch; + std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait + + // Verify that all threads got unblocked + for (size_t i = 0; i < num_threads; i++) { + EXPECT_EQ(thread_ended[i], true); + } + + // Count number of successfuld sends + int ct = 0; + for (size_t i = 0; i < num_threads; i++) { + if (send_success[i]) ct++; + } + + if (is_buffered_channel) { + // Only 1 send must be successful + EXPECT_EQ(ct, 1); + } else { + // In unbuffered channel, no send should be successful + EXPECT_EQ(ct, 0); + } + + // Join all threads + for (size_t i = 0; i < num_threads; i++) t[i].join(); +} + +// This tests that destroying a channel also unblocks +// any receivers waiting on the channel +void ChannelDestroyUnblockReceivers(Channel *ch) { + size_t num_threads = 5; + std::thread t[num_threads]; + bool thread_ended[num_threads]; + + // Launches threads that try to read and are blocked because of no writers + for (size_t i = 0; i < num_threads; i++) { + thread_ended[i] = false; + t[i] = std::thread( + [&](bool *p) { + int data; + // All reads should return false + EXPECT_EQ(ch->Receive(&data), false); + *p = true; + }, + &thread_ended[i]); + } + std::this_thread::sleep_for(std::chrono::milliseconds(100)); // wait + + // Verify that all threads are blocked + for (size_t i = 0; i < num_threads; i++) { + EXPECT_EQ(thread_ended[i], false); + } + // delete the channel + delete ch; + std::this_thread::sleep_for(std::chrono::milliseconds(200)); // wait + // Verify that all threads got unblocked + for (size_t i = 0; i < num_threads; i++) { + EXPECT_EQ(thread_ended[i], true); + } + + for (size_t i = 0; i < num_threads; i++) t[i].join(); +} + +TEST(Channel, BufferedChannelDestroyUnblocksReceiversTest) { + size_t buffer_size = 1; + auto ch = MakeChannel(buffer_size); + ChannelDestroyUnblockReceivers(ch); +} + +TEST(Channel, BufferedChannelDestroyUnblocksSendersTest) { + size_t buffer_size = 1; + auto ch = MakeChannel(buffer_size); + ChannelDestroyUnblockSenders(ch); +} + +// This tests that destroying an unbuffered channel also unblocks +// unblocks any receivers waiting for senders +TEST(Channel, UnbufferedChannelDestroyUnblocksReceiversTest) { + auto ch = MakeChannel(0); + ChannelDestroyUnblockReceivers(ch); +} + +TEST(Channel, UnbufferedChannelDestroyUnblocksSendersTest) { + auto ch = MakeChannel(0); + ChannelDestroyUnblockSenders(ch); +} diff --git a/paddle/framework/details/buffered_channel.h b/paddle/framework/details/buffered_channel.h index 00b63da4da7..77eebc99249 100644 --- a/paddle/framework/details/buffered_channel.h +++ b/paddle/framework/details/buffered_channel.h @@ -42,8 +42,11 @@ class Buffered : public paddle::framework::Channel { std::mutex mu_; std::condition_variable empty_cond_var_; std::condition_variable full_cond_var_; + std::condition_variable destructor_cond_var_; std::deque channel_; std::atomic closed_{false}; + std::atomic send_ctr{0}; + std::atomic recv_ctr{0}; Buffered(size_t cap) : cap_(cap), closed_(false) { PADDLE_ENFORCE_GT(cap, 0); @@ -58,6 +61,7 @@ bool Buffered::Send(T* item) { if (closed_) { return ret; } + send_ctr++; std::unique_lock lock(mu_); full_cond_var_.wait(lock, [this]() { return channel_.size() < cap_ || closed_; }); @@ -67,20 +71,30 @@ bool Buffered::Send(T* item) { empty_cond_var_.notify_one(); ret = true; } + send_ctr--; + destructor_cond_var_.notify_one(); return ret; } template bool Buffered::Receive(T* item) { + bool ret = false; + // Once the channel has been closed and all data has been consumed, + // just return false. Don't even try acquiring the mutex. + if (closed_ && channel_.empty()) { + return false; + } + recv_ctr++; std::unique_lock lock(mu_); empty_cond_var_.wait(lock, [this]() { return !channel_.empty() || closed_; }); - bool ret = false; if (!channel_.empty()) { *item = std::move(channel_.front()); channel_.pop_front(); full_cond_var_.notify_one(); ret = true; } + recv_ctr--; + destructor_cond_var_.notify_one(); return ret; } @@ -100,6 +114,12 @@ Buffered::~Buffered() { closed_ = true; channel_.clear(); NotifyAllParticipants(&lock); + + // The destructor must wait for all readers and writers to complete their task + // The channel has been closed, so we will not accept new readers and writers + lock.lock(); + destructor_cond_var_.wait( + lock, [this]() { return send_ctr == 0 && recv_ctr == 0; }); } template diff --git a/paddle/framework/details/unbuffered_channel.h b/paddle/framework/details/unbuffered_channel.h index 815cebad2d8..92a16b4d22b 100644 --- a/paddle/framework/details/unbuffered_channel.h +++ b/paddle/framework/details/unbuffered_channel.h @@ -45,9 +45,11 @@ class UnBuffered : public paddle::framework::Channel { // A transaction occurs only when both are true std::atomic reader_found_{false}, writer_found_{false}; std::condition_variable cv_channel_; - std::condition_variable_any cv_reader_, cv_writer_; + std::condition_variable_any cv_reader_, cv_writer_, cv_destructor_; T* item{nullptr}; std::atomic closed_{false}; + std::atomic send_ctr{0}; + std::atomic recv_ctr{0}; UnBuffered() : closed_(false) {} @@ -62,6 +64,7 @@ bool UnBuffered::Send(T* data) { if (closed_) { return ret; } + send_ctr++; // Prevent other writers from entering std::unique_lock writer_lock(mu_write_); writer_found_ = true; @@ -81,6 +84,8 @@ bool UnBuffered::Send(T* data) { ret = true; } writer_found_ = false; + send_ctr--; + cv_destructor_.notify_one(); return ret; } @@ -88,6 +93,12 @@ bool UnBuffered::Send(T* data) { // data that was sent by a writer is read from a reader. template bool UnBuffered::Receive(T* data) { + bool ret = false; + // If channel is closed, we don't even want any reader to enter. + // Unlike a buffered channel, an unbuffered channel does not allow + // readers to read after closing because there is no buffer to be consumed. + if (closed_) return ret; + recv_ctr++; // Prevent other readers from entering std::unique_lock read_lock{mu_read_}; reader_found_ = true; @@ -96,7 +107,6 @@ bool UnBuffered::Receive(T* data) { cv_reader_.wait(cv_lock, [this]() { return writer_found_ == true || closed_; }); cv_writer_.notify_one(); - bool ret = false; if (!closed_) { std::unique_lock lock_ch{mu_ch_}; // Reader should wait for the writer to first write its data @@ -110,6 +120,8 @@ bool UnBuffered::Receive(T* data) { cv_channel_.notify_one(); } reader_found_ = false; + recv_ctr--; + cv_destructor_.notify_one(); return ret; } @@ -135,6 +147,9 @@ UnBuffered::~UnBuffered() { item = nullptr; closed_ = true; NotifyAllParticipants(&lock); + lock.lock(); + cv_destructor_.wait(lock, + [this]() { return send_ctr == 0 && recv_ctr == 0; }); } // This function notifies all the readers, writers and -- GitLab From 87e3bdac4eafb3709f146cff358bca9568c24aed Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Thu, 8 Feb 2018 15:49:14 +0800 Subject: [PATCH 096/138] refine structure of cluster and quick start --- doc/getstarted/quickstart_cn.rst | 6 ++++++ doc/getstarted/quickstart_en.rst | 6 ++++++ doc/howto/cluster/index_cn.rst | 14 +++++++++++++- doc/howto/cluster/index_en.rst | 14 +++++++++++++- doc/howto/cluster/introduction_cn.md | 13 ------------- doc/howto/cluster/introduction_en.md | 13 ------------- doc/howto/cluster/src/ps_cn.png | Bin 0 -> 33865 bytes doc/howto/cluster/src/ps_en.png | Bin 0 -> 145107 bytes 8 files changed, 38 insertions(+), 28 deletions(-) delete mode 100644 doc/howto/cluster/introduction_cn.md delete mode 100644 doc/howto/cluster/introduction_en.md create mode 100644 doc/howto/cluster/src/ps_cn.png create mode 100644 doc/howto/cluster/src/ps_en.png diff --git a/doc/getstarted/quickstart_cn.rst b/doc/getstarted/quickstart_cn.rst index 51dd00f1e80..d511cead262 100644 --- a/doc/getstarted/quickstart_cn.rst +++ b/doc/getstarted/quickstart_cn.rst @@ -1,6 +1,9 @@ 快速开始 ======== +快速安装 +-------- + PaddlePaddle支持使用pip快速安装,目前支持CentOS 6以上, Ubuntu 14.04以及MacOS 10.12,并安装有Python2.7。 执行下面的命令完成快速安装,版本为cpu_avx_openblas: @@ -16,6 +19,9 @@ PaddlePaddle支持使用pip快速安装,目前支持CentOS 6以上, Ubuntu 14. 更详细的安装和编译方法参考::ref:`install_steps` 。 +快速使用 +-------- + 创建一个 housing.py 并粘贴此Python代码: .. code-block:: python diff --git a/doc/getstarted/quickstart_en.rst b/doc/getstarted/quickstart_en.rst index d1bcf82ea07..70f7fe06460 100644 --- a/doc/getstarted/quickstart_en.rst +++ b/doc/getstarted/quickstart_en.rst @@ -1,6 +1,9 @@ Quick Start ============ +Quick Install +------------- + You can use pip to install PaddlePaddle with a single command, supports CentOS 6 above, Ubuntu 14.04 above or MacOS 10.12, with Python 2.7 installed. Simply run the following command to install, the version is cpu_avx_openblas: @@ -17,6 +20,9 @@ If you need to install GPU version (cuda7.5_cudnn5_avx_openblas), run: For more details about installation and build: :ref:`install_steps` . +Quick Use +--------- + Create a new file called housing.py, and paste this Python code: diff --git a/doc/howto/cluster/index_cn.rst b/doc/howto/cluster/index_cn.rst index c68b2655b65..a60521b4a96 100644 --- a/doc/howto/cluster/index_cn.rst +++ b/doc/howto/cluster/index_cn.rst @@ -1,10 +1,22 @@ 分布式训练 ========== +本节将介绍如何使用PaddlePaddle在不同的集群框架下完成分布式训练。分布式训练架构如下图所示: + +.. image:: src/ps_cn.png + :width: 500 + +- 数据分片(Data shard): 用于训练神经网络的数据,被切分成多个部分,每个部分分别给每个trainer使用。 +- 计算节点(Trainer): 每个trainer启动后读取切分好的一部分数据,开始神经网络的“前馈”和“后馈”计算,并和参数服务器通信。在完成一定量数据的训练后,上传计算得出的梯度(gradients),然后下载优化更新后的神经网络参数(parameters)。 +- 参数服务器(Parameter server):每个参数服务器只保存整个神经网络所有参数的一部分。参数服务器接收从计算节点上传的梯度,并完成参数优化更新,再将更新后的参数下发到每个计算节点。 + +这样,通过计算节点和参数服务器的分布式协作,可以完成神经网络的SGD方法的训练。PaddlePaddle可以同时支持同步随机梯度下降(SGD)和异步随机梯度下降。 + +在使用同步SGD训练神经网络时,PaddlePaddle使用同步屏障(barrier),使梯度的提交和参数的更新按照顺序方式执行。在异步SGD中,则并不会等待所有trainer提交梯度才更新参数,这样极大地提高了计算的并行性:参数服务器之间不相互依赖,并行地接收梯度和更新参数,参数服务器也不会等待计算节点全部都提交梯度之后才开始下一步,计算节点之间也不会相互依赖,并行地执行模型的训练。可以看出,虽然异步SGD方式会提高参数更新并行度, 但是并不能保证参数同步更新,在任意时间某一台参数服务器上保存的参数可能比另一台要更新,与同步SGD相比,梯度会有噪声。 + .. toctree:: :maxdepth: 1 - introduction_cn.md preparations_cn.md cmd_argument_cn.md multi_cluster/index_cn.rst diff --git a/doc/howto/cluster/index_en.rst b/doc/howto/cluster/index_en.rst index af957e06cd7..2640a09dcc9 100644 --- a/doc/howto/cluster/index_en.rst +++ b/doc/howto/cluster/index_en.rst @@ -1,10 +1,22 @@ Distributed Training ==================== +In this section, we'll explain how to run distributed training jobs with PaddlePaddle on different types of clusters. The diagram below shows the main architecture of a distributed trainning job: + +.. image:: src/ps_en.png + :width: 500 + +- Data shard: training data will be split into multiple partitions, trainers use the partitions of the whole dataset to do the training job. +- Trainer: each trainer reads the data shard, and train the neural network. Then the trainer will upload calculated "gradients" to parameter servers, and wait for parameters to be optimized on the parameter server side. When that finishes, the trainer download optimized parameters and continues its training. +- Parameter server: every parameter server stores part of the whole neural network model data. They will do optimization calculations when gradients are uploaded from trainers, and then send updated parameters to trainers. + +PaddlePaddle can support both synchronize stochastic gradient descent (SGD) and asynchronous SGD. + +When training with synchronize SGD, PaddlePaddle uses an internal "synchronize barrier" which makes gradients update and parameter download in strict order. On the other hand, asynchronous SGD won't wait for all trainers to finish upload at a single step, this will increase the parallelism of distributed training: parameter servers do not depend on each other, they'll do parameter optimization concurrently. Parameter servers will not wait for trainers, so trainers will also do their work concurrently. But asynchronous SGD will introduce more randomness and noises in the gradient. + .. toctree:: :maxdepth: 1 - introduction_en.md preparations_en.md cmd_argument_en.md multi_cluster/index_en.rst diff --git a/doc/howto/cluster/introduction_cn.md b/doc/howto/cluster/introduction_cn.md deleted file mode 100644 index 562008a8984..00000000000 --- a/doc/howto/cluster/introduction_cn.md +++ /dev/null @@ -1,13 +0,0 @@ -## 概述 - -本节将介绍如何使用PaddlePaddle在不同的集群框架下完成分布式训练。分布式训练架构如下图所示: - - - -- 数据分片(Data shard): 用于训练神经网络的数据,被切分成多个部分,每个部分分别给每个trainer使用。 -- 计算节点(Trainer): 每个trainer启动后读取切分好的一部分数据,开始神经网络的“前馈”和“后馈”计算,并和参数服务器通信。在完成一定量数据的训练后,上传计算得出的梯度(gradients),然后下载优化更新后的神经网络参数(parameters)。 -- 参数服务器(Parameter server):每个参数服务器只保存整个神经网络所有参数的一部分。参数服务器接收从计算节点上传的梯度,并完成参数优化更新,再将更新后的参数下发到每个计算节点。 - -这样,通过计算节点和参数服务器的分布式协作,可以完成神经网络的SGD方法的训练。PaddlePaddle可以同时支持同步随机梯度下降(SGD)和异步随机梯度下降。 - -在使用同步SGD训练神经网络时,PaddlePaddle使用同步屏障(barrier),使梯度的提交和参数的更新按照顺序方式执行。在异步SGD中,则并不会等待所有trainer提交梯度才更新参数,这样极大地提高了计算的并行性:参数服务器之间不相互依赖,并行地接收梯度和更新参数,参数服务器也不会等待计算节点全部都提交梯度之后才开始下一步,计算节点之间也不会相互依赖,并行地执行模型的训练。可以看出,虽然异步SGD方式会提高参数更新并行度, 但是并不能保证参数同步更新,在任意时间某一台参数服务器上保存的参数可能比另一台要更新,与同步SGD相比,梯度会有噪声。 diff --git a/doc/howto/cluster/introduction_en.md b/doc/howto/cluster/introduction_en.md deleted file mode 100644 index eb70d7cf35a..00000000000 --- a/doc/howto/cluster/introduction_en.md +++ /dev/null @@ -1,13 +0,0 @@ -## Introduction - -In this section, we'll explain how to run distributed training jobs with PaddlePaddle on different types of clusters. The diagram below shows the main architecture of a distributed trainning job: - - - -- Data shard: training data will be split into multiple partitions, trainers use the partitions of the whole dataset to do the training job. -- Trainer: each trainer reads the data shard, and train the neural network. Then the trainer will upload calculated "gradients" to parameter servers, and wait for parameters to be optimized on the parameter server side. When that finishes, the trainer download optimized parameters and continues its training. -- Parameter server: every parameter server stores part of the whole neural network model data. They will do optimization calculations when gradients are uploaded from trainers, and then send updated parameters to trainers. - -PaddlePaddle can support both synchronize stochastic gradient descent (SGD) and asynchronous SGD. - -When training with synchronize SGD, PaddlePaddle uses an internal "synchronize barrier" which makes gradients update and parameter download in strict order. On the other hand, asynchronous SGD won't wait for all trainers to finish upload at a single step, this will increase the parallelism of distributed training: parameter servers do not depend on each other, they'll do parameter optimization concurrently. Parameter servers will not wait for trainers, so trainers will also do their work concurrently. But asynchronous SGD will introduce more randomness and noises in the gradient. diff --git a/doc/howto/cluster/src/ps_cn.png b/doc/howto/cluster/src/ps_cn.png new file mode 100644 index 0000000000000000000000000000000000000000..f9525739cc8bc6506adde642aafa0a85ae3ebebc GIT binary patch literal 33865 zcmZ^L1ymhfwpMaF^-(egAuJX4Xu4 zu|nT;-#&e0pW0Qm6Q!ywgN%re2mt|sEGH|e4gmp$0s#T(4-W-=Vu1<64*Uh_rY<86 zQ9Dj}2%I1|%YJZ!fIz|o|3E@y<=_Gr%-Lw_y6Y+_@|!t1Fq@b=nOZP=J2(SZLqG_6 z^8>#+Sh$;zdOO%Vy77Apk^gfAKkz&FV-|AKe=c#i6C&4DQYDpeamPliMdp zcN1?WM>mRpzvREZN7BO0%+7|2*g4>s76t+?{}4 zaJ4a$b9A?G1#WgX0k=+={XcjA|F8JJze~l{#sYZjf9_`c&)xs~XaBigkOlnW|651= z+spra3UsqDq9DtE51BAx{Wkj=1cWGroTQkhH{{8hM}e=@67RW=4P*&H>KV%6nWO3@ zj2J#;+L>c`#x`f z-(I@kvc zJmo)!J0=u@mOJKS#VIHVeB=d+ien40DOIE(K!K07P~dzm)ZbP>j1)KuXh;)PrBoC( zj?9Ba!3Pf+aL!YhzpRWV3mgXs@RXWUm@}#TnWTxpF{uPTS2|djxfOYPn22HSI5@8B zt}Sz>y}I8{=ze>BYQJ3fyIZ@O5Xt`?o3QSCrKG6f`*hm6H1s&*Sd`~JE%&$U?&C|o z@5Anw24~NVB{JXp&4_%z+YgNC7~lpHg#!&Fg1t}7Uh(eCBR0~sq02N?(=jNBS`V?Yl3jrSVb=E75LVJWP^q;A3xg^R7k!z zH{w$KXnhd`N8I+XmwkNaeX-E}ihYRLiQoEjh$J9qEMPZH$22q58Uq+2PJ|wA|FeR1 z+o~Ez;6dNU`F~%GWlz37{SEld)cS6Ahq?1)tN7M4RZTk6Bpgo~Xw&-ZZL2=elIDev z3|DzRXARiEYoAu;S3S3F$3OMguXo-3dHPA<)Po=J@Y|?m)%CaE?b3e1+q3`6vC+EA z5K&oiuViugz~U6VzpP;_bM$NQ?C z^L|to^vkkb&*dZ;;`TC@PHiHeZ1&B|{I}XoXdHr!M*em`B$7z<_s5fC%bHol3v3_ge5SVZ z?6Ldpse8fD%kZ`e|FCu4-=`M_XEqUq#A8;?ZImrSnBi^FEP<(I(ez&LVcEdCFf5*T zD-!>vSm#E9l<&5jM4O1ohJ*L9#)rn6m!AxR@4yaz1DKHczLz6zM0%eOOTaUyeLwH% zY5ffcrXW0q<;Q(Efn$bUZM&d2HsYw@)o6QN-?no<@?Ag4EASW3we>=-%TK%*#zy$) z>1}0!(;Y2Z&$IUZ%JV>I6!?VpZX5&WUSRJ37<{j9;5tHeF=yzt7ABxJqR27EIj5;H z_?+P^`?3Kil3|`Kx6=ccqRd~BbABr$@~Ac(ar=|dP0?vvhmH*mqj>I z!1FeV01Mhv!_Z>tZn{3Ru(ndcvD?Ef&9r}UlJu`#pQe!n!OP(fR=IPT$^}@6#!-nL zk;1%|&lf{=&z0;nh-j{TiK}M25p-~%4ez#~MdyygQX~kM$!rHJ~ z(=h6e6k25Or^4~;Ryp{ciD2p~g?t`V$b%qapRFhwe_&LSHqFH(Iqhni-}j<$2IlFR zMo~>F2~J8y%i;H%v;t8{aK-*x94gyW3yHm0aj2_--_4wsDJmxOAsGtvw_pK0Sz>Hy z^ol273C&b&8aTsh?Y>KWo2&DOp%OK-o;Hi;`$?gOT>&-yw-1Shvpk3tn#WimvIWZ< zc)gQ=h%al(#Bs$`5J_*t6bZZwJs!4}JQ_zYt@?+-?SjdO$-mzTr1`@qaV$ zFo(a?{dlzfMOQX!UH&0|!6?A53hFjFlFE(-Dl7>37k1U?2)1m3hd60rG(Y5AyXm+) zy~Vz(HHOY>naE2NDX$}Ub%+LTnvymU{8s40r)63cRNv7ig!&#;lxZ8dNWn?+o+YI; z4ICBwCpt3=TtiO~!`)8Fkc7D{$q{i#h=)}M=i7g0Y{rdQn%glvngt1(tZa+0az2Wh z=WP3L>5xHEO|}YT3zDlLyZw0B>;Ho0D`0AjzdwAlV1%l%C#;z80Uqsz?|{vBLg
D&IYp#aYMgWCxQa%tf*Hgc5N|iG57Q z(3s~$pN~)##^nXBMmcKKyOQ~CW;Lv`QEZM^fAPINU$On*q}oZ}j^#kH)SHQAJkX;T zLEwXAGv|OwvH$l*qtKcQKaOEDWNTdyohQc0LIx9Ur6_wB(a*0#lA^We3YW zm3xTp43?mZRsXTDAJRI#CS_9@Xos5 z{GW}`ki*DQl*rDB#!z-i)0cd&#`+n4(IAaLI%tu~k2d=%!1TeV21(@0L(tHR5YB8$ z1R%(URAB2tB4J#Cm#YH0(eFyAuN**!4e5{tRR@h$t5->(7{dI)`5ODH{iN}?tcKg} zw}fFNNrY6(3Q(0GVOp1jeHdrZ1fyWp6k>7yZ+~4b9|UJZ4oryemP)58DrFgw{G@9tA4B;aB^MUW-zKk&PQ2B^d*(mCgGA)>^-#HBX z_vuQ-V=$NFwFU?HkE=1!Fpj?h3lS{=uK_~!rBry2;;jaA=ySQzk3Sfxn`YI_@Dpve zisbW{%uR{D2P|6OiIaP6hah>PX}#QKc!gUnG}t4Gcn=hy&M0`q4%JyH+(mu(5kn5L<-c ztk*^D7Ttm#SE?Bb03xdmSOo5g$Ft$Ps2-90gu+%S-@>vKi#+(o(+7$FS!E9i4n&}B z3c?a{{lfrPRTC!LS{IzU0l+EbA@_UMSC9Ec9a=BG+fSok>ZKei>#JN)BPG?t0%^XN z2vF7ZA@VMry77oiT8!flqQxpRNEYRrMYhGxy}yu3_35$lFJw}8_2fMVL2BBs(Lz8D ziCSWCz{!K&nG{1~A1pjqGba7|MHlrVPL27}U@dB=VIaVkDZWc;oY{-nqwH+{akzlB zAYNf+0AeCp;H18Pv`((61D4%tcn*&^cl=BtnFYo@44QO@j4ryouNjr9hMyLByr)i7 zEuBp%1VoPTDd^DLf`C5ssQOcRuf$xs|sZ4iBxN)tU$tur zk`pxljpT$RduXcQ_<({yzf`6(SDIh9J`rn4ys}b|-M54+ACwyu_XRqrwGC8EJDo5h zKY)pbIvDO2Bu&ZAZ@xMXqw{9U=0wL~ZzCRSFt(|d-xY>F6+%Yi%%sRCr!$NRU$^9k z*G%y>S+H8q3W3AW8`k!oIFA*nO~?~6UfDO?4UHq>b+|~qUdk85hVrarU_%y^-EE1u z$NEiI%bh2^fxgli#G#B-(BeojdO@pQdJ}FCNJiLet|S(Z|4Wx|9^))!iwLFuoVHaJ zmKU0Pby;ft{zJ6lUo45&-`6!#xRbF>Qbq5k-p^XUL}V56b9*~R>nfl0id)WgU*oZn z6cmfm?G;O+0I9?d#PqJrJ4o%^ALG&<)qer>JQ{S8&h`@*x>LjfzmqE0u+TRNC z2=y$=iH+93Bgj3@0Od6>5Gm{cM?P7wJjLdH3xz)c2CPNBr@-aB&LX;15HWc~T<#y3 zA3?k})`Xvg#oq9x=FAv$+?I%^Hsbwx=Qx9~Cwr?|Betg#ASNs8{dX|U$V;X+%8^-R z)OmLs!s9(`bRnOaI3V2PQ$7~0D#=r2wOXe33d15&Od~>G{=$^4)F2tf7sRsKx%|y? zSA$7X6rDLC{AAyQNd}kPqblw+Ut`4h9@E@ZT2jzrqb)9xg!WzTP?TQPHEke-&63hZ zeQNRFAV~7JWWJag@$nEPwJ_J>aYo|_R$tvQ8ZvZbXXPZTwD%AT{LA{VM2T@JrFqA4iFbf|?M z^Fw9(rGwF40qpQ&Mc4(VWLT&-){{1p@H=}YLI{lENy{Xw&oRZMAIPyKPc#Hf#3C=1 zf@bi7RIWo|LMt;z`1#QxC50q)#Wqr43VsREV|phMRtAnQu|J^e6k#`#n4({Q7%OGh zRZLV#Hqq<&Odx;BC{7a;OB(fyE`L(u8gY`CKPnH_!=hLJ?We+^IHA?=MWS#B$a3U7 zihysfjef(WViGE(!x+qYjD)sjn3d(o`Qu&91yLEQpB5buKgxU;dKX|^i(r-W7hc*2 zA!H`=%%Y>7>j7UTb`wNtb~X*D&mqb=P~Oy!4!nw`{o_d>ERz}WXfYX6tN~In z(3cv@>^-eMGwpZDek*l0P~?%leAdCNsm1&#s+JQvi^10f8yaR&?WD~mjyPd!bdPx3rxrH80)MX;8-pFT7wNEz;q9)8 z^h%Ijf*~l6A+alpMGW*8V)~gm|1Yi+#p`d6r-avSPmrEEP;Y#PnDhne;-?eiNqywu9_FN#s`2!vBjU^U#=$*(I%DKENE!<9?B;U3(09fk z#3`T4HPYImFoIpM&LHPvOL}Vdq#-+pA$f+FCB+Rw5Th)7k0IjzK%4ylT|!Bz%SFi` zLgUC5S)BH+ALO01E;T(CA}d-AVE!pepN)+hPfw-(X-+aI4gDg?U4?M-8}(8ppR^Kc zX;U3Nui5qc5@G`pH_nZKAwM;-s}us2W)h5=B8?`xm|8(-ZlGJ4*#clR=Xli6BkLzVoQA&{|33$K!#!ua6Wzsg9_!R1q?vr%^vd+nnpn zu+L3AN0b9^mhhNTYsdBH50Q@6*H{(g>Q5;sR|t_$o%h z2hk5H`F_Dz3x-hcQ#m#l9(Q?)OYD7cJjPfO_*CWjPHjSPq(Ol{mRDTObkn9=t|>Aw z$@qtQ6gwsKfs9N$rX=-myhhTMML8wJ*n$x8jdu|-t>Xp5RP}b^mi&&mIN=H{h-i`p z>am{K zk@4kPofzXSB=U$$NsBp_(u|%K_Xj6AA;X15FyxSZbNx;R=Kv|A=QJOS8WaQn#oDXj zl4?Swyhs-b;)6`^Koa|$=|*)$=}7#ff%j?9qpd=s$f9?P3-3!>s2SmwD^w;)x6~FH zb_3mJ2T214vN>;Ll=}t4c&e!Wk|6gdW%xjYn?kceNHJF_Uz3ojq-jFS!C!48X}D0r z6nZ>Ah0QTlQEF^(b8BH=xMNskKF-F}{~(!0zOsLzk~3ooKI>&SMmN3<35Ill4AaEM z4UVPwV@m4Z%is3J3^oZy6^S~K`dul40a7Z2g9L%INCovQ3f=~m653qqH)4;XO3`)D zEsP%;DeBTO!W2~p9DTHRupW}g=4a9rzL^@<5FMqIj9z}*aXslB>M7h^QR56{&szS# z#w2JXQ}^?&=?>7vj_{~(xU@w4;Jaf=!K5%1=9jL(e7E*PHl!fYLAc+?_WlCXte_E8 z{_!ciWrsFw4t*75$tG3tPn6uq{ogM8jei^Yzr5 zVr;R$Ys@o3kB;a)KziVoWk$4tJ)Et|@RVl4e=&t81`7Q^HW7;tuU=<-ae_IF@C((z zzPguPf(H~E77{e-jocF=?a7*6tQ)?+C<7A{o{_J#fop9M{6K7W~64qzsL%7 zLtjl<33Ru7?1DZ&`AdC-^XrC(5TlZI^|>^6eV$HEpu1*6DeaB{lw4b6WW^oF@Gs|X z0p`>!3KX%a{-Nxz89*472Md`>+w%W#?$&s~xiucGhSa|edO-{drGNCWoN2=Vfe$@c z;9L*(m!{gklzlgtWbw~k;;j9b@=q57&eMa~wNn43?CZdf`+q+k3lD0G-@FrtVqgTP zp7N=3q{;Y$$2@6ZF7NxLzF-Rr68&s$^M)V#`c|K870Qcr_W4hro34(HI^$(c#J0s4 zpVeXwC=u(dOP4A^C>WXzmtUADZj;d3mvR!{59vd6(&YP2f0d09ztv7hpHT%FJRmVO zCF*(`xH623hDil?Y5P_*gJb({x@sA(7a6ao1*Yr3@%wp!r(dmoT$XO?9sb~a zN~9W5ll{~;CCSqtnnAtTF6kt6O3GIDUJTo`G`#8jyW-bj?mFxMz%iLA%X2~m@$F@pwXS)76CeE=iti6d8VLKu(`2jH9sT}&@x<{83>P%%?aB?!&m7$Ojs1T z9HuA)Z9dGZ{AgP9T#@1w0SH-{apQ`T^yvd2?Nd3rIr%m*4v=#iDniYz@bEb@Eh6C z#7#n`-AAX=QNJ~pxFdt-Jv1+jUz(?XMh39#3<@OWcoD+Z{hv+`%kn?=wRa!ac3O&% zSmn5EQOt!=c;tQB12hhwdYIHdlMn{tNoccE4aK1p{&iiq(R|JrCtC+!YEr^LpUdIzX zrz3D|5$Ks3OEwDQ6gAz-RyaD-#QDhaGnamvH zHPPC{fVQUr=8w8^z4@S4eMW}&R#XFuJ4UvBBSLNPUJf0TU5v={Ch9DKHxyh)_sNnm z`)&g~$U=d!L9%@HFkmm!N*FK}JB7rFR zq?0Ou_ZDL1Um8uVMFu+Aw!bXWX=Z$4On&)+FVLKXs1BHE0+obh6C0<1Oy@En@FnVg zYOUJr5IZ%39zMP=BK8aSn!84D!TE4jyk>;O8fA*&)($c~DBSjHT%Zn$S;bgH5JP{H zQuRU?Q646C`1xug!KMJcklI9E<`*c*8l@`eVG{FJlTv{m59lX6c{zJPmF6U8)<1Bt z7J9Wfp;YP;3mJpuVdp^=x}Gfm8jl|{2ztH%)JzS_ft-vsKm-gT2co7Bcq<~N#V%J| zenBBqKHP zdOt*u+NNoFUSj%-#Z7ztX}u}a0ZdofrXqI+ecZ~PA<(Gz(}kgd)ZLlgK%ce_$w}oZ62u|FEa_ znV_i*_AP}9x9#H81`hb*-&cCWxSD>;o^u_d-YC2OBtMG)4EYgAQ!TnAH$NL8LID`` z40Do9)`z5((9v%b!Ye@xr}xzGrhM{vJqBE|`N2e32-vTr`UtNftTbNI`zP8B@U_Bd zq9#bs9+~k!vg9yyNUj%`mRD!y%P-GCLfpuRDNCvw8zHFF%9Le@)b&esvQ^Fq!q+_6 zDpbTG2@in2O&5nWCO;|{MGbZbq(Gcp@7wHq;cvan5p!2A07h@&$GQXi6<#G(ms06P3es4U+(QiJ)BG1L6>h+U(xJFHK)Sm2gv)mK=?gJ zE*>nOP9Ptmq zG(B#j(Dp?e9Is}A7@uq+8(6r92k~#}Tlyl+1>PYgN3|z?8B;v`ukN`7ch4uN&&8i! zzZgt=exJtWP(f^E5MS>?w1uuUM<|fJii|xX)aJ}m|3mjzALrP*=38h$E&cc4Cof7p zOeIb=?=a|d3YMas$jrouCkexP%@UKYr|;(e8&W8J1kby#|6Dpe~byV}ckl4imXZmZg5jTXq) zQ%6Qzd_al$l{=A_{9w6@D4#BeWmfi+q3DJAX8S952B8vDU`Sd0}t zqG$)_!Fc5ahCq5|dQNA3=Qwdodiu=Bxtw%%KJIk4C5Kc>6hoG^K3ne@j8o+U4IjgIG`4|31!vG5+nv0}@=U;dX19r5)qwCQ1sXHfoB-O#@Q~>=YvAU{7i#R{Vy=^E2d|^*7^vV%x|kw z&-g*f4JR8Kla4+^_NdV|Ps25wGDys0mw&c@YPa-z3O^?4!}C}UT1hK+R2*Z}bq0}} zUh<|g8HBn=IFPJ#c}KSx8w_mP^lP26I_s3Sd%M=?Wkj<+o$176ZZ0EF>@+L9H)%eQ z0QyEu+Hfh(UB459nr^=vjg)+YR5nyW=kzr@TMmIKa1L$e=Qxe?hBj>uI%@<+Flv;y zw7IbS$d51x`XHL}df0dDp}f1;wg7%ZHIo)PXB1ckd=7ezPA0S2&;YpS*{{*)+Jhns zZA32#3@P|!3GCyca+z)eDZ?FO8@mY0JzlUG#uQlICk;b_^UNPCao@Y003+k}F?yPT z*e4R{I$R=NUK0rFYI_x|FT>%epDqS*+Np3z69jf9#d?*(Mll&y%ubWT@Kle6yU7w~ zmu=`cQG!;-aZ;&gwLVF6{*iuc9lC(uFyi1~C)ZqSu_>^js!omT@pds^x6L;ovQL+@cPR}CFEZp87?BQ zui_#$sW1JTqpE{*R&`|`eD{BH*dA3Nj1Q_7r2YEW$Ce7VNTs?i&lLRoG~nr>^x;r7 zn%w_p(5Yy$t<4JHr~83|HUx?jU{k}@CBctu`v7F!&Kl|)D9yj}wSpotI<(=zpGVq6 z=E@3)1d~&NTOSP_R9r>D4Q}x#W#2DxxtUA;4cG+Jcy(XGl7&ik| zv~1qflUHcvUB;IsNLM*+(S3?iN3#EB;A&)vZP7*JiVTlb!56U{<49YEF#c69-nM~lADNDC70j1rF_H%$rP^Miq0 zReijn0@ZfEe!e<(a*-|*i65-J+h1|&o=ZWO#xS<#%L8B}%?`9{vw%uK)fiY0Q<;E`qY~Po zgc>#s7{HL=avCH;0OEa(Wsm-P-U~;AVmQCoH4Xxd zU$uTU#aYMTpzQ%wX%+nf_)Zo-AJ+t&hmi0_O9L5Vg2-zduVL%e&96`HK*s9Tr2{}} zxdRo!HnqEq*jtZxJb=S$W~Z8xI?sEHrTe+jFK%ArA2%N0?GUNPV||#(f7P7OIc0VO z-oUK=+7`grPG0&IuVUb!b5U>*F<|uexB%pJn?QE?`gmLyX9c9i1i(HP&B6LIg_mmp zbY=dhz05PulMJ)?kwQeokp29CAAViT<-t>uVV2H0;FAxB*OZWgiWldtm?;#PziA(4iBIo;t}^+aJ-(+{U1FR(jlO%rEYB)Y^ds%k;ZAlN~XF_+Y zSHMJi0c;1*+b!P}CmRg;up1p8}I*yahF6q6o1u3~Vfw z+;NCd7=zV)n#>|UCE5jN6}V=xKmc=NWv_jhn*1pIc<|K?U{GdMMB=>HfsKqTN`a-z zJ({kR^a@W;%7NAo;93sk(wl>OmM4`dv@D~D*S96h@;)DVl3q&l>-0In&RG^i34tIx z?Eu@9<){Rr(_239jbTU6fN{_%21bBZ5DgRO@<9AX)oNVi%@;8b2+yq#@tN`9$+I2D z^HI#+q$)m=XaI0@#r75OlBIUuF56YTKlY_IG2~6Gu@X7>uDa)&3n%NPuV^4;lmw?D z{IF96EI44xVZ|2<8xwhZ4#OvYBr+2_;pl(ILFjiq1)BUok{xH z2rdEsxH}+Qe!eafsZkT|vSk5v#8jvs*M#*1BQXy@)6G5r$#F}|<({3Jqqua7!Q!m9yqFNL^sYNwaEzIGL%cFbo@fZ^~u z*Pg#w3jx?4iZH0VvVfVrFv>ZjxPvfA0%5y`91fVnx;6}jE(S1ft^kUzvOAwQxuP@+ zkwC57%&XuPELd|sKzA`IznZ5g{q(QqeW5c^0p>`G9L>sRM}%4UN4yCXfUFxL_QC3I zO`R5by-3KUN3PWFjZ92wONFq)2{8D4P`L(PspjqXT_u-G4y|j^>l4CfCevC?IzJW~ zmM4Iz3#2K|0L^%;8>7T6ne7VX-&;Xh)n#k>n5ikr-S=BDU~*C)9-3#El*CLFnd@ee_EesV}d|> zTeR}Vwr6W^*6^;?wNod7t;36A?CKXshg^rsxF8ltcrGraalluNvj>k15M>VWtkd1# zeh?a%K70myb~QY;D;r@ zb=nYyxgx-sN!)?{LO-7kkdOJq8ASUPz~UGtix9;gu&y=1srnLfvj@PVnFqkSEV$q4 zT>rP@ArE~3Fv|9q^Y|ov%O;}#uo6{u@PGi&BQHFFyEm^NbK`6oCqz>(oj41+75?E3 z7htA%l{T#>y3T-q9G&n4wk9BA)JB|#Vw;9>Fok6MST|p5=wMbJoG!U<#&;yV6%iA` z;-#vJP=4>4egh^~>boOgj!LIt=58~6F4=F$A4vP|)iy`s_L-^|v}F4??a%E85%B#8 zYe4rbxsbd1xZ?t2*ExQNSRsorfyyAFHkyOBmmqYD>!b`ARil5Kc0mO|<$R6jm&f}0 z@zA*?UoxPyAd}&GX<7X<;CU}MmEEA%Hj!oo_MY`R|C=gP54*^&Lxf%K512k<07vuy zcn@eg5QOpCEe3VZm&b(ALzc0bxQ`TH|hB9}QynXOB;m^!stwf@&2ZtQQTnb}}!uaSjDzLU* zSqT|S-$ldD!Psl_Jl~)k&7?wWb~rW^*u)wEwo<5C`n>9c9WdU2x~+L-jv8(pz)-+d zc*!qLt_c#Ya#5+Q!$dRY+8H;b(L2&4U~UADMy{O77$bw?iwZi74v7RYhl2i##M7nLiQ}VndZ)N zv}+CLJ2q!xd_1I?($2MST5|v!oyomAl727Pbgtl+`{!v#_0?km-rkBn8s$J_X<0)qd87a|9rEBJ9*k=4LA)@g z&tE$|?$l$a!GtpqTg#tf-s4ctU=ByZNr;zGUxen7`H&&d;2U8X@|>*#WeC^BPUoQW z(oS!tk1rmYU)rMi12v1u`vfO#@}2t-q>eQ09$kS}U7Fkh*uJ&2qr(opzE=ynluJBm zMrHQJ&=4k5xu$tu&)kl@-g6MP*8%JZ&k0J;4)M5Y!$D+w-j-kLDyns}?}DhAzh-m6 zt_d=;vL(w0`%_QN~Bcktnqnb+3g3ro= z7m{~yE*yY+?$DQbmhHWRA-xcC{=C-@9cR6fr%yn+K)u%qDi3jU*=1?Aqw8k7XXZ~p z?!`?op$3TW`|-&et*3|6*7Y+WqS9`O<%Vg#d> zY6#R(aR2c(TMiDsU;k)Rl12=W96;>Mrb1m)w6l88L6!*HUpB9;-?Qu;uEJhEKB~~6 zGlj>?^Cw!AIW@)nc$Ar-uB7Y-*n4jf?B?|uC|0p)+@8$?(i-$a%mu}aG-^A$hkd~S zR?9A+bl{3>pZ+xOv^bDWAN9&O4OXUcy*)-UX9UpcA~zB6^g z)KSCAF>EHWKOe+AqxmbO74abxtYjuLaiZP#T0k);=td&y?GNk(w>Dlv?Xfz!Ifd7w z!`W;g-<<;_Uav@R67s^N8wofS9Prxf+W-7VpNP2jT}R!YtmH*~e>uKJ>Igibg2K#F z4nuOnk=Xf?t=JAS2dceMG{FympRUgf`t|rO1LzC7X2{TGmAm~05DA61`2-AH;ChtW zvUfKtf0$Tki6JQ>1|}W;#uwcSso0)wjiZye*+V|fZO;@qTg^oeP>Z>Sqv+uZ#UGHJ z;oL!G@>xHSDoZbJ0aD&JF~#1UT`(VJX)jaCl2txYiW;9{ z;m46&`5(sK2A1TjJI!PAi={bl;svC%3?3AsfiN+hzfc9wMWJLvAtsQ!pVs~g`U8nU z)9Ob5lfAEg9;kuRViZ7Bc^)wekSJf8Ry$2u#I*>^ENdNZw-N=G_6M|{aqQlc4bB~` zx=oDLNTKsM;zNs`{HkZG1M7Yq-hxx!Yg$aM>V^0 zb)^W=S?X_?IXIJQkDlFpu1jvP53)=g%a2*oMdUw)@~B!d`~)PA8RA8`K*f{r%ITSV zyI-vU01b7r5Oh-UMbJ-gOo!O&4FI21f62x%);_9hpg zGUbnQ2ZgwzWuJqF+fuy%a?9Db>3Uk>TPZdH+h$aov3k!bd~`YuhR&30BDd})fH?So z^wF8%i>H4e=@2oYI~cdlE)QF6@(0RhjKXfNc;y=cScjRowq@CNL*w#^$Hj4kW=$6> zEp%caBNKxOYzxtY`uSe75lFrf_0!KKA*h77Im&=)kz=rN0MVyq$&w;)>!=JS@GOQt zCn~r*I(7g$+3vF`-pycr77++lZ?&V?SZ1uR;pi@Rwy>-PHW-437|5T4tgWH3X;p-0 zygPFT5JZxQxbiD@n3i(b%cN{gao>}QhC!D#HZXIDZY{RQVhw<`v?|iX+Kpz!WTm-p zfQdI3sH{}dn!+J*8xN`@57=*@;2}+K98iTel=zK(_E&}yrZ0Hrl2`FN&Q1rQL$kKT zXO<|5VxvjhM68zv(LicyeXRPLnPYm%Gfvs`wdQ>_TkXZXf!i3Ms13o`6Klx;6p#U& zoSri_?SX+m0tw%DQ(XBotRZFHJD`LLNL*z`s{umxCc|i460vP9{gV?WqWs)E0;IT= z^hNCMG2{A|L2GM6=Izs3Zne=6`(&m9dhelaspcbjXO^L{Q$U0+Jz7aNkB;O5ijd}S zv$QZ&o18NMlqzn_tv>g#pFiQ{PGNLA$P+KfGYq9>FcG-g*9qv~M|4Yd7H#G@9N&K) z0Q(o2wx}RxXOP-g-GJ^bkW|m`_d|ctI6M|RgS7JWBEIf(^U(HFn<=M;URU9MKG;EoX zAZ3G7!tIsirU6On{XU^=H;_8n%1U@H1w*R^L|khHU{O~U=NlvFO*mtDeocvk&#3U>$(rBB%T@h2j#^MC z*XA7=yzR8cHIQ2iz%8p0f4Pa5zs2dkhxw*cREB4nQP|KsrPWp?+cUkmFZ4JEWawjr z>FYGg`Qe-(;-+#QX*>(8lSp1QfMqB{>Wp*)QXwPXi$Niw=q-0)fF907de_Jb4VWB^ zyt4j!SC#AG$ncKswnvg_^m~gBMQyBg6nsCuC+%3;Fo__t+>jW={Col%4o_n4_wJM! zC|`7pFZ|}eNyZLm=gNuW`e^r#mTYr zACo(y^%c825?V_0GYt7|)m~V%1p*RX$4^f?0DcZRUm*`4WP@IyDW%O3Ef2* z%aDuV{RQ}$RkvIg(}>P;8ajs=_jW=2wB9(lm0Pm}q4d=dn%Vcu?VaCFCn<;NM=6^oZ*gb*s~rRaE=L-;(FjLpBbmHbX{ZU z;mZB4%B6*>aafvj{G&QRW9`rkQyo`6#&rW!YBo@vnc=}8=qRr%oKD?ACP~ia0%ie2 zqkW31K#mnN!y|;yd6nE9vZeN=bxU8Mewt{XFe3`6W)7OO6ev|0)pTK8R&$#=D$eHx z95wnY4C!i?5fi=tAhYu85Aj&_Wn`8*BZ|jHZq=7D189stGb=q zW%B;c-T9F~H)bC-TuyY5eis8+Nc;g9QF`yN&gs!*xe1jC?cPP;HyLD!9V_%x!lScO z@lKMBLh#U2W_eoJaw|~+JhEAc=rkUjh$pyR8E-N)Z zpt87`q5zYLXT7O3_5;3}$pYL;DGFov^W{O*^dCJZD$OT|wVPjPZ5L=1j{u2zn1+H{ zk?(%PJkNIQis>Uu!flXY2xj}Hh9n!R+!%!k!c}rf#9KqvR=XEI{Me6H%OC<^k(M^?itrR~~IsTp?R zLxMmta_8dW7kbWGCHM<|h@Y>H&hG<+K1ig!%Y<8C#@b4+ z@NhH5KvjyMo`dPuf61rn(O(Lrb|b>dwOJP zY~qyy05TGdGLc^H+m*;!MWoePJaH%cyakb6D>Sm2ol6cCwLIMN(LVKKwhjI8L9iy# zh<4&D=Q?uUEAPIpL-Sl4uI-N&OIzo>4%$gYWSHl47Ab$Dj%l{p)=qfe?HO;c66jOw zR2fX$XU|!KJpPp0B1|_W$!*DFNrh9VkT)tzKF92DV@?5fA+3Q`Xm@cVJsVO!#UQ}k z`fY5!@As($QR_xC;9~(1%+$_6N}PU*yG&&ZE#X2e)@de71Y)ff9=M|4@i)L!wYId* zzQPX7xeR`(y#XK%MYVN6NJuN|AeP1f#??b_%&j;AZ-Qt750+_)(uo&2{V` zpIvLm)Uu~W{a4G)4+JU2tz!~_7CbZed&h^4$oA?ZpUUC*LJu5O==F!z3xXN0eAptC zV|43Cg}Q-?QjiRWW(koOgn*#*0CKM$$-Cly=a?}nNKKHGKE|`iwy)0`EV_*iR?fU1 zP-*M{so|DZG%!*iTa;B*`ecN8-?nuQKpXdNhPOjVE&`RN*yJbD%K~0K?aKh^^jB15 zC_^*X?A`Fsn?4$3@7Rk*kxxG!IHd{u1HL7|l7g&!L6>rc%l!3N6WK}P_fFbf^50WE!c@Xjb z{b#hkk3;v7hJYOlak0PBa2c2&$^au=Sr?su__zYpcMd?BCUwfQ?*;dv2HZ>dvs*{S ztj+~Jrk@nR29It$yurB?N4yt<&^p`e<(Lqh`Cp}dbySqy8!gSyjYxNcgfvJa-9w{* zfV4 zX7%skRE&nUR~ql}0VpmYLcR~MEiO4afBu?>^Y1C0pw#24Cu8Z$JqFAEh4OI-UE^y1QomNwA2FbcGREsT}u3wYV8fM9BKpU&RU|57D$pZ`9-j zXZ|_hl-M&&~Q4?V{IEuEeppgEd#K&IqWNgobirQb&G@2fRss>Hdx-g1<8+b*_t@&oqi`ok<0N~7_fV|f5i8f51L zmv}{;=i-y=G^+HuVfVbM2(pOw(hi8sIA4D;HpD`Fipdb@;g-l#OPOmoHYH7c$jY9* zbjw_~ITqt8ggh7#9xE%ao{2(6^E&ay1=KGKD5f%VkWKO0mzAlyZ+|N3oO5Cdmq>|i z|2VZTp1OLk3>z@1=?Ge zDJ8cnDQDxB^qM>X^v|X2N#N5P0GA-^TO8QO1rg3$k5SW&c7_xR)#krLyKR(C<1_y9 zo+hD^vOY4XwH|l#Td*p9HElM>B;yMZ5vvDR?Pl|TpqyMT;nKr6=R0U8SR(ZXeH`uzKF{I}lxCC=jcq(5JY$cA~wNE;fxhj=2$p3B$W zlM1WjUpaX&m_-gk7jVu2H7pcbl`sgoEyGJHAeNQ2Fw>}T8QwB|D|oa#x>A2vnJ7zu zH~5_|MbDT#hpa78+on0z% zlUA5bsVN{#`ab89E4K>OM#9xk`z~n>Vx^{zv-pu{WKV8T2U2U*P9npYTBInD!Wt%S z1yF(hodUf!cTm|OGK)UW#Th^y_bE&K94-U?ECl#b3YuCL@7`BV0CN54+Mvr3YM_iWQuXvFv8RdSRR?CD7fX}Z z{)#V|D_joe7gCfWi}IpKrip&QZn!WBX*R_?^+b=RY|1B~*RM1`2PN&}93$l4?>UU< z8Tx7%9$V^>pg|iQ0}IPx?1y_S|Ag)31kCwD#wNG5_5X0^mW1*p zUz&uPqwl6U1D3A?y9J5mz^MY&6(^EGvusTpAe_dCsA95rSOA~|H6BLo0ro{WhZs*f z<3y}t!fM*Tm1j7#hu>K2(11Pr6JR)AFp2*jo%XqE0E+GRpr*JEM$H4LvJSOo!&k-- zaeGYzS-i;i)>oj&m{OJua?a0y3oHJuv_LTH2mJ-O$EAQq`mkouNhfRSh6<;)H8>3x zrDicgUvQyV4vqmzdhiGopgep|BleNx)d-&0>~vRhAhvMW_&p$9q&K+dIdt`h!4=7T zfA=4$|8y9T`jNM80iZ6)|A!3@abl3@9+V?KjjB6lOEOm+2P_Rx zID;t`V3d&8S*|GrqF!#V=wvd|f$2>p%8tsjy9+H1*$>8U`n%StSHMH5*Dcd&j0O5( z6I5IneS?W~Aa7*twd8YK1*C+hDypxY%vKIdB zTE~DLIuw^-hrW`?zj{z6$?^M-_M__6MX<+w9vp_bH(h)J&K*Ggu@T4tb0^A$Z#4(m z3(;#qVhl9VK~TaV;@zQNu>h!Hyh7R;cq^P|7#zA%6A?7YJQZ)DO6A`?sxl@6&AsD> zHP%|C-puR!D!{Bp$L#SPok~txNu+ zuY16#@OmKSBZkEWZeqyFxK5t{xyIOS)dK8u)5H+#@)!VKO@&8!^)dua0M1IV48~_E zcute{mLvjz<5N}#R4srN8YT>Q(zKxYm(89(*XM4*7QGKz9SvWSU85La1s=JlW#<&n zzlML*2%h-{W1)Xu2F4ljWfhUwG1maT2-qP|-35y|mj7~IgcaYkyEPy=ZGs@NU(xKj zQe9uM8>rNPgiH+v+m5^hsrMI9ibMOrR7*t}Uge0dY+XkPONQjkVX>%#5}lMve^?3x zgBERau}$viiB5v)5>8xR3Oiap$0%2*@J(RK6EvTbP*W79cC!HsUq(HjldWXk7l$)U z6q+kUopw8*yx|qiw>0tvGwBn+e(UHEyP$2lq)sPO&}Zd#q;|TuD{EuJ_Px^Sj2Wb* zAx7QR#in0OG#_3e&VVTDo*&q`bc5)^G=5v~*ZLdre8V2#z!HGOQ+Q{g_{cM$&BdDW zYAl&lOYJe^a7A0_9f@9tA>CJi*1V>8P0E^Xp@qem7J=Jg zi%arJ&Z$?y{>yMmC%Ra0D#6$@fI7a`Qn=)>=>Ytlp2kx>Y;A!WZp?R3&7en21F;r| zXaaL4APLb_O@Q)JjA~P<(F%H+B{{tCtPFuYU=1BC1M-&@plo&nf>Yt+*X7UcpB7g- z>3s`Tvw7h7$~CZd{jRa&7~bG(zNsI8PoKzY@1!)Tkol}YdLBgYj>PK{gnEg$QR>AT ztbW!D&FvV4Ia?9~==A-0`vlK;l)xu%tfIogWA= zmQ_K|fXAiPG&!5VAi`Lo zFC=4#H<|8hhRS{!i!hd1A0DC9p%|bS?RC?p1gWxOy+Et%&qCX$0>!>ww@$z@aL0&| zW<45(u6GZE)msIw5ry>-4aH91Ghj=B=fDmXjA5&^@d@HM#-CRh7f7~|5^69M1#5<& zEuGAreUyvLPx*>~q#vj1F;DzT(d*Xp-U&+$NcCJ_t=lhOi|0$b@>TYvLQ2 znlH#aWu~;DJcW>Ll* zx1AaVc_+^kSH#p$vgP$d>;mbAS*5jD>uay^vhFiNOFh~4U|;*}Z04_A{&fQgrCe_{ z0#2?RaFsHF{hK7a=_;<0;|+hQUH~Cmi)FnT8f9678Cs$JfLVCRvYZ+YZ)G$H!Zkfqi*RLjDcxjfxQeV)Y@g`Vw*)JZYSf zdbjENq2ohl5F)Lv5vO#p;SyrsLbT#9u6{$GpLohU| zxU@aMv;$$>_r-*7Pe_r{rHH_SPO)4Y?~l73hR9Awn6GLgvSOAy>vT0L#N48h2REEw zSrYg#JEn#-)v+Cb!Fn!|0t;KwvKb^BL0pnStN@}PHJk<(ZEhGc$NlD(7?)Rs^F4wt zJk+uOyyxNf*+!h9>1ojEns;r`EhvJ$FNo$J8dx z$wR*%@bLQD7)Zd$>DdhU)iD{{;ox&AM@-pSgsniw=){?A^2nm~Zj@6b4M+Tz>y6*) zL_i_f)A%Jj{tL@!S-?gTYs$Zm7-1DgYYIwpdFIW~RtOOlnHSARNKYQZ)F~=jLCC2a z@nhGgvBn5rXEpbXJcVL{8T0|fhpZq5qCH?S*m|75*8o3`?+lA@REEjze{*%)d*p-2 zPiR9b*pgq7G;my;ehBawDGt%z0EVD>!h{_l2l*F{SQwZbpm96a|CW{iV4hlq?8@M? zKcYsU!H$>i+!OmSY&VY>&XCE!YKE-vYZw;JNyS!yXqNo9Bno*TjB+W87TL@Zgh%D8 z9y|!Rb;OJl;kmtleS#9dEyVgd-r*L+S=)c&=K{8dE6W2vNX*W_EXuSg5W8#?d_XVb z>p^ui{|1dZQ*U^M{_kGz$Z2$qUOAceNSnr!A_7jx?|=XiYOZB_0Fyd_Lezj6ewqnj zA`EF#vj#JjxA$vbf)K}PqRf$9;t>K=f`+>!F&vDi_tYH!b)D+Ju8S^+0KPTSqJDs@ zcM1TbKzF*o;yHHxG$}OOC|TSY?YNHG&dmFO2zf0l)Zq&fDX(6_vrD}kBjoHkn>McKJob4R)~pv}HlVzvAWK*o<>-ORzJ2;6^xyJI{oaG!+r zG#FPj^98uyzJmzg?X$3)Eb$J0sH7;=)gDhGTsAQ>MLA!Ws74ed5S#}VO&fWtD!ppZ zqc`@JpjQoYht~c@aDr7F3b3yPgQs10Cz~G&Da8!XW?>awH$WA(G&RMd0M!~owBy{= z-Y+^t) z_6KmJqrq(OuNO;~r{Q{~fc7Wq<=r>k1xm21-(r{io!?FWo8JoNGEX#=XudCIJp6hs zIZ|`?d3ZFs9F)d=K(gUjFvcPDh@INz(HR%2C`!mQOADSS;1H%hCkgN;s3-x~9-!-3 z0Tl1ADHuZ`Jhf8QvW+M=1|RF&ZcmvdID?-r>LOZ@>bm7xry|0a+xQ^PU|hDAD7@V_ zV3;-mv^Nsv)*P)`!-MO%-h;KHYh|s@0O6Ylh~R(B{neHX#;mX> z-f-|aS`@vBg1XTFIcT5wz&cnWakX|~Uc9EF4LLyF=K zEb?PD_}eH2SlyVUW<4hgk2>Js+4WLQF@m@S2v?QY+i6B^QQ{OA{oZ}^{XhMh{-yomgcCLav$B;G(L;XU&GB+?Ks73Y zeR*?sv50A1;~_&*T>BGlx>KOG z!iFvIgc^ghmAYgX7@snGp}ceSPTU!-a+3jy+)Pn9+&enkM4S+&`9fs1KIG~HK|o8tx?HM869rw#P1J@#8%s3+c! zLakqtxHCNcHepRQV}{xqmR0*v%@J+C1(bFWT=w_uYx_{7Xv*wC_W0IWMCT}``zKJ1xKCI=o9#}c zY&?I@HE|{RWR}3x{#|@FvtYTrxy zp7;Tc&e+_bxh-XHI4a%#c9bE68>#{CBspnY(@<|SX{AW2Zg&Sb!)n7GfNB`J-`Bv$ zBeP1FT4O;ie(8IZyM0z!#W zH!Nfi!+$p>DKbj{mvInYUcnZR2hVQIMAsh08kHj@(jrq0VgL~5Y0GqunN+kTAbVG? z{k|u&WTM#;2;-k6z+%skk%Ke98wtbKmw+16lFtBDLL!I?+`MlyIgCH)E7at{&6HQEAWefKg-g-T7eCwhu+bH!ak11&7*xNVV zntO%-Dzc?EV22_DXbP;4>jtnzGJ$3X=B+C+CHCAw=fqnwWR*|*E#N8e#=QinNdNBk zSGNu9Id_kGIBC|q{L4u$5uEeCH)eH$|BdO^z?&!EwiMxOE70M| zRty8!B<8ipO8=9N?9<2JUsyP!r&iDY_93LL)!SWx9ih4Y z*Q?V$U!E<2#f*#?u7%3M3d$L>6v43ebOJPuujyz2EK|-#vit1CAgx%#g>dHCd^ikj z0qb=lZ}K2MoSwAbEWJDGohZO~C6mx$syISy1Q5auJ+>710VQ;}EW-ZhRznZb4g~6< z#oUCS|Fm-WNIbCo1TiW}`LBJAJZ>q>pr%}sVgG-hFsJ6qHOSD{E7sd$1ewQUijCeG06^?hIsdAX*=}~Nzp>rbKE6tz4G~g#L4?lY3;ozvp1=`fN2LAd zB4Drm1n`X;h;9fC8-Lo5?AVX;k$>_8ndQMj!*T^12$z9v0sb4;cv<4JgWY%0C+|T3 zdqzp8&ig|dUE~aaJusF}gvk2LSgtQ0y;IJ!i_GD6$xt0%|LPfn_Fn+seO=OR6UqU{ zeZ-~^@#hdtl4KTl$%eexs(2(4_g#%mL7Or+%2NS=fPVR@^i%H@;*~Jc?iyFzGfGQ` zbm3@L@_n|-S_FI_07yvZ#$Necd}Q)XF98oC6@jBNa{~Z$-vBrerYXOr|KRwN5Vat| z?jA^`_PTA~fKBkKNAR!(K!hON)CVcyXL5_1Wh-eU4I1>t1WmPWBoFLL5kLTv49>AeH(sfBbjCH!KJ4bQs zqo3TZ0phdhuJSH^pFi_C*zNr8zpLB%Qw$y2@~Wc+CxAN8N#0gkFgn|iEK82a$Vie1 z2uDIpM&1&H$Ra}JwY##Y&;kAEkzS~EaoQJ9MGYde6Q39q{2W!l>hpL9lSFEw5^VZk zn%5lyLy3)DG)4v{ebz<10(=48-(fBX0LiOE5tcCC=pGF4KnREYyzSV!bD$1SS-T*P z{QQVj@*DbaBY@|QOq=SiR_@f z-+c$1GA1tpg?CJ#X_TlyMEMAKb`Lu;DaozyvyX*H;! z*w{pHZ~(1^N!aXv9!%T0%%kL*KOcl1gW=#Poyw?(x{j&{ z08a^k(OHSUKXd&y$wAmB)lK9Co-_xnrld&cKL?fp{KN1#5>P;N+Kh~aKT;3RvotFX z%4P4IVAPHe9*vu`w#P&>s$$&$a?K+xWo}uA0-YGty6F$*VV(9~u^=x9T8Ka+lx4#< z+@>c-7xXNp^;Yc_18POy*AYo37EMkjESb-}i9hgDO1nGOzKw ziYkrJs=#5Yy0`(?I#EK1y5{mj9Gy(5~+;|6|&G7-uPg zECl^hLXOLPzLraM2Tpp3IXXF2JRmQ3Z03|>XYcsBys0nk z?Hz(lemcAhVK-3DWlbf=IoW6@tFElrk=_4U?)g|S%-y#oMwVg6(Eb&7xZ9lfl>yaE zDtp0RP|q0=hp;wYy#B;Aql;gHPKSXhK_>GgyQA3}i}DBD!rPhVAwQyC0zpz*Dr$rL z)gGXLd2cb@JEMc$KujyC$(R)6?W-dER;yBok&FC#s$2vMxfh!!fs}WG3-P_)Ef;;N zD6aCXeA%GcRF_5eb2UqA8i=~oCtAs{Dk<)!!&Fk*l7t&kQ5ebK=0}hC4y+0p8fv+y z)ugw|Grv~57B3P*x)N|ZPkTtuJu9T`fAp)Nd(=DY5NUXBB5gOK<*x8G(11%K!sq`e z|AvnjE7ay#xuHNYk3U7R&If3Bj2uNh+#BzjI~xm7Da$%ACDx8M;TA&A3f5yc3$v-5l^Fq2kpw0n`e+(}NypE-{Zm{#zg)+D!x)mzl|tU(cz z!fzx^+TJ8_sIR$QETsWJwcMk>axu@IIIOo=ikL z*e|L6yF8oRF4PPf3-?>{#<(C`662abQ$1dyTEkH2rI5`#AkXJ=LzA-*6q>LgCS`=A zzZ2rj&`;B0XKr_@!M)k7{lYU)pq2t;OV{YW*2PX2K`lHQl+Yq&Q%lpPuoW|Ve9Ihe z0qqZy6Ge-Dzad9RXD*=3Ht>S4`g-r#kI%yiHCUNQ*dnE4VQOif3D)nN+HI@WRIdu| zn<2P%CFM6*-15R2zRtq zRGO;92z&?OwI=EGEH)sxF{ObT%TBO;D`Bd9y}_#??PkQXDT?Iz-rgJVzU=}IMD(2m zaH9X%9x#?WR;@+TBeeSSzgRR?|AV(OD?e}F9z`Jd-X;XZciCtZH}WsDz^N<}3JV#F z!P-?Sj4H*^RX9;+6^catp$_TgDyiDuI&z z=L~To`g!ZBWk#?e9yzajrDx^xK}H*8ScN0z?>N7TZwYcU+Fif2R#PsOjepd?!1{`u z{IV$1lX_;}Ht+g zKctRCeqPk?SZ*QA>*E3{A9Sd_7D>hSu|rfzJw%&X|7Q707Z zn~m({(g=rU20Wzshv5EPy;ltCX;La~!9&=))Dt#|6$)eoHG00{>f@u0PE#@p2;}9g ze}}82vqMAPl)RVGv?!?HpjO5Gj=ouMd~`c_?MR(~uoKYadikU>&Zr5XMy+iAZ4+b9 zlTiPl5fRyOH%6{futpl`1Jmdv8+$#HDX}2z0d!&l(ZO6MVg4#(38FX}{tZqG$U(6b zxee}TOPsUoh+*uW>yc-7ppw-@Q(#QsiKX?$4)`)pZtbv|c z#Z`@)_Jx$}(MgNR4m=#c(B*KmU0QUHQ7yr4rmfHoBHMl5!(~D!GBi z&(-4B*(@uH_cEAt_p?N}U!oJ~2YO@)IK2}iqUqP@PHXHO5s#~ool8fOH1)yNz?*0G{CVD!lHOwq)U8%sYD zN*_E<07VNp4yDN|Vw$-u#ciHi3V#W8em#Y}vGQ)8>;{9X7o7{&3{vqY^5t!SeKH?4 zs+gp3dHEP|d^Gwiqg^Pbh5I7;K{YTRwisdgx;8S!-XP&~(kdvWKy6lEv-Y9d%=lCH z$AzdQ>HW7qv}lgZ!D~!53-8JMao-^~W2wlE{N07#Q*ODZj&%}>eiA~i(%_wELt8i^ggmB*FP$uN`Z@ylq~TBA3YT+K8h74YjFKG~skYKSVV zop9Pm^^BsI#ny$UgrXR+$imCKkZeJVdHOmlGB7uEez&7E*rySm8-UIq_+Mc?R=aJ2oMj zwfXXqOLzqhCb1pqV6>QcF(Bj_tCGH((JKX`j>OFF+-0_W`H z?%5(T$hZ!ls6bESN&9)k-$kVwqwGg3&YUtSTs1^DS-HFKhOoZcvvdKne96>mx zW62-uI?DXiO+s*;ZRq=#N;Rl&Y~+I*sEC<1CxjJj3qFcAo7+1UMvs26iqXJXT$Z>G zwQ(s((_NC2BASG&73jAA0vU;^r5=`SR$|$df;??dom;cok^&wtBN3k&xr@nmR5>_Y9sfZBf1#@38j!cWEH-I&>lGpK7i20y2^& zB=52WpJ2EIgreg&J=k2)6$%hY?(ULHV)DhzSMU`~+3vJCf%jK4^hMQflnCL42ci>R zrLy$B6f0%;PP7NsCo zcS?|OBK4gA0%!Q7@M^mk{j~6tpk*238pn!_zfeFbH{tj=T@i+YrMoCil>&i6G?Olk zI3{b};HW^q*}SF(M8m1xws(fGL%m*&u>1^92x4K!EAM5sq{~$}fB4jK#CO@Lf-6ri zn*p~ju!wS~MG_+s+D{R&`bnUG%!-;piU^SW)2xbVsEc~~4beaF;~HuDSi=a6bE@yf z5Ffq0TeGd1`~)@Eeh(aLmd@?fKCozXM!@U@m6OYY`8_??G{3H+`b%i)(dYQy3nc^? zu*DOe1iDQxvYV93AKK*vV2+{p>B)F#Ph*wbV}JPG%s(TDTrVSe*Q>~&gzVp_s>rt~ zO>qp)vxB+E{uWul@oMxAM^YCPmM;5CTtp7)WiE&c=wdb*URx*vMzu&Q+V-$fBrXlu zj6T_$Pn-W$)l*>X#uPtpXu?0w;IbE6nZ^=AK zuW8?V-V8`=h<7hMDyG1OV}RcNJ+}DmDP8naaG016Hy))1ZUNSf$FXc~a_zKrR+8f! z8Hv-t(H{6D{vro!wV?qSDe!A5HM zne(66XUC6JoqZd6@Sc;;);SENR&klq;NACQy+F>SS;;Fe{HzvJtK<7{vqh6}ieY_y z0JGeBdSPyQcko<1fDdpnF~ealM6eUWUa!2_Fzo>jmA(OJjH#do1rti65Ig)aSBDh3 z@-T;K0^-I|VeojaZRsw^kv##!GTZka=BIfGn91h{U+uDh*~8~LG90sTD(o8tlRwlU z!~rIyP)Q=(k6f9TLoe@tegM?TpCEE>W;W#!hhUo}BJTImCSu*F0QU<(zq|y&rvTvg zkZh_OvO9ZJ&{ie#Jn<7A&R$Z;^0R5x`>Kxo=MJDko#5;}pfjol)yORhg*jNQ83L_2c~yAa83sEY6_x zI_6Ff2P_jYX4c|VGxU#AcN{gtPk0aV+nEeuRuMTsxd+_765GuH?{~mb1%OK76=6!^ zRf&zNyJn^FpvTXT3Y|;)e_MS|%bYV51%>M=74fvFV5{cC#+@ioV-idk2h2|QY>mr9jYjm^S7)_+&#O8V*C z&`*Tkb6gY=XK4|ng|>XO>xw?kwkL01qc!Lve=zO)UE4dc!z7P)p!)9p{&(a-++&N! zT z{yWT;5B8dGrX=Jalgbkrw%Cm+7y0~iA#}k4uU)V%%69%Ob}wKahh02cWjX&AJ7d`5 z2Rx*01ph5|y430}+B@Z0>rj2G_NI*Y6U81_qtD!^PlQ_x{VA-N`83E7tg{wE)I7-m zE~hZSmY1u0`n~g9^{poGaEJ!wBAC$j5klD49WW1bzQzK;NUamKrP%q^7jdOpYM-LL z^OZ5~m(_mHIR$iHcv%+`YehKb4__iefI9h_VG*aK5W8stRA9=rSvVRGESQZW} zalKldUj2Ie%ciE`YVD?n^F=4KuCY>+OT^J=*7}Qxennh}SQ*mQM?O-|IgbyE3l%0L z*yMtKyDVKatV@?i%ZX{|4a!5h;;baZMI7jn#2jlK@80+v?hJ%}oyrOw@`;6yk0`ah4ykzAu)z|m@03Gm zZa9|8n3Ug+M0|U#$##?^&cZ|`xz1v=^|{aOHMioacB+pG)qcrd!|UmKG4SPM(Di1k zd$B?{L*Mh~-e|RPhlJny<^K4GpX6Mutf|?;PJ3ZezAnk!?pMcYNzST~L7)0(rKoQ) zuPl50;DusZ2cHVx-kvcs)I+9f3dHY|$A^7=ru38%vcMX;?QZkdaRj$(>NwD zNz!zmDI_x5Z*^y}|2%e3P`&h^aTpnG^*Aq{j@C|nP|X(o4H`wpZ->kJXn0DR&T4ds z|L0g6`)zhk$c8%rUIuY(Qn#g(|Iy27MF#FqL4}%l7wVLj3ekrvKKga8S=5|binE22Y z_j)qj6`a0}@rZLzKfJOrvCCm9Q%JT6msJKdS?ZK*!R0`q#zluhCAf{QB3Td}|82(f zS@aLXPfs5FD&8K<kqV+r-43hBdXke$eg9L|R$B zxs2Sye2JP!mY7#_8gu{e^RJsZbP3-?-A}j2KRmIlz@~bZsW?}|!h#kPv9I}?A^Uke zBHt67HZMp}whPXBx69X=#21H?!s+Nwz!yFaY<(=JrNBbYfycts%OJwxu+iG5?I?ne z)ETOh!LMM$5xNPQD%*V0tImOSXNPFpr=pO2{?avbP5O1Q?R2)7QMAkDWQG9#r+vP} z37?TYgP@sn^xN2Aj7MJ>Xg>IXafe0?*eX(L!aoMn(<8zOnAW6+g>b!z{c$V#;gdhP zed_c;xA!9vb$4XtCSdq(FP7W#M`La~p8vzF1!c@l=yX5Z1ubpKWo+fRb2Vi$ z;)}G1hbr-vNIhcHsgx&X>uEP1O}Kr!HwPvt3)VL|0oXYIwdjJ8J&8rD(S4y?Wnf_= z@7>?1>ACK6_a}{%^*Bh%tLiaaqIOw-C=Qj~d_(mp<2N({##Y3VJ|x0{#QaA>HeV{e z-`Z`q^F1-Q7UcF&NDxa1nc!<8rwkXnd3VQr$d8c6v?8(4|sl$VGGWY#r8;GUD=2?XKf`Z^X0SFX&9}TA9%} zY<1t=-+9qEbjz9&Y5E!1eg4$t6^s$Up;$@VcLBQ1dzLT-yW<4>_G@7r=%r4$Rx9TEO*;qEZQ=}HrW3g|(zUTNvad^T z+&l+~EYVFCVw9n7W~~_|=_nQHaHQQqCl|$>E*}8Zb2Et|7pgB3&04$lcPg)ywm-Tp zS10ezieaXx$&VJyl2_aBo? zS&w7S6XdU6eVl=VgTGXek<{+5c)!@*F>chr$ zPfe6M)TwT;aMOh~;d!n#TI*$SvES8&``O+4++1;x;+tzRJH@Lvom`98a$V5>xR^;K z2j?n+wlS-yDXvwWY~sEe!SuhzIYZ%bT&TL{H4m2<@~=R|OB5{@vVV#FoWSCOo}6XX zkURZgs^(>n=h1YII32l6bj(X6j%uR~$VA4q+oso-nZ%jIY%ARZg;7tgPHa-iJFB>(2@GPi{Z%`K5V@+CE>G4(q;SYpRsOqpx}H{J;8({ z#sYt0qb$4rhkl5|V@+hF)}xZ{*SF_RqJ0$ z@B8t7;{Egc>wEnv9`>F+duGjAYp%K0S|?0ZSq2-E1oOs?8`yHPlIk~Z+>=naOmp2r5PU{lW~ZULZUS)-rqNYWrIK)Q zx1!=<<7VTa5y7OQq7rhqv=&sCl>X~+@S8A=Ed=5!$jK!BZt zlbw^36|`XW@NtHid9yluJow$o-~C8hd04pHxkBt*oT(7~nwh&ig$UEoAYSyZKflKb zv9td3CTEYorUfR*j=01Ah>e5&Uwwl|g%DQ-C0v|b-K{)4K>s2jH5BQgFAkkaLDuxr2uxW{A;=aQ^l5|GmY3_NC%(X9Z?`{q&>1p8n^xzuF72BPRY2 zLHw5T^;IBd5lkWWe=V5^X2l}s?2Q{@H{>LrXnG@U&HA-z4qtZdn|xz6dM_KigRa() zA}=Y!bmz-E0y(yZ993r~=0<)wbOz?7so;}?*{@|n)<6x5*NV)l6cMz?E+&A~U7fAYU>BPRDhN00jd zCimZR`Tz0C$%_HQ5uo2Y=wa_q5#Z_)l=8aP!z`A8YP;dFQUjTW;A-r@MfU0~Q1LZG zEd3xcdsKq3rrnlb>KgZIej6W7I=Fe7!WjIwIiBAg*7U*uFUv&Sj-p1A?zBQr8vb*a zmbq;TD=b`%0`W2`$>&cA#a$<2<$%2nm@ieqtPT>>eD-oKIn?S6`7$xfjq z5(qPezCr(Zb@>bH9$RmBAwS07 zz9Q-cuu1p5tRXs7A>{DSvros3(iC}dukBY>`N#3J3&Z_*GIE)JxgI>zBxaZYn>T6e z{&%+kZN~yrSimks0yF5wM;g=Wj=@Ten5DfRAE6V$`{3_6_#h#d5)X1C4qD;K~noalAeGd$s>@8T3SqM|D4*+E)3+|GfQ)e-7pEH~d=zOWEh9hW~GJ|F*RM zZ{_}5RsKI#OI*U!jpB0sTF0}`_$rds{@hr9d%)i=zz}HZPirzLLU*GApfwKTwJ!4k zm*-~=Vo9ld&4TCPP2_Xq-X&b zFde11H~Kw+zqaepYnip=AA9Y5+$?LNS4y&cIAfFU?w3ikHr zM|8`Y8$B$Ru;uK1#Cxn!#bXSBL$gUfWYoH%1D1Wy0E3G+3W?%p5bOwx5LLue2#O&!ZN*5&2rM zo3wn?EL6Wm=K8$B60c0FIBsqgdh4dOb(6GdzPXjPkxhBiP9xv^GmY#gC$nBA7hL1T zko6%1u-yUMssArerTH-vio>aTw`^zkkzyT}^V9vxMimjT#KzC9h&Wxo6N7yS>PP_m z>-BCCMUK1+^y0^-7lhR69x#g`Zhf^kV4GvgF#Tnk#mi;${o~1;U$J)SRFl^(5a~p- z8@=b&MCOh8y@if@EV@4^vYLkN+p79Sel5kQ%z5++ha1-WS>zICk#Jge_prB?Am1W~ zL@^zMr8f?|fTjB%CkorckJfivuUfKbD~ptzid!y@r_DM-gn>GcJkFblv+MMl_a86T zVGl-kDt?bs?pvCXDI^m?lAFG{V?3IXX;Z9IUYn?{{=UnRnU=YLS*vKGd7l=7d+QEa zLB{jOtvHhg_gr(2Z)%ErNe*QV>v_rID=7iz`(5!yo{7KaIJDLe*0&e59freS8vCy2 zdJd<{%5}`xYry7QH*d>5j5~@9JeRkQF+m~$SUQx^Lt#zCPYSijD2UAaw!(FgR^XaU;$_%Aw1ghKzg}XGkFGJq{RNWje z*ICmhD3bZ)Y%oV3UQx9WNaToxg5O2v@uqBRC|BVrJ9WkLzL8d@3l!0_r5L9>7yzLJ z0b7z>U>>-Muw^*F6($+T|I`);xZ2dwlaTJ#hw%L9e(K)HE);5lcq3mzsN<5Vt37K2H0p?z#S zemI;-Xdk?JswzPnqsXK+fW*l5P#D7}?dh1_1DUs)^EC?8$qwbs+V7B69(ifFH3R4I zhC*2IlG>fU<@9F+-&}!rc;V`+2XE^SzC9#VAIuQ{atdj@Md2+b;Z2~W)~2Z=k3u-h zm2_0^w&;7jb=E~0ca5qilXalWbgj~E@6Ruu2%glkLaXyf|S7_!%CrB!Ds z^d<9oagp-a=j>HpCOo^kIB{-~dsyc6U10U}@O5n!bs*8X5^RaFz9gQlVCUhQ%UfqV zZ%kW_D{+^WGwX{?jc3EH@hKyiwuxx7;*95~Gb6P{ZBs||BlDIKg6tKR^jK@d@__?c zV?_j*YubsIlh$82v2dUHapIIn* z9_0l5Q{g9#v;&e=_mxB&=7gtvpKkei`tXjI=w<5}S#QM#ut(f~^z#kZ_X;a+6IiqY z+u#l2m~13|<`R?j%d?{0kB4uO?MakY?hYWVdZIqcjE2Kgt$GE! zg~Y=zo88Mj)6Xp(G2*D|Kd5%pH;mR7KWOB8k)wl?W6|}N1l8uw;5d~LGWiF`NzQ)i zprQL>XoCdndyui#!ivc5MRgqq%_s-H42l)+d1{gpgmWqF3vb(S=Vk;7yshu61+1#lP@%B=~v4 zwcnXsiLNq2#;$BW%`Zohk(cQvJKLrBA5Q|T4^gl$DZ3o9kIG1v6I4P@G_8*YWm6`j1MnZW$^DwD5HbWo6Gd%G6PH$Rr}!NVd)=Jc@F|c(W|tGUvhK16yN-m)IzPKIA8%9?B%)2%xMdFSc5&GvxEs$v?r z&*jrMJvv|W@;tzC6RnpOu2AVZ{DRU<4?z-@XvPd zKDU{xZ91AhSnPlHGmOExVVS7zBhd%VrKvuq=<3UEA^2MMP9ZE`Y|(f&MKkyHl0=`8 z^^;bHzIoy+PAq3ip0x*IU|ql7ukQ>h)k|mpT3VSAs0z2q^*wiBVbrKAkJh!IZqW7_ z)8#0|G_?3M#@_2ardzQbE4xjCRcq}(MFbwi?9#W+v>VRzti&?NmwU-xh2xObTuS>r zWb#11zK4ZiWqV&t{q| zVW9PGrA1d~(hQEbtb1bOd7FG$Fj;IJ*d`8+hlcl42pOa2S*10gco6~W&&5QC&R;YL zLe?@KX;j{PV|yb=y2ZOw+6XczRT(gAm(wf9kZLK@T!@r}TR2&mwpsM!ZOd-sm!lqS zBo>uN7i2?Xw6o=;f{=o^?1Qb*6p}BPuvZu{<@ii9g$v%{n09UPzdHD9O4S!(qk5%7 z?2n{ugi41d82!U4AMtpVza7dZ6-r zuINO%WbEdOgZ76@>S-f|IF>M;J*6jtMvsF_30-QIXC+a2B#V$tuHfUXGoJReC3Drz zZy8Dla>9HczRGxh3cW?K#+1>A?m0H!{nGu&MIJ?AR)Qc~#~Yy)5crJECLigYvg{!8omThcT?Q*6tVI!LXR z$?JQ?hg>3cXZM%78hV4?4zZ5v&fiu_RU6nl`rcZ&R~d)2K2jJnn>#8Bt5cX~y_gTx zl1^WTYj3q`Py~PPg!h;>5v2|A$wh3A3y18PTbI8&S{6-<4rYq^dbgy#I>zEq^|@V< zkN4m@C-OSpfR4?q!>EEA*~JZ46^~D7_txSSyImpv$D5tmmzxy1QJ;hF85dyAD#mC# zIln3eX!}w&MSkNAkHZ*5fw^s|T&a$jr(ThL_LKJKZ>$+>(Kp{Vc-8VW9ln#HP$Nd` z5fYYnD%LzJJ9`yspU9z7%24zZFJQ`8Z$3*^ zRXJ0}$o_g!;j8VA2XmE$A`=^1Q)>Zd?n&Waq@U~RlU7ohaiaAtUQc#Z(yxZK#w8N@ z3U?Bj3HW>(lBCegX<4Yn)r0y@aa?0xCcD$xh|lYOB_icdue7Lj{P(MNagqm6lDL~Y zQccXg-hsmys%KBA@{kN5ZYrJ!QWUrgxjhVw;b!lDD^lfG^X9!$6GmT?8=nLPQvrXf zfqG;ngiqM6&trdykz`r==S=e@g;j~x%09ZYg^-=fp0eLIijy$icAhyIuM-|zWczDn za+1xGh_3dT{!0H2wxYLqXp(X{UV$kYiEHB~?tT{%M z+-4~pd`OazXGKK7IkrtUyE<^21v~x{90961Upyx2BrD4-X}q#bE63B#yvgpMTdB9L ziFU+6)OTwXyY5cYWRCUp9)s{1($~B!qV=1t^q~rOm(yNmyD&$$MTxpoFNKF;I z@p>1Y{wpoXMO~gfnT|jR2DhQ?F3~LWvkk=t64{&VIQ9a zLf5_WszVR@3McCG{$sqF5(@vO}FO(bE zhYmy(%Gvl*r2yOCh!HoFc#ba%(;6~5AJyLM{KcK4tU%M+n@P@3qV)_9en(wvkj1IP}3e(DTM**rI zJNn#)^*z3!GQu8ElRn-&4u)`}9las=qR0JXobtvfC7lxEaOgYHjZ&%G1R4XDD*X(v zk){3NM;Kd$+FS!3jz^(`J5G}ya+RiE{^Wt%Rwc`OJ0B+)k?fX8@xo9hL^neD718~ypC-s<~OtUSybk*Xe7$c&|~wboJI_J=|_-Bs~QsO|>9NjLcWuahH7J@}EX zPiXVFdZb59hJ5)>!_Ymii-3U`U11(WaD>IYVHak)$t<;13rYVg+~1RNaBgRsT0AvPES*onfX4P zpdR0mhfQo)Yldy2+B315;iV0hRP+c}#tG zTzA0FV6ZV~et{|7wmdFpWBZ+IzWRq=ac-UuRuREjjYrmIw!%KWRU?JJF4fxf`>h9Y zbdt}`R?{5_N+WuvpW&e|y#PUjXZ7EXaG{QP11{8j_w(}{c*8~S=aar^q$|^b`YUkX zUnPF8X?o!dm93E<7RSfzEPN2&h4xkg@~fcQJR^+^92yKM9;bF`w6zxM79kg2p0_0` zU7AA$?3@z5>n5B?o4&BH0z^}o??7gg&OX1Ar3RxsU&`WbPZ>jrMHB8h$*NvQJ{~r? zMZI(AQ#4N=EX6O}Xo~&)2<-9s@g{F92ZlqQ3~?acCr=GN?-Eov2qW~ME=;s zv;G@=SD&Q~hPcqV1IKjtSiJRrbcK)Uy518a%m^{+(%<8zJDlX%Qgb8r%XQ4+;ENfW zs>lMk^zQ2lNoVmMmbFS+C|@E?u^D*ALQdb}Nz6Bt`MY^liFy{)V}6`5@y1u~JmIaz z&AvNY$N4;Nb7&*+P#oc_g(a#e1w@=;-G1mEs{50+)R*S)#k2KjTL$4z^~kMb4EllY z%UlCRv@J6Pr!R&f+6!tb<%HDXHPP|;meAy2V!f!b5W!lFcn6=IgkkS~S1gS#bH$nWr#pzSPG z83JL>`MsQ2zQ~cug#&3uXq?LqEVRNUj|EG^+extC7RB)*c{M{pD|dbKh?01S&N~{P z#$w^Wd2OQw%^}SrwI(K=+&-hpEKE(3H_THcJ~4sH70c;ce@ZV;VRTrF>ApzebYS^R zGL~AzL-gV2P^jpa9%EbV@H#uXYO?%Q;l&-g2r*|Y5E9+qRM^Go7C{Q+V0*S7uPZ$B z!Rqx`$)Ju{>Fi?MBa8Z(6ve#k$s$R#qtRiHS1sy(?$L8By^)F4ye#V!bZ!~qok=`- zR?+7UZGeLqvlwzs257g|ev~=qV9mkS4HA5Vz@e&nV2|iT=IvLVb2W^J?4$XGx?9q% zRkek*G;BDl-5P=HKLNfFEGDiudZk?a!7BpIhyLO$b|>uE-k>wcE!f1G`X|X`O{X|L zd-E>_I@;1;s@Z1Wq=+Ut66i-n|{)QEI9ps0dr_SyLAiAQ^iA0`px~!(_rcJ!sjhs6oKdajSI&E5_cOE@~ym+ z?70mH(eDT5U4_#=qT?M~$Haf5;Lrh}j{ljyj#T6m&H8QNIO;qEW{2%QB+o7QDwZN^ z0dB`8IK=#|@0(q~IND5?+wjzXss|^k4c1IitjpjB+tl)wQZ}>(zsaeQ`KaPEzLp&vOpk-i$BRi*E)@Owz{{qQji3tg1BLszg8JOH)HU z;6^kQ`&}CLJ4v%^JH=5AV%H41uh1R@JFpZyx@_%mtL2!*eqBY(#AF?($Ef!qri%)dw`mgnmH^=*}jh~Kc0U|womKG{TfeGxZ zH}>m;Xk5j?P`?7QuS(_mUi9Lu#uD~U!^EK(TYeY5~Vg4`0uG|yKKKW-eD!n;1J+- zryao$4>0Xf4m)A2FVgxjRC_*-Rd&TY$Y)dlxlxvJ`b$zJqY9}+X%5%8`kCH3@-MUM ziL>Z@^9Wx;(-Dy2aI;-v%#&%C)`rl0KZGtXV-kpLx6<^)u|9lOlNXYcf3~&-mQ<35bo`)YQ$}_kWzNr?HZRXQu*!s`i0AF@_Jf- z8ia#w7F&U!Xm7cX6Kgd+Frb9^vvp#yTDJ6Ae=H$l$?NuyV|pwS4N?>`gL$6obcfh%4KnR3un_&#t{ZS{i#fsEnfO+ zkDLCjdy>F6HH)usJ57yfbbwgLqT4aStH3G_SWb=Ez87^V9S^wy{xjdBwS}|bD!XC& z3vW-AR4I})STgK^)A~>@37J_Qryc$4>GC!R(No@v3(J*|=j2CbO^a8WADZ63TCu-5 z=oq^h2N9e-oQ%0Vm-p(3CVJQlm1D1pA|xrPj2iHh;G5!U_XmueV3Ax5y^6@0rG9G! z!w4_t1J##c&kV;&*Ld(;?D=zsj10V_;0^ZtyD$*?8C-E_c@bDiu?@TZT0>c1^fG&r zx%hq2%Me?ixEF&esu2_Nau=7BT$jm9mCEnOLcR#vtsL?=9gZ2X_ZPNaUHb7cyyjNs zUwHZ8Eq}YyNBTMDk7d;2*d*NiJN1h*M$mPZ4M)>oUwg5k2=d{28d{k1q?Z%6nB%&t zk1x_q+;~NLe%Rv1gegbr8xy|`*9Dw^zu)6f>*oBU9u?2otW&nulw z@3t)dnb@;#=YtpOZC_!eEjC=W*5ZjxAv96$ba$VYdu<&=1dc6}l&;VY?($*?sV0KG zW#iZr=5Gund>LwH*_U5JXt;AFs5{f=A<&o>`>SW zVl%W89=k{OQzT%{qZ^T1b|#q1)EFrOFUZ&Zk*A;a)bgkxqK$sE%xK!s5D7u!u#<(=D(_gXi$?RH=%R?I^?h22vW#0!X*OkI13D z7zuvn3#%GyGWf$YoZ&K>#oB#+kL<2uYU9{VcG&aC`acUVd;erg%RZ7A=c*bN9s1<) zYPQ$1SiIe`Ryd6_P065_zPcrCg>h1ej@NPGhjZgP${l_&e7ae(T-a^sk$YKTexsa&5qJikjhCCuw-UJ% z*}VsmXli|xTRgi>ZEFp}w66snrVUNouTHwIyz<)Q8KWK{T|L(Zob8}tA5~JR{8_NV z$um!Y=zuJSW-Hz+&>NWvfe`h(n+_~5KjK4dDiz|{T$*8V-)7u_9z$uvnZ6G^-0Vgz5~3z5sqlh%3H4s7VM@bJE- z^f2W)UCpz8#2)0 z{~oLU(t5nR(1Ji+8_&35c9&tJfc&PK9;<#e&WBc?^k)ZQY4;1{F!hsL0f`QjBY#!5 zO0ULVtN*j*nO|0Q-V)g4>KrG3meVR>KkNl`d3O}q7slWWWZ&wa(R2#tu_IeXq;FpH zglKkIzYB3rZb}_Z;=G(3a7GFgd@Z=?^33$@<*4_8O@G?M^gJdODL(+mQNU?bbaS*qTthw0<~<%yby@pw;X!OHKugl zVcMfU)6cJP;bFz>w@9*J02fXo`_g;{p=}2~r!2pdolCLUSqFSO``DGQaD$qF3pjvz zQyPAF@aT)Zj=O%Gg!LG=?Gnx5Q%mWVMXK@Ip2I9CDq5b0pKeh)PE=&ha$^LuKRqK* z7KDvx5Grqsm#5P+rGQf*=1^fDTrXF4#v)Rf$l-HLOP$@BfJ$l-)Ekj;!2A%ia6Tei zCUR^`ij@Gd`o8i>eed~7qIM(Nq(dx|KDjj>VaomLno2<7K_48;=w1OaTNggzFvr0G zE;4Sac3xwY9g zuH$V;?L~bad{P3Hm)28kex3eRok6i~_;#uNJ8a~p$|S%rfHx+IIhyVmeaKPz%auvb zn))?2O-rYECtLHcxq*|;hh_ER^BYe!whQyq8u!|zebz4ObQkbg8G&6QI{B$vr&~YdMIYzHU_fhQND_%OsM=Ij) zQn|kK1Dlz1Jp?Oq(S~x@cGd)~s2ZoH1(UIa%k_s?1n#hJWo-_!7TA1wlHosea8#)o zzCgf<`2?iR-6}KBlg-K_vEvLfCYjKxb2c(aPL_2lK2lJeHyvUAk|s2t*(dijc^}|O z%{H~{(5)&Z9wQwj_6P~X3RCDbZ_HTq6(EG}$2l+URCZC&YiOWA?2fk2K*|e@GAlj4 zX#2f}vYKTnAkET{lk^J&a*UchpQ04zy*VfRbad-c&D-55YKithE#|n@))vgR^<~}+ z=?Ie5G{3TBi{V0h-J&_tS~EBLEp|a8@-J*WgA&=20K+-l8JGNWdGSjniTkVLM!<*m zRB9fZchalhQ@7D2VS+gtm7Z2=HYiOQ={q3dLUTax>iDALXD>C}AU|*g({RH-OEmv7 zaTdxGlUV(nEz~B_Gf=BBcd20-x4t(hFz%593#xfz;<~QI6R)rt4ciE`(*A-#J-GId zl_t9jyP;G8f4jl?XR z0v{MG%?7D8?c0ucz>0t6*3FLV)9Hswda1-_m-mru5jlKJZZWs>4Hr4+J)kc&sAB_Z zE2W{R|G;}0YiI<|1}}yJFE&lW(-`wIIKo?cnBj|hv5FF-)n%UtWhjkIUx(fyFFH-d zUPwG3X~~I~qYOA*fcuv22Ejv0cah*c%Y_;a9S+aN0L`CoMh`f{{VkZI{f~(hqjDyV zywgzLiFPK(QIHnEP1HLEf?;jM&6W-pkGy7|#zzdIQe+)(=I*}r?fuBa0Z8C}7ukL& zSPN2+4}qM7nXFxAI9a75YJ^&#<}&2jYTy@cr_po8#9O4WHNV_H$RTWIKa;g|;YTPp z@?QGF^@EyhVB&jjMYC_3&3xyqx4#aG;xy@8xn4GGYWa^Vd?GQz(2ShV?N<@WhAU%Dkn)!?R@iP8wDGb>GD7r{2obtPu~bRa`XG_);RcsA3HRu z?ntvy1Km4O2I!4mc&nUP&|>0K9fdc5w9fNSoU>~yxrCFucT?9^@rFBj^par7kUxFj z7ZwYk*pXgIs~vSh<(MvdgXn7OW;Pj!OsX`tu{nq|wGi4Sd~DJI0ZPVqNAZY}G4e7{+GnXMO91j2Cy@)Ah4EhQ}+~KMEV>A`b)14>EO? zsbyz#4@K{dj^OljV(H~%vePCNVV~Wm?Rp3)?gjJKi%jWSHz*ZZWpiSe8uKyaekZFU4Nb}ABM&9&6$(P6@VKR<^xqUF1+*4U2@MR{Wn4n=xu z6+F=;e5kG2(jvLx>Mx+J&%Qm>C+MF!?kX^wIIai5S9ym#eKU|IxKy1(+(;v)R{>ii z#i-UW*u>39Nw3^~m>tWoB@zmP|CSKtyjN(NSy?PKD zsiqM<@?4L^s+zoFx3qtL)s*`( zHF6L8?nu9K^9bG-mAfLRrZu&|r|i#&fDAD8)44qe$1{ybOfXFpAy|i2D>KZ*2rHs{ zK;hPg`FdBCw8O@@@_CM4W+tC;*IRPfuHg4d+RFRj><*0>cV+HV8f%3ZSv4Q2s9$l^ z;KZhhRVjaHARvCOkP*Xle>*sh}jv3`A7r zs-D|fE%CV4QG`<#Klu z-$s0w4{Z_9g5jp}LxH9s7;gDGM9<<=Nb^wyBhWnc%e3CT-0>6c*)`aC)q%-H8`-R` z;PHe7ir2d^u{&4+&si|?9_3P>y$2M{cVq%kAgU{-ELF)b$JY@4TFik_GpDZOOVh7zpE zRB177nU)k0un(Mw&Yoy*iA9g1q z)XLV7HZm>J51rze2D$vNrI*lZKgASE;cby%H9Fh}p>=_=i!WukHlT5W1h+CTV!m2y zCJv3co)KtT#ZmIq0Ih-6EngrlklUmDC+;Ft?e*Xz2_wq`X2RS;;@?y-*=S*=A7dx(j&WJ zS$g9N}3fOVhv9=a4gBB3|}Ig!jZYT<6pUVr%rTNO^<=xsn9-ah9Vd{qkpY7}?R zJu&!*hXtibF!)j_*EKP?+n%xaGYM9rj|`n}oMM0}-4yS))rK1n77xy`#ZG?LEg&+N zf8q@^ZppFqQ54&k78UB0G&=`$YCj&812l)u10}l%+u<0GBYB^T;E&JN9dy6>k?3Z| zy&Zk%s)LvKZgk*B`AC^3b4s@-Zx~h=H9FOgdlP->Qg{9>S^Zx{1UV8wYXmh5@&AbS z&nGCNj0%a-@?Z56|4e_5x&v;mtL7@B9vF%B%O2o3&bQ3QbRb0Yuig=#d>8>WcNgF^ zX{v2xkehju(viN%)b?NP{z2glBX5qC9;Uc&Pt~N6{Z-HLrzopi0=)QlN`>7$Z-8=z zRued_!Wlx95PFV;nnmBniKW&=Q+?}tCjVf$RV|T0Vl^2^TgGDl_`~i~megxFZ1V&h zntTr?C9ey#s05KOr3lYKD36u(uaYe&3nAmX-0i$cbSfMkD9Cyf{k2tfxYG9vWZEK6 zANL1d9)4v=F98{u0QEFM*70;ht}PA;HCz>>0{`Z)u2axOk>9-ONq10E!Idp<15Tr~ zp@1Gw2V`4wT7AcXxq>`a&v}2ZR78J>oMDgzGc-+LFY|kVOh|Ehvf&Xg zuICDJRUd&89l8}MJCMUl$MNiRFFo+G;iznU&Tn^{9USXwL87J0RP|TXm26p zK5!B?^gtUhonW8>4rwc=P0*d+3KoS6-b`$^LigSXLAC zn`Qp~_X=Mx^PeA7a!@=uiY~K%PC_*Y`LXBLeee&YGrz7TOd&%;{h*%V3)|S9uA6t_ zySZk$yqrBa$p%bsx}0kJPsiOFNK!wY>lZ?VwuNS;Z$LHC4ZRnF3kyO7uT?4QN0^Tq z$v-^6D|8NeHkx}u)>Kis$miH!hp=R-Jj8%w6mZ2|M@B}^(S5Eb%5*bV5cNO~d^&^F zJQx`VCp8|YDQEGA$xX3#`%*h=JLXvfa6LtknE1lUY#=MfG<*rsDDzvmr!qbfj% z$}!JC>|On*7q{|%v5bJ#YoHzg+3A3=aAR7RI>zKs;_)jdXO+1Dg}?|RXQ!zA_mgZa&!j?6_XCM z6W@2^K=z>rpM;uzK<@o51hD9KMRM(K_y6a*h9TBf^Uc9OSLa{ukCq9@UylEd#Glpt z^NC6gX#3xVbbl`Y{sg53q%j@FzpwhAewjhPYLCACs|4+zZ;#3W^P*kgzy0?F{^%Jf zNRUMI%Pj5lKb1ck5f7v#3;o~zzCbKY@@~ezt9Mj7!lR}L`yB_NVQFvsmKrxV8aB9t zV|}T1Y0{ZXUlPyjd+cfH{72VHMWqbp>?W@EZ(}>q5ksoaPxG~Je~Hl(&wf{}0L0My z(}Y3Ch94}yMwxq#gTg+(fOGGv$!5QkY3HWC9eI56sIsVMz_~)`M#kT=g$GI6c1_CpZ9X~O*&VT`d^!_QpQ?0l90*n~2 z5b8Kn?*;~**9+K?4y=6p-4JZjEx9`9*`y+S4Fi>T-8r>bBM2Yx8N`6^DTF*j2oun` zK*2o5QZNv5r^1DTEN6v|8F2bd?0#}pL?Ze>X7~ zdE!qn+x)3eXh;bg6y|LE~LqF$k$2;eUD|kUU!FF zu-@V);HBQEh+Z54O7bmXm>JZj}Y&bQEJN!1Llhvz*|lL3T!jG6w4yaa*3D(=>{OC@o#>TQ>ZhIEu&q>Ocfe9Q zPgNJ@6(;G|xX1t%XTIuOX_`@~=T1Tn>K4`}_)_EH1x?6s$r zjRoFJolZ{pdI8q)Re$uBK3f~eq}I@wjdpH)^5P~q^i>OhwKDkVT?s1zNvsY1@&XK6njaKK9IBPbG95mYOPgVwFM#)zIRF{ z?&oL!{apZ4&*pqqnQ9YkYka?NAxgSd4~)t|(HyAZvTikEe`mk4i*$=D$~N_SqjE!4I(-RDl=TB65QaXS%V_jjqJ7IcD-2Yu_=7Z95*r)iwEj<$4!;0Lji5?6v`uNhkL0Pj z>nCeba|u78xr759*9T5O_Z`1Rq~H)>!`v1-33az)R746_(;~?PIJLnO8OFT{k0kc% z5pm--K_-ODUP=H6-+e1+h#VoCEMS9XV=9vKN!QhiuBb$8hNqyaY2tQinQE5g;FY=h zt8L1w(=MREh&nUCr4FF;*SL-rfh-DMfaT^uQ5K*$@39-t%r2{xgBZm?#uMisidai_ zWt+fb0Tq#{`}nQ;ccmi<Pu(U!#>0u(T|}N*tSA>QRWjgocL0<~vy(9m&G7Gm(~l zb}xaRNBHFOa|V&}4#{8UhLCd45K~3H`4`NXY}<^LQRc2OiEDH80msFabGp122!$9#n9>*z zfEp~s2MF7u!5!dInLX47@wQo(=xXT#*!J{zn!Z}8flKq#u@XH@zeL#-Pjbk>6GWI6 zT;wrryW!QF6!LMs661KH?PbJgrWRhXACZP>n3b-04ocl)@|dVFYuY}(Y9-qR#bLs8 zpG>AfC1NY^pfTnA8P=_Lh&d%+_f3L+V?phq$Le?cz*uH2M{N5Br5J=z51#zEy2@FR za9MwV*s#Rjp-Xy>MaJ#g4dr|VTa++M5^OP@-NO3fXnnXV)nrj%zks)uCnifJxfn&8@bW8eLY*%$T zb@W^(xsYQ9ki|A2d;+$!HH{(bDK&RqKuiNSHjS*P(RZEQ1w<{2&aoM)C;HC&!4Nvo z{tPN4rL1bHqtoCIRy|XFZ7;ROJSb0bbUXXD1~NN7vB1pA zlZzvh+8wb6y=T(sHCA%l9xI&&CAk#36PCOVV~WL#!2|JdqGTjOqBqS z2<^3@Y^zPzI(dlhXywlmCJgj(B)7UPN+;7J9M+$NM&Mu>&7ma;uxzcTJB^!cUL{cL zi?z|lrw$QM1_-DhyDL=%ur(j4N2FnyJ+-;;?j*5&mSUWAnmw<{H|L!l2YUotTh%_< znSH2P@S|`{bhArztgRPr2R0uH1)HvWwqud2BWX#$&zF1XAn(aL%7SwJP8G@27{XrH#cF6R=LZVO z#|)qp7*^MM(U-p4Z#R$ueLSbslh$Yj`6YkXfwO#}BebdM+-}moJ54m54B_WgQ127R zGu%sZMtBq{rXO<`)?<-=bP2FU`zUFk@ZpAH*=g~t2G&WLYA(Q8Kqy6`&Q|P);`o zQIyQi6FW%S<*!Mb^!0_iwKoxnOKc92el@M2(uMFfy%4~}- zGh1JL#*li17+&1-Rkf+`;NY&JYMssS&LX8V5}SAkhqA+S<~W;&qFWUmj*V?EK_cMK zoW>qP2h&uFUL#Yd+UGzr!~jCvI8-jZ}0hi_&0O(@vr7yKBGnFjobVXa4>&SvR}0(3C=^D%rq*F>b2`8Re9 z!$d@0cc>QvH)f+NhrsUhwx+o4S#aYFCkO*T_&SEu?kDCmpXjnuE;xteE5tG{6Y192 z$2JunArK=s^&^q}&-l=ETVK^^5k*io%x^bD+h2LT%bYy`U$}AJFm^si+Tx4i5gc^A6ZC`ml@5bNDe0@B(71{K7(zHI_YCze|QXl*lbEv zHD;5auvSA8B5eYUVjHDN=~3U%)1HTT;{YrG3Zx+sBtg_u2&*-DIteq0^F@2Ib(W1jr(h{_!!z>rLw+!emF6^QsU78U&7$VJ1~ibU|bb@f9kZ zOs)XAO%L0VKL>do9nSPgx&TC-?l0mCe`76ov-BK}D4*=r#o%+#-yg?}_`FmsQ+lHNe4U4{>0-d)+~Ohk=4$xEviqEtucV!1Ex>%?yH0hpaRcGv`(7#0h*4jn z8-hGUl0&OszrH;zKi@RD`jCu4ca)(;nl8U4W-#Gz+wsxokbtbZsU08efi-9|@{V~* zis>T4-x*Riwsyo_c0kyoPk0eT^y-F{ANQrvaG1%QqwUZk_{nZ30NQ`2=>u8;1Bq+1 zix;@F%49iOp?8HZ?KyxhQ}0Y5B@;hfcq`9Xq$diFi(~Jz*jrA;RUGR_f9; zn3p+E^6x|2DcZL<)?)7z1LH?L!evu^VGN}_gGV?oyyZjQ7wVL!%{JK%W+4(7`l3FQ zx_r!BO`Jk@rsEx^54b$D5k4Dn4xuHet-|-#>P>Af&TnZ{K@nYi!zI4)sA-0XNH^hn zD}Rg7tx7C;#5eXpUtxlEU1|qEP(zInB3nUoZoTmIx|2t-H~=<90L!}O@vvVt!8eK* zDZ7b^t^Iwq3)R2uXz8l=}(05GfeTDe@6GKzj?Rl zg{Q_{E8q0FQOY!MU^2vKw)9 z36~dB@Ug$0^mtA8MLWolLyeGsn=co`pdzwSkbSfi>Wvv1&!qQZsSS$al!$PE0OC_a ztH>XF+TN&1%Qd|_nB=wmEw&1b6T99SSgm)03xKC26V7*A0gfP8sY(tvYU~;6OB0^n zIbb;YG!96@*oQh(Mhp41U#|5}-~k{vY}n|?5o9F|D`J5007CpdK-C}hi*JX$D?xGG zBrgldkOg08fX|?On|L?x%XrzxgKvODos=C0h1oU^vTTE3?Eu9)Z9(a?^bR0(fj!&; zz)c|PmE-Vhf z8at)1y|FykvB-ep65Y!76y>Ub>Vqk8+$MKFUb4P01YavbCr_}a)>D#>9QldX#8q5BoX+S124Xu0YA{>m6i_z)HU~QOWroCpf&G znfxdmMU&dER_HGrc;51wtx@)2OJQVqIaAIlAh(4;Cy3PsC3gd}HiijCfZ+Sk(`hH~ z5<{;f=(RJGr$nknCx*>DqVSI2AAev^m^=$EW#hy{PzeVA&5HrOiGdDrgkOplLu$ow zFDyO+-%|p7ye(QUK&!r$q6}bf0#*1m;82TJtJLbs*0ja>cr2ssfEu(&S72-S{Qgl@ z0L8PtPrA^m_FlLTsIeDsf41KXe-uq`PEM z0!nv-ihu}635bXw9SR6acS(v8(xTD|(jcL9h#;UyNWAk}-ur&;{XBbrKffREc*j`7 z0l4B^XUsT`W6m=#{_1wr-y7vIbstyAI?c*!mvCn&z*%3eGl`*?jF8D@r z_<6SLK7qrR=BA5F$%D?5wYu7t1a~$bDBA2}3D1<7@QLpEQuhIvVqcI|ZrSjw+48wb z*fQl>=J7${A=d3EP+|If9V+rVZBVTvq}((qEZzj%^f|Ahq(z9>H49A5jbiY;3m3+> z*pZXZKnV?@Fa$l!0|x6^U$`<7Q=)*xYvBbg5JQwLx$qjymq$>4Hqdk#-W4vRwa<_! zwz2Tp^ZDg?Y7!bUl=75pQbE^{>Rv`|cCG5u0#*m`y5STzyb+7si^Os$F+qyLYY2V= z3ABpDXQEQoN*DXyv?8h3Ukw67{^X;1NIv?n0IB{@KI-`;y$V6)2q^lT1|gwdQCFsm zC?L0eL2WdcO{5}r>$61()A9|{8cw`6(X7&qkKEjr^;H>YUs#6=5PYT-Idb`oQ}Pfz zOSQ7B_Y}(g;N%||0l?Z#0MpFOvY`->DE<@nw_h!YM6vQJ{wbO)a%b_uOq~tgC|!rV zx{Dv{t`TagtB4HC-Xqpg>sJ;RlRbbw5@PYTn#m?gLx)SO2%qy0b^=b_6Bzuag*aq5 z=rIJDV&!)IRz8a4oi|6$mt_}_J+^{YKe|}!pnw|McQfPA{D_7!XF1u2#?N@?S8%U+ zHOYnrJ2?`hTA9&n1r338y6l95SWskx72VAOylKTp)eQw2$yW!(vwz*pSb^+%SfPi} z@Pn?Ueh3uh8uYl4oH9_NsvPY59&+-`R~|~=qVJIlv`islYbyylJpg?g`RVhnuPA8W zULA2BDNW{!eHJYji^QO0_rRI;zrI)nI5=6S0yr~O>yhg7RhE=|jhXc`NUt78b-J>k z+{Y=~ls7q2Hhd?W>_)V~q(UlmLqcA#%v9ddIV~m?ydISogS@|=jkCBuB!f=PKm&YY zkhlU_4F2PczNN+AgJ6!n@ToH0tCGH+IK6p>4a1CO38Es^d1ecYubksKcoFHi5?{5k zy?e87SBALx#}Zcp(rSj=ztA{Pb;T3(sAM%FGvyj6++{Mee10EVq>%ad~hd3zjat^tFyyEY1gx3Q49&XK? z683*y2m4h9Nlli_1}Og;_5X|?2@QbTiu(UP^#8xpi;g3VW{}!yIc|i04YBK979eUn zmYN6zi1yOX$}=X|4%`%h6e{jUT9N<0qGb01L6KK(TG?LZ0b%uu%Lb^|*H*-DeNI~+ z)Rl)_qr(n}E-!`G|9&NAOU1^(Uf9|!9<)dHLLloI$T{MyQFQ;^jTd;IA*Zswe>nV2eywWIYhvxVUC0l5 zXMq*@2A-=C0yfQ5aYKL}?RrvxxUs6V>v5trUj-rqEZPc)`IiBWFveu$5V!)=-uLLN z%u`6BS#C?J~hK5audZVgf1T>Hb{@T2n#Fd##DfMs~CA><9BT-|BE#63R>BZ7#v-tg1g zkFq~LfIQt}#MJ&Fqnug4Xev{Nj*j+OgYZ6mjXfx7`UJh6piR>!_=Z5RcjzFbPI zdQZWmz*~^q??Orrz>~v<9)+eH#>ZBOy$n1chcM*G;->GvZD(~Nhpb7^iXK9Pt%f+s zSLIOebt#UM_<{c7!QPJ;sYM#%tdpmpAaM33oQwgV`j)IG=4+5un>@FYQ<=PKnE%%C zv0||F9F$hAG1Bwi1KomSX{J1d{rh9k69f8Ns$cksSasZzB?5u$A%+K0L5HHW7z8KO zd}iF|&O{02`D0>VDFhfut4r|2h27Ul5~$X*ufCVSWQlWF4`;-y@yrbl*mq>FTA-sH z^n_alE@g}jKIApPA9o?t6IUhd`Pm5B9#znwq*za1RRa`M8;HF&RNzl73Jx72rV*Te zB(v@+;%vsm4M4lRXs8QRAc3@96jiPqK>_)J`;Y2RP%}Ynzxw6%{mt37DNFG~!00W! zK3%8e)Tof+%vLW-xmDukTs#jMgJ3fB?FuR=1+~S9zN9OnWH*2a?9ki$xwI8fb@`VQ z>*JI|z7DC~!zDc{J;bMhf#{eZ0NC>=TR~1Y8DU^J0COD)OnbtsTokv2=@1Af&#dL* ztf+~`prE$&3OffBa}{i)QL7FHLpjf@6pvnD8lq~G)hv>QuZwc%2TGMu7B0+F#SVOR z0=j4kFm`+TXFZU%>U!7!kPdcEPk|Q_nv0*em;2{HIc^*>ZouujK3PW!UjTq*O`*0r z(VQ~C2VPCON8QcREGcqfcY!lJYG4|&RArFHlY0>Y^zG)~>vvbnUM1wt_|vu7k;XjH znOUD|m}nweE1Cl$_bb4&DuOJ;-m zU}KmgQ7Z7o&+7XJLlVT@{LX+%Q2;umCud zlkpDBN#gVpE(dUSd6n}e6Yo?6z12esJKKBe+<5T{!S;ykxWLdo$k)3M?OFfE;^uK7r9sNrYBV5T8+_}M5}dr98zDkxG0pFqE3gB~R+R#|g`QF- z?SfB32~1Nnw+8Ye_VVYS_9rm)^tpK?i0k`9F(c)XzH+7ic=gS~Y~qY$Wjea7Y|50B zylng$I|#}KJ)E@E6f6A!M&DgXw64ECv}soDSogZ2YV^Z?KNZU>_8W+C4>TZjII#@2 zq~MLq5s{cp&ktqPLr?v&%OawOAy0BQUDlW00=IwA_VY&mlFFDLU#qy!hK;6XwUl_a znl;2|V(T9k9e#R)<@{CvEhzlcfY)&BPT(mstA2TRD)=h{lqNw7!>JD@Hy(QHCw=^8 zSh@xz>dyUy)Fz@X%?#O37pN37zt_i{E-}NnSdJvZx@TPN4`qSO@MTV4 zpXW6~3QEA6Or}@PC2#+Lu-*?cV4^^tOV^JkvX$^(y-L}>W`C+~&#pU$UU*P|`!Jv% zH5^IkXcX65Za-7^y68hf=EiO31DN;*HNK&r9K=z5K14lz{oFWwrp|QneWgqdM~@Wm zM)usm3h{5^epGbq7!aD_wRCaeXUWBw36BQM;YDX{p!P49Oeg%_8PPAi_^eK9pUO2X zwXPxI0`Lx}pT11akm2th4U6N997omj=5Bf;JjMx!dlC(oGrcn$#mt67E z(}>FnPOxJI`uOWFlX;Cw`=Sjhi)QZn=9CklXjk7LPnD#Ii$Q?5hykn7gV4Iy~JS(p3#H(64j9?)|*Lu|D;v zN``vkg5VqK!LvM5Z7)(Iae!`6+*TD+I5rQMNb4~Znikw~Jww2YqmJL0NzbT!8Owr8^mH)ChX0OW4m7$gvc>^nbB&tHJ5h|(0H8J=pB|_=D zXRjb60GUdlMlZ&mCWBdtymfv&7-LVffQz0n9A~+4uD}ovmH5=-rtABLJRIwTj!Q)`hW5z75_>`jVy43C}=6=-Yu$hLBH- zWO7lHmdX6tHWr}oznU)5%N5iiJvmPqeRAiD001?JiYTqI)RX9s6NcGz%e7lSpMlDs ze4D`|Fa6Y19uK9535=Lo0*6bC8x~;eE(89)oo4hZpzMt~H~6<57+U%8z9L|fon5Nuv3Q`*oXy?y0Oi3YT^ zC4ssFIo2+!T7Yl2V}F{2bYTP`6_8I>Q?X-WX?LR*5nN^t@N{jZnQ8Yr`Vb;0OAT+5 z+}_2ntmR@|TtkZ3&HAA3seBfsVJoWSC7x{Vnp3m-P?3JeqQgP$AI6xB0NSNn5Rf$d zc~|9@-a(;5^{xaaLwH|?JNNsD5yx{CG*aJWVE3qXX&)lA#To)zFk}f$c*56*{jdOdz+-S`Y}~QK~k4`xUCv%U)y2g z$bYla1P7UD>4Jl2j&_F|f7EVjVdy5`ZKvueYCbv{FDEEKa?&adR&){%E);g*7>7!~ z84$oaKSBadYu^VTTKl-92jP=??VKnO^Kk}6QecCV9Ylo)SS()6-3r3hTi$J# zoIbt04uG!LcWBh+Fo-N@pe#AKtA7!ld;HVABSQNAvs8r1Te)OX;!pPOjr%);7{h~TCCn85Tz0%8;MPKKL+-z~-aC)ZYgssfGxnn$|z?)N`u z^T_g_7e!Fb+>14g_YokB5_2F+sr$$(KULb_)D{0-lx@!w{dqfb_<0bC^m4L&2B;Bk zl%kOGNk;!-Ze%GmlEJ90z9ISbi6jFDLg>;Z*-6hq%?!Vfix9S6xU*$@T+jcnL5;oF!2N4cA=2s@+AVNb^g`FJD}WHcSU0;v0$E0S6Z8|w@H;M8 zH|BZ!tfS7cN$Rh>)sGCU9i}oI93Jz=E$n;0N0X|7BV0o=Fjl*6ZP0pb-&7Si{M^?k z-c)tz(OF>|l4!97)w?~Vnc((6?F_W z`|t$5PuCcU5bSi*gN{4fF!<*<$mgE&TNYbip!>2^&e~dST19i#>o9jvg_Y}|4jVe;xABfL!NciG>_P{Jp^yOUoIy12iin}D{|gSf1g`O zd|bzsMbEhDeQ8i?&M&K`=*NuZ^WD`^KBgwm+-0y?)=j?Ec0rC(czCaW4&(+y2XCtT zh6Q;~O?3pd1Rg#{=Ih#Vu)-) z%+)UvWP-^+8usN*^KYzk*^O1 z?QY_Z(Ba=&bT|d3V7$7z-56lJ0cb4(#CF*vZO2o7Cy_?p;4c6T{S~8LWNJaoU@-v+nHO~M2*dk=V{J-Xla+4=r z{y(Km{ZuqUS6f>nD2Ho+!MZn)rw-VkvFL<)2@){;hZTXkmNeJ5ga8^ft<1o-gP!4) z7VFN@%?|;a2CPa@hJ!hb1~0zr2y8Q_cE$lD(4hQW1fuUbFq1P4R^U_haQTwyjy*mg zbu|usSzz9I=b$Tf)ai)J);LFlWJgFj%zr9L2>f;$#3W@8lnV~QhQ9y&h^k=0{FO^J z0ll~rw}#>R3s45tK|l*<%#y*-wyVLewFkuC=C@(H zdirU3HYsGgDiU_y(y`MB=N)7TO!}>^z5!68iiS6H&GX#_%9`YQJ zUhNo0{O$*ID7062dwjE%k5(HZ!cA^n%vE8`Pg{V<6Vn35*X;#S?OqC-A1F8v!CNCe z2!5Q@cy3X*1rsdM1|;-LL}#ERJcxBy8VP;S1Ay8;E|^0jCngXJ+wPEn1N4Eila=-# z5vQhEK)ui7C-}%zp7F|`!ZiwtWr%0!BX~_;+QRH2p#qd3I68g>w<3Q3=pb7utlX@+ z+h_vb0jZmD=xbS|N}_=pHShw4rGQ5!TKN{4Ul1X7{iufEhH6?+kJ44=M)l`BtDLvt zTFEOuTcc=qm=~Ggi;U}l+ys(7P^;ViHixn@z@vFw3w%tWrU5jHuH8I^KbQNjA+RO{ zzimHl!oWL1hTXLo%mv80l*NT$aBrXcrJZjT?4Ta#Ii~da z;4JcLfQ3#6R5mp4Ez0n9_VCS54TbD37-T$;>8|_7+$Vy1fOk1^z=X#3{dx);C!Q1N33lKg!jfY za*r_17tNfDGFvu#tt2{V48@YPLcZEM>gr~&h#VqujdZyBiv?UMbqrI7E5O;$Jy+Fk$Pm2p7Fyao{qD;XYXFk5o-aeIdGT6~g9SauFTI~)>oH8xHRF17 zNxr%6MoO19t>qfV95~%vdxGoD19R60c-{o->+56|57>8_ioP2yrly}=ODoWO%)G(q*+10LZ0ua61DF1( z693h6#VeDz1GG!un{xdqD$Zl?*7fn*fYdjeeED0!4@AasDR zoRBsjFVPyreSM{rSDkTQeq#?$gh^H~t=jvs6lSE2l?b7Dka+IyZpfFu>{#v03vb*w z`B`!)N#xubVOzo@UGARyAE?E#W+$PcGzzStqeRPU zqHAb(olE_B=RpG@ix}c1Y#g!r1vbVd*#z>gJtqW%F^m>Kj{4E`wKDehx$#=HZ zQ~A~#CSzzb6ElycH<|E#Uf0K4UpIK4^R&&Wn$9zw^0eG^hf>pb#+ABM$+Rn z1v7RT0rNcEn)9M&0J8L;Mm2UR){Ce5oi{+b4td&{Ekg~V-BFhWfu7>LB^=GG9YEMJ5z;A8$L;5;6h=~} zla0F677@SEoK3O_RZzD1_vhu`E(CX{Y7Jbz`*S()7Oxr_Ph&nO2bVr)K1Vs?fk=2z z(Dz$I&vXYK;yovGmsPsx&N5U2t|`2eGNlDC6pP=_i0uUNx}qD!k)&-ScqwDMpGrc0 zBy-U;WaF=&{r&}~b;3%8MT$Mx4iYVuNUB*s(5gDgxq(VNER1|z%I9<&mLF*j$=MH| zs%}vBT0pc8h)a!d(*;IRhgek)^3Kr5Uha{X?HPzR_{?iGqE)MFU1z@Cx^`F=VDJPV z*}qwYpq6!f5T}S7-j)sAgSagm?yvTTweedxN}gT&Z|ZB?-adT#j>#=?c}@nD-xpBx zo)pZ1Sd{4fLJJ|QMxSt=ph|V$Syw{FS9*mRxPq#YYSgq^>Iw?!)JfgByd;$w$z-eRdaLQ6fDYQ_1;rI3` zerAeaM3~an*bH`>(XJ!N!=Jn>>0EA=NNL2)OqYF%PmuMr!}Z-lZ@PJNl}yVyfCOT> zt|z3)Uh@w4E=Im};v;kMxMs32o{$?9#JgVTelC*Nnu9G-&oI_SF0g4SYxjYsD#_JH z42vr+OB!^Hb!4+-mUi#EZ}Iw=IZs9Dg{$IQaZktjk#J&zaY)2nj`oNM$H8owd0fSU z17_@@yKs&WTx_7Ip3_LHTDz=G=K-Q z?ItaN)Np00VF!TnIANSAm&w}AUu)*dv`;{HqxS zXS*U~nUQfz5RP4#cKghNjdFfn2SLE1A7O1+?Cy%~sp7|7B|bk{W7@DUVC}3;aA{Ku zSV50%*#P`Y84z36$$I%P`(9!mTkDKtb0DX8k0ywVUxL0#rnRnD_6BC&KynhMh7L9`2o3j?2`z3hxt1NXhv4KSl{i?or_ToCm|h#vVJ1?_Uly^d~FgJJGXlEZWlY zIPwNi49xH-S?F4*Go{J$v}@9oy6ogjnYr(Y%T6Z+vYpux&s=%^{i2_xEF}ew4r|{C z$5+3n?@B)NOzTi-KgJ@A4G@4W@FRqA3G)jr!LGmpzGp(uz8lv9{yshTYDd&gPGb|O zGSB=(;sW#;l$6I}!YXV1In_rDK6zATdE(s%9lOU&wl3WfETmhDJ@i|7Q?v$LvGzL8 ziRHIANFH-)Mt;-XoD;^Ox=reo8T_f6aE(Uy?gn3|*J*`MTNDqrEb38hHp(QSPoMJU z-?$x+DLKNXkGEuj_ci|2@*Om?(gy-hQxlT4}08`3oDG|rC2^)xPTHB4a1dZ zE)lMWjfZ~ERw$F<)49L5aAsyldG*A%mmX1y4PT>B*~G>@HGh)D^JEO=qhPiv@vaF6H_i#ckh!Auqua7 zZ`pMXP6%@G$eRqUmS#fOA;XpX){vIEP<@2-T-?M7nc8@-JQVI%Bz}=Zbq=-~pQd}< zeTzRSK)P_ZwVm`pY-O-eKqm_;=N;LPsE-m7#2WFO4^!FX=t3tAW%q#fKw^|lfv;&f zJh7@2^EO;x0+uIS*T%)9D9Y9EeA{ZNxmf)Ejn%0DrZVH_a1)FDYJ*0SAWQKe$b>$h z4cwYQ%4-!Th`bBN9&K7;o8OSd7?v`EKH9?5>fJ2JY0afTcayrD4*mt-b;ikO_gmj6 zr=I_$=x{@6CV)|w_E=X{n?3!fZ;ZtJNMI^mxI98CEh{-{^rZx%PUd)TXs%fEhD1%q zooh_N8fB3V?yVNLkDV&+{jzR=j-(9lhAHl=GyZ+U_>ZKNuVwquXHZYSfA5kv^`2Wd zXDoelfn&B}+DY--1jtVO;;zX^R5`lQ1iiiUk_lzyJ0D$H_Qt*;jk9CfCQ6>`TQgZ) zQKaqX*n7ig%Y7hjdKX3-b=j)QWy2CX%xse#3AuK)T2yp`zMs@VA-}~2%;Fv-Y~hro z%eIp~j982RfkeMMkaOSZCNjRo_dwwP%af zlyUFOVGF(2EKDl%^L|8*H3m5}yxExV>@d+HWGm-u=x(=k`*jFSL5_4FlG3yAOY` zrMUq1fo7*u9O#o7))LBd1unn?RB1FW-s%&H8?8_HE%y(9@6=osvF!{v%Ido5h zy}@Qi_HE+TN3W#!*SI!ayfkUnzr6mgm%b#cq2BnYy(C+AHE#OoP2y+Jt+DRa%6 z@BE2M+d+6Isl%^;kV#p#;XTjQ03)K>I)<<~WV+8F77ImJh?)CO1iNet_|a4!_uVM2 zV=X-CfwA{M+0>T8p*AqPMJRExtW9nvs1ijzId zshiZ~H_vKEoru<(Ql_%cULZe-WM6_Mu`A66%YrODo>!I19U=A+yJaNP-K=czOoiBm3O z>}R9m2?VPezHa2#5J_4*;8KSu7R?I-9Q-pgu>p4o!V@x~5ylMZK0rj z?_m1Y_17t9g;xu`zkl|b_XosAR`5GSA?^=iJ=ni~_bHBuPhbkCFqPLUXoi1?zmPWe z?kn`LO`ydC88r=OM5VkZ+sE$i(s9Un7Dy0)ZG3;V3+`S$0i#CMD{X1SdkKT=EP(aVeNAR*0?>V78G zI6tK>`cU&RdqkQHgn@S$#<8gYyHIp6mXjlmg2wI4_SyEYMuptS9%yC&!5wS#+W%hd zue|v_k}5;yDnp+P1OcdG1w_DzohJe*auyE1aIFgvP86_ADRA z&bCZ!gZj*e>Zdc;n{|%nGZ}JsIkiA!)&!;dUO2M8b&#ubX=jAgt3q!&l znB`Y|g*s3wDP=r?(>i&*GghHn*+e3xzB{o7AL1Y$aIlOUV;R9BBzSVpAAQ^=;Rr)< zb~kkS*9ulW^(AjBTGnTC7O(!903>f2o10kA-~wCb5YOd41;)GH8$dTjMSxKA{bP=( z%Pc9p)9xxHm_V0_aN|Wy43-r5ya3#2O;`W|dgX3yXpohbN|ltrw49|lLhAAZ;54=X z=GBgxU$IJ;#_bLzE#9uiLm>#=SuTj{o+)8HQJU#`d_~_!>h@Oy+?4DYJIHHi+aE@4 z%!850-TMfVW8HB*#|(bUdbpsqGckoJD>VH)eVBVm9~8TB0w$`14k8FUs7?z1&4$#vtXMM-zJMe(+4xHHmk#RfaA3 zH4Jbd0j;5g^xr!aIh{z5$E@eyM}aL4<^bA0$h_hbh5Yu1605(R+d!dqOfMG4rG(g`bcd(X(64nV z^7@A)H-M&(N@&A|H(O4mt6yI-61~{6gXhn1F0=p_&i!-qSL?v&wFUqvGKW1#VKTp?ed&R`ccuO+#Ab%6Rz@Qg7}B(5{CRT`3{ zLiir;6};2eV{_5@bZp87275bD1x)+s+T|{cK{$nS1cQ(c{#o??j^Mv6LKa8??jY6R z3J-8y$}{-P#g)xCG@z`1^E0v1(g>nJn*07fjtw8R(|0VYogw?VY{8a#cAD&dqKLf~ z*R|GsC_Uvjt5ST@- zxvuYzruMdY+OyyP>mi&T=R10I;3k@Jp?i+c>Ih%$ybf(o&2UVa(Z(FJUMX}2vA(x$ z=s@06Gj~rubd=PJ=RX*EVR<|z3B}ggXnyEdOwQ-rC1NzS+2y9R^*ywQ$Fa$4bk{_vv6So!mT!4A(&= zpjYyb>%tBjwbUoD+NOR^Za1ZV|+>RX1W<<>b#KO^W|M+8! z-^vs=>aa1D#+}-t#9=((2Y2p_mS1LURyK;oaQ07zi4X4KK;eln;uTwLaFx4BiADh9 z?K?ZXxxr+3V^W*kUA5qy5bG+wgkbG!cS&@H@(3w~`AfT=NYKoFf1VfHM*@E#T4j*) z6|EAQZS34PZT74CaRs}%yEGFEXUQG*Ss*)eWabHXmBgDY+@z+@cHN|e;i`+}3p4IX zqd4w|KjyZ7wrIC1)wT$9pGF44yn$;X!oTsMjun~lXKG-bAT}6PC#0XWWIk~P%Aq&Z z@meUk1S3kB<_FrLc7a_oZ>;4QPht{qubGHQ5U*G32Q)?%R-DKKYC5Jd!Mi@DOj(Vt#uwd1B|8aP{w zq51P~3REc;FRJmx{@Z)}&u9X084&9vbS8rXiS_>DqlXaEAt^l~_W%6f@R|+;iTMy> zkId_Te4ss0p6xdtC7}N6Gg^dTlQ4M9nEuBhAb;#j1E3;>Eg$_~pW&*(oHjQo`!^{5 zXC7?Gke+6M{}(DG|Oh4#|9-l3?Fg%3S{OwEk-&-V@6Q-(F2QS>^7Id)YORjV3Lz#6{y8N4ha%~ zX-T7i<>+Az3p;&oOMLGmrTv7^_tlO+e>yXiaB!x}_e9cIgQ^Tx~ZXs=$ z494(~3>h9|bb(NPQwmXJw&emQEyt{w7@?r1Vb8s6BY2K4g-vU7#?Lx&LYho0d8O#r zVO9TFkORaF`UhX`hi9IER!U4kE)2t**r%a6foh0l776kpe;!%FFf#_9&wjlKs1GUd zqRa2nOAwUhpN{}MBsD;#YEb^SWgS9D8M@zSzW=-}0&vg6Zk9ahB&_-0HkVEiOo!t) zoa+CwIVmczkTtC65{>(Bdyg3QUeOEEp$Grv2`wntzLhPUgUbJ0-hciqh5!QX7j#bk z^N;`h`6S>tz5l-_f=r88>=_<7879I5sGxvwaC;6#rHPqv8xGX2S%1HUf6)p-=H+-K zPkceJ{5$0Od7~lm78u=0L05iNlr)*-_Qx2KlOF1SeRf|8*6X1v&P#XxUbsNc7SUle z<&y8;fB9cOJmr8Dx&A666#10@^uSGl2@mA#-Ttpjb{UrJy^9<%*MG4G1ftmDt^}fP z<*KM7PMsM?ti{q79T<&ypJD1hTSrTi?mAhSkk%% zw`;9=uZ`c{{%Xi;>Lh6XF6876C{=+3Erl2hVxePjnZ$MhZA15dED~P;i0!Odwqgjx zy9)ra!p&*a*KE{0W~7KoNJ76)#EzRQtw8&P3@MLxx|)GY$U3-2LH+gQ+g1f+FgAE7 zkHrr+`g7WVRC@5>L8;AiLMP~;cpbQ-U+Yt;%nB&Vq-&QbGcMdqVNnVVBV~G94=54{ z((1vb0%AM&l;`CX0&E>-G956^41OHRh9IyOn(cAta19eYY=QbPxI_b~VuC3N6%Spy z%~b*X6#ytuzfR`jGW8@Iv!}99$O-Bz0(`gC=VW58K`l^k z0)%lAuQC7Rl5!Mv&qG><9GpNXe;z3@{^&IW2IoA$@6oqs3$#B{xX)+fWZL0mMVY$LNBNKu;dCs&qTS`Y8`IdLT}eW@>-##`T(}#FC(749H_=P|2FZr2 z@Z&_xequ!|MCZjp4a|aTU`crbsQTpg>_CC$st?1*bL!}Q7pN4Sgrx-Nhq*>qU9+rw z2remtPN@e#sl_svTfe?AlgX68#cN)3zR?{>-P@p7QF=5_8$$s5LnZWew#jLREEg@+ zNmfQJ!Q@^|3twUoZ!>;9*C`zh^!UJK%JNkb?3w6yozQ>vY9+L3_||*=5eB2AM0|K_ z>g7SqVDSRMwPbspmOjz#2K~n3I%IV+1Hz*zeUAVIfIGvWVRH1oqq)vIXyTjQ;s+o= z`q&X%g2Qoq5KK&8gAPP(bkE>#FQ#eCaG0ZNCsY%j5!?@=*TU8KMB@6R7O2p+s59Ok zf?;?WSmRbLYN$=NPWKeyd`j2}rE@FiO(Te0SnJHGP`?%N36Z?FS*EhVU$y^-gC zZ1SSNrr9_qIO(q7mNt;q*xx5$BV12Q80B30n8}pvsk|nJzOSupsBeH&Ks;Gf3@7m4 z+v1?h8_Sq~p@+NJn;x~{T%-p|nYf~lk;xPUq%f58YSSVo$2H>rSR$R}dXpDdy)q0qW5Kx>3*fj zf51t=j>{@paBruzEt-~cN_-jm$(f)18sJ4tKv(^v^@{D*eAh~Zhnhxud`+BNA5pKq z^3etV1Cz7FMTCek#!Uj#i+!SZ0a-@$eH2)^a>J6R@3ss*+gD!8WjK%x+zi0wubMM? z?cc<8RZ<*{3(dUVL*3~}L)&za5~n-V^-n)hqF5MvaZQ)9M@E-3^$8d&FfI;PsT%V^ znL4CQ72==;RbrsaRhb)#ef+73g`IM^msxa7i-W2geM~IFl|Dt&j4{FgzO6D^K)ny$ zOQYyV`y7*@d*Zl1i8ZeVbDjuEH=s~2pl}wSl+nP%ZM8U5(Rg`iCF*eO3n*U!8javL zj(7olY;)qNn;@4!w5J-3+$vaWbYx6)W@pjTa|z?;!@4Cs>sdG^SC=SBjAt+|PLK?2 z*r_U0gD1$w@nhZiI?PLz37Qr?2fb-Fn*wbI*CAOZSf}NPmzSPBytg^4A~sy}(nu{B zk82No8+JT-^@aQS9Y+58eUVGmj%2?G-)R6|{c}Sb>hukw~8jF*_R)Fc}A$Nzj-Xvpcw7 z#zUtnF#S@-A-$4IepHPhe86{p4%#13i@~YjeOA|Is2ET@TAm41!g;VeW>pT<_A~cu6c@|3D+pm&+ zhl32G0`V)Q74NGc-f9`Z6?d650dy8hFdFivr|y+re!(y;kIK$3>OMUaR%TK6GX5vO z)MNZW_E=VxNNk)FKCfo~n8j~X79JlXq0Q!FL%B8MsNi*QaY?DsV~$* zHNWSigavWaUfrx143sMNLJ#?^Ow!_~lXv$l7CWw=t%jg{gA!Q>+^1JD>tK)K_MHMx z4AtW~J2|PveY8uQ+_yvP=AwgY0A;s-asRfLrUL~OO#-lTXUGGqw*eCJV!Lp?{cixx zL7|gNG48JGK67bz&n5#k7HT&2{hfvrGGgFxaB>zeC-zD{Yl6GQpctd5NFMooX6%1U z$PNja`Cfp!9H^chwbRmbvTyU>+DN3k>71^62({gM3ie-_31P|$At2hy|A8Y)Yx2^T za6D9+$?j7Lpq7y6(vJ_-eo>~mK6g2R`-9@E;LJD@Mw+L0$mo)K4@!xF!h5N@Me0B2BV zmT~S)g`{>wk>KaUPRVS(?i+Q3DLQzA=W~?6{!e|BodUM%>3LJ2A{@vi*OT^OKY*k$ zOZDNY@b{?_0@A7o{(fg`K3lydC=6PywhdZm_T_DYMVY)cO6JZdJ*OzFyAl1nS5n;A z(DnB-3JBQcAxBsuEl$8tRQqJ;rYBIXvjwQA)p>11nC6)|TO=_9ipWmuUvG^|8uB@f zvY-d!vd?NOx_0!%&8sc29r|;z{cGv}nP!zm-Suro86uB*1)34~T! z3G*k3+dx-~w0s*k%peuSju{slTcH+n9J1|t{3(6@G9F>=_voN4r}E-y5U;cZnmpmAG0vbnFfp` zz!b0t`;qi~OIYF&)_|%WE^(7+XM2Erz_$4@gzedW;_Z@qOIRfER z5SFPq0e^#+&!hs&*sTD1G^^=oW?pEjjkF_y3mM+tYGQCo4}!XTyWX@r^PO?5%>3^2 zompYE(}+$%0R+3zmg*O+FO!*W!6L3~WHe<-WnJ-N z0~*bLhjQTxtxnVr|qkEOnX3582o7rL^tvfwNm7aORjDLfG!QdOEvW3br|f&=FHZH%0y zMZ#s0tUc%6SHSUnch=VgWzX4tNP^uEXVMo(P2Q^Jlwj|ZC4=obu?(BIxe>tJg|@^q zC)-z=gIvx{hlr3o?zcGo&~(z`!8;_*XfZ)f^N7E$Ibz*iFEYE$w!$RKL&Z=C(do2@ z9-a?-B~=Yhx%TFU7T5erlkq0mEABL_mg~CKM9@MEoq=k432M`HuzR$aY%6o{@CVFh z-nMW!S%Xj}MdH@c`jkvV+~VE3EALL!vUNP9=h6t$b_P-$gjj0n($$pLHm^W9b596s z&eF^DHfvezeg^CjR9!Yw*jZx7-gq@>M{FgE+p>|0@FiaSPAm5<- zS_NL$lV_A%?=wHqj@3Aud(I%dsige1_EVyFHp)Gs(rv}8FWmigDlQ0-VX{hxgt~sp zTE7C>9K(s$Po_c;_&{fEkP}$VR@1j6Mv|c`r$OQd7>%@TyPE?YxN|z%;oiGuB3fTR zE*4YZAz`P+MaZDzow<9E;63n|4E<{)5C>n!glNW{KU?s8F9+*&e4nq;N`ZX1L~o4m z9v$rUrpvI!Xqyh6e>Mho5facxa!5@KaPjG6Xf58i;nOx0*ZLZ;IOdH?O=fH(6$HTA z6O+>DELtze%wB~cA>{qVeOAlykT7D0`$8b7O=OcoUl1b@Ad5yM#{Dc1Afg`K9A*CD*~~hAavqtMLs971 zU76!~2sUh~Cl%ki>?O6SsZgHP+f`Ax!vv(-_45G^My)M5qTEda)NC`{>Z`piU|5{su~blqlKT7{cjHCqRycoRi$vmY zrU3qYw*YHGpZVx__H^CX-v%Cm}x)A&&%6EJL}beVWqwP{|c7TMNiL$vGpYdFJ=QhP@~AGBr{kl2mHVNnK=9 zr+nkN%pWPLAq3S+Ik!`=KBLiuAtSPPG(c(cwi-UIhZo@DVd& z(4QiLEal5c^BVPyR?u)08s)((=kX|a03Bd0{G$@lrNsZ11T7rXFfWl7QhA~%E&VU{=dGN8gz?1Q!$ueLqQvN zF2`R{JV0Qm7*M5EAcY)iqvo9=jC`$c0@O{Xh( zYh5|HrKcHR?@&5Lm{fxlxO6{bxvwuO>T|Fw^dO>7ma6(APC*doNtR9O)3X4MoXg9e zoPbI*^8F>P`X){yo$z&Bs3(RhHnC@cpWi^7L$Ti?6?JiDS1*cQqI^gYB$-=DkX5=u zKc#bq=MLOZi6uIWOjje#6)?aSS_yRpBJo?qZMsw#94vqpR|kC#fZ1BEunV=B3LHE# zZP1MeDSQJ^VosJ-$3&;=QH-ut&M|Xj`bp=Lr_TjaKzOxI<5pPc1J3hLHyK%I?`h37^mvZSt@;e$@!d4>uXXHriGgh&DR%(A^&M@3 zPA3SsNLb;f1|cWjysZm*5}3m(D6FNFL~zb2{+_y2%eT zvsRWYwdaZ+hzbsQvd-h#%OOaFW+H+fNjdN5)0_%_W)~h$zy<7)A_fU6uCy|1UGkaj;~SlkIJ5sWE1f?B zowag~ZSGVhAZ_yD(Mm(&52gVsiOzWnr~gcbWKt|pe?B&D0jI8uJ{FJF{VUh|r%Igr z64qIf&B8T+L$a9MPlMobMOimkEow-d>za?cDay5F<|!wTVI*S8SI?|_xDwv(yER$+ zW_E{!C;2=3`d#wt;-D?LLMK~OyX`z!@bJOY&vhP!fZ#yv8(;sX3)M4o^{Ks&4LK@2 zwmVs*u(K>wf?A=OT4<8Uj@98zdK~%h#lA5}nsS9i`Lj`rzy&cp`YS`Fg%DJuCR-FE zGJujS-&mHWllZP(*&B87bjS18eNhftVPtAa5$0#}ypum$(Qy;Dr1yidgs3S(1gPm` z$U5|RY#_YYho(oF^7r{gmAA$rV9zcT4a9%S8lZI+B7?9lNY9j6@GgIHal0m9QOi8b z&~HQJ=J8Uls_jLeAFR3u!t~VbB#r<#XcC9jc#HcE|M_DGs+S`wXH#3+?F`~5pHF9s zSxDWdm+@H^!Zx@r?LMyt9k|*w*aKO=5vZ3kp4@sD43i8OL=-?qF+gctW+vw6)la#E zKh{nLQPZWF2&FvndPh2?8;(WffsoWj&F=#qnEfIDq8?8ulB6eaf*Bi_P{LpsEhkYR z1#8c&OiGf!e*`rBU6w%o56W0YRt}d4qNb0~IkCwaxqSSDd6yH{!Ga4&No zPWE-+>=nboi=pD7;LKH)PCL6?d(QEa@A=+WgwUl?=^QH+naV!PH|RzNv4aL|{(F=k zKDBc~Nlh>!rwRq^FXwmci-+02tjOW#P;ToeA1hh7=kEj;Mxqo434(pzR8Bc2Kztl< z3c!da>HYkAhL6}r%zb(4lvg-e=K#dD!Hd6 zVb*IMC`}FytXKV>k|d%D!Y%-`(%rs(!xO_Gh}MMDHNPOVh7l3&))HfYPA7lU=KLCV0TxN3;o1jMq z3iBbDZF=0HUclN{sLiA+O}3r{I8;)%INcm!AQY@e&Oo}hZOhn!_>ce=B+l=2&Kb40jrY7?&+C5f`+n|cvM;s&EB-H$x}`Jv0m}?8 zPwfTO{+UzVqBa4=AhTpKAdhu9_rOx~J5?0VVwZteBW{Rzmf9ESbZ$U}*lZ>%JD;DrQ6|Ke)lBBsPl%Oo@os?d%v9#vJrr4u3t%j8F>Ua1F6AP|gBEFY$Vl<)ug=*xC z#(bK_6aNluk-Y~zC|P)g#}M_sSblLGvCjnSM^5r{r2ft2;(Dq0V)E|Qu5VBN{W1H2 zl7U@Dn4o8+YX5oUDY{a5(g=DEo{hsBKi9P1;4;6V_CbF5=E+DuR7`T#js5u4*Pl99 zXmH+|3q=<$IgoPp+tC+<#fkCW531$PAW|EUK|T4;?2pYnt0Er2mxk*> zqlPE^87w0oxLhU}279q#<;E8aOn*^Sgq1Ai_1%U$_$d1I1b8z?H!T*Q

>qd&Qx~cKaDi%@2sG0elMko_oO!sN4z=YP36p&C&MLtp2hPVGUQ<9gKN-C z+|9aL)f2OAR52CaNsw5eHJ`

r*zQskxWNV;c#^NX5Ru!a@>#4odlXRLh<y)6e9J1CIs2;Q3~hQ|E=-oo&qw+?*u>{j{@RdMx7RH@V-Q@QGwX%a z1xkCGV0vR4KqgS1_?v&z?+ty9*#6Nt@M?BJK?xEgVWLxUkQ8Sg$Il2)8yv_NPbDhu z`>uvrKWXkSL4fkquH<-&1czQ@-m=1ALOw-t_bK{gdDB8xT*;$drqH zBi$uqV?O>>j$$^4Y+lV`k=~|j3G7ZI2CD9bU>Kart6b5}qMQ!C5{R7UsX)m^s=K05 z@g}~eoAPz?JD|RV+Y~$R{#L8ykg>}~K#ieR01%0V; zyYx37vm4d-gwVs$DF>@%$uti_xaNpa%PqVy&ImE8LN8Ib;cu>3?D5=1;;gMYn?>!? z&Oq@qzs>3ouroL_htG7HRbc%#7FUi!HLv$HxFDw?ZMH zeq~I;>zCYJ#fWB;c0NPzxQYkFpG$YqR87dFuvmVl!>e^(h~f?Wzpuf;QXUM)N|uRK zRd~xEUD~pC-*T5Pzggu_6Y+zkg%n;7Zi=o@@+)`TM`s;2rx-&g8+@qf^Ox-#Hx)$i zM)(VdKqkTmeN>v0?$meGVVlXp1CAt0lUmN#22nk z9RMJLYTDipA)!7$OMPq5dR4I}YAK@!YwOH{zH~nWuM0ri_j&2T)T($Hfo&rlG@n@L z)0YxX9#j9T#(85uMoE^$m9{&4?JfhmCmKwSX@b{$k-=Sat`~=UubYFvMi`e|RLI1P zV=9|PN)oaQ4n@jsl~pG-%iG~R)iko7s_?#CJS+LIWm2I2tD2laN*UlSq9OKcd%p&k z%sv1PbvP-xNTeZXR=dvn{+d33gW>SL<*QGhG2jBJAc@C*jQ_>yA1}R@1w~d1aWM`~ z4U%pJ_7UMo~cmR^Y*|jV;v! zVP|rlUQJqDVs@*F9WIfpxJ34^gwKGexQ0;V#HjQJR_bqbmy+5`UqX%!HoTtsT4Msb zdEAG`UXQWj#Wz0g&IZF%_wz{ue}3HNvMfxSV@7xH-m9HOymGOcc;!AVXU(2@aRyjG)G#}r;MA{Zg~*0Ajs zI%k2fUCB#@O~8|&rZe{8uM%CSJh%}3BzK>CB7I51X9~9{W3P?0Pj{QessM8LYWvZk z(4DAS_p6*%1xT0s|vcHFWNLGcHTfrve-H=nGfTI~ESy{!dgmEA7sQ0dA+Y^}L}I%u)x*2>Rw?=5?D zYWZAEcX=V3F@bm2nR|1SL=UshJw1lz^VZ;tr0J=%B|jSsFquB1#6-+ zo=`EUEyWju{*$r0Ggn~;+}Je&2h^Mx+g$Bf@=N`2E$hzN^t{;&*D5xZ#H=;Ae((?v z_FPRMtf+UUpVmaX9_VH)aPXHxJHhD7rmNrN?ak{;<*W@XAF6YL@|)@=6F_P z(0qqcv5k{*EE$@ZK6ZXIS%QM-<^P;TqbY~i!p4dWgT$$^0wd9w4PytMBAYSqq@AK} z%ccg(Fc?NLf6ZT3@oHHqcWg<01C`S1SI>cI-l%au!bO?Ze`@(Mz3Z+ISYdAhQ9}L_ z{b(!A+mc{;B3Ptg#*|lDIqWar-#_x0l__CO{^!9OR6FYb7!c*}Khu8;7IFbI&|$@Z z2W;;l@M|8wP6uI47Q9vP*#eLV=ArVhI@O~;iGj;yh*5$*m%z;Nc4dX2Q~dng48I#o zo=bRIC2%=g7-gWPEV4iO3K;`&ecTz+jPn|R{=U5S>=^{qMM1NWLs4Qj0={f~oxAQN zrL*_|$wH&@_1uxFp>pFh6^$RPC@>bMH}9+INbOP>@c}tcr+>#J!H@15ph|rD9{BI` z&p}f{K!W0u?fnjCOE;R6$40R<#i3&UU=Tj2Nh!_8Ux6GwIl9AnEM@JScUtytGiRD0 z^nSAQwh*UFnNC4}#H_0WTYnKZCf9%OdjCuoIQ!MFnn2b)ajst8=ht}0F3tSOfrYSd zqy4?^BC+Nk#Q__TuoQ|IHef>JUq4$w;^4fsxf<=-9agNbD|9{>z%3dD)(Ma3*0@+`0QOt<#n>)g@YNa2U zi1SU|kw~NVDN+(xm>V(L7uRSIC;Bvv@*?c$KtlmCeol<42ZOyF$ElNi#jFqoG2VknpMTWJ;%I%<}nb zQn1?el@;an-6MHHMP*Ge9$~+itooJTFEg*zp$mN8y349eohmG)=g-MK@uuhSXwktk z9^RW&O_6bf#595|k|vQeh6oRce@1Z~AE`$jU)KL)03K53Vk?D8v#qOrLnO4*7Kx5^ zYC~C^sI<)Nb}%!SWP9H4W=8*+jx)V&m*OMlL%vdAlO+&Sllf_r9PYubFT2G|qa+1G z`z#VZ*Bex{o_(+T59D;jBjs3#cPw1SUmv8OIJBE{+sDrM=J)Xzhq?@8fek|f@Wupe zeZS5eS0*1d7CBpPw2BB!21fSRgpzVac&*V^e|E?UlYvR zmDWStoe(+CP949{r9#Jo9*PI;@RP1K{~d2UpP_}o4oa{FYgFzktb)NVZ-j#q9cMmx zMia3LXV*V6Xd-v&KzT9nuUBS&``xUS#$U!8Z~d1m!VC-QxQ>q1U}90U-8lvkvjlx1 zEe`*S>51aTJ!sz-Q6BT!7VU6z!TA!hFUNI`Qp&w2spB(b6Oyu_%=m%bcbkq7_O^j9 z11$;jAi;GtUV9gKbV6yM7M`B%UtZ^gaC3=G_mEywuSS$vBqYxlfr20rsk~?tN;Y42 z(St{k?@#7B-UdyCj*YHy^y*bT&jBnS& zBASYfnhn@!iJLy|dk5DG*sMV98L&>`$zk=)cMwm9m;$SZT0UdKFH>|P7%CPq6R&SI z#8`HZ`Y`G>Ih$%QPEKA6!~CA#+kV|joM{es6RKXtg*`V>8vRp98{LQrs;JH!Q)kP7 zYa0V=t-u3?g2u?$(zlWsIW(UyapP}{WsMbce5{SNagm_WaCl-KH{viPFK-gw4dvPP zqvjZq$XRti>-C|H&u(&WigCWo-RM^#mDmPr-BNcuop1rN<3+H6$LqAe?X%D49D}L) z!M$f5qxi=9)i}w{TG1lUVPQ!%KLG8#wpR1Ji4vdbfNP;v0kw`z-)Hc(^R|N-Nvgt2 zTmHL>0qquZN@%CAt!9Np2+8kzSSHB0%Gwhd|2kuTypCQ^h`Y^Rs$JF{8 z0gupL9Z&Y_Z%d?reccan{c&1WYV^pL!)vbUSKFzDC}5!QTu<=*rTF4j(K$-tSjEL& z=95&Z*(3dIjg)@hucdz7xPEOu=*Svedt0RHAo4a71^hYA&2iV_%9ENRAzlS^uN|n( z;`v^GYXRBeatgDkW)8+}bsoH$B)3D;eGWhWeXFqR>4s)%oE3Z>|Jcfw)4kGxHaGqg z6!>tVuHbODEF|8ZXlqiAw_TTCsd-HE){pPPObz#O9ulk(ioep5?N#UFZ{FX_1JL^N z>K0msH>e*r2IP^UL86*-$+H_*bXhgfADiHXhrTL#*2kYONpm6-GpP%w5+?2@7rrwj zj}_^*11GrYDxCZqn7vuwgJo$PR$ce%g&>7R?jo-Dv)}Wc;`Ke4O-fX1_+-|wvtvo! zji{=HS>miS&E@6FTC{&XINB1A9yYv70wIQ*44=hbbOj%uZ!_R9|97c-&xBKtfpjzC zyJM5u6VaBb)r$U6x|_1+L{|Q79Bu(B_(2gR%a%@VZ{jZTnt*fF{^TR^Aof0g-&}}n z1Qz-Kk)z{nNHk0MvpQU-1lK>P+rL1?Je`uL!I^%~o)h`+mtHoxQIj+-IS@^CNOlpq zCl>Y)hGFQmZ3T6I!u}pOXtLVb^e`dA_zP@%6hU3Q;i>7rGq)v8l{w$b91ZMmw4!20 zDi^3Y^0fI9!z6Tks5TQ_%j-k9_s_P>&~xv=iMc<&Qn3b~%uh8Md^Mtas3(9E6ph1d zPl%OYfmqGRkN*{f@CZ;nSrK+-EYffmu3Y;_9fr?rdcTQ-yo^eus%=Kd3fuVB3hp>c z3*Hw2-J$S~`_zusjCAp?^S7o-R6RUn$L+GF0P5W~84CNPvQV>9HB5B8#X4)yQjus) z9#8?l2N2{KCeRPC@Yrli1sfUxR$jg6)H+OIYjU=ynTTd=kY~S<7}M?M??|^nS@60f zzg|_T9+p#l?xKZdTtCP6@avu6mm^9c1@nk{cvtRytoa&Q_(qqDMww~h?YM5R<*KG%8_n7R`#e{Q>z>@EZT%+ke^Sd1Ob=MJ4{OmducyGUOS>cj#8 zyMo-O;MSYQ@5pAPx;{WcAl9Wgy>uQ`$iTh~w?iYWw@D2LH;+x{NU5|(v@pfh97Bvr zrpwm;uZvl9qWu*C_;|XwFB8&o_RVw=-sDz@l-Q7_%vik*A<}iVQ39%bo2bh3y)V#1 z<@h&IBaNZ)F91oFx@rnFi>(-FTJAMVeoMsD@=k$wHzS@me+AU-yJ=sO(RGztU{hY& zEW6zG9OhPlzXa@$BX<-6wbZ$?*Y7+?IS12P6_o@(w4VI8)01!*-UWI3X5qXZlozZQ zrX@~K)#7_)t4-5XDE8iy687X^2J#TOj$GTm)A(}|wA1dlsMo{n-*J^G=1b{7LeaV* z9v6+A%JGXT!t+IEj}9di3NMKm)5xuD2I4^F~BSYvUzskp7IT8V{u2D zzrB8N28X}hVFeh+fT6oBukV@I(UiGC5p$HT#M|i9X%XW<%6tx#_TPwO%I*0gPn;w| zdsi0LM@xby)EO4xf;g;t*2$Y@j=Uvg2zeldOuOv&NC~S~PMq2C#r)IK(6UGW^fJ8)$O1|`lguj@q+HRy4Km|SvCoX$ z=B*t4nEJ}hUct%2bUXajjziC~zHZh3mx~c9BWXUDf1zvZuT(ap=B!cUY>aIep9>St zlWT3n1GZrRyx776U9l+aJYexAN4e~yrtMcfcL?f}k7=GJkldZrLxUq_<{y{>ZxmqR zMfVkEg-hcIz#F&(>9srDD&L|7$?Ucbb;b6bx0&;9T$R0_jehP?jhD?>PuZ=m0h>rS zauIPa9J_d=3-`j9%MgN}P`;%WfuFmkbo`7ZVmLlfdHnrA?lF6d>$XNWCSAfN3RtgT z&J;LHWv3n|+J3Na54SQ$oo!m(DXkE+W!Js-LV}6sH=zI%d%X#1)uPh`ipJq zLyykCdEIF{GLzO_Ru_@l`QdARVm$Om)@sa;`6g_ukwdMyL2KObY%7kn5u?pvtf*V| zvBrx{t4g;0`nDmq(e{v7O@KP$j9q)h^3Aa!NJ4vQ_dJVwbq=Qq z6CBrMG~HEJh*%p(KX{TmeAbGJiAs8$B)y$B9Gy~GQD@~>0rYl+2VPHH-8oau{oDVa z1(51+=wJiH+<-UzNwohWHMS#rzBKsdCFHws9?;57FAJHmYr#yO{JY!p!C(042!~U| z+V@6FeOIQuJ-_b0My$jBkX0QS#-Z($!knX;JvZ!O!IKK|!$q&zg>5Rr+9$Mh-h>Mn zu49vOTGPvmsT%!k@{f=)dcJfsqHc;vgKxKm;osP8QM1iVE1jtyAPocqxP^O@e;fon{Q~YyrmvLrKKr5Y#8qUc zI-lHg^H4@!iqbq@q<`eXLPNB~)kX=p_>nNtQ25i-FM#~num=9^j}U=xzH{oF>4I}n zjkmIX%;eOOtw{=F5Y?D|?Lj$OT#1adOLJ)P?j(1Wmwcg$3?0JodWqCiuUU^M6iUYp z^IQxMVEu{A>QQ=ngjL}g8z$W{G2T|9@^~a%r^b{gkqV1qYgSw`89Y{TduZJeD(rk- zxV12Tzv$Vt+Maz*ti|N9Q+l$(ZQ|*!z8@*xu$2CNJ&ay z#o254pddJ@6b4+`ZZ^GRBwL`l*eM-`2&RIx)No%PF7i9!vKx#S@wI4*t=2o4?_r+3 zQQ|9%*KmK&kFIYwxNnbYbe=l2Rcu(Um$u7#q+0AjZvVZ!r}-=@dF;qR4L8|gYfG#p zfat~+8m2v>^p1-zY^{7~ZVy&rZt;E$U&Srir&`KjeTzj(OmCNUHk;;dD?5MPy^gnP z!LbKOB|eo8#&M+@RlvN7sv5UV<9Y#$MJ)yE8vjzvXve?5e~`~c2@f!s(JCy7ci7{H zImCYs#X_B9s?^VuI)`;Rjvl4QNehGLlCjW%O*-Je9b+8LV&x?F%e@<1bp5 z`MODdWe3gk$*}P+^)!H&(EJ>-TPZX~P0i`hIJVXR5&{V1tH;@4fC+f&OVg9q&RQbh zC*hSy)6uHuHSBi&Ah4fmO)@?-QktDFY@Dh|yvQtlT7ei$*h@~ z;{V4xIG19dr>Ote-u6oeUeYG%JGa!z8C>X(-6|`hMu&QO^x)Usf1%ghSI8YnO^f~v zJNz(xUcfUBrbA}zf@;urO!mQ1BZcC+CKrO0+`Q~c{+dZ`NF~MdH%K?b^)rC|6{QBU2 z7T)E~!wWx&BDZ0^-@J7P;MmB-R%5$R<)?Kw<7LF3y-!=GJP+9hKH@JL*X=u427_|V~A`yMp_X|Hd=juCTAG11-uc}@a59j2C(Z&OKgOtb_-T6I&u}AkvXz#V$9_`DR z$l*kd8o~XYr(`v)aXaeVMH-mhzL{5*`*Pz%;^!6ee7A6K-qHDJps6VEILZy7+sZL5N^nxeJ=b)L(;FUF(&Zs zXXvyGhvMwLGE$_Jkwz{-6>UuTvgXNUsVml?%XZ>-@pM;}OKO6D`ujUUE8D@LMry6{ zo!YwnG+vC}?Ul&U-@mYTD-N^`G1*IhW~&aMob5IQprNH@5XB0J5jz?hwXf!iFU>Yr z9_HrV?b|QJ(B<<*eK8jV*Gk1QQV;Lf{ic{vc#5zV>S^63@mC*qe-|!Zk8V<`GrZV6!@$IwI1 zB*qV=A7!4hO3J-!`fj$xIF9K0*N0$W07QeJ?2p--o;17y!U8Dn+Ll>DNc;;>0&xpv z`mF!4bacV!S6?ZueL1tp0c3^G9<2aR7sjp6d} zRT&tSUP3?)$Bs6%<_thWdwk($2Sckp$>mR8t0+G+FW;gy`0}+$&v~;6cIBmT&`3a+ zhW5wZ4s8W>k01<0cGtcCDPN(>g-@m8TqFq1R4F1n>}K77bC9oh*d$;KNCdvRAwied zRk{^2`;q8KRW@-?6nW7cw;rc#1qus5$f3dD<+HqmM7Z2&e`5cgc^)GlO5qX^C74H!N}u#!P43nh zyu8hDdQd<;=6-J7E?`5ifH%Cc-2+>uj!3R=8@(-XoZHspYpO=jo2If1G7;z}tnNKy zag8A6w*Lz{A<=%j41o}DnDdQ@jD*kZ%wz^7+m%#iZo6GTu-fCu#&B#iQ094&eA#G$a+Z6wLOkW`8{tMK2fr#UI0@RQPC-oeQg3j&X4!mCwX>GSpi$ylw44AMIs{Yj$8aDlN-in!{n!$fNNadG znxWfrmmkud@#{T<1>?ZPU`{HkC7P)4~P|u*H&)&F$9vV){ zx#BwspD>;hu9Uo~j@3P-GU{FV9S$C)7d<6Q4j{-J>Ag1`?}CnF3d@LX5vY}>`$9HW zl$X~lnufgjHdBxqXD~9C9B=DG=qDw`bvvzHEAU_fiI7OatU_yoWMZNi66y5Oi{LUF zyC^-f$RnRKOaqhR{Ft=1G+@@pf&zhyMEIobY4N8qTi-u)v!5-yQ*a&6ijrTuXxHCF z(?H2V-^S~wFpx}U+8tT3ht_CioX6CSBA;1r2yU7v$K%i3qeFKwIF}cK${Vpr-5Df&?+HEBT2C#+nGEM6d%17uQ%KWM8gd0`0>|aT(x9?D&o79|4Y%Jua=Y;s zbAz?SOE?j9ee2W=D(24bL7=_qOUHeOfwK4f=wW|K07`XTqj2Y}G8dLl6x!&nfP9i0 z5sG*O0RaEA#f2D=Q30;~In@318O+q^QH0iz9;owOq?I3tz&SRO|CxkdJx5$K_nEh( zLI$f9;5J9vcmzT|L#NVXHpo%`Wj?FwUh$I&CeASGNxIloom8hv`hKONdn*a`9ygf5 z6n6fcuz0v~UL=cbNoVc93(FXbR90;fKSA=yOQd(z;$2ZIUjlM}-Q7Y4E-yWIlKQb! zO<2UjraA(C-wE0NMNmpFBO4yJsvgXN$~_-MMNl=NCgr1BQ|totRCUfRGe0_h^7!;o z=ONq^`DywZxu}njFR+bw$|rYvbkcA9@!6xj=RloKb=0=5?(cOYQ^Vb1vzep%?!Lg} zsf}U7Mkt1HgcRHp>?w|4&!hi9FXCw-Sfp&n-s)|Z657b6u$dvxx|oH`Ipz?A2=)nE zWF{ejv8s%Ru^XY=K?XZRczi8-f%rcgIq^>U8CBX5cleKAE% z`CtP4BVe|q`+}?9R`^&|9pbXHnFKN>^WX`$bKv5Dl};ky zftq@|CoG8n67L#x=I_UPp~*5?#Sa%My}^!kjkv0Y51u*Po@vy(^ao;%qhu8 zUzG4!ZF_y*6*L3&*HOAwII5yhT+qQt1!TFAZO2YR|-Ww_v-meGYo?* z!e75D=R2Urw~N6liG+q!PnI7O%Kf6b?j~O2iQS}sy;Hko-^>t8j${~wZ0rx2zrYs{ zoghvA+C1VcVcea*75M)H3cdYVF>4R4ISar6R77JYsG#~HYA=GZq&q3^WTeT2$Kj@q z$$`$zTAJzY6B*}ex8LmyoGmg2!!m6+LjLyWZ#zd^&rTCUFmY2QnGL@qUfU1?8!uM= zFC!wDAq^r(Yg_?RV)4m$$MoD)`E0~kXZ9kZj{OSn;q5~UJd&QnI6qkJCO5ULK>cTE zJ77hxskfS1-6pb`kP#PzYTK`exSh{_f;E)xNn2awxIv`eq(m=$YWY+Uw+6%y=!^dv zJ2NsyDd|VvfpNaN{cBz1J1=7VT9=4exOv&*8IF>vDz@u-SDDaaCOVj}9Om^&*phFG z^G@Zq6dwW4Ffxrj4s!D*!$8tA+iOyCz(o!krC50A6T^2!H7G_?t(u z@*yU2V{vjIvv&@2J}yehW;m1P1o`F3FRU!DtEf$luxt|R^lhwSWkf`%CX`WJS*$g6 z2Ei?lDX)7ecr5n`cx<}d$p*(ORy7)lqNy$!uaE%Zd8 ze36>ABd{(KN`yKXt<06r(3)TY&I-*nD<0NRdgEfz2#u-g2{pkd*_J%})pD1l6F8Bo zE$OKEHsYaQC98L<4GD7Vd{3yHlw|ajE!i`YcQs>V1wIR-Hy?mYw!MmI#P={*!F4BF z?!!B>XKDG9B29ol<>?^+t#=ccGMdEx-xO*&vOxq-MJK+#Efet1MZvc=(~CM(A$U_& zMSHnLQrj|8vVj=qX#e6DOxK9V*fkYH+b>QGt42$E?B%QjB=`&qmHusz;`gI;g_(Tf z#M7W0aumnYm{N=W{Uhe*MTJ1)hR?ZWhuY4^sdEZFeBN*cB5Il=`c#7Aer}s2I2UF2 zHJWhV$Z>a|jkdG@*_sk>OAZ!`fUt_OK)ROZn$gS3X#X>=PCEN@G+|Y^`>**(^#$Q-rX8&(+!DlD9kO1S z*VkmPq}k@kN{X)OU_g$adLJm^PN1lbM1Se#DGxBiB7dXM>5tB>^@x#@tfdQwS|y)x z;)PKV7{OdT4`APS?)!q|bUy}Y#cBB3Q^~Lg!N|7LDXuSlPhcvs^A~<9{a@UnKPy4_ z!~R~k>My2z!~dF(jHbT#RQOs$qy5;=`B2*omp?oiKjZWTX0nsE;Zl4fqW)io3qQs4 z%JRln?fQ4htyAQ)hqM{>iUd`JQ^3s=Vt3oX>I#oOY~ znEAG;F|U9#r0W>dVLxbRy&sNPVzJ~MF6XAD@L*3F#h7NjapW~pCN4M}wmq2MKHG7< z(W%3x2$$oTqnym^3HI{Fx!F(U-J%1NV%)D8mAs}(A&$xSoJ!bQ;PmlV#@WBwmq)=3-sE|wQru6EUM zD9?Mv@;V3HhTB2r74O`$P8sL;8)c5DqF&^+(KF4E9V1#v>U%i_v?{klQf1W8Ho)cI z;R2B%%+Chc@pZ4Q0N)@go~nW&i0qw#g<(#-B=V2s$B+|s&NKZ)2v!ew!8(iXw&jnK zm{;>!w!F!W`j+!MAXe&IkcK)j$)KFDV$OAA?*|!+9S=M4!YS>PAEM-a!$%k58K(vn+8hNnP$VDDSqLceT@z>B-w>Me zdrQxRMjw%8xQc;sK;!Q@frJ2NOOo8xt17zZnkr{xYSFs3)lNJTC7-TYGQZOGF07p1 zRgS}Z?xAO#p1iIEyAyNy&@uPJvI!#-5kFLlwm2#CDAE7&c2*->M(RDbwk6?8-YM`7 zi(Y2#(X@%h8NDsor91nvKVs?x7{oqeWImKjmmROX9)q@0!e_4zF+#var}V$<7?vEl zM%)mQ(wgr>*Q=H+b>ziT5SS()rAIxN@BeO9w0!eXDJ{<-EIY=OXuK4$X35BR`-toJ z)5fjVHA;E?Uc-W%^8nipg9A|QOfSl}>f!v(4HGLs*Zn$GySJR;xk?Qjg{R`(cnGxR zhaAs4@#KkOcir-y03K4SIIG$NrL4mQ#r@_HtJW6>UgE$-&!Ai>_*D5XE(!MkGCAt- zGUX`mAq2k8DNw~g#&SQCpk+?7m5&n<^FxBXUjeUg1|b+~rB>zjqHLFcogS{qVuG!b;R3KmrTgSBf`=LkLT< z$(hTsZzfr6s}G~k#Q!BEH^e(EXA+0#15LrsFWRM*Fr4* z`u4tq@y8kwT8#_qLLU0SKR7D`UJ@eaO~XTO92b#Mu|4o1JpwKM>dv7@hu_NEkmsmy zp8BE)7bLm&$x3hf3*O;J7CQ-w=N_0WH6u~hTT}V zwiO+J(NtYq%q+Ddzt$ia?@2j+(Wc*B#usli=7OXDKU0dVh2$oO?MUM3cZO{8C8_Y4 zqE3|sfgf;GEA8`a_Iuq^EW3=k3EqZuK18RDKREFS|6%uRak@7asJYsZ#&H{6Jla)R zPyIM|$k}GKu7~gnFeiwfFxJJ^3_QiJv5T(n1C;E@Q)+cBHfi_25L4e>4J7HA1K?fv zk@^+0q@H_t_;b8gEgv(5mDBAc{(15rhKz2cjo+1G^zLQB z%pewuWXIVR3b*gqsAm7z_~mAMvAf?Nge#tnb~f9!c*kDOQEGQdUA9kLf3tT0F(8{0 z^X}wqb>fFAAVpE&oJ-zF8PDk%Q99 ziwJZvWP6L1*IfuK%*E|RT1Q74n4UXE^37^L>b6C)w96+A8N6!=N(xWJlT5 zBi{H>HSQ!cc{|w;m_zN}6jdka_S2EIgpAF5clPVfeaV(ST=ONP(QhINGlx|_B=;yR zp8tV5;f4sq$R!F+eX|8oha#U`j1YU^J z*08P85vVBCUFP0#ze23*6Nq9}Zc<}JU$U;mT2;O{V)R^O{(D@)0%DLtSAfZp*c!cj z_UxSl4S~kLo9reIH#d8kFg#OONVC9CNeLKAz-)}YS!$&OK9oC?wW`$HuVg!{3qNd0 z9*a$czwH>mqWN4rQ8x83z9}`XsMT-!&!K2Sb|#3A6tci#gvyiuTEaNKZ%G0O4`pgO zho(bC8oG4m+&=PMg{GD^H*iSL3^LHysr}wQu}eNMuVpU{PL`NBo5jU62G27FBf^MdwQ45*cp_q!rR#?MgTiZ+}^ zjNAvfX@Y;9n?;aTlq^!~-#J-o*KtUociap7iXHc3UjQTSuXQ+b?3e~qg%dT^EyI7r ze4*9N<0=dbK=y_-V&|KfSodD_Tj*rv%p%nm5xGKb&nxFof|m&Gllo|++cMcDCnnqu z5so`SnF!Q^d=()oD423=aU%Z(f}`Tt5kECmFP+dXjk!eJ7C@Y*GDM_%I%`)YX)U6q z=JGIml&^mpMnf1jOr6ERg2hitFDK%ehq$DyQlhAUIaE=S50)KXAo%bivEZs-)&3WZ z_!ikb;=LQDDKD$d{;GrFQQ~6PlvVZr66Mt&B)%*z0Y?@GK%lnknDfZ^W~9v`yHv zw*zLjkB`nAX$YJ7heV{E-U{c?K6~|aSOT$J3^{|NFGRQ8D2Z6yzSkVRBIp<$xHTfYSdW)RTO5&G0S2wy`!ao)nm78>K( zCsPP>+0NhbOzD9UdUaiSzC)3-eX5|0zx$HT6eC{Clxa( zz^hD=aXVvr)_l`VgBB#!%~a$95<0}SQ2y5!z?{&oV9<_;M9Q=H~Ge*o`#XF3^78Xgxe(Bq~-2EJm!TdHlBzGQkoB z|D}O)QSPldx3z&kQ0|Q_aj&GGj5esjILMOJ>O!1qF2)vymd$Z`be_AMbA&jTbzDwab~76JtLi2zxg3*)jVVQc^^C8_wU+yf@bINZ3Rv zND;N9A5U}%?`U6zXYrm|Us4wrwa^6L_hBlA8qNdbvq#{KKzLHS`#oi+YR}<60_ZEd=_TGVmMsO3Mf0XPMrUSUZ%*ERSa&`A~$zvi9#aV6paMmASJ7VC-VAA)X@Qg1}nJFu3UUEk2*sT7o z^}$PfU^<}l&dB{9O10GbsjnAft`DWT-3QW~z5#mFbBJhy;!8jN8%7A;JiZnc#>#(0 zSkR*D32Qn};ED2~=&xWIZ6;*i9pXf`2{HAkKz|K#kiJ9L=B<;m79~H#7&>N6fzBE% zN_+t43JOpuruFB2xn&{}|60t~`}RPmCH}OUDMU}ZMe__5+P!jE`A1@4`N;@LTQB<5 z%kvcmygJgKf>_9YK2x)ba0#w<^Szk(7>zI8pDg4ptmlyn9xKF(3~njuRH0UO)5TcXlOPt3k4vt(LSLT1lz6s1enT*sT*axt){hMIIE$6Qi`af!Hf@5+7d&r zatzbmdrKDKN=nT_nPS_OeZjA5*h83yWE3?4n|;N4QOUDf{InVeQrlwa?P>a~z>$f8 z{Y>2U744YutzMnnO;LTDtS^&o>#8e3<< zPcAXrmG8^Hw*`OAgt@1++D#U@PK;wC-+mDmfIXXY*z;`fYC~0y6?r9_+A!YoMMQ6XuwjztYVnQK5O$RY?7Y{=)L_(n2=UF&}U^FT(lc35K}GcRD9Go=(FkS2>Nq$Jd^4w0i@652xLQ{zI(v%1+te?J%M7LHpQsk>zaLY zv#=7Sz~?U--Fh*tql;!U;Y#Ek5LWVMS7SH38qkL2jRP2j@e%q5T-+HH{ZkuzeJqwI z5$KG^q=dUYK)F22IBO$yIC3w`k=iRgm+_&r5LeC=W#^x-%le}wHY@v^VIs2PKqe!f zV?vn_TngWl#W!@lbaor_G^~u#x3zKBNvl)y@?6<>O+@%8w<=6kjnDuUcroEe2etm$ z<}%Dc?f)cv^X#aDP)564=a zvsTVnX{jGjxlYZNEkNkFWcBf)NfbXPcBfJjQ3~;q76O+6iumg^=#7%B4r5d|V9{Iv z=cYwS2^Ji7NC0({VcO#f$RU_uLkU{thcU@H<^K;a~WJSu&1BQ9| z(PNu|(!iO$uaJOGi)ejwV(Ly&U*R;>my%3AXKnr(U&sC0_W7qd=uHpOF-bV+?jc5e z(lmEv4&wAbz`QxJS1|blmFiq-Zmfaw+YlQd+}=`}3;@M>IayyoV*<)}p(i2<44hxT z%zqj=^SWKO8)8~Y2nPr0_>iYsh&Q?3`x;w6b!}Ch+uVY%eVl2)VeefzT1;XtfX4fC z96xKAZO1R(cg_5xTl5(9a(w1m7aA1kc~)J5BlNh&Zy62f%xtminoJkm-Jp^ zq#IvLt~%PGRR_wl_Hk@zu5fDr6L~5gdo}&-<9km5pF@rlwGen~o#S7nDiYCm_+Ye0=1KDn>5km$GsBJWj*+vo%2bCMIlvm%Fp);ZPdW zD<^=F#W~gsDYVD=q<8a%7AlF#IleIH0Lc0Wk59Ug33AyF>aY}iJHx3$k@J&c}Mt2}5!-l~DTGJ}JI z6F>AzSB4~AJ8G#H6CqR)*pNMo{Uu*VH+64xB!jw;usVThw9n{6Fh+q--_lwT!rA!` z2$UO;?=BLDB?-?X-&V<&Vwbb>`Z_EjP;@`4v&Z=ffhiqvs287>T}^2J>7?M#x7Nya zyuuy8JK_~DN_$+)L$6$L{tCo%_$uhzl~p8TYU7{Ijgz#gS%HEEYl0|WK3g$n3$rlQ zdmpQth6#yRI85|enQm|njt+J2GYDGb|AMd=R$ohBgNgNgOUBjBZDO2r`JAN`IQp<3 z!9622_8PP~ALsc3Hn6Pl0e+kM+tv|2fGkqtE?a>?a3fe8aj$>L2V1hN7}Sd$P#}ku zbbj}E9tp(^WX%Ikmat9)&80NfCPao`(l6B{HH!!A==F8^ZA-xR5ZRkv+F@u7fZLO> zNuEh6T{~>|7kPWcWR;JpxgjkzpM~7Bjt|*B!oxavxV`&X5?4Rmi(6Ma7wETu^qUhB zL$^+d?@k!M@;!T5f-8BuI7dcS23A)&O{*!f?D^0fVZzZmX3k!1_uAZ_f+vs%kAjZ6 zVj7dHOy?PD?-cZ0F?-9EuMd}3)NR;~CIL{nTjf@FcwU}+VUP%!&)u5*j!x{sM0Sg^ z=+f?CZ*5rC-gVnM5#N_Prz~dcO6!{#HpG)*e@Rsr5nb5WQOxdgZ>B$nA0=!9)>P^0 z#lV6kgH>p_Dt*p%a2F_k9lo4CtAoPaQfi>fTH?s#C&NG{Rb+17ku=#k*K$Cn_p}j9 z`BFGiY&vSFiVpFMZx-;!GDPgjb>b|&**Xf=&WW6R3*Emys7zD~SpfL(WJ9NxA)d=P zh5q>q#Epz!^rLTnky=tHQBI^+DbLQYPp`{p{rC5>ClLz+zSVk>PQQnbx4H})m(IOB z4>vDuNgAb;RO(O24ZAlysFP%L%}x~fG7CR~-26%8fFW7Fsp&mT6m;6zmvHRPv!a5c zdrs^AO#@syciEuu2slt}AYk9fg<2TO=cvz!Z3})&%Vut>^6lAMY_3JqvLTt#LmoF{ ziDkYP*2H6~^EX5kvGqr@vo}&u1Kptnc}9>Q5kdAKe-<2mY)dX-ur_-CE@NL(bJ|mzm=99#;=;9G?LO z|3*j}_SHIk`K#sGmkZ2y=$|-R%wnOR>z+1hzj(Q2ZG^|rb){V$ zmPbrhXZ4yw;l;e#0E}x;Z$4E1a!^WAvqks*7Bs;JmHDsKxQ>jmho=rr(9+(!!K$vh zFfe4(^_6>ZEo1|X!nrB^6W|RV+DOa*0^^Vui#e$B&8Brb`u>*E(o(d;6kEMEPIy#v zw8SBFR}y>&)!fT0GEX}OzQCxs_xE~n+T(qs4}SILAS3^vYhtIM^H9Ai93IuIX)^PG5C04y2{^nvu#W*=nPk(DSu{TiPxBB==Hb5ky1t@!NTx#v5 z4XbB9Z}E7G>g`^tx8zq}vblLWK25Y9K8^h4DW6ixlqN^*KISy&+qYt6#q76VS8nEC z-xSsWJFbgcd*wojV?QJ)oLoy|^Bcy;`?+K?{a<4d-k44)NTr{|BJr94#P ztK%1uCl)#!oSD-QkW3a|-%A;?Rih`8yN@V;Z!s<5Un$gs{;gbwD0a&|>rnV6tjHL0 z!wx^49&}&1p9}t7=BYAzGk9>w;HF!KIR$iH)bnt@e7j}X>bRpwAr4^|711^v6{|l} z`xJc3Z>xork?Kr|^)-JAp2cG7==CNcr@QE#;9_QsH!JH?SbLp+2*;sqN?@{8L^ah> zm}*wGUOl)P_L`GZ3X~y$kWv;d2F;TuacX>;433Puj7DNQZq&@kA{`ZbfgzvEI>m01lV zP--LADrtFFXJ;2ITRGd=k!&f(TYehf=V%eyTqJ%yIKJb_%n;82qujVhUur&ajhC@d zs>P}W%!09q(Wj{Kilq;S^4Ep*`t`PdKQX)4-1HeI^ID$Jy3xPS`cALKO*ufv9IN5w zI_pbLJUKoePdf$Qsdg9D;=s0NZq6%++2MAd&=m1|yv+0H_`xJCW19Oe1>nAbg|_H571^hz12I6Mx~?+L_H;j(Y$s3cerJnWX3lHcR7- zHqa9Wu1)*0BJ+7JhQoadZ-*xwIZtbo`5VkIMM+GIJ`xfw`?$O@f>~IyD6VeJQ!fRi zNaALU0d$imY}}w}wss`nj`{!idJk}_`~QFZoa4kHaqP{p_a>CGXJr(X8B!W{5+xx! ziXxPdk&!}+N;+mzX;E4_RzoV9D%J1tc7Hzq@Avcley{&^x$f(_@4GnO@7H)fAM2U! z+I=el$DVn7;fSAmvV+r-_fK6_`&I%4Eo*UvY3yD+z={Dou79v41(pDoa9l?>5p}qj zL8PpL4d=9&NoM#yer6t;X-^rZ-7Z-sp}5A?*gXoMqT=w~u^Z9D2E&5C|2{r$uvJd4 zt77*U7{*n@-JuG8B9x&)0&#m#KALmpM2jm~SoSnp+AA&`?R7MdO<#qs2HW&)O*IFC zK8e$Bcln#lBCnzOoK91;2mNf4zQh@$Nbb5tyPO-+jM++;1`;igF!ymXss_Bmt_>oa z6;`Ko_3)JaR+7(0|2%U&eI7aq7UzbVE*@Ih+q`0`Hw{NytB@7ff`Zeg^LD3FbdawFw4w=N@CjmO0$4px zRqWo(;E1{dD}I1;B@5c$LFzwn73&R!HFUmIg@~FH+eSn8?8@CFnWJ;udHFZr6^?&5 zP|%emz2yp-y&#TSbf5OquiX86i5&WPFPj%PQQfz*(s!}r28njhh&+;zNRV6)t5&43 z=g~Sjnnt*IK|k0tgK+%+$jUN8@20YX^zv8kKD+cZQZxFme;{Rj0%c26FodPMV8R-} zSNh>vmEvu$q8I&Se0Brvev(FG{;5f{bxWk06HZ!O2z)9oWE13eF7dBj*D#E&&9_E7 zuVckI%M>n{9PO2*Y7gjhGCpTS9o$%1r~jRFQ7Msx&8(H6V*)RNc{(TK)-POe3~K~O z5N_&#t;WkZt&>Pej{#Lv_v@(%W%~QZPUPdF4I7W5JaS~_IMdU|m1A_Uha}$CnG2u2mwczaJ>9${j zAJsnr{=rgu^}|-P!E#sEjKxK$4La3bR!G(6RnSlo?}R7Va#PF>_}d*I zN_4HT^|>6mU`NGt91%b)PYqH`?BxT9)(zSbzOt(!&lc;h26+M3w+`w={8!u1;H$&g z_0!;SgdaE;>2;so<`T+}zy-*(fXE!spNrl#H8k^b^RmBo%Qyzy?i8Dxcj0|?*=RUs z45-Y*HD22$xG9Uo4d|J2%gXf)P94RzK1%E>{(4CQ6X~S_pkm)xTu3<10a9_DBNZQ{ z$~lvGYY;CriFmol2a(XaWl+0!GHY5Os%Y;>{Vuiw_G)=j6EFBPa+)^G(@?RojTqLR<-}ThY+i zGwi8&r^=L{K;V*DZHzm8TkRR<^HF@=A{!X93$zph?2Qix16}6xEm!eRc-8byM`isC zFg~lF@LlO@^qxI!fBCQMz1lU(#>~-1z|4dzl_u0G>xF|3e#3Hzck^dr7wza#E{#hv zH%p_J9b3ZJ3kwx@-#yy(TlE^15BpY&@YD2g2qtkCKSXm!Eg`V8z|G%L2;D$cG`%C- zn!hoN^r=ip+m|qv+Bqq%Oq$yQ6x_(Mc50v)$0N5vKf_wwF`4Fq z1KSr4J}(A%4PA*PLGN{|@8WiE^lQi3ACXmNbON|;JjO&5K=*yo9Jb-Dbkc=;D#o;D z#e-{}g~km%Kfmb~d(+IqgE79wN<&TyxTeA>vxax`)gR*)YCnl}3Fm8+QVy<6`H4q( z2`N93{AeS085owy5%!ChR1<-vS2>sZ##|mP@Z4;3p>_#T?J*RnZ^ZtMuK7`2{9Md0C8ZOK?D>cJDJOP*~*Kw%}i{tkW-lYz z5r;3?o~(!}Jm%c7x;~1Rf)^5~A?SLD^dVP8v8LDQRyS13ziJ-K?L2JGEa zM2i(;!r(L}h;$y0Ehx*M&f{wL-h(I$DA^NI;Gx}((Tq!g5d;>S#7vjn)4=7}E1Y&A z!m0PwhwlM>dOYE(6szkuMzV==E}R3Jot#3EuWg%ZHcuC5YDC-a4t;xukiWa_v#uP1vttXj37v#gHsp!B3|onJ!Kt6t&9=<{DK`Eo@yXAQ3s_w4;Lup^A+FWzZRK^sFi z-*gi1!J=@TZ|s7IN9Y1wtsC!UCayPgYsyL44t{^ZRapvrh?nhRG;4+V%c5dBouE|WCo8t$i76*%JMnwAf`NAfr-dC3? z9Mj`ozdi!Cu9aIg`DV;LDE7OEMo3m#-aX$(iOUwy2|hsIX)6M)Fy%ueP9J)eEa@0) z0(ySF$n@(gLw=enS@Z>N(rAdSWf%1Ahv8|_B@@YHH?8BVcq(jBeKHJ+{RzhxsHWqp z<`Gg3WiYFh`?gDjwKf4{d9`@a^@T1$!djiUNJsf6P0H%nvZO}#oJ*k&$Re9Mj;}s5 zfQN`@Yq+#zg_VOnQy8l_=)k7(9GpjIsR(SphtB2xQ+P8xN4?22-eMHm+2j{zaGX5g zSMK}6BdU{d)Of=>RTKiak~)2A{Ed+2vmq>KA3tq55jNP@caqR3ja_-BSmSebvE}Th zvX>aVlz#mPw!+S5MF3@Kf;$VbXR%Vzi;w)4my}mk~4Xc;41QEA0gVwo$@85EUNUkGz`MdjVusQH~a32TUIH81~Ei9*?;MBLeog}ei* zE@cT(IrnXsss(H(h)|~fI0T3Q!^2dP?9~;F5UGz8nJ2+#3bu8KAb9HL3t6FnB$!6e z9*fCKLyd?(t@rXtj;s~yzJtbczI5cnJGvt%g{Oz-c{-Db7pzEL;y5~Mz_av7!XVY! z!u|HS;3s+?oKJ7v5l1K-vLlw9QG)LLH|HE~OjN=AyX91u-+J)&`uO7e(^!VMXxve6 zbFjwQeS=jKhPlIK1gpssF|lK-H`FK_a5t=kH_ zT*t~vx*$vB(C)qmABXO5scF@t=12+;H_$o6+`yQf%<>k5=)im@CC<`iZ;FZx0Ra+1 z|E{+Lt=^e7!+*$u;YM|_WdH+0yDm&&p4^BQnb4X?FkLb#f45F&3aJib11wCle*d*9 z>6=_5)JYybfgrx)!UT&Z_Wb_(*!AZr5FY_KXujIpE(2)o&APdV*i6%Xi#FMIB>?AI z$m*2@aAO{Z((vuIwt<7x3vvnHvr)gKIL0*kdn`F~^wFh0hO8T!CMe1>`${Y`;HZ|n z9|LlKb~fZ~9gIA;mAmzLj+Vb-weiRNnKOAl+T~-fepAInmOxH!U1P__ee4@Z1Zd%BhmZ0kVo*h&(>RwT9fr6EEdzpF z`~3RuM{BMD4@HXE+%%iyC{g4kC&j|Y;-GR$NxsgFf31~AVC5vlj)?p{c{0`QW(jqA z7YnU_j?~u_Fl_$Lig`!G8;o{;o4O3C6re-S5rj^-Ukdj4A!lupp_+4qR>legI$DN4 z;1e++4+oo!hkCvjjmakY<>VmICDH*{zyVB1qZeTpZopmIGk}(8tl{F zvs*F2yBO}KP%5mmAL zn}UuXlQdF|BGGZ+&!uX9#}$k28dqjaJ-v9^egWxej0j~4=cS8hC|C7TvRG|qRufRO zK_8z<_y)jspqn7crGceFwFg26Tw1aRls+9dB(T%w(rEsZ=ocywf<{8MWib~b?(m4Q|&sGWT9q(3S9J+chZIk2|k9E(s zeS#zWz+p=7Hax~P&nEqO+g&#ZxvzEI?w)=i?u1SXpp}Qx57szJaU#6vhI^G0;z99p zG?H(f#JZtalEg*{n20+cV_^krx#vILZGjf#BO%uwz?<@E>cCka37&}XTY5mCJ*~@7 z@&p79Ipm(bRqkP|fya{Yl%g^zcO_eT*%BNyBm82;vnTyc5K_m{ZR}$MVwW=fIG2es@r*b@2D}8$CU@;l6kdmG^ zHBftmE_CS^DxD{m!Hbg7hrBA-5Kpx1*lkt>3+gXB-KL!&_-9H{(y z=d<=-dvx%@e=y-p4RTjixtrYX*S889a7=6t+i1q0Za_BnU~qG}ufbiT$$r|aSZ`kJy+xjmpptW z?-_=h1`?V04bdm5>{WNU&MS@D)0HN+oax6ou)4{*4DZs{aG%0hq{m2QJ`j0PKu)0c-=kiF7?7^eP zn~ZD-b;_f)>2|M;Hx)Q+Yc{ z5^uM!ZW$MJmY{$JDR{Z=tz`9`bF2CI8`E(`3MWo9V(H{6PQ#T36~9;ogL~E5HQ7^T zt))~sCDJYJlF?~t3HKk?4G{4J$dqqW+dgZXwJJkBW?VRuv2y@w`MjJAOXK|{{^Piv zo!%_*VGZZ=-s`A#X-vUU~&aM%Oa^H@UW$rRWL>JJy)&hH<5?pGtrvzNXOy2$iU z|Eo)=tc-+hw{kd+8ZVKrIj`lELzN~1W>QbtuUE?MaB}h9B!zqB`o6z^b`8hc(K4=& zIaST8c!`%R_-!xJwo+ON zn(7cMkHAfasOy=sLtQ3BC9#eIB{@PIp8@%~e;yhrOs!ABQJP4dlgA0GTAB8&X=7T8 zlf2p?9CxNU8J)hu4p`)>d4>!j!qd0o^D3NO8SC$>7V#T zCCgi`4a{Fo+uh?T62@UY^M&gZIMMs61yvVC5>Z`tn(^q#SE;I--Jfz`A;2Ek{IobbdwWS6(3DDt$eYh=AUU31M)9YX+shir1%>$cTy^Z5uEj`>@DcsAHfuM%3VBuxJ4kl25GNS~Zt>zc# zmvd#OVV*du;!24x2=DANhW7FC*%vrrKoyA;`NxKB;q1-w0R<+cF5ww#e6@#4|EBU% zh{&yj4oX~o`>sJ3RwRN%QES_nE{(d3=ALU8 zRuST?At(`{wTXII@^gnsdR-|gDI;_F>wMqdJK6bHYdW2ZxH#txD*aw?fTF`VFmrD6 zoy;z00i~7cko_?>vqWX(;o!K4J8a`mz|RbR&A`ZTl<%UL@P`+2Qe{}Qy>E<^{r-&H zZNznd^*Xp&Tl6X_(!db>lo5NV)6lffmc_YkYK1 zHV0GW%C?TysmGJ4k$E(r7$+EnX4`aTKm=H?F#Niv4&VXJBgN~2O9(i}7+v`l-``Yg z8*<4e!Z~-!F`tXVTJV{e6>RBXY=|X(j*y~6Z&eW`O2qPb>#FlOJ`IA0?q$$w&Kptz zv2azdsQfT|ec$!Mk&BOgpIzReTv9XEm9gas@2;zprpEeRS?J_@IC6&np> zpuR#};9>wa8FXZHLeI~dLKs1jMZhx5a^Y0H=67;`fn?mo7uUnbRLn^>5-ujzv<`ds z1;WX4ISHA>CyH=a5#Th2LCT-#Xx}Wy9G~20-9;)jJN*DAQ*3zAZ)>i+Hk+9hwABFW zUV}z~hzmIa=0ZHlC7imLi6ttk^XPz9FLn7Wi%nOaD-{8oM!n~EEWvyF%6U(;`j5D)C4V?qBFeJ# zbq}!!>}1udYgR2gqdU4 z2yC|_$*g`*I|-R+-h#M`Yuxswpf_4zw>7zz%N^tUQpTX~^VoN%*1wU)&}1Lx^)tdLjyG+MY#{!JD2M`{WP$HUNn4 zyxWAD$?_po9(ii+lPJvxd?VCE4JZ#xaBsz09M2OVR5Aah)IrURbkq*^lvM0db+?+F z{->*r-f^-tqo4ygAf!$Zpy^MLwR^~Ji|DI>o~*6E=ZalrwA{q5I8;+-VIOTo|7FE+0P}_EL88hUVLo2?FUNN zM=LvAal21BV%uLmD>uNt$->lDe(cuc)?sCwI zzP=?~R?SLq>Ng*Lvni^ zd!9OnJfzlGw4G|J(d!r{qW}5}mxl{^=bfAfw;o+_nWgg+zT#oCebAJo?@kq^Ef-%B zrP4Yc`MO^UVWK;0X!fsL&_|_LCZ0T;{CU!XM6RQ)NzsBm&PJg^+R=5Pd?Jd|PH&`v zTj$;(-Fg9q)|_Iyy46M_O!9K3`iH}%8I>o4Kl1#}3s#nV15I0D^U*Fnp}JHL)sS+_G>fQEGO z2b==>`WJeg_QJzE-mD<$W)XMCPb?;`LBB79gKv)kE{aA^Oi0=pEYF1^CO<|-qm~T3 z+(j8$G3%7{i&eYoweiazGd%qFiU|0kL2IWxb~(f9Is7M#AoM0?KgI>VE;yB3#|Np- z8emtLP?ykfIY$%02{CTNxbbiQsW`^WQ#PhH3J^YsR{>&aES<+OM{!zbUe`f^OiY#R zIkwu6Zr;%X9Sef3eeM~DpHfnT@5!0={4 zIrW2oVZms9&)cMq-D~Q>}!lpm;?CR;t_lOGx!$vsYGw!H8IxNfAE}@aWMc;+b z3|2*c;H+F(6p6xO`mn&qJ&;xW-wSdGFZ zvAZOj$K494Zj6NL08tkPXWP?*qPms~a#i+^Qe9%2K?M;L8*#^|TJQaQ$LNsM(nodf z>Pxy*+oEL!d0%-#XiCx;qj#hp4G}YV7x(z*+iB~F-rbq>GGG%|2{BBx z(kZbtReqbbAJPbrfB|y#(mhpN$9VldsHC8^GkkEkZar}*BLQm^ef`nf?G`%~5_TUW z)CR{!s$A>WH$@E`Uh>17#1ldD&|F=a-oo)w&{dAT`|4iNBp<1jgc=z`14Hw9hG}jm zsDJDddRqQAvH0-P!fCfb=wql~;X=imNNA8Ob}%`xij%>hz1;9J_Vo!uxg&(o;)4N! zpxK*iZ3UP1Qm4l?4u3#MXcYAR79}ShJ}DqM24!-*LUnf?YSC}*$%i>|NW-KMDzG4IS8R0kET{>d@KqPdkyCl51EW zPRjIEf14a!5rqzSCw|bfs8@<<0_!hq;nVB7gCZ#0JdQ5u&U%@*IK(sU$jP6}`EvRr z!fOO;)%IRFaYD+T@ZTc>NsLU!O@6oXtR$(IN$3F)jvG)a1rT`9nXy|7iFqq_n* zopp2L3932CWGu(Tsm!lZt(i&~obHLt#xl@na*-8OltvL$Irt#>&Sw{P_XE{^NA%FaSRa5Ni`pHMFn&V3(r73 z1GBhAHD6hqFDGjZFxy^u$oZB*k>`uhlH|pEuH{MU#@+A+2jPP;dY#qa>`P zvBdz#XbNQ0chE32!#{(|=mahc9B|nc_)Ze4IW`L)^g!49JSP7Je430LF`fuyYw%SM zDZkwNUCIfzb{lI27DpoC(f>ZYXVms+#57yk!%(rM(Nhra@SJhYEVwLKeCg+_uXoV% z0r&AgE7V+!1&SJH_SHt%BBI0-;X+9-zdc<68NtPe1J!M)>$ULtc91&|%93c$XT104 zg%%kx-Q`e~!e&3Vvq_TgsPWKP_%R43oXvi_5k5C6dHiLqYX$pUA^oF43j@Rdp*7Xk-YU>Y$}eZpkNX22F5&F8y|s5$-|mmf^5(X}i@4DJ`e-b)GU z{vatck@x=D&AyO+tYhDppm%}A? z0)1HW##ZyEmrxO@ble|Wo6@zFAhQEvy}Q;jir;8MVfN=06QT(!9TpU zm`HejuGZ>z&WrGWMs#6mCrkd)VS`ZIuwSa~>o9%Rjh_x*tgg)grOHuC#_fg)IJYC>gMHeFT32Ps>eTmnt6 zrS*)@)kp7q<|@X8a`!H6Sv8h&&&{~&WK`De!h_;^>ADvhj^&wNpwiQ@9NLj)wkDmd z{2c9CUsR3KrX9cPnB&)`PB9V*2e0P^TH@{AG|+?_8u(Eh3h}`n#*X9Vp&uV4BQ=)H zN?oV)JrjLGrVN zmqyoPdzXZbBON-Q^t=p($#zpt#M+2H+Bg1YW=T?(_UETZq=FG{k|K9l?9ySJt7B@# zYjZBCmWSfV(5DjP%eM`xQOW7FEd#l*$SOS$JeDENoDDBg!|+(^R=u`WtbpxIo zSP0}kbS^mvHnLpTuUcYhv@v*7r|&R0VS32)@i1roLr>U5nM*t(iGdyjZ4jXrExNI&6pM|7+)Xu6v7BR6zy)ki)I zC@N5MM82YOnN5}7eB*%1xC(5`=Q;e_W=9X8sXxiKTqa`bq_TGHO48Zz>5wm1X>7~J z+fmHWil zC3GNZ6HagPaSp9t3GNHgj&WXF9A!hSNl)73#5Bz_{_$>CPkxsI61Uc$!G-(N1RY!DU38&?ovgFF`J69- z(>>55@^ZRor2T-R_{_=!!H=-kZEQ_0Fd|=L2E0wr7i^BJ&%4*~(`aN<${9>^!1?_r z+xD*U->2r}q$W7>Yc*1Q{3dPA zx^KJ-0TnpRUp;0Py+7}M&$G4uw#3%er>hmr(KFtrty!Ph#KUCC&tSXnk6vM$=wR+j z(dnBVaZUQDf}NCBW$SuIWo;L=?Z>O**s69>Qro$l9a`t?KFr~e%?yP}6E(~)RBL`( z8EXY__kkFvj6=xfbBew~lPL2l$_u zF8@eLL>vFcef*8`XmO@mw&fL3-4s}-b$Jf51JNnpYTJGBi;@7v>D}Pz*M^2|^-`(9 zKV#<1pBn#7GHlK0e5b%FAyK%0)A-82AK7#4TiDlUqG{e-pz%*&Y8x7fsY8k4$%Ulo z`_Ol~`szcaMqlvgFAS;FSP6C>NzvRud;J@vq;%F$`CdfS;X$5tF2~6{uNyv2XdF*1 zv}vhrLqA%pnagYsS;nC{2T&TS zP+8|#KW&Q6o6cj0EU&!kKJ~9o;urt)$h+FT`IY0SNi7(D!=fD3b?81BwQJ~=qk?4R zgh{^*@jt45_&4d!o_JuY^^My&5gk9#<-JO)BEHL>uU|U#dD{DHLOqapzGtiB)({H@ z06F`b%*;ibD^qGepqdiiDs%fwkjx-0eY6fLzs0JeJ9>(!gd}`Bp*Pt5L&X$BH*r&) z1xOq2=HGb8QvLM-m0ryka5-=I5W{IwhfdV1G}J$cpQb@J?Sf17@p$2Fu^|4uYgc4j z{y-1&-C=WHV(LfYfR4KY{*Gx%AWp{{^oc?_aF33e@t!(cQn$OS>*K&Sp0;kckRK|% zoa+hfO}!=(<8&m+ZsetVzSRd^1V?w!_<*ccD%0oX4rgEYBc!!rxrPYe_~LG=P>LUL zu3=Y`89XQE$ffc-tgp|&1>$nfrxa^_RJ$;?X{I?8?+S#8!M=s?H8oJ_4{ll`F6NsWlgs5TI@N+8} z-_^w=?pjE5W~_QaF(4AW?~nw{#nc>9$LTv=5)VX0%wJ7%Z(43E)No+BndisXx9w;8 zKW$aL2`5e5SA(?uK>+tJR*pzkxp-g1{p9- z&8g>z9zk)uM>MS@LYg)7{jk_Z$yBCCgNiwf;pu-$n2wKERmv&Q9VvWbW9Z`BAiVxOSYcVq_WEF^F-Zwi!~`sR(I&RvO}=}7;&utv?MGVB{VyNsDNLnSpPPrB(^!m18RXP ziYq;y2qO99&Jm~rrZrx`{`g}2?%T27Uqw?>Yp-+%ytB;UO@@ zxGnZtc%g)YxjyB-;ls2^f@>}%wRxAT6c!r+G6g`30{K$#Na89^)`58VQRiZUnFYOw zW-Gv$F`~CNg8gf;qIr{{k_(zb(4ggQbqBuQnQ+@k>37vE7j-udy<4PzV`a9_>&Eg^ zNBI+GFof$pc8>KY$GMp3p)VRY2^%8r@m|5TpG=oGu&0sQHo;I62gAD7!~6RE3uw*` zu-Cax3!bS8ApCWVEkrC%9&E3=FThc>!ibBRq12FQr$7Xy6sGQ}xH-f3C9QHr$IsM) zYmGOLwdO)Am`tU=9|R~|K?*N`Dx|F`pmm@P0rdL-H_HL*X9m zV}P%S_~3qvyT%!o$M;W{M8xc91RM-|>$VDcp(go6G*c%9+2Vn{0|ruCKata~r=fAX zl9*a#(Kv}l@yE#a!-dCQG{ym8G{sK=KUtalCF_Vvj5$Rdu$9wTHw6CBj&_KygWSvX zS=nm;oXrykYxxtJHv*#TxJ%k+6myO9okYoIdB;SVKem?$nh~HyG$gMrvNX=EU>vLF zgl~6?B7?1FlNdVA`9K4Nq!0qPOI^HNy=Cmnd{bp>73=kVgPuT!eE8ty->u&emr-M7 zF1W7yyLy}`XKf);e+N%0Am@pkTI+e^D70W}Xw2Gi&@0w=R<+%DxbdcIY$MDK=T=MT zwf!#r{LJjhOD;hbrgr-7dcBJ}{N{Q0IAr3Wx2N1skI{8IsT<8|I>QMrm5BLqAxo=( zKU%!R@~NjbSA$U^F3$yRk+V<04lXZ{UoD*43egn;nB>ZNf}0m6qLMqRy5<$$G9A{< zCp!I4Z6f>NCuPZyZ)HP0&6JQ3>wZ<5*NQpA7&JFsZkZUJuUHwpoNwsjb`eR3##0bq zbgX%Y2$so*@4=d&IZtSSix3zvUq?k0=LXH82)-A6+|B~7R;d%>&p-KB{G`Gueo&Ip z*axN+cEUX)3sMK!FCCwLAKWBGZWXX};7HGAQ#gG7#GD|JA*M_@DlrB+;ZeEG!qUKi zAmFt4vY{~wPeWyqNwd9AHXYWoP2DoKUOtCmkp%Nlr;y;*xHw3vzCH0Kx8=m>#c$;lu0sW_LK6Y^zV0Ejd^gz3ey(x}NbkhF5q)$s-+Zj=!&)Np57p1(CUCdGI!gT_js zOJ4Bc*Q9Hhuyf8jGias8?Z38BR!B-u&ZV_4MeB$kon%AOc1_mRS}(vJCS9q!=~FUm z8Q)?s7gAFGB=>PsQ&p{y~qHwPmgU#lD@BjTzfV%&rjQ_I;|MQ=T3?jnJ zz6^Tw|Kq#Q|2!2><_Hc@aMegTl3bTg&6CT4@C65rZT4h$=AA|L19h&V4$N`(qrZvl zzW?zB4_Bu6m|w;_+{4U^6A21*It3*5XHy=d#-Qb`;+_;=qwp==V^jb0FM`X|=2?Dn zb;vv|Fl5LiaRZ?IpP=}$`bb<{3 z4-=g|f{`ZPO+9B4P0j(@Y;&R}e~)da-uQFd0~0E)t5S=1ur1xT4m{rxi6L8YSGRvX zrRn^H$sXhNqnP?Sd3x=XGli@SxNetOR>X5Ac>|5-jSTtk;>LYD-bRU7_3!jt^*d@t zJhFdL{mb%qmAQ`>)gL)WNU-K#j5ke1{M4kJqVvB#Gh>!2Iw%qMs`$PFS^xOMl`=;j zHc4<&kCKdn{9+>w{`mmU_*`D-_eYzn>de0$C@n-I&%QkN)J^`cW&h>GkSouBX1ssN z{^-ztuWZqEA5b4yIWYshO^m{O*JV;#6#XLEu3mdQX)8WfuXKCnpPwA;oFM!9(gsT( z!D;RBVu|$yUz@K<{|WJXb~?m~+>?KNtjqVLEX(k0v4pX~!cLDneA8DF?<^8v`65|M z$At%ir2l-w%lZ7?oONy)GAX3q4tCPto207$T8`{s{^X9sUE|BPrt?SNe_M5k?(y@E zCGuiNM`G5SK^c>PypS*SF!gfX02I991Uzc&- z#-4e7k>p6Yu(w}f-Cq~X%P4S5#YBjoeSe#ZcfW=~O^y$7vR$>JmK8=u;p3Ngu-TQ! z+Wn7j%^-s)2`c`p{s*Q7MAi5G0w%A0DAw;V;LXCo7%23P>}v8dr-ifGNz3Eg{FAcw zAD=K>9MyzLNOvs~ApZ9j#eV^Mj$NwvrFeeK9@c{#@eXo>dUHPa>Sx`iJ@3ptnYQP0 zqWnK8d&D;}pXStAX*he^1pjURds4GVR{ipa4ikS*w1}@IP2P(4dnB73KO<~DJ0F-6 zi->`nF4{qiqWnL<(uhX-fia+CtTgZYsar*6ppmOFtiG}?1%p65BdMjC#EH|b?51! z<7q^-kF82r8YfCi0sSnUFf+rPBoOm$&8V4~D0;wTQ2{fwS-907q3oP`m@ri@M(+NuW7N3$S2j^Q z|Hs!qNkLKBpphFOZ;VHeh7N{TdlU*(FdU!31%PTZgr|RGX-kL+rVb(+1v3ubNAAQ= zE>{GZ50~#22-s)Q00?wy4bkNA}pQVX5~qsT!TD=x*N`{WY~h13Lx`GZ=& z<77jlcPmG>6Zl{D`;kKc5sFZ-6A{vZ83N7bf55JGO7DWYb9#TH%|=S*NB>a2?C;X< zL|*KFe06i)(|08anCt2?8 ze;>Hgu?VKw-?{Usl_5g0<|w!tQzt|IO1!1`s|UqBRxvYUcJxXzFK}|3H1Fme+8d|- z4gc@Ot_2hSUh;IpM4deSV&$R@`s*KMiC{530$746BvL%vx7cmIgx@3{Cj;0N!52WC zbU_XvI~3Zv!I%nDY2$DfGU&#)B3ud)Esv2;iLcBaO!9(G<~rz46gwiCjW>w#X`xYo z^a_kJ_Y+175StIyB6@oi!sHrM(+$_Yc_IVST4rbrlYx1Tj3Q;&`W6ZH zYku{`d*SzTBz`a2zkaVMwkLVF4&*6+`@5c=0u#<)NgJtgk>=Wo$(JaULK^H(*7 zz>ABfH$*3%bA_Z9zCX_d#>=axt|SR_BF)%mf*=v0Q&IqSS;a)yZ=- zTosHP6ldLqw#TL7KCo6QhvjjgU%rCg$~a{K{o7SUE+KJw4gM&n>g!{RL%41dV^+hu zWH?>1zl3Pcx2)pbTa|a@5pNntAmMmbFgewa@)a%cZpAb3ZJS-q90koLa?QU@bqo zdZckJYTJV4p9I@>A~3{!r`vr@W$v41lZ$izc7uhbb+lTG0|)#z=wGW6op;~795q(! zL>(Y@(~|c_lL1m>_0c=BA$|{)bjF`wSya~EyLJleebHiecbC!xA&ZH7$08;swXZGa z_iReLoOtnxw5`pSEd2+xi5Z!FbN*5O`%imXA2n80^d29M`+Ns0B2ze-qny4C5y63J zX!=$b8@ras84T`Mbc(XdsW<46EqIPb#8uw%;51GuVBvG|a$=i;c3S*Ed5 zBYs%2dh3N^pJP~1@Kn~CjTawc$fj7sm1VXHG_%mB7Bl-v0 z)XlBpe?)JNbr_wqSx1>9P6MCs(qFkW=AWGZ{F5_nJaG2OmiZ@lXEEoXYxew;%Z-?* z)4b60X+81e42N}GvU`l(AAQG8%P)tBk!z>Kcx~E#AWINgx<_MYB%Ah-B{{~o!mC4n zq~^R#k~zrJkuGEWP;8If#ut_Q_VpZ+32PPnBidupTEO$NU)YD!@2>sez;nFK$V7C( z(zN75MgXK#`eM_r+l;SF@LB20Zz3>WJHu2A>41iD7HPE-OGOXmE++51wrSx5rQVGZ ztnIy*TThq>C~#%-))PxpJWRAs*pot2yYW$DbSskhcZoQ_XpjqLzra@E(pkv6z(3GU zK1AM($vmAtTZ=0LuzS|~-e&|%jDQY@H1-IgukWTW@6TG+)Bnl1yo=WR4hj}6+FQM+ zgqjrXdAgb%E3t?A@1IyJo*lt&lg>-$&{AZ+jEbH5${hNEJOTZ0gpJV@@v6n9m%fnp zW$9?&4xc_w$lI%SM1)t--VjeW|EZgx(kylEpix2j=*R1~j>~#~(hf4+a?HzU>1C0v-34qNs^o@PBDm(3#3gK@sKNI zJGakPyDDsa4O#|)E5_ge$~&=yH#v_5?GNVeB`@E@%3Ii=V!C%^LmXq|hz3J`2YnA; z<6G%h?3W%duDGyPV0!6r95#A~gT?qtoy`_0x8ti<^NT2#hz`WTJfctkzzrT@k| z)VS1GslI9+NPYx{zcIfr(k_U*l6&H!g5TBoXSlp_b*?hKDCRRU<%!&}r{~9s#qDQo zXRWRJsLcf(MtVV(F-p6e=eOY3Vf_KEWNHUTzMWf#lY*`JqZ9Ln5Dk9$7SaAL#exN= z60*1lF_S-xk-XEx6R8rjO5&N%6*27}yR&!|TE9U<)&=$8b7a(NEo8TdIRp{!z~lBO zFHcd&djvk6ZpHcyxpG4#I-Dlzg-r65JHFa2@{GHt_gksfyO{?gv)ciKN@EaU!QfFLFTJ{L@-q5l&GRZq|H@Yk- z%CFb_vitZu-OS_%r|pMI!7zcZD3S0ixd$kTsk0Z3laIcw^+ zaFQ82f64Vfu{Y>ZZF z8LeLPmquMY>>VH15);m$7DAEq`Ss=Pc@!M!c{csjaX(@_Y9#l_s@2vnhRzC*=PNO% zrO8)RY?!lx@@9Is?bG}9R&1SpeJZCbzryq#bSAmWxt&*UcJq^2M*C;U4oPgjfDVMa z%I`}%8=LRP14S6{`G5~qm{9)2bpZyoHf_y@1l4&O~(8)^jlj=JVW!f-q$VWpW)B<0^2HS#Bo&k zj(Iw!r+;M@OGsqtdOPCK>Y_b@V_W{AXmwxa_}$VCnUsZ3iRr%uKh7zZ?+1(IARzry zl%u_NW5(Huds;HmxR=i(;NqQ;WTtIi{AMn_uA{t|q}z!^|v;+|t()X;|R3qIUk{dWPnw z=LHxJruR~>UPHYGXITDuBETV9H+J$H3-mQX_O~W$m%(0DP;E>&iGkz?-U5p;5n)Y~ zbceUTf;ww0{7p{;Qr3V1(sBkX!8r6nnyDEIt8b`wlk19q+5pX6Qu$oQjo?UXJs!&sO>k3VGsu`(6@p|6q@?2t#%$ zN|Lj>48cu@m+qXT?(njvAmy|i)!{Txj#SPPW#j~W668QpEj}fG4x@uj0FvibkMK?> zVC)pf6JMla6eaRx*W_(8eQ|k5^_18jlK&;5%mEiXq1bTA=E)I`#foLK#Oe07t$fHr z$#3N@Mx;`qAXAh*RMPtS!3&&`lBExn^8S1eg5x5RI_7=5h|x4{PS**&ZvGx+(FZMl znsMT^#l)-XaNYHyY3H=gJ{L8my6!-x(tSG_jHsnJT!6gy8>^jd?f|zLL0mS__B=h7 z5HV)KR}k{n!?2(~QYF;y?sJ2y;F2sCK1HiRS-@qVNK79HqBl!ZA<0A#o!+0HxB6#~ z4S^|(RpTYnpTMX@a9v+K+lM(u-oA#?Q)otDend4{&i7ReGLoRFgox={c`sIB!Xe4n zZKg~YLhe92)5O-cSjrkz$uEHFXKTX6sK&8-Z+CXu?S1yg0XINBiu2)N7#$c8zcRf+|{3uaW&Hl>N ze|nHKS37fxW2;N@_-rq8qb;m>Na^0toTl*DDOYS`k37{4|VYce>xVxYnQ#qBIYqNIOPKT_7 z8}e>d7jc~v(i1Cf|m@88W%6^Ixhz`*Q>G4!99QHX2Uzpe6T8lTNfq!P;(m3KpI7h`sa7)^gptVK#7J0>nuB_xSv zQV@1%`lJWq4byx4`;*gqw>%G>ziDGST`to3&0Q|ne@hJ#nCm`>FkrD*)-lFaG5Lo! z@|4s{&CcpB8i;oRdC}6fSvz!fle+rZEas1psp1#iI_?vX#FvK?h(SxK*v)N^>SJf8 z{dtcXb0>-O;1Ypzgrk1S_eR$NhG~?n<)aX1Y7>#hp!Z=gMCLxNbSYKtTGONwDxooR>R5 zul+1E=15jRJ?-ySWSCh?(_8rb&jW9;JxeVJ;0su3{76`JHMQL?=4!rCn6Ci=U~aPI z-G5amD?9q1PkU)By8QNXz62Qqe!tJ%y?jsfL}7i}{eYJR64H{dTGf zy8s+L#r-$KM_aevGhQW-`uvrjJkWA}C-)i}dRY|%G}NaEkj#0kGpm35#s?EDzEL74rQHQX z9b{o^zP}^CUI&E|9Z_{{SVs~Nc)Q%_`9g~YEGhO+cU} z-S@X`SUndj^b+hk0BLG_ulKXh&rOI_oqsUDpY>#T&I_p5R4$yy#bVHi!X9Tk(}Y|#0;4?p#0SMAVSZF zp^ev)9(DD&SRNbmnA5z`Fv(m_Lm+|(II9eHvKKk!L+0*)li%0$^K=R^R?r9YTVUkC zcv&&khBZjRLdHvD9(aVJ*6M#C7+)5`uYO)=*ovqOb1KLgQ|%OW z|K~G{X;=o+(In;x^Sj%yCV48`an9c*5a0`KZ;&f!cPb#pB>214_@AykV5VwPh(r{1 z=TPctHe>c>-r)DM-#|zUBd$Km5}%u}xcsPg7d1rU)#M$;$WOqe1Y#E5lF7|=+?&>6 zK!;>jifgmGn?`R*{-Gf@!yCAkidv7!q3}%Meii#Zx}E$NaT)3sP2G`v7MqstVbA&o zM7RHJ#sdz!9oqfIIFjSeUJ_nz<2JN>-D?GhbCK;LL8Ecc|MlxJA;s7TiM-3#V5K&> zti%`2HKQufnhPJXKJw}bB>d%s`ai!b{0jzX*lW?GU`o`V)%^DlmJfk*^j{ZwEP2o; z@wLG5mTw@^Cub!m(b~W+C8$(E8)xLZx`l& zh(#HvDo6MI%ZYQ)m!py|v#X%axDoiJ;)iJI_X{9=c<9hHIy8~-eQVX996^hQjwOYL zK^TJe`^PpZG#k^4gp?Tn`IrBC5q`A7vW&!)jS%|t`~Q5E@DUQNy_NzL@Be<^zkjcl zu!Z8%Lt@u|ed?cgAv}j3>A@#ui}l}^^yfvm&@j3XKJJT5lmGQk|NSHFc?^mr>yS>u z|Hsu~$-q}#*W7&UxslWG? z6fHk|cko=Y|gRBNOg- zKZvW{H6G1%D&n%Ekw^B-*h(yQGsy@3Yc>A-%}vG-2UACPP-*P}u4Ori0yfsO&pJX6 z7s9VXt9=(Hw53DE2MyDPBZ&GnvjJcsF=v--=0Yug1XR0#4w^fSdxI)hgZ5(OO`MCX zCaYZ=79LU`09XQq?P{a0RxtNDL{0BHT#nD+Iy{C>F<0{4)1MuZJMgYR;^7tz%|*)l z?bNM8S|Q*(%87?E64Wy=^LgPjI$;xi{b1UwACxE)LpP4IA@QvZ#Wn>vuRLes zXg)MD!1%@1{dD_+t9;gNptk{k`L`fB6n2{)AEN!Koj}TW4@|&NLm*C*lFfW4s;Arn z7am;NsoyL)Z5VD-&wNN`SoCROoa`a`zxOk(7DiZp(o=(b^5E8gS=cSjIl@Z;WS)0y zUE)a6*df1dp0H2tLY8D=s^QsMH!R1gENd8ia{dyX$2%{rG8CQk=qpw3@Kv zAmbEYruzpngU*5e+YuFJ4X%8LKF=K($_xFG=Iv2F2d9CTdv`UrkGu20!mS@qsk?>Z0B zqL9!0W6;f!l~2Mn9F3*W zQ;5{?(Wix*`(f`YlrpPj`}WHi9At#Zc)0WVvs;Ww{UsMCjoSZ2R9XyFh>@;H5;=Ur zM(hRZ7Xz#O?4&q<^}_*D_W}l$kB=Zl*1sZb6X87gI^q?--7}qZk?i<*HD&Bc?%Mg# zhMoEz_0k>?dzkT+RkxFR)4qw+cgvpMfeAJZBl;$e*G78qMIa^GENR$@U`glgwOx%< zm{R5__8v8IsH1T;@~lrn30K+G^*@VdKNfIhT!0$#^sqk|rQIxuEzM{gcllwBDTsPj zx6M)xDmS2$U5C4R4>tjI}COFEfwk`a>g z{me)zd{LHJiB$GWrMi?_mRye;uYTL%%=#1136W?Jzog@JQpMJ6?*r-0+;K^0*3ZUh zQ8!DLB1&w7RkXJ5eb*(`6lLqO$!dKo+mT)e^LTSLFAOhv&@ib| zSIoAoA5_ewUUMyK^ygYubh_`PV@|eU#za(lD@4L&ZiQUKMj#-RKV!X<9;-~^3S&`r z?*|@CI3()#`(A2C5nIf)6%(ZJSj&E9zK>wXzQ6iFsy8f+|9G8#e(fVu4>)5dNUWQ-s>^1krIxJe(#Mpe96X0LpwVO?=Hr3A6xw8F`+N})z zB??&uOULQ+y9z^AMAh;UmM$!^m2&5*5_w7Mj0(vNX%3uYXT? zN{(^`C1kD1#*ll;k>R+MGT^$NBv?hGgDKiucSlb@uV^bE=V^QLm-JlZ*(OtH#KExfg6S2X{qv{fwy3l8U`5wV9^c0Wq<_s9$f zM{`kEKVyL>8pJ-*PSPPk3X{47?8H5S6>8@y14v3sjeb>SB5nj94P{7JR8%)zkq0)a zVVNb;)uQMtR1JGTNf{TR(%jvG6Bl4o|2IU(`_MGh!nw^6nM><{aXn(MM3E-(O~)+P zLED?9)G-xiobCfp+<;r`kGiXQ3_JHF$5l@ld@@AzbS<|%&DPyZ$ZFX&U@+wjPuHN` z;S1OL^&IqV;~KZ^S-r}fz;&L@7KDkT*|O@>dzxK0?Y?v?^(LLed3>%)gY)ET?Rrk? zbo*}cP;zLo)}uVG3#8u6vMpvaad`n4DMDf|tDY6U5P#HM%Ehsd=Z{e&j z<`=tcHMg!Eh}gSypFDf+{Icp-7j97!38`}blyVGQ41dY;d~C(UOBi|)94vVfJR7k= zqJ^(oT0`+2EQAX<=-O*{e{QNioW~bi7 z1kTy!+tVJQn6dj@g_&6tTiF*Mxa|m;eBrt5-hvLHwdC#WXl-nr6{p8W-X)_FgUnZU z+qX#L7-H^Vl2dw%i0)f1-zDOuTixaP`oz_fxptq)i@B4dx7wf&VIqVX+!v<8)1=!s zmTq5v4Ip$rhubM>*aw5;65l?1I!r+cvSn6d>B(&B)hp;`_oiIZA(X$6+{ML{`5I&g z(}JQTIMuKDu`wLk%a#?w#~*h6FvJxSI2cF+%D;iHk`>4 zT-WVb=`woP#PP-P@L7*RUl4xxn8`bN94>DYa*m!uW38L%bEd}oK+?K>ol(`|u#7`V zONl4knndgBQ>h~yPSJsdTd8`tn;_MyS)z6+lXk!GL)^e(2Q!$tj-hsYCLqx)>wc8< z*J-?3Z|W%tx11A@+Bk^s)GkF;UpD?>v zNy8Q+pBBg=z5!|F_39Ik4jO}9xxTs6gPIVQSV|{=IK)0oCG2-QK)QCC5$vl?)@BPj z=4yG#Jji}rc-R;fXw@)B**`5TSI5O}B12y3koguQPPSo*F{qyv9Y+vB3%uw@GstxF zsi0F)9=c9sdAg|->HZG$601*0#>;dne&afQfm8}M@&|ut19^To5D~FvwV!Bwh-;fSO$f3 z7fSKCV|`BhsyGs2hKf|U&0NRx%Y-(j{!>&jSS=Tq&m)suin!eZqghLW*k9wOQoeI7 zQg{+1v1S+h>t5HHd=EoNpyN-7dn0yBH zI^&))rBBT=xNhMTeH*YMu;t!_vre`zLb%jqL=aa)2Pw$lFS%hlXL#=cLOY5h8GRFc zl56-kdmrE9XQcz5#hL9dL?@32Os@VupPQ2hBi#8(C!4EjzaVJx9e7r%VceC-zK?~D z<7KAcnCm;0L+S!t=Z?2j&k-{izBb~1UDP6j#CI^d%FjgIUaRWbafXvM_Um(*mJZXG zowFu`f@LsXdVUKKww!Q@_L)j=JIbYL&S&X|vi0tP0sh>4?fxOlC;Zr$}r_gtllDHlW{p; zDV7Fbo3)#Yz3FtFDYsMk!CzYx!cc>x>GpdzWl~sN{hTYA6R}+Fg?E>!gKbobyb0}U zp>a)CnbH38+!@u3$7nDgGM)#{EU*nPduBUrRaeUT z<6C`wrZUZaTHbX!Uuy7l^HHvf=+g^^J2+96>}2ZHY11s3JT>L=Elw|pILR?bHP_rn zg{b@swX1f+1eDhx_&WAaPIticOlR?fP1E$*`XD3{9K{@52=C6*j^E-z7)!d zHEj9uB{E4aoc_}X_r86yk|c4WB}|tBZddI6ZlB9x5$t+7bx=fgsrgWe^0mT{fy+`# zuh@yw9X~0l_F-6YmQcI7*rFrhiQgv&pNV}B;;6%gTO z8a1zW(*AJJt&1Rwy>9!XZ753Z3J7D|Q3 zPjydHr|^z!?c{Ij%ukLfD{tNK95uOp{YPRsH3jik?{69l->Sb~`?R23-M-venM}Ho zBt?wF`E1R7#)%ZS6bC(;z=xA=wJp`aj$_kT+q>4SCjCK&T+7K&;rz25BAkHR1zx(i zLKTT_fhLYPJEauD>eE9XgV%))j@>*@2z;kK6oThM1&`3l+{0?h_kh?r0`|ObmTDGm z!1$%|rs)rp$~bRG6m^hHjSa;m#!05hQxr7Z5rslj^OVUmLBlWc`mg^;i~R!QNp!Km zh&_zx4nTU?)OO5re7`JYrLH+=>wHx z+d%)=(8LP4Ffr`otoQy}Xv;--7)q$b*qpjPHd!z8z5fK;@<&eeA5Hh9>1IT}#XS`a zothQMbnbMw*bI1n$TQQL2&z!cSE5*yN!>#m7HV3jE#P zVnUcYR138`Xj&YdACuWyS1?Odd0ZICp8Yxv5k05Wv=(5KLekqG9+vMz*J2ZFC~SC6 zFz+M*zcS6MPZS{P1L5z(fX*mL@8@VMJOj1zr=gG@Qxf+odTvZ|LU_=PkzeZ=Vs3Vd ze*0XfbKev<5`Q(|7h!V=7NM@W!IWA26S;6_Pm_2PQmcZlH^ttz69uqlHVTJOi=0Af zG~N@6na#O9R+`b2Eh%+&Q1f;|g)HjE(XBflAq5zQRsrW~m2*j5&ODci|IdZ+8rP{& zQ}!#%1%)N$HIK<4o{OjQNxPozU?uxns4-g=Fs5D)OsWVP(D_q0bod{}$P^(+=*jiWhc{q1qFo-v|qX zp0Ro0b`>5K>1@^9ym(2YZh!iecJ*1KFD;vymgIarMIK8~BjD^s(YDAyu{^9kRx{o(d4_~2t zc*Wa&MME1z92S&@jqM4HOFh!z0?pEoE}+YXWO`ns`-1qi(D=o&d-syi=4(#G5E;WK z0K`BTb_5+LAw0LnZ|$gw)87h4@p^sJ#P5_)HFO+xHu@|^uuPt>cK^uN(P?-9;pG{X z(GT4#f=&sNWxD=~1S0bxU{5^NjE-4X&30}Eva1}SjoSD4;++zAKBqVw_1^WIi;o@H zPZ!_F%fgWVtbY%+$gg0L4VYpiqW`*HiRkAs7AXS@$2&A}V`S)|-J4Dg&Yo}9+|}Hi zJ)h943W`?qMRKYnMe2j}s?uit=Yw9MsjYFFXqYSVEYch5iu%Elt|7JXpS4LMJ}5iu znJ8a`UboZd`myVKpJaj45pOYtCp`TXYPPk6k=*nEWpf{~!4JlZ)ARb`2_~N+%fC%5 z(b{@!&b>WLGI8_Y$j_biopR+L>}o>U1sj-pJh)6hz2_c&*|QUz55M5c)!Adg7YV(O zKNk70EXI^jksejanYsk^VnyJOebg_HdCnrG>ECoiA zYsaZH=3D5bfCXOZrnX8YQyf0{F-@89!}b0Tb-YQlY*?9-PzcE30fwH z*~#W{qCB-JJHG3Ka?(X5EM`*4Fzm>zqGyf#Q~PvWe?nByCy$bDVS{o$JWn-RNJjn+ zIEVC^7h=pu3{&>cEBgBW=zZLjorWDi*okBBi~+?&zFvZWbU)9FZ5#2)Q0=Q*U{DK< z#f)NXRwyXVbyCj5F%8{W{ZAir=VX8>1n@E1N8|bj_+x`^47_< zQ-d@&6O|(^Z?n}^-wPV=32B8`M;I-rHsaAvN> zqGHt8xocJ3;YLx@!Q^Q}f!4Ev;L6Kh~d$8jlkT=g773MGt!F*bZ8{!!*9Be& zLsR!C-|h{FXMmg3^<|Oj6#SgVTU`p~o+!4H!h7&8B5f`}Um>i47VKmajt(A4zSCq6 zzh8G;KO|l&FE~Wsf+-D|fvFaH+2+<*_Sqnln6rbyv&ls&W62mGLuN~Hs| zp->v$s(vQ|yLgb9l*CY!WCt3VXd130w@W_06dDTcO3AFm#_Sj-IcPaYAh8rFfHPZ_ z2)|AIa%0_weZ{OK+I5}>ws!yP1E)_5lT$kSx`7B)%{uMrXW1!jSe`Gq$+Jjq@P%r- zr1Gg*BM$L8F55w)jce3=pHUtwL`R_J z)57zKZc_44K5ooTJ`FmX2{;@seaU>0e0PyXCkt+4#(210izx}}xkS+PRq4~-<;Hpz z^Zo7Ve(8ruyB&5?uY&Inlmnbn{k&4fVfM<7?~i1)zA%(qUvQ6wjVCiJKcC1_v>26x z>&YO4L+DkT?UBs)RW-Rqc?Vc)g(fdW5AYrnGq7HWvOWPgVbiE+8f$p`dMh_AUudM= z!AWKFcRl^i*f~k5Lp*$ud9=W=7Y_3M*~AQ;A52z<^Jf|5WBhlZAHSD+m_V@rk2^D8 z=mpDS^H=Yy>MP!q;@X&QDN+*tg|%p=B2+sM!RX;SOwDBC`^u7$+5*+-LuAT!qtMF-}L*!;Q0Ow5qY3%sd|?DnlTt1^+}%H z6~?Yz?r^Ho z?SN*wwkY8-tEx2ulcI~&e&@zt$HNdV-C_Cq5SY%^*@Nx0R{r7^kuNlQ!5kwh_?k%; zrQMQ_z7ENy>~}D%P7-7ZhYRcj1vmH)x;9vBUVRL9d!<(w2D*xGzL#+X<#^c-lG+u8;VieG1u$M6P9$H6 zsdeT5z%LRY4NND#m7^4iCYDAqd6_`BQ zpO7jtfaz1F`K9zk9~9OIr-Z;DYCg9S5r>&5YVHEA)#>r!x_X}r8Ict70Rd)-V))v{ zf8yVneJJGDfx6H0f4}Vo3UJ%^#{~rb`N@C3iPQ_C7K;D=>l+x$!KB2yl1F>=!ep$x zIBC>&a+-fZyDh{R%aO7~f8!1N>LJI`gJ`>wpMPYa<3wVt8U)5({=K*x@o4ED`*mN6 zsQ$gyzyCB72jEdXbTJ*7e?rxHvKVcN&O=I(F&4cFNUizW%4FUVs4?*Wec=cq+7eT5 z#Yn=}^b^hD7+EbIj;!sq%1S6-5AfMaZCd_5;Q&H`(IT*L0}7Jy?WTR0sgy5MiH#a8 z0*6E}${qCc$3ti?3T`OmN6NfruSczMSQ=bvNF(Zc;*AK>R;3ZR-@OomFr<5T@41|K*x6vQ5{&{E7FzRT5aB(Em)hP%z zEC7Yqj}tloVhv?m;BFVo7ie33nEbhEH!HRfMsnruS81bii92@t)WE{cl)is$cTgo6 zi!U8in1W-bUVzVTe0bt;*(=|KCEafkUwq3Qd~|xe*KU6YT%y~_eq*ZAk-<5g4Ggm0 zJEZ#qtFhU!;Vz_p)h@VDo-IQqMwb;?%!fG=4ZeMlqkjR0DL57b>2S-j#m)&4_sNP> zq-xuS{@+Z1P6j0L`4H>@OS z#DK?twJu5IM+sMtK?$%S;T>Gdzz63EPcv%h?JiqoBIzJwOc>A`9Cv8F{y>F)s2WmN zuz@{N?hK23BzbnIQM0P8!j-)?3IJKl^_>&+way-6tCn_}u zf`^X}2X2zxlKAImHny!Ceq*}eRjFvNOc%#B=$ezMn=%VIEmv|sjD6%>ZGhDpbX5|? z#86s;r?R!35A)y8#qtY)J?*|^s+xKJDxXR4+KyAU;AAsZGks%6rVp>!aZ_C!fPSF;^myL(l$zi6k;Kx+XOFq` zB2oTWz1b(FcSm6wi5s33KavK@tl-n{8FpClA3j~vAr_Nsgsi_#S<%6XaSFs$dNv*3 zE+y=359?_7Y$htIV{b)b4~}Md4=Ul@t4!u^V$0EEmH?DEv3!M%>_yUdz{d+eqwwIZ z@*bB!95zBlMaXGM52Dy;H6x{nyRHgeI=uP>J@Ef;9PNRLb$vrCOq$&?K>H`C$cE(# z$AP9%?4@5;_+l#AlM_Ka2oFutj1?hP0+bxf(Q(M*=oL4_HUlKT;T2get*_pC(X%Wt zox5$@^orrHTTa`9z|!Fgyff|X2oQfp2k|QY9k8Ij4Ly-+`SnVdIz#Cl8=PDFhKnvX zCrr5_`hkZ~_K%lytwjr&z{Jk7Ibw$|PD$Ww1EzS16@qTk!NXL+!YP=EgF8?YC1|8| z5a%B7-!Sf(gkUCe@7OPzsV|Bxeg!Bp`3%f1cn^lL7;_4y-6uE{yj3fq5vcFi&F z?z_;{24MJ*{i6INAI@SpCctF+y%IIV2ZoL~aom}(yJW&`sL5DVdC0%=nCfO2pzTcOo{CqB?A*u0DZT(K#no*XV4+n^pRa)?T2z=Xaf}tC0~nmC5XD z;k~g6R~((Zt=|J9U~P6kdEcbdf>JYU&kF+Vh7X)<6sa#LrC%1I%TAa$qN}e7taX^VT#e*Rd}j#s(_m!^9uIzLL^67%tIyAGkMb|`WBpMQ05djH7Cb`Rp zqPoPoB?FH*elYYJs}YN((O{BqD|^G58JS`0a8di^s=si#+N3*Y{e&w4sdD8)^&l6@ zm$@tWh9|=P_F)f9YX4>s;IyYaH@fzr`No0v1Hn;4S@hSRb@(xK!zF+MGVOtrWM`D@ z`evR>Cm(QFGtrgAYnad5J+E+*Cpjx*5%M6z-~*%Xq*CBvyhBX$xW~8XMp+)P((R8# zldtg52-rvVninX?!#D=p)HAIvLUr-&Ybz$a3fTF{u$ksmd=c?2!{(ErlE{31mbIX` z>&l8;Y8X0oV=#@W-S?WquW^;&ckrfA)!{sK_64@PYG5#i&PiIs+N&p9S~5=d#LrMk zi*eTaO8xrbYBsYk2KKspJDDMc$_6ja<%V)TOHSw^vL{Q;XEayiDh)uir71KD4`N0+YE<|b zGN;;gMyYcl=xGF_JJ&-h(C#HXzYC25?_i!o6DLEHb&p3?`=_DzxHmrH1YZ~IBKjSs zm#ZUV>E>BtOm`J}?4Me2hz~OL;^&iJ!LFro}5L z+%1qaFAd)z5-+?h)-tVYO$}3Q#@`XyeFe50O`&C=ZCcH@ zp*oh49FLgtv^}0bR`+Ql7&f+8!5K`bQ{KTo>Prj^RllkC>DDKy=0He_noGGK;zZE# zoUdcN9JDt8{lntR``2PGKDvs^wX!ax8t*;wypZ6-e>@+c`tY`DPd7H>YY2-gChxHb zu{2sv*0njWbVM;3r|n6G$TxM zLy`v$abL-Y;Ygs_w@E6;`gy+_B#0#$l8@{3ZP%a=1mm+XvCRL;%P8R{o;ib*__SA^gr@4w+sE+_@Jb@6Ms#9c$*q45R!*{u|HL3Dxm> z&K`zK0%Ta6q!anBWPqku3L!kAsBoH&uH@I%k{Z*@=fZ?I zmR8=?UH$*(eOUSs~R5T{q6MLfH#n%E1q*>Vu`HEuzw6xBEEh=&U zCkcZ~-AYor5bPg$oz`0Ln0eN~Pm4-w(vxUOQ?xd0z1rNxo2#?1 z9L=1;|8|0Vv5l^t?fa|rYr)4$SD{uJ4NVd;I=@mD1SO#}HeLI#4CZ(3B29-L;e#Ic zcI@0IzC5}zo!;<f?Wnp~Vz&v1`X`*@$ z|24=`Pl?FhvPo_$T=8!U<~X3DN1fii*&jelVs8pSK7co_D zcD!QgAl%=;DcW|Vz^bnUxQn69#u8JEAZ(MR84LMBD-3L=VA419uMwEBNf&SK{-8(Z zXDBs|z~e({0Q4VsVsrklqf?S$rbWVanVwvdaOF(*yO|_BR_Zh@u~YuK@{Kr%Z3YHN zl@4`%gJ#o@CGNUSGlD#R`}c49a${y&!0G>Zx^kD}33AJ$`#x>*H51A}Y4q|V+%J&k z^E33=m+Wy>_{PI{qmtKj>sND_4iGSDq zds`PZVL??@TVMZ}kK6vVJlMx{S4$ne?q%2Z&{13WRke2o5mZYFvhN-~DXAP(CeDP) zhp)8kr#ld0kb>=Oc8=d3&WbA+Zxk=q2OtXa;Ct*>zew>?LZzA*(x)A!PO>*1sLP||a@<5k=F-dy^z=HXq@PgA57>P!){aJnwzy*9mmlr`|yZ{_`fzsKoOsNle2H`IMD z3OZYTP$J#pxsU4^?d}zhDLgtepXn)>$5_%>l%c{Tmo3eUdOl${GQWeHb)isg@DA=> zB=dE<7m=kBFFYR%#m<%(Jde^*^gaafO~uj|pdT`Ed5rqn2kL3N&&hExkJWxix~!?d ztoDrerL6$fYYo~USh%4%JNu8~$dAmyq=wLgFl!i$m8=j;i-BMi!q?ZVE%>!(*AcR2 zF)EXc3Pw_0?q}#xuRdr(6riBoIEVGuMnk9LlL^t+Jt0r`SiwWeU-;t%;Q9`#((H4T zs#lvxfpF~P9()p!AB+a&Tb1MINvcqB`PyHAwHWb8_!w}P#ob?P8;Nsm6&|-wb%m}j z{*bwrqUX8cNfqBQHWHKWH~enOdxkG+O#akU9!AYZyWKJ${~QNWMj zs^^FtXkO?ny=F@}juvtaO;N{Ih4MDAPHS`J>FK?ZTo@ZsueWQpp{#<^4ZQVbl8}QPvU-YOUmB@5}s%Tk@d3CIa#LPPnB!h=_q@35TiV+ zJabh5@PuEE5@n0&PVEgCG)nX&uyXlf36G!>ev0YZxCSwik?V{NQ2M^hQu+zX*Xy-v zMtAIT8iCBcWwqdG!Q&Vi-vVs~HKU48wK5nfBd|+bUu26T_~VYXsHIO8D*338k!$X+ za5(vQEFNCbw4L8aTi_Q*KnV?2f&cKwRY5kyu*C4rYp zuxUCV@73AUXsDJ2nuceJAc6Q+S$R}I*?pZys=HWj{=Et_CSWB zzTNo7)rR=lWDg@w`f)VM^$8NN>DPEls!kmVt1nvHbS$}jr+sBK;E^>(HS`D+z z;WL{kXdd~vbO6Nc3$#&#pD&BwJOGbg{o5?Sh`(VXODBjifx0+BOv_jG^fC8#6&8HF z_+wfsOZK=TKID|5qS&v_XM(a)(knn#sF1PRyT1q>lF_;EAfDbY?E_-O@$^^WC)w?7 ziIV%nIy<+Hj9$w!g8H_IWP2}FQPOK~4*lHuN|%X2S8`K)EOrBh#qRs-b_Z^wzjq=z z0(ujsK4eU)Ra;ylzqTs&O%H+Os;Xf&>pi5l%MZ4YoACc7TXkZw^YEY#NftnN3I$jD zVqm%eZ)?h&S!*0%XV=MEz_g9&X?%vYE?jmGIVI}m|2Y~LeI+e`nKE23Mbh?u#k*qQ z_5X+x-(h`6uke=RS3kI7*?J8d$=M@4FAi6@qZ&spM{DlcC~g5j*ve$Np>UX~QBzMp zN07Jgp^i@#Sxw7+djMKP5&WsgT7z*g(JD!6oi4@*Ldbl6POX#Qr$jpGRJ+TRs+O={3G$^&e-<+&fe_&$axA5TIIhYuE z4~!ckK1ar*DG%kuD=|eA$xk$t8t<~Prk);Nhuj{z#KWl^LsBdWg_#EO0lj)Qd^}o ziXht3xI)a}H^01A{usX3s-lMVr9|ElR>hUKkfQwvle~SeqE{#trm@4o_E)O<>Q^7U zgI?VkzRm8CR@v^%&|$8LYn7g;K$sJUq{!i%4|G{=H$l)JNhMeyeJ-+Sa=0AIV@ zbOz*zKR`1v>i8PHy$EbaIJoyghwu!XR{VO*ZX!>I)E>SCRS+Bg8~E>8K3rbU2G6gt zM??&uDv@h?JBYW^sN=nTR{it+RcpSdY+5zyF))dySg;%I#|&Mk9@}HeC-o9+>O28K-u&?okg_ zUUN777+4QaX+MA|#J9nBaCa@qp=z*H`7is991)@ z%(0I4IIfpoc&%}nq)wbhhc-qUwx`bcot#d=-zT^poZ!Y)+9Wqn;W;iI5~jG}~@$MWnV!>Gn5cp#uUdh=%NB=o-@bdt$$jTcRR zI@9wEGO+yBV#{>H?hJ;2kKUkxF@z>US(m6r!;Ey4A3n!dE=!v#y|O54>hB9Aqc~d~ z2=tMV*DP8^syydi`dbYaM<3I>9Bs^Xcr|`<_c^sNYTJ*cy?T@R7>hLzG)SL)vEs>& z0F@6s{??I5rU0r^#jLCaln95bX4(_Q23~=_OkTo`1OA9mmm^=pc!~vR_{a0^-Ux)y z23sHcV#)h!1F}k<=|H%zN;s5NI&w`0QJBek_5K^d6>=&e7ZKY|LgDr5%jdxH@D2#l zrrqD;!T5s5p6Jl1)@jX7m;gbL&{7Qq#_5P#7TCy-&{Z()s{Y)>V^!ckC%ya|lSI}t zYx=`!!f=*F8*}!?_$-EfY=M)bEKff!Ecs&Nh_=dSAJ{K>rZU6^J3FK+)_?8tH(Z6` zkZA0e^%VW3J9NsBIv+2IBL;Pf#rH)MtMjOg=yo6_+Z>P!{GnNX1Ee;*9I=7<$Dh4J zO`o+VR`PkQh`mZ1p!Ftj*l>zrVZ@4np6f31|I(-Lb#nLQOlkwt_}JXBqInzkCE5=aPE|&lCTJPW~;V)lHGQ&)(m^mrqg)64afy zXZO^mz%B!2Q489@{3k78Fa-*j3q&`>{)a4754C9Qr|$76ngp1L7)$|Etr{w}1u-(W zedsrtdv-Bp+}(|E1IM}REyO=iOd-GuQM7xA822Dh;0cXq&cyXYyPJo@d|@E}0bsHb z>>7%YjhsKA{2y>DVG68d=bH8gYC<3?1+y=QBOzPHCT+{Poj4ag8sre6&VHb|Qx{nX z1838&jfLBvDjkO)$xFMt+lBs6BACZ%BTNdXTSq%wC+4NPZ@k^V7eUjWZ zpQ@Cj#Kq9063U|S`pBokRh1yV3v%rh4q-5bs3|u*%!+4LTuI zRP)F177?V}a4>;gE`jJwnMZH6JHTd4E)Pk=o|p?_4EyWJu|+N7)=nP~YAgG1{JION zw?~ACb=xBl9m{12-ug5T0>6Wc;$Y1>Mmz@gN8n$!1^GU#ZI+DA9LT@Gi-G{gG{O+! zY8#k`Qw>NaN@zlbv#&%exPtv@Vc0gWpUZxP5Oh*vF+3Pbxtz2PGR=GFf0!Ib44?%c z^<6&eVTcy^u(%E90QBdKOm|N}>I}1ng|oI%vdPlsdhuOo5<;MHa)yC%LNH32@`i1N z2{e_!#h1wC5LnYTt5FVmu6aN*Im# zh~{Y;_M5M^gak-ZZF0n>I}V0l(nlr)Euj?ff6+vyyD`zdMJ`S0mc;nAC#rD99>A;$ z?fF$e-SV?kI-;+ZCb*(N@kRj@`iK)CXmb?pCbF95WosmGCsp|N4VYmAguI`ahK|{Z zKy!KyLne-)*~WK=31vc~E_vb|Y=hc=PTeG#@AJDfSpvKH>E`LA`Va(!+yW}JFA^}9 zRXd9?XbpP_Y58P&UF=Rc!`iU+E5S_0)HeLoI34rP2E1>LQ=Cobh$H z39>HPK5YSItNo(6-NWx!XTCy>XN>#-RXj*|d(Fu?z@qxY?9t(B2BeYBfl4n+u>N!R zt139FZ(y^2Z-K!lFu5`aK(7tNOW@M77rz5M)%rCN;q4M53A$CNs$NH7@AkU^+0HS) z$s1k768PDxA0JRFkM+%el8_;1eFP;<7!>3yyl%$bn9`+1w%1%h0luw*dh)X*;mMCe z^QoDdaq>z`X@icWe^o+EN2qeBx@=ePM)2t|Xf^s(vs^qtZ!rE1m#49eQ*`h3T`+xcxdach z91Xd)W_p}hS}26L3eGy%%2JF(z1OqbtDrkC8n$D3w~pIt;Kedl2)=bvd05xU&SvCq3~M)aNcaQg>qLb+Q#8*7v=VQGOTpHpB{VRXd>+%LJT#C2F+(AQ@e^N; z3YtxtO4j+F(dW$oR;^2X={Cqi8$l^SqK1It1P2p5ql8$rglhhaBMZ0q5uD&+8X~2L zaWKa(N+Ls?Wn_fVRx~0kde4TXNlr*5o7snrKqNl7=&|Bca)pdr^3TH^Lp-BG40#l> zRupyP*H&DV0KrJ=T^4n>St>rkBs8TGk~_Rj?NjWg8*^8pV#!*IX(*sSsl>zid_=NO8Y2iH0(=p#L@ z&}8VM|h28X8^KBGpduGK~l-VZii!MdrRPT?a`esRA{8Y&MZ zl1JBPO)*G3)Ave74t}m|Xl#j@mY`KN(G->7W5c`S3pd#(sal5tDS3k%VPooZ$RhF! z5(tH-RCdgoi`(F5AfK-4;0OAhudr#y7P@^2Y38+eR4k{GWljk5Zu&0{E-2gPWxTM? zCR9ua5iE)qpwsHRcqL9MECKV|hC2dXhWn{KVMqwpcyJlG_g;;o{S&uC7;E_;32zr6 zh?%Twge~0r(od}ZB^V~gUdp+z)9*#9dQsvfj_1t4Jmc$$J3-SRoGg%iVmA6s#VgmT)@)3&_#0|?eSZYrn@ zlBBkzSEy2IJ)b1YATa*i2cl8MWGu+p_PM>CYjcpCW=(w6#3q*25v+^R$@P06V?Wjak!q#vMI1Rg7ix0SC)6G{ieV~B+@ z{QX{bqi>`K+c^yzvATJ|gmz$GET0gNu<=OY=-IqeE*5`Z?4fn*b*chfvi6KPQFp*nrJf37BLX z8V|%w>(KVA?I5Fpr8u)(QjP>E)a@}EU_K{qsWk!0a3kYvtQ=Re;3Kw%*F zQ%Nld3I){gJ^Q_KYhCctW>T4kv;7v+=5*b6P}4MjYp0%~!Xac;q%~iO$a(**Thl#7 z7s>@YqCDPNQ!lnG#sbMO1*(hXkHo2%cD$T=aDwblI5@b-6 z+2Hd4R)GYfi(vBOJ8pHsIl}o<<~wkv`2n1Gn+RO&vh>ssB@#s5BEFOx{<22mh(&rr zQ3wsJ%g1db8YnG@nM#@`bL5rl#s>kM9v<6T#Ze^5r#D9zY7u!Oc-Cg96OL1Qg;>>N z1?Z87AcOgAy!h}UWhfJOgjrz|AICt3p?57Yy3=j(@I@~7tWx8-el`JhcM(k2A6B^yB z$F@w+|Gkj@ZJQBrt7>Wg0ZAagteCz`=58Ybo`n?*-SWFfNpK37*0q(-&NjE20mqcA zlSF$O5?wdw4CZ{p;V24W;${?n9`WbHkT0M`z|KO*HEa5v*wb9}IN%DM{0ZDYJ{-;d zU!9Y5@Mz)O?-r!-SXtrEZBohfJjvG#=eX2jhQC6JMVRv(V|gz})BPvSooTDjFfumz zqm$KV)QIm3cvQ9~-J;|fA}aS~c#-3YETQ-v+_GQe>TtnJjFFIT)|7$7^9nIfFAnh| zgJAkK_eKm-Q-(x{DAE`Qt|hzwhr6%->uO!vmam|cN{1lb4N8NgQql;Dlt@U2G$J7_ z-6eu_cc+R-3X+m4p|psgg2X!u_I~y`?&p2~fcN~cKkT#Dcda|-o_pq+nQO=>od%^a zrih9lng~(5S9rln-gK+>dMN+f95%)XuT&UMkwo128%U5LjPlbxEz^)Yz)7*3vyX{n z`R?fWp$pUEHEqcPk&$T^mS=~*WG6H}zpBnw4#yAHcZUUB;~RNTRY8fV6(H&F_MjW6#cm1|nK6a`wYnpHt>t)_ z*)lZJY7OYIr`MpO(k!wZIV7Mh-a<+;@eqFMZ73EqI0(sj^$j~}wWX_I^WnP%r+MsP zFEE3X=Ic_gSUAE>KyqI_e+4r1T#B5X9($7K7I%&D^xxO?L^sw+#}uJ#!mX)OF2+PH z69(hianWK4t9s{IT6F%s+?+P&tb8Sh(CN_Ql^EuFVrLU4sU-gTsM=I&{KTk(kBnJe zuGow1G6(3KovR8BPa)t;S=pv{N2WHL931w!o9XvOD9douF&webFlFi0Y?~X*XcxpT zT6&P9Dppk+U=*j;IKWv=m-NqYJg1`P?)I{JoA7T@`$a0RjE%k-riA8rQL`({2C6vB zasiPM*pCA}XYi}D+TTa%Z%y{GG=dM_6pk#bvTT?8mC0>B^tsVzFx9eC*6jb!kBGqK zmh(z@k3-;`-Xr($aa`h&c9)iEN!RfKrd9yn9gui zxS{91Q^z?#&`~O}xMIJCT5uQK;uP(yfB2Pp=r2eM5OCe*h6UyYG11f{IR)d9XRr}j zr`#7J`|$Efd@(^m#@BCIuIt$JQIs9R;d8^t4_EG9X5;BxbehMb_$wup3eAxz`%Ox6 z4>R7q9;iHwi+o0fmwyrYeGL4*(B_u%4a&cNA0pnpN(aAp{u1@~@A*+3spd9vQF(f( zCHsbs`xksI@qk9O(Y`|*N* zj9tPgCCBLcE&ZI;epV-Z|8F35gw6N9)Z%j~J@=AAS&`LHb?@iP_F<@OnXucT_+5>> zwkt8JyvqealATx??c^V5I{u^5r;7-TR})K7yiOUKPQhsisFs)1^@d%XAA?G0k14FN zR8rL4xXU+En~O<>t-?w^*J-jk>mg-!51^?hwG19o87t^&9}Df!P?9tTDF3uKy|M2!n0!!qxM|CO zjRzDlk8jRny9cBvJEm5L9=uXARWfk+@)e7DlE8!^E5R58T3i)VUHr9U)LPOFMZOb; z({>6GUt~QA=p?(`rH#mIrBQq)QI|8N1D(p~e50`Su3;64@;fY!E)J|ENLJT761_)d ztLdZO5eX?L^LFMaq&6H6f<_e`gmu`KwkYxx_L__|2D+o9PyH69H31z7vk!{Y{M9AQ zeQL1U4yh^i{u=?ulTSXC*~EI2b!0v%=+7C-=#&n_7 zJq76Dpfc~EPU`V{CnKO2{;>ZAsd4dws3BP9E8qh;gw+49#F#5jB&`FJ_Fg8u8hx8C z{u_|vpEyYR9#`GMJ!~jzKzuZ!?_~o}?qG`3cJSppT%4(M;Lq*N#|!|jnhjJ%2$d6`=W_h8;Y2I6Gucj{seGY z`Kidw^N*pDTMY^cG}E8{BWC}+FW()&ealBrj?8_5J?a}bl+5VNc&wHX*IQl%G((f3 zp&1~9uSkVDIQC-tL@pkC%XtWCBoOp9+jc8$0FJILTe#abD+owlT`v@o90Gs=NN`t4 zpY`rb9B#_159{&2LTrtU-%<-Maz1uWKSX%DxY34~7F@qZAyrJ08FyHQ}H?#h^m2??Z&pAQ|tx> zAW3VtAvRSPj~zLx_k*u00AkX&mD2q6ppU~fQhg_gs+(vVp(FYuN)wf>+_l_ED*zD} zUonD@nXzy5Wy~-~5KdNzpm{?!0cqS=KSvD|H6i{7ur@!|nFUA=B7xperDIzKH%en- zdSy)iddtECSe;qG-#>5kC0~AUH!LeN027}Dp`tXsjkJR76+~?CVoqIn%98;f$&{tQ z>_AM_X0a-vl=H%nU@ONs0cA-UTK9os#dt_#1$PXI$b*$Lgm#{2}VviC)GQ5yU#wqy(sgN0qxxQ3;eh%H3x!h@u zp3i6ubBQt#EGZzKGy#x|_10Y9mh;P3yl*J8w!m+2dMh)QL=^W0fIhYKdI-PF56WBs zdZ;zXO3^7N2jdi(MY0dG%}cQSb{dy}ca$rseWcHLh{??iH&);-(0a;II{2?IsmdAI znHGLbjIG8>o}}>yl14{a7*SIHTmvCO!iy&gTU$ZjKkaitd^^fS1R(NM0h^Ru`#pyo zawr#r!%dwlY`4&`FgOlO6_EdRj!G=)ffoI}Y@`sx_Y}%@(jR&%#@{fq4BTQ?*c>NH z)3P80E>qP9?A+;-hYVP-Pfv0Pjy16uy4#YQ4^i{Bgy}v%hNuSEOyHZ{(_`Fr9Q+B1 zX2UY~0dGY}Dl|x31BkVSAck?JAZ=bFJ>}$}(mY0=uEqi^(7OvgH=d&-Tlp3wSn|so zggG$T`04XoAcz^VJUw*nGRzEI>7uk!?{HDy2x!bt?!qt>z#& zA7Q*yLu4r?*!pz3CaPTR`m^Kk^!_Xd8x@g({p zHA2@PCvX>V?YBg+elL@%vG`sQH!DF<14PE37KVR9} zsKtRihd%#w=J@FjW!TPXS@H|ub|*v4BkyTbp}+5xeGtcw=F(uO!D|_ieTQ2Rijjk# zfHpyxQ)E$3AekFlw?M)N-4E5`DQG_#x5>4?iLehNX$p|cy{XSP#8-7E3jb;_8X7{n zQjO&hZ9;kc4HgA>=gFxml!uuUf4aOw4fQgGGE^yocD4hTj(}AMx}_Iu{s4>;;k#89 zm{C^Y^oORb zTR^kA3()ddzslE#9toLp!X6M3n3XzX)YW?TZf5D zKxQA3dD)Z=Atk-B=g^THG9Lu1<85CoJgx_~ZFIck2z*pSu#u6h0RRaS4LmF=Hf%}b zO#^=eOC!H=2d40oWxhWA>dHfXxQBqfxO}pg#q7*Oa1S88g!0lS6RsV@?yQ8*j!5$w zXi5MX0qZk77|F9-ZTpVNtTiTeEgX>u3u1WlHzd|NtRl)8_TG|uoTUEcuU-*!wa;D9 z=EoaoKM$L1!xs#J+?cIGt1(MG5X2iM_#bmMi3Pc_-cagS%z3rOq>|=5dilB< zsXB%eeFP~wkn*Ry}nhaPRJk9r7{U+s2StJc@8oG zxt0k)RvVs2^NdwBT<`sYjJep$1xy!MCm=SZstq@bj!X)f)5`IqCgxy$l`LU!Gq)Oe zd%KLx!(CmX;(tRPi_6aMxJk?zs zvBzf(xvKj35G)F^hK7Jy_}bUT@I_Kq3OWf-JthPI-p1-6t5Tixx6W1GWQjL8*At{a zyI6Y&o8|%ZL{0^QmPops}qi5%8c`*>7BPIx5Z6KVNkz?<%ZcKmwVCQ~?gN+oK z@?hCpBoh!gTq?tNsN^N2ouOCnAS4yCx%A9Fhc$=Hi|*?>-pO0<`3wagsmE#d-C80V zg3^jZ^-npaP05qJ`wKrb9xgsR+-9cu;RZ$!)a=%b44gU$;A~1-&neSwIhX|)4ycp6 znv`u#(-j|BOi%CEp-7PWqx-a#>`y&b3b5<60)_5fA7M`ffl?y5k|SSrdL{DFZRVXX zttG6tqQZx2I=VyFz*_lH7pI`4;rtiK=G~J5LRJ$|p z65V%ePLBpztvIX>;;OQ*-is29Hh#6-sTPFA2Hnp35}t9dUjhvg)X&x!{5pWF!q)ZL zskl!g4-%FBTCl|i!mt0iU=`L<^8AA`auWU5BPeedw>%o9g0v54Su<8PX>5~UrhqaC z%1Y5=c+!JH^J?hAUxQq*?{^HgIigcb^<3hmOTB?89C&wh*=n^o7msq>ZtOmqU}U+v z4V5@`2|}Y^;q?3{<;?#LTwa#kH#db$lJ9JF4L3P>N$lmYeI_Su#+x8y+=uQ}oMrb` z2bq5gjAi?BbRRp0aPJ zQ3Mp~sSE;BJ%r}mHc~BP5aOmN zDD=2AVEJ{@FNIYq>?-7N{*b@fM@CwXcYAIDO{XtM#T5zM{*G1UTVnQ&@L+M#E*|6h za&aHc8H5??W)!_cIm_a+ATY?qUT+CU1`Z^1X)A(=^(2xFQ&NC>|%qDq( z_X5qBiv2}6XQ)1wwxKyC#YBjXQ(P6la-f?xn0d+N{R=PpB|90!Ch?6yH-5O$u`?Lm zH=#OCs8dSCDbUHu(lAr&`*1E!s4H7h$I`l`RU^WBkyl5T7K5}6PEKtOE6XGKvbnhS z5zPV95+ndvB$_(oybhvE^{LTmc@F5rny8f0Y~3lRQRhFzJ$-v=1uOsuZgJB#EyfFk zo38Kb%z2g4l2wAp>!G7;k6dFCuS$@L+ddy+wMM>AS2}oZZ$%`)ek8P-VQ#m}$ZdtQ zCWMczNcCm;W=vI{__15T#(;OPueJ4+Lh8qf^WuY-L0op}!oFu1NOeQvRKC;(0hr1Z z;O4DXaLqs6YBvu1Zq17XsvuJ4vC;8s@ErkPdMR_Kp17{H-W|2EF?NMC4u`F19T7Xm zw=Uj0OS{kgKV$nk5+?Ry$y}E?&{rShxbl`8?WNZDCRg`(y-xy1L3XKEy5t`A2n^C7TzWnrQ z?bu0`YNOs(G<@XUV>^#!9FKGN#W#BS4Mk}rjZ3l=(Lz=v>P*h^=4VJ;ycVn4+RCTIdOE_u zs%RPvE8}?<*$*v@IB`I_?3umw@ZR23QRi@u5k;NGm`p?90bmNX8WtC=CpeettuA@3 zkKGqm9Hw8LQa#$O=hm1`u-yed`I~)ZckcBpDsE8wLXhVaN|+H(r|+r-A&Q9Ru?1n= zp>(8?Y5nl-{A&gs4T&6#hX8=soIXey(wzD_W<#Nd-2ZAZk{kn1-Ka%kgsYZ4j0aBU z$0#^ylkuU@<4so9%C=umiED39O!h){>DiF;;m#Clr8Wql;OTw2QN(-G!cEGV%#YyS zs%E89L#I%r*4;3NbS}ngL-!=3vu&B0jP>_-YisQJ$Gnv@Z^_jeXe%+kePCtXpx139 zckJ3&21*9-{szx^&>uHfc`#GK18odwgHBaATY+EW9PY4pPjX=6BiA(IbKu2DG7dcT zrxJ;o=DykeLF$l;H+NNmOGov^Yo@qTIf)4;1GREv7QdUegd%(00-9_O?&PX{NAA#l zlJT#onMB;@YD1Ub%CKdQf21ujQd|Mx|1Q7C_M~R_iMJxlWMuq<-qVuZ#!o!x%8zQS zQZ1;eyLL-{&4O^Dy5vo%!DCi;KR)Ip&7`$4aC9E2u4Beb@%D0dDbU^^eag8Zpjo*d zb-}id)D+ng3@oj!oJ6>?9QSexHy$wGC{4eA;i=yf&U(#)n=6^iZ$}F>nSaJO+8`N} zQ@`&-79p83i`uzi+3AFq+ezK{=YMwGqIY2o`Qcb?FN-We6qB!_@ehU6&1c0}5~CWE z=;Ja{h3`QTULlSF5X?7)8E>~Cc3MWRPh2DZO6P7<+;n}^vPaC@Q9`M$7^NrEl4sq$ z0T`(D1mX~-^l@_v&3rUuLyZV;zdT~jr~{_+$ngSm$jd8!MD;6kp_lB-04WrJwdIW< z#&dp@B%_LIs3fh8wEUsFLTZNWHKfYyiLUp-j`Q2cE!l{X4OmF@Gtq8wr)rkJC8FE^ z#(1n#DbM$fF~q(Z{j`F)5mk?cG40WF43k8$B3~SF@h80=r1IXsWUEX9 zQjlK#%t6E#!1|Awnw(qleV$B+h?qoA^lPVnzwjYEG#q@L-2s1pU8E7gP^; z;H+1R&Y0R2#Horpk47c3Y#-rDa>*Sb3xL!D{Z}qagHn;H0JXBuvXXIf!wFn8&5Qc( zjrkBPi!Gnu;K24dgPV@j_O&r?AS@~n?$)#1?IF*LLlc~~f6RUaX$g76-=^A4PxSY7 zWF+(y_{w2WY>^K12cK`e2a8cfyW!zMKc3C#w%;lAEd-xZ`DqIjPs~r9h9Je@-a;a| zikzFE)tGfoymrrWC|(l*NIP4m+y^$i!Dqc=1bNF~6Mg_5gIC-^Z#Y%XZS4f@XhS;u zOKkS~H$p5fK&HS8zHYg9#j*VRNY=nWGIbZ`Q;_t(gYVCd_JQrxQZnc1Xe!U8+hpS( z!w}dQA^>7ebBoE7_mM^=V)CVpD{McLGJ+BxLJdLws|jb5DC~mnSF*Qh%AH93B?GMq z26Y1Ecp}S^zu5j#8W7qt#+lGD=SM2JZ0CVBC2}K&Q;w(89k?DQqi6i`!oLjJSoEd~ zq$^F`$bBVou&nxHhzR25bB2j|;Vt`cgOt_JVS6sKx_v&^hLP@2k2w2k1Ly$MK)P)k zG`IoM1XD6MKy=Q7rxHn8^`%JEkUPjy)aK>f>nD-IpOXMHxMSH2_S6Lm0bC_L@uMeZ z>v&y4u-)VzGK}Yxp{FM-osLtXRY+C%PSzc7c}j4q_IAzKS^vgZ)yL|MUZ9?K%`X?! zy-82GpO{{T^vPgb5E{!HujT$c{2MN^34vU-t(%YK*amXUuRK_`r~+iguYX!mm0|$X zwiim1X1_mHrfr1wJZ;kguX`p70Gz*YNu>IHf;%#|*Jil4zdq~QIm8jh>_0wszCH@6 zMubUY_LJxrY+?moXePf=Iye*tG%%i1<>e|y5>*Q0kyh6{Uo=<-`R8Dx92+A6bj_OG zYX)>Z#v7daAN#FUPasN=V(|GB?Oopy8MJ$P;%km^{6P8Cc89|pbBVM1jhtj8$&nt0 z)qXBOO`nOZEiQ!Cx{7~hzGH{|sDNnbtHpdn9C3G5lyL(0l=`~3KFaJiz#ag+IZcfd zL5>37rN`^!9mCfR7ei;g=EDV%E*O^v=NpQYj*MT~`FdAZTJ95m!_MdML5^CbOh)Mt zQ~i78N%T6VNK?~b&AD_So|1mKD)~c+tD$3lV2MjnVNnI5pMbn{mt-TnW;Dt$y`k(& z^Lp3|NEVqbRKo)UVJkF-0v9*ePg7NNz52|#X7X3t9*Bl~;$RurBLpZA>~Ud;D}Z`? zU&iVluL&LQRa%;`ZB>tx#8DV0%eY3&u`Z`_Au__t!;Z>YBX7OZ|twRLNC zUqgYR%`QXqGjPRxH7G3BD@?yQkb`TIe$k8m?5#1VF%&wI8dv1typx)6PeZxsG(t57 zlqirOD{i7R1EezpfCmBh)NfLXM>uOV@34E+SC)mGSG4|8zG4U(h&I=@%|N}WcWgd& zKCoySF_}bfpz5lJ9RJAfQ?6k0Al=GbS`Pj6CU+dIrt2Lxs0j|ukdw?WL(>oxf#=?8 zE#o20m{RnHJ6^8KPjnUbENq%_IVi%rg0?Q>W9Un*l`gG=N^KR5*fUZB^~!=X+kob@ zK*A98BVQ($QdrzaGe9WIaS`aOD!zhKHjqu>c-;;(1IVOGH+T#C7K%eyzs)^j4in zRK@UX!35B(it~HqtzWlgYy1JB7xf#he)q*VcLaYT{Q@7vx1ResVNNZKJ9S2JYAsQ5 zSv@|h|AbjPPL@uE^{2J>%dm8BzIw$mi{P1>F*_`bDwleXT@lO7hf;eC-7SOMM~j_ zQ|#tITs!Tk^z&>1xwg9OVM~P7_0H~#Xk~ULV+%6rPMBSx*=fU-HQkr`vU#}#zQT3O zayhE_7lN>!p|H+4TA3a&4!3F1>w`|u{18n7thCaIBky5PoX~AUg{Pvf2|3o#53Is# z<|X*~O>d|?!>A=@OSHK9kUA0aT;ffZNS=e}64Dt2qy-}#Ze@!*E6)88RsvL#xkfv4 z{Cwp(1-2F|wqHC^>()S52Nys~^ApDOy_5h8kh?Nzf;&JmhY$EAJKw~239LD zvdV>^Qn|uAUlSICOA>7+Ut~pyu+fA08eXc#P5891O=R18=uP}Y!j6iH_oZdIXa}S5 zXJ`-!ta4?AV^O1s9E;#i7SXXVRMZ=yT;5vvoTVj2}X)| zW&s924}WwLi!xlPL)lMf8cdp^=Wn^~Za=^E?V=^OW;E);>7n8zUel1_uLf8D<^qU) z!RYqTJ*cI8fgTp1J(Rx{n5q_cm}F9?Bxeowk}d{R_7IE%@-1Gy(D9|tyT)>K1%iu% zuFqP3jSO9SV|WPVAtvvJ<-F^AwTR4UD~VTzt1UDqe)su^6nJbcbRWL33Fq@zq0dTv zM8Hy(S0adU@HYB|N`xJD4!-+<{!d2;0uSj~E^DVal6A}2`g(8rT*T*EGF@J#V=up2 z7)Sq8=7RV_LoXOraASt%C=c@T6H`6B&0*#ZN}3C+*=W)qAmI1I6}Kk+uX^nzC2UJoouVY#ezO?o|E1jftoAcVN$fbOvUdASBhYozksX@f*Mni!$jsE87oSozAxcwoe zjZ96o!a@mG0*@skJ?kz`dHK4XM_u>Ah6e`xKO4U<1`f)^no9){Vm)_@G35>l-^sOM zZ%4-~Qy(mI|18Z(&{b5q6+eJVvue?*Pbs3z4cFn*>Z3Sf@mD}QA z`dV63+-DIUvp8Gx!?-QWcp4>Ym*hA0s2%OBd{*_t5)~X$H2hct3oJ*$!L9X;C*OgZ zlEm!RBnbMBq-0^IMBBxSl|m3Gl4>+Gn-tShRq zm-8i=cV*e}eShck)!mr%2h3JiWo&OW@1xxBLkT#Ez}#5!s)jw)VCWYq#4gF6 z+16FCp3PQX8ZY~RP;7sFe(`Sf*+yZ{5dj_xLWZPS9i(t;?|foaAltg=WsngZp2&i? zPG$Bu#kjOM9@AY?1N28nP^~t}ey%_d&HbY3}UvRPEj%jeq_F3epoMj z`R5JKXQ^L22Y#j}ch_@C5ZOTe9Xb0QWi$*&w!xu{#|re`+qy(fLWQN_qRJOU&Iw38 zxjG80;RKgf7QE(ebk+nn;s}afXuDOrUH)sMD*$)RFZ(M%>SIG*4c`n1GtO?oJsHi* z@U_#ePVwOEt@`WyN<%!{J`3fTU&IC?9+EJ>)nYxHn0R?hs7cwUZEE|OFx#Q+<+Yrt z^JUTVylni>#{-?KJ_yiK-Waw^Z8r;HIz21kYq@VbP)5&6$W^K^Bk(DhK0n zvZ@ExibLqz^us0lPY|!J0t@K;t$yqh!Y&IfzPaiR`gg^gbs|glY5Qga#B6$p8p|%? zBi&naWg${QA1cl?>tB!?{Z>C5moePIWTaV=R6ipuVf>mCL6VUGP+HC4Bj4I%4aAkL{Ybo;HM6qF%x0fo zYM8Jk?Z*)_?$)9UURc%}foZ?Ty?$h& zFYTPCG3jpLKF^cuSl1+Xe%tXBmaaFv;MELq<=5U6WQe}nXD_j&IG2$f`th$Ch*RD) zupNK(;met?qcHjVug|6qLWFRDq0X9*t3Z9Nvb)~6uFO0S8A66+I(ofPY(WqHcTr)@ z98qiS+q}&+)<;^k7k~28E5|ytJHJO^UE-QDxwu)2H7$5~gu`pEkGn$8DUx{b+VkF@QmI!_B^4n;p;aK0inG0dTV>p+Hh zB}i)9Wa<7`c{ahl1`UHHaEtxb@esj(>XckFJ{bU*G2Ezg^x$_#Gal)u_3_Cj#t@ zRn`9WaHG6WQ&ze|C;2y@)~$F*!xiRxtIN{t(_Q~ku!1@Nct!v7ANmaTJ*;_v7>j=L zfuuN_4bG`+XTM}t8?>9~t%lD$Mt0yop9h)XZXa$uf)*thVc@$S!nWz%&kU$uW`!|{ zk4~o-|LZ4CH;jI)O(1n=P~g*xzuC79WFwx?33KKBJMiJLlEPN;nwxIIQvJuIk#!=V zAw;qU3?OYfZ5v8C5f%XmF2w`6BccQZJAx?Q{_M}zI=~G&L`Xa|Ve##1o6Jl8H9ut%q@M<46Qz$_55gs{ z1Rm2%(5V#2V^vxft?jU37X2Snvq%^W$>H-ZS^xG-(lS7ByI^1 zm-w$$Jb9gODL$wde&tWTe+m_{bwv8`@ppILf7kr;eYho%A>|O9xjMR^~KnyF!0YDz4tqk^$QKh1s zaR32(>>Pw^v3r!&7&%BxtdidL0aF5SZ;~gVtF-J+`in+Y;YJ2H;?Lt{M zAuSAQftp{72@WXwn&JF%E}3|4TV{?984W+Bj$|{;@7P)~DEeh1`FIDMGEaT$iW&)6pQB}k zX|&MTL*_vmz?d!ex#?LbW&BOP(OjO!=yXEh?;Qe;^EnG>8sPwJyWv$N$KxcrU)2t2 z2@TK{xqP@lGmopXi_T`YspVcrc<)P~EW)V{el%CEhwwE*Vcz-^?>_-TyK>lXxuP+&K2(mO=+epJ%vOHcYVx z^nJrnO!Iuwx0QV(F-ifd0BCA?e%E`K)a-VWp7HHtP3>EYbt7HOegD_DyRJk@HdCcD z$L9sIkqzALN`BmRf$mC$G}tUW8L+-+A;lSzx*2O3k1KJ8twhR>6)XsW1}_W$l;5RB zg_9F>mEfc8q?jVVV;UepP=g+ZEMrD7yr7bUODp4fJCHI!`^dKV)_&<(HvJ8Plb`9b z@c_*|TyCNbIR809608m@tye#gJfF&$9Fp~!ezqu0&}YInLKm9up`do`-nR4d#_g^< zIlYLrfe5A&2EZzfNO`O43l5xEC62$=p3c`T+;aMm8j$&0wXhwgpl{<%9WqhG7=?Pj zH5W@lQ@@`(8O}vQ77DK(E^BLv(vzD+Qr8HWw<4h-ndLHMWgWp?l0za=cU9 zfiw$plHPZ;)U#rQG8^%gy4kFd*Qgn_`<^+iegD&o1`A|JzO#fvP;dn;$qhVaITlKW z7P-`qwOK|8DrY=DFnpMEWp&kRE@C0(>i9KFroKpG894E#7?1@U4#_cfYj?OnVWQvY+~rlHDQX9nfde3O zyIH1;QFya#zUf4EuUzxt8Uz!I-6ph-Z6}(oWbRlwpc2YEyM3haKZch+%0Iu_;52LAN5F>-LluiK$(P0 z&{i+u2Bl}t#rV`_Pr(zTx}Eu0yjGHh3N?K#pXm(#O7jJ>r-Y? z69U-?u+mV?=ST7DFKcQpj!~A~Ii5C4y=3RD*b3bj6=LGY+iQlS)pl3!J;f^`AYQIE z`#Q1Cs)mlnt;s3P?s2|^~kawY2{yEq?-TcbgasC*`pROj@qM7H*P~_@%{C8_0AZM zEFZh-DFsr3rVV@{qz5c%D^QoiU=27nGN-r zcE%DY$k5rB>4vXR&W857KSR$^S5h3?uq>65m%imh+zjPdYCvuc>?<@;2Ny@nV8 z8eE2)TN}Jd3|~wvASqWD&tbwws04ujj3W}Q@Lnn$_YVM*BE%{pW6s1U($Rz6m%{%f z^{#3%U(Q-nJP8+6#7@_k57KxClrSuuTfpN`r=jJEn8*IoTs91WPLpnrcr(;2GXW<7c{;9f}9oihqTi6V5u(gX^dogRF}` zHX%)Lu%gh*t~68fE@EQVT8LNVVqS66SAAS0xJWd8Y!^yKqpw78IG5dG5Ffa9y#G?Z zI9&A^t(?ehxl^*Fi%q1_-m8T(QJk*z03B&10tWDzI(~<_4hUz~y$G*wJQ)op!k*L1 zQPl_iB3B2={*lLjR>Q9ya=rGT{oPpJi>wy+c2}>}#stt?_9l8SE{+Ua4bI2 z1HTGr%55*8{AhxV;3Dfcbwp~X;2V!{PfQrNKPWNnmHZhK^(u#ahe)yQ)blk;hE}+A zPVSJ#b|@#=e@wlfq45;%e75GBKUOKY z&9Ix%3@hA%qEoN45Ucg?*C2nB4pe)=XoTz72X6Bqqur~lU!$Zi=8W=P2doj<3e8xJSF z0qcq!o)fd+pOwp)fkm6-+&J9gFyiAHDEU(Hj}V_sbr1udXw1MD^PfgsZ)HvQj6WC6 zDG$ECVNDuYD2nWod6S#gpN}kDH+o-w_tPye9~s-pzSHYz@Qb$-P3C_t6*etg7SdO; znBOtM4%zs*2>$sdqMNWousoq25$TDjegbfOITY$nl-jXoJ5g_u*c~Qmi3LBFnA?X- zufgm&b;tgVgb=_-7#XYDLa(&`+x5N;5@4!c8G({&H%WVj{^xj!?#siwWgeDJ z--BiHD1ITX|K}qqoFK4>>(T{Jgibf6r=DoSe>;01p_V*vxHgz?@SeolH z-kccG|1qZ^4A}2aXY7AAz^oICK5rTQiAOA5^###~|Fvd)4<=gTg6YkSQc;Ef`Namp z;BFyq&3^qU_x5#CSi(mSe*Q~%GGXe(g~H;=B;ava@s_8a=!5?dp8vTdFT=j`YYy8S z--rKkSOy;>t5W_>e6bOp7*+lf`p>NY`in;nS=iHH5`uKunkxUfd_*ktJl5~a%SX-% z#l3TgMm;0qA^)jx;mh9#;eSq0#&tBg&QVOWopchI&~X3P?th7Lz2uF~ot+*2R{pvS z$qE<|ag&$0=$~KI<-&_~Fvy*Bv~rE|h85hklNtDD(kFlUMu2=xLm?FuolLyD=kNvF z3E4JVJK9Lq{~6I=k8+X1HFsZ3x=bsah=hoR9mIj-3C^Eehgi|ToctN$oN80-H>(P7 zm+8O=cPx87Xa8KZE2=aG1(tlJ?)kO_Nr+TUl%~i3xyS27$5QpQyWXX;sZ0zYtloP? z?tOp0%PG-^e!j6tTr7nPS-02W^O=9{ViX}r)pxY>!WO(2st`FBx&Afc&+)T_!Q#3n zTi%@B+#^*~O`1^(K+$s~B9b6@YyAB%;Cakyv*P6ewPO%^;6rl$Qb z(8@OyM!+`oEAamz3NEm1HK`5PBN5>4UEt5^KSz82c{?z#y&QY!BSUy|MvE4ah5z*) zV*`Cu>)2E!hKT2bRrj@jtzXcIP&vQBcPGMjDu7XQ*^pgO`eS0glvu|K>2v<5*hmC& zL18wD{;7~M)L?4ZqiN|k*kM358sj$q5@8B>!XxXccrO#Y!wU60^t=>5YzI~d&i`_jj_)^qSe|f4?XSYHoZ{mP>E(aQm@+9C zWy1#!{*%*F z7+$Fr!3Ft0*Kitc&R+dk_x7iV+GlnD2`=_O?|{D~9T1(B$0;8Bf7MxM;fWndde6{) z1T&?St2@C3{&|rkH*EFl;@Hn=EBJ~IVPgEh7clHtdWN7@${y%OOfBnSsC%#kyHbB{ zhotXy48X823Vu4d{QvfK|CkyYh9w~x0DMp#Lp`>{soYQ;XNglu*}jkwOAtE$WN z(Pm!v`f4B!BWM$~{^jofP`Zc#$})(A4W7ohw(TN!McI$7`+v@2NdunHpX8y^PDDWZ z$}`{W|CEC+l(DG=1%D(<<_k+h>!>Y)Bzb*TNm?2IC^jM z6?q$T-!-a#PW2~nwB6qGhc*d9tv^+U}>XrhXxnb%d?L^Y5xRUU^d~S2k;_zJ!+oct$Qe>4n}+8!XT_2I zQWI05xw3|?uI@{M4D6Y(b#)pwpLZ+Lm)`ZWO83^Z>4QU4_obn}b94_5!Pnlp#5&dNng_IfE6`-X!s#-5gq_DZYiXwAhT+^l?)mv;`c- zvi_$AkYxkufR7pPhA!Ts@ppQm?uogGvBboQc*NVPrZ`Wa@^9_|GutM+_U^d`EjVCH zX{TgPeLj;TeAIc02)lb6udpx6xwPcW`^(++cws$(41PEH*woDiJ5Jr%wgI`#SVt|q zhIVZvrwQ+}$kG|B72=uoXVtWXRBl9tQDx>S8qJk7=;yo!^!k%zP;sUB?Ew=#RRPb> zu}lQb2+!Y+DwOapejvMM>^nGAV2hjB&O^z8d+jr+8`I$|Z-xIj4@J*if%DGi>C@c9 z12(nwVwzuGF?dUe{V``Yd>mW-V0C z$|)i@AZO^(O-`*&CU&6d^mik~QR1lR7zvwJsyJGi7KFK6fVuAFPT>%W+^;{ z&n(Mmu*`)YAM=d`orz8-Pvx2YE9w?J4{0N8G?oO-cWmSy8VB^Vid?J|KGI^LWwb{4 zfr_Wq)~EweS4Xh)Gs$A4#jtTSD_DO7#-Oou7L@b!@ySO`sO(;#zAAPmOD&^JmVu7y z_513NemE{Go6~mYASKRYZQni3(=H}%w?Ua`GhYitBx#pP{IEl6wJGVm6UlvFcRWiM z3Yb`sraP;j!V}=Qv{25_M(l91@m)8<4sN8V{z%OIQbQqV_{2 zJ30Ls3}g&L+GTyWmFI&L9m5R>2z6pN*9Dy-jA=Ux7K-nkzbI(8M?z*v`de1^8D5ff z8g^A2D^D7{Y$R^sQt}vH{1O-MJ0)W}w&?QjMM}e|_5BoA$j8k#UDhHDEvSI6p*xmY zg4cB}^O>_8{@!IV^q_CmH;96%GwozJlBfD_hyB7o!&W!HJuT;oxk*U)E?YQN%=FbPa&PKi&fn4*#W`xqws<@&v#j3QM``V1d52^r&1JX#&rBnM-LB%VQ z09n5cOnBdW2t2OxTQjQ1F9BhtdWd0I?-?dP9piIP#AzkDSxgp=CMaY4!tRr6cNNKp}E)jty8FgwHW89_JV|`;QEjIZ6zk&m|Da&s7 zQ*XV#5jJM>-dmX=+e@LlWJ)3&oTh`F@jP~eVUprvACwKAkrRb)NInsYP*^BX$n*nu zT{c#oCYEo=ds!ZA37mj63gSw;{uFEmj9)ASaO~(9)M>~NVW`?3irpznB)=|q6qF$= zKj#dMjX%!odJ%uPDe3zI$A0xOY%V)5uZb^r{=|jHyk}*f9nKD=k!qEf#GM?bSz&tfCb9;TM~)zCvAsIB>Zl zxeJhJPkk@1s`&bNz)>he?=G?6G#TSGLx9eswf;(sRhB^~w3ip|GEGy4QrHjre)-RXO7HQ0)cvQqIQE%V!HWL0OwTniZ|F5Ad|R=&+TNYtk&*O2 zuOoXRs|Auh1Gd+~^jWS->=J8Qs!_47cOcEO zzMgw}$)8W=mO`UDqs5)6zNh>&?aXN#YV~cjm3{!A&Ao<3^-O2@gY^|RqmaFXij zQu8&JQ%S~g26rc*-Usw5btz50RZwZR56Tps%+MZ?KtmwFhd$4Da(*v9peFRgr)BM6 z>e;7RFskEA;p(Y6)keW9O)JYNO+ZNAs3?)R^~@NSjfrg?vV2^AeV<;V@*KyCV##Zg z)oO&Ro?Tz(MZJKnWHYo;-uam@meH4Coux5elXG??>C_C}a{|}T=e!snvE=A2A2!S8 zK)7oQ5^CtwLn@wyuTRvh0-mD7tXHJd1jWk{zyyQPrkg&-Uamd&oB5FgW6SzEsXH^2 zwr!#qZX)Py4;?iN&r-ShSmDW!##1pzn?Xvz?z_lGS6tTK54j>i)(H$-=q>mG!nlwc zadU!_(kqoLX=&Z<^wYKSF^;fPr03r4(y}29EcD9y?*|NN(z79zwJ@^U_s_TW6TzBx zL94Wr%>k>q(pt;795o(x+2K3KBl4|Uv5C7Sx5?1X^n8=S|Ea*hz{~4Yx;;YH@f}Dp z((`3)OX2s`THul>xvx$L9VsA5_eg}OhEL)n!n4glR6jlSK}%EUEt}7&teUTlt6T&V z*|_zw(mf_`F1d<9U}@JZL3aakt#Db|-JaQb|DMu|ODXIg6~~oYj0i_(;&ze$Bvd5X zoH+LZ3fG68J%LpFASl1?&R_T<>nnm@FW{H&gs8W%#m{2AZ0>f&L=jck{+_IkL58J` z^Lt}NUC>`!Rcmf?eZYg9IM*stNm_t~1GRY#yOc}}s&GL~9|)BK31 zyXJ`vfbe{p1$Wpg?R+O^g^bN3zJsy4)|zgnR1@6^MetEne1gs+B^;0>W#blmb}BmW zfwhG&TOzC4b)WW*oxEvc(dIYYGU!ZPa-5o@*C0<(%Ew%kAbt)5?|KfdnYQ}Jd%jv) zbLN?LkG`jhacs?BOn_OEpxN%oXwmVKYR^fZeRQR=Obh4kf%DnXwn8g`J0BqgftxI3 zTZ1q@rMpa8n9Zfwto`s%@i~%3ViHH0zY2n4zqGxF!hn$n*nm{*U{eC694+CSSXFo1 z)8pJpAw}auJ48wJ<)CDS*P%91&n=}~y*n}h#gCQtXm6R94N}+KkqU+6`jk^8#gd9X zM^gI|bKaW4iu*@%-nR=QsyGqcf$qJBK<}{XOG_~!es?J~@hQhk7)0`z_T#g^IO2w; zx*6GYL6@syc6c5HiY7do6E}z18K7}}GXQd{SCiLsJc#wnNHM^dBTz|I(mZg;CzU`; ze_)0F>qEy;LHYWxwqpJ!$0BNgA%4GqKF^qboifrFe;kpwXFv;4-vRqNLdyevr=A_{ ze-W!reS;M+3N2Sx-e(lC3~Hj_O0f|70JJp-g9{=So-5npw=q(<+QUo9?luBD%j;Bg ziuja>wKF&zcw9~&(HpSD2R_ejv_nP)$L6kwC3dy$Uvnos%6r*5cT}nUb#nbeG(=$z zCnap4S_wL$pFI+XFyvTG(26hXpbaW}w!V$~D5(BU+J~XW5@)e=N-SKk+o?ERjTorw zpjE~iaO_NrP5lm$CC7o&CD|F?^!nSWv$+f5U(V{i17}|isD6mNzleU=9W8rw&3Y2l zr`w8tP71`Kd^OP0Ih>Yr5*_o3k>VNTm;)fMM-gQ;i{V}eVW#qn13IR2bWLzC?O%Sj z+edQ8`bA5%)$DV2mO%>CL(V41z;XXI57*ECY45$msgC>q@sJ2*?p32S?T|i1F9RO+0-@+dz?>m^gf1X-H=ie3)(!Fp#Fh=FuLFRr0p8vO zKK^FY>t;%1gbUNWm0m7lda3l`KcBt#C;&WkFL>b(A7@@8I9-5zo&I#v>&sN8?{qnJ zrN~DV<`c5OMjdfC3ajJ({D@?A(0XI)u2k(GgVx>a*)NatrMiHzTgU=sr_YXfuTT(e zJ>P!*E}tqt70~F|huf`nGtUuETRbIm`_7UHa&;M$pO6JKI^#wbOae$pkVlCo!0O(4WT|UV2#eYdVsSH)C&+IsNd#e^J$P@YPgn)EiBR1WKNE7 z6mZUA=d;@@@ynjN3mg`=eU8@3o!=g-rCf4wDof9?pvVj-I1A{6 z9$BDdvG4D?regBZ1(vsKlesTO+bLX4SsYP(vYaEP6cUx-g8MwiylhtIN03)o#0 zA&$7e5LnMb`1Fk?ai`iq!MAdB`d0AKQ{}#=i=CF5D8|u>7v{K3wN$u;x#sP;vUR^0 zKC$8MxZ~H+qY?65+FIAGi#MB&3y%*rEp4i8A)YzY)|*n$#juMx%DbzbXZ;~2v=Me1 zZFGHRO55TaDvhLH3EDk1ADYwUVhGkUfZ%E1k8aP)U0gQda6jA!&2%@1yMPcrwl3bi zw8#TgFpWcdsCFWB$mumd>D^1nD6q*2B3_L9efIkxewo)9G!Iy~c#9vCON6&(U366BGwf2S&oQ%pb;* zXY$+5pGtgp=J5Jwijab?C5FPTqgBz{*ZNFj$9Yf*;TV-h=(0&04ptJU(Rst{ACGZQ zP*SIuLAkjYrjx5AWMqUWnd2W=is|O-+0Xgv!9LPGGb4{QA~(CE1WxfMlsvk6wcaJ# z#QRhh$8c~u-JwU)e2mT+jou^99aQ_LDHfL#`CYOEC$>=;nF&U(4N1SyoWY3}L|*#} z3%iTpWN8&fM87mTd?(LzqhV=^&VZE$S1*&YdUA)#py(^0PHxs5IYdpYWvCBUh_nc~ zC2f19o2HcT1nh|1PH36X4f+0V)jEq*lvn}x1l^p5 z&WcS53HL(XP)tk|ESP!hCox(={db;L^u-dc?xov2#JTw24l3q&F$$>jZ&Psmh7oYR zm)qE!K3Bx@TyGZiy=Gh|Y4eLAnQ1J#=pfI+O|*R)w4}wIgiq=&jUHF!7ah3KP&tLt zV5Z}?{eW6k!$jdtMZ3iVLOE&aCpvdq8-+}%g%|JZIxY3^MH*zQ`>oquosATTJC2s{ zxSh?VpdNGInj(R21H;!_asG90An^=u+o9o_LI=aii1tz{0g8mHG=!b*u!64b0Zgb| zsb97l&M4qg3XQ*&DCQXNJeFKbUcP+1R4M^r=B?W5R`cf>gRp)kyZoY0rOQY{o#oxP z8hRqV8TdrL7b=flXSf-ClxYY`c z`(G(_F3~n4FMZCVw%fN~L!f+AQ4%qEa>3~MMJ*GVxRQ8u>;&blo$;+jA!e)KgKJ?@ z6{8;kstjdJ6ndegu$q}oyOt%g;H&%RK90=6hI?#O*lXFPl(;JfYyqV5>XR5t! z7PmiYcyL6lB4iX0pEMRR4Gp9#SE+0=SW4}Vz6`M!%xQKdHOf1_pwNnrv-J*fN!e^7Q z-Z&Z31J#hz0Ud8v#n4avtzX`Wcdpor7|$p#j(`665~@D_?+q7Ce6!v`7OP z$WHT~7srY}2(TO%gN_>m$yn;g3sP6RL%2Ah5mRp1l4PU6;xL-|;|{r2R}KxdW31wc zC|`_M^J)nSJ~1b9W4&9}al^HHT!2RJ6Dp()+J0C(>L{D4q;11;(90`(R+u{tUxkvI zXrh;GmLE}5&t%)kT4|7L^tGu_3#y>>MVbtm^=UH$bg_jO=xAd;ad)@Hy=t-jZKrFy zp69$d7>teQf)<9#?d+VcE}>TG__FyJ6J$8+@m<1w!$vi+zE$J1zS1H&kKJ*}EE==p zd|d6r=4GRd#X_I2O}{XBsH04Hv*PI|zbkc&bNP2w_Xl3ioIb}ytBVZYc_+)ejCidW z_26cV)7>}pp zC8e&lM$3sDB-7p}SLUKak?2KWd<=w5EBfgUe8npY(H=$3-Oxk#xAKOV5r`#M>{H_J zebV(cHE~>l)^6fmNSfMJ>i<+;( zvqCL`jVGU|aLjTa3o)4YjZkAqv`x9Lp7Mvu-<2P;mA!0}RO`5QbtW0EOvAr*)Y8Rk zGIpLhkS4;)BD_L?3eW9Gf54Vt`of*d?^b7cp0ll?k_&#bpt`LgcvdGuo9pZ*8|7Rn z&BNuRSqB1-vc_*mT&kc^))lvKT@*uSU1lIB;{VB(kgr6|E2bVYt7k&jsJ7Kc#;=f>2Fizu9kN zCgNk|Tyo5ItVUP9;#;lHYm(txQ?5%AmP$AB&ueP0UeNUan0!i7DT#(o+-Wmr@!gv& z=9!ZHMkv*E&Y;>1-fh_Pd7OS^BhjK8Y7qzhz*9N8{4`lRp6MoaQR2UOpULRE6I73~ zuqd5{B!EE)7v9eAb2V{kM%8l>!SN}21Z<#^E~)DmshZ(^btc#7+WC8Obe`qw-MYk@ zKz}re+=X@3xKx06QSP{uR>hb~Lv>CUT{h#x;%8sSBNRH( z44)I|$yw}RA@B#6Mad-*`-#d*kW%oreJFks*XL%&70on{eK`i>2lL~KVw~L}a~(Wy z2P-L8adaR5kXTP8?JkU}iHOOfm~~HX^4op5Q8#L5TGr+d3Cgjvl}hwSckcFd)vS5k zF7E#{?{b{#_@zLaHysQES#6FbI=52q-rx+C3mv#CJuVt3b28OVa$z&JGo1cuqPUJV@FTMbq2Ky0cuhcRK2CPL|U zwwAV;wJe4So-QrAh0Zl5L7Z?xgG-Wh3TF{sRl;IrMNs~U07o+?j_$Az=nZ^%;KE9q zbphRt%A&ciT#*1LDxvfC&jN-(T!fyjh{WKmOIM(c1;w1ek4H-C;Y&1l-6{y6N?%Wf zD3RZ2X57W(alo1DE!frdRn1A8G#TkB8^$>0UnicshHh1;b|8~*k~)??!ff^JUET2z zwmN$cWRxBtdwfLDf`wTx;gov>hD)Ot8)DJr_-Vzx&-Fgb-vN0k&BtpGS++;QBPZ$x zcOGNIbl`p4a33U1H3Q~hvM*?sa$mKSk$f)r;l>{=$L+) z=e~3E$dL3;4pkAmMs5LfBxxu7tLWq{^x`K=ZV6C{Qqz1QuV>EO$4Z5 z8=&7ow0dtEs5Bs$&)~cr&yYy#8GD_W8Tauh5F38S_wb)UX*c z$%$3-xT~{QMsP6A)n5AgED~|gg$mOACr>hxxu0OKRXcZFJ$*4IIH#Vf}X6V|@7(1XjfDj}>8jU6%@$Utm@0D9=9U{;! zodz{v_N#Y6<)QU(f1T2=KRuH;gjp0_Q;JVvCrIBWiNzM)$uRTERh&8+fw_|-iLMEJ z-!@M1M$&oA#w4YEs;&uIM9sj>kodL^nr~SLBU#Fbxwo^=0y{`T>uVQpTi6C80@Bd9 z^3S+mlyLy6{)qQ<>)w(`?qbXKi}ljy9TbEw^kr6^h$#f2i1ER1m~AoBA}klHj1v2N z#b|OC!uyet(lE@a+JL@-@kiInn{+Y<;igIYvlEO$Z(BH|fp6tkxrO(tX(=2}4Y=?8 zm;@j_$hV3cnA9HDufv$7bmb~c;OzfUpIP7p=**ok78)rFUSTH}ASy!taT6ScegRag z7irH!bSi@PEc$r%1K=bi{_@G7PmHz=mVmEHqhSQqWS`>uk-inCHHZo0;j&cgBs>zb zvYBSK+~3f}`TYC^^e~t2)F&C=2XUH15Eom3{+@=DJK?(i?+)e`QJAa)Yvz7d2-CcS zUW^koeicNTzE&a~>$g5IZ)p8l>im>oO=D$p^DnI@v(X=07#@u+q=<4x@SLSIqUh^b z3H0@LV6z57;5Pex7EuN}>yH_Zr{n@PzZ0X%RDnF%({Lxj1x~A^hnfvcEJongoTt0y zniT@{mcx%^3iUu~=jmp1EZKxR<_6+DcuoEi@X@ej@^M0YeJRZE5ftXrPVVXeeu7uu zZaf?|0kg1vB`QiIG1^`3dQkVLz|-KZZNe7f6P=3Mh|QiEeeBWBV2gI-PXoF_3n-z4 zn~a{k^wkAO>IeJlek)l-K+`gDvmMof*`uDDMsxzGX8i)jH8=N0?l^2BDFXiSx-N^n zk2E;%!j3}Zg6dQM^rEEb5{!{YbYCU&!r6+h0-V|^JK>AsU4(ga24#W61dPi$;)+s+ z7$0EZ2)HJ(-NvU$GaqG#@Tlxk?l3uiUpUr~yAw8C-)v8b3KoylaR-_SSUE#}WJ~GU-vl;D|>VpVwuY4ZwL%lFuzW50N zqRDRC^ot9Z=)MEpmHDRW=OXt)Tk5A_r~p|lrmY~37+8tj1OzZp7wwtEWv#u)uhn}Pzhde# z<5cojT)-{91Ejc;nr{%QAdb~km8cIE1C#>B8Ywa!=-%fqxgS4wbi##x0eoGYEoH@3 z9-Y)n)Vt&<^KY;A4I~+QL%$QThl4RL-S2*$DCz7U^7l)8K^VZ~?x2LMXo3tC)hY1& z0Q5?$$6=R7cThEw#NivsQ>BB-n zLy*>P(lmVE$l?LO(;N%V;Kis%EzBw%ivt)qx9Qf zHTT;cgMAM#4?6o+fjq(457|B-!?5?^aNpd4t17s-+-FJ9^1to-Dwsvl`pL6a^Uyb+%7~yggiJITGi+Zv#fR;uHmy;fg?Yp491PXm$f% zm^_~ZYbZ-SA0|>XXPvp?9x{-U`Rpq#S zJ#0il*TwgyMYAKM54^y=y#4CCu|f?Cpde|^(nMMsniv{3l8Ot+MtFew8$sOFk^n7l z#GL>h@dW)fS#0Ce#)Rs7>k&ij7u{@R--C0m2@K6$J=;2O=Q;J>dq`|h3YGn zKL|Gl)^r6-D-s~R_WUC`_f_(pMNmDX+nqd;b|*k}jL$3;C;)ac#t!i{LdF{7p!;R+ zoAioS!MtlwQuou#SqR-Li(-E34?b8gvJKa(3*W~42J*1|0@#@APqPJWfim2nbXfga z29_*@>6pjFYi~?;#eaoF@tH(JlTDH%7BTKLhe=CBk*4rzd7mBN%{7AQ5na`STbZ-n zA_uc{FES$rNdbSXKp}1aoyVB3WrlDn;@(FALpVT|f{KM8UHEptn_Z`br%4ixnzC(c zp(A_~Ms?rM-*Y`(?=>`ezEtDPMT!W<1_nPMiAES()c>plj6k9d*NaLx8^wM_UO06t zM<;S>(gdE~H2*o?kKQy#ql3;2GSnf1=^ifcX?iZRmkof=yX)#$(xcsPm84YlD}CB7 zeyjEHPpdh^LK7u+!3;QZsm%VopJ?f6D+5E+^TN*0k#S6HlMRn-72XlsK+2Ax6ncI{ zlZDLd{jL1XKgW3%I?U>GPAe}pxL3R9mj=ym9Qp{g7z=SUK{8tt zUd|_elS9Y%CJx#2=!de%o|kCSB)qSCH=vC$NK#xp0Su|-CTR0Ers}LAScbfK9vn=2 zu@H`X5yJrS&Uyp|;^c~n5nf*K7E%^x-7Y*3C_$S3y*8)te3cIhPxMUMeUoz%wGI%X zlXCi)J+OFF`fB9VoEUA4!uO=%C(;Y4>YBFGc9RaW#Q{q;3SgYFlh{ZhaqoWTHQz0I z+~=SW6asnvU1GB)?o{QW)4J0wGT$br*z$YO*)7#1oZU;?fiOkdW9UTX1tEBWK0zn* z0Dw51ckQBOz7&AqXBz5y&m_vA8W34>{BXeB`B8fX{V{A8XO@f*V4|*!JzS_LOiD=vys~$*>qeRiBews;7u>1rp5TU~6NvZ;aL!bhWTiYpLc{Mt)L)t(~Lzv!IK$=97pSQ}M~Q z_XR2>wx0#DgKN6+)OWd7VQbq|Nq)~QI?D3MYEwP5z6v>{+$M>Gy63l~Q_;eOH6xQR z?pE(9buZN|zVCmw*zwtA{k*U_s=e!FIlydUXkXUIHwVr+NAMO0cIKR)Z?ZidJ>tNB zGCJCt%nDFtC<6DL3x1UMX*`d>D9`9n` zwzwto@Uv#$BxcF^;i~a8u57dkncj0k0(AZNk!l%q#cB`oIWUyF@zAEenu)>9n;7jg zms1#!d2vMd7aYApXrWtwZyKHv&nX0~gm%^PZ*hNicVh%MR6?W&s4tr&vDg6ag9^RgIUpz=EKZF89%q(ITH~`)zX0=az zWc4N~giLInUYR$ZZjJsm?=VsytfZNx-l4CNiV_*J{kZ7=`Mt|K;MvnBEX6cwrH9xe zu>mstV|!<-h5zvhZnZI0L&mhIj(h?kqh03mMqM(mG#SwxU5c|89lZn!Upv~yy1sZj zFOlKA#Y$R|3N*q=rUt%Bv}|m^inTnbP8;gqvPzR&PczQnJiERj>g-h3E71%#H3}=f z0Nijp+xOk-S8nW8?r6`E%Vem8{Ql{{b2T3UF^Y0PJ_orag;zoojNI4j7ou!9IA1h^FjTMR!&v7Kw;*ckb2Anu zqe2DfpczO+lC)a4R!ye=_=hY(e%B_WkCX`vqUbjhYub3s*TyTzDP>*-#BU0BlgzgT zT|`o=B4!*bv+bt&QEv;5hRx)S=VwpYY4`jzdOYFNQb^{mzpJsA{$On`eEYAk6bSkb z{<-NLGPL%vtrLOVHJGxE0mpd@lCB>B^}!dru-~Th+|y!6I7BQu+2A@{V4`q1BIvYXfi;Gnze!Q{JXtj>KXf%gi zT2^>mn{yF_yVFxk&CDPCM1>%DtQI0Ns%4|U#?HGLZJ5c<{Z1cw+k7asDJ)Aw$ zZfbwA8JCtGj!A8T+enuA-?!1TMfUIqRA5o7B@N5T%8z{J&v$;<7V-N%U=wG>L4h!2 zz=EltAf%N$$D$<#2~K6IaXzY8!p!M#_MXsd(4wJ_Mo>p(F_GbKVn{i)UB)L~x9)NYzx!%*{`fT%*fJ>>9TkZYdh!libyBEk2Ns&fi3O%LJd5VyF)7Zl+dBh~D zs9&><>vJUOMt{8S;4l&xLV*e-+yZ50Df=o`c@tM>zjNsLca1?m#kwX^@;!gNfQzQI zYGb$7wCwj+cQ@L}zHco5n%*epd9c!PaJc{BFp?bUHI(*AznuY<+Imf;Jyh-+e15!= z*bQ-qT#8hl;roz7PjqfA&S>aKXxf`2n46HldMa>z!?2-9t)Oa@2I|>4#sW^lI}gp) zPy>aUsITt};vC}8SbVZ)g09I6Nk{Rr38Usm-}iwd4m!-Q7AYV-GI09^!Uj>Sd8EuH z8*zG9e@{TxZW9t8f2ndC6@Mep{$USa{M-GmYw3z?pJ&0YX8v->c`F?oAkW(TM68p> zyCMZM6*=Kv<(1mu&=Q4}P}7nkl>SWp0HPc^_OI%ps8n?|&pWuHpL#YHMb}Hzf08l6 z{4!yg!UR4G0;8B-hTn`}vbqc@=y|omst{><|D^iF1iYgJlAHE*p?dd`vdJTrIOxP* z)EPH5E^*vqze46K}YJt2T&KWOXl($WosCXXq-~*JB3q*IBn?dAPZpcn(8p`6m7$!l!st2x? zRTLb-^9+(u)_8LRqiYdTNP7EX2I2aTfuNe+^k3J<2{GongL{rHDrs~{HBXlw$!s1X z3r%vWeSMB&8Z!K6*Wv;SdCLN?OCm}+cZNPs1HKD6rxyl4zWem9!Wx42@Zww6DaP`* zHorkXFJ?VH9z4T5LD?!nxZGKTY#RX!@kX;2*yxJ?*3T2S?uv0n5Mq6k%kh`UyYV2+ zE#K$r2N19hfBW7v-UE>RI{_M(5KZ);m`4x7(+iBfKLEwJt3DCw7y6fUgm-Rat)X5n z6RLU1^0;HqlItn-ClOMA_3N=YOJeY1?3!r#RfC)#PLBUj>ANSdY@!svRnv!hqxQn} z;XKrX$<3c6ew&0X+6jcJl|KsED~i1@^{Uos-PS9?lvW zbxGA`0)K{KGs=Bi;wcQ6V-q(;{SqkTsd_wVt>w_Zq(m67Yw}%}Df4r8!;?mU0C9Ah)IqoR}k;)4bbK`_PL!gl4Iv@GWdskWMOXVGo?8 z`G`1_>^8eoeZVw|S^lntRzwpb^2YjYxO5(@8rf*->j|ehjgQXTK~QIh&I+rwReX;) z^us7TA~2JsC54LDj#qP|iC%imJo8HUDk!lXI~gun^qKsGmV_%~;a0CeGHxW4N8|xXV;ZjE5I3;x54UGQtlf#1lbZ4evBwno~zbL%^NY{udIW^5i@-NX@4am zHaUq2f$k4UaYXdk%*~ug_0I=i3|c9Sry%aKmKxp~m`wPd@iH-;2p;~aNyJo941U3f$=t`aCvbD>z<;F{g1+b) zK7->eglr-~y}5+eJye2=e)(kM3xf~NJ_&QCNK5(im0g^xWjrxCtT&@cEtB{+j-_L+ z{3jD#T>!q>-MCtI>0>+Mp6s{NdHNBR>&TivPG2X}0C~2?a1PRjJE-P;)qe{(=s0B0 zho9T!bTSus!yoyowjk0$?kp1jNedwUABlW7BH)rHORcn|giHV2ccuocUd4Vy5UBu9 zN;Upl87NCfM9v2eUSTrKt(NaxeOc;a`589jPz1^sJet7S+yv@Uo8Dw=rZ?^fbu2s7OF%AV2e}kasWq;@%pTW|2vpoRg!K8Sso>v@ zy=Gs@cn8-D8~cmmz)v*DMqC-3=yIL-rXX1M)(5&5_h^nL+;Fp&_O<+2Yo2nuvJb?_ zbjdo;vx4rPLwaK0U8tHUu`5ph*d2SQ4&aTMC`h1c?)WP$u z{FYw?Sis!gLqiZH*h6~s` z_H5{&VHJwJ{MQ9>`+j*?0YnhsQ;DK)4xJMR)d`MikG(b)fc?iS{UM2cm?!jYbXvxY z@~z46>U^0>)VL)=W}rFk!jj?DF6+t)6ogq@|LyF^ln~Ta`K1?cbf4d=bhdg|qVBmJo`Es=R{~jy<VTo&eN6A+uE=U^aL z6go7mcRf=q066$Ow-KXUz-LplO(lZrS(_TMDhlKpF7UfRLW6K6PB?YF0Xj1|4FQSY zI<@8Fwbw%W>T9vo%{e3|r7d8%cH*Zm4AYQdE7AyWD>hUn0%=pkS_zq4!0>Nl*zX|L z2+_-9ZrKRwfAdg{h*X~IIV3@$zahJhI#fecOA+;JJlhcLv2(;=V)O+VTgExM=j`5I z2a*xIx8N(SMA12f5z@z(u?MH#!@XJ*DMJC)Klc~p0SM_0N`=@G2lGPJ)<;Fd=fMl? zk>tmIZN@|yk1vq-Z$r+}TZ)LJ0;8fA{DBZo!Qa7eW+O7j2NI_p9hnV{)fyt+1?S~^ z#JlUEDZ)YJZ}UU6NmpRnjn~|H;SPk+mHZq??f#qBh5Zc8kGWPlW$Fmbn^GTxfG@;q z93JDeYNu#dsn{iN!)$F9N;5c*zs3wVz>Rmt?y`{j>aVxBQ2nDronPNdiAA8Xhk+ zGKQm-RZv^5eO0{ZG$4if)MyGLO=QEP!eHF23`GGP)>rir>Pj7)u0X&^F1P^G>_uak zmYyLO-HX(0{w#UHPG3aYrV2wZJk5?)^$D6*!NKVXpZ_O1#~fgA4%ZLNqUw+r~VzzBIN@23H1cSGkOtiBrbK1+b59YjVgT)IFMG|l) zf=&fcS7O{hvZpUB_bW9#KSI1G;thVr-W|usz}gtY7S$A~FH;V-Caw?akv`3yqETTK zBJRu^(9$AbRs{me-<`liVw}3j^^#94&sl_f8PZk_ni)M_=lFHb5~G-gkpEr)Kw9qw&(538)#MA>g z6>(w;_p%du{Cq@-k~TUF)5Y6}0kw;U{+ipi%;=FCVu%8~9H?xBLY|1;*Jm3FbT;2R z15}skz5asn$sP&T3B$wtzlIK_0#DFCU2Z2ktu+&v`<1DolHhYH+CTHK(}j!IH{G@m zM0D@Ol$9h>P6@P$B#HdEwinY$L&94oZSj)5vxrQVN{QS&UvknWc5s)kQ3kLW>;8_0 zFse`HSDa0K(napXTgHSK;#A&F{a}s907N@c2{$kA&ilFYlVN@yvCkvmSh9$97sPF5 zMNWH$a-IyT#A}+MRygR4-rsOC0j8a=a3d^PWs635)H~}YM)~f?r4&wVhP~@jQNy=& zsVtXOS86ic%s=va1#WL{&R;^045sT@#D{uysonyWc1P^gr01J$Tk*2SYjv#y982{= zfyS{~Uo%+J+x%XCB^!KxXUp|R*r!7si|pr%)3>0)I&YZHF_wJ&Xni@^jE9qM+YjBt zuArb!m79g~C1+|30itOtS*N||MzTiI(M5P)C<9?kk&Ki@Pz56lPa>(*Jtd@)tWQ)% zMgh?=@b+D$(-gXVbN*aLoB`8dNDak?^|dfeGhG~LjF9erdlSl-`>Q+l6dn_NmUs+| z`GzxZx5(#xLE3s8z>t%~-U6Ki_qZ^ONfUDhN77OOiSh`d zDT|r1h`>=^C!9oG#)TF0T?}luA%EjA#vLuU;wSG6xY3(G1zH@lia~VBQP*$@#}`0TK4^9;*{UcZ@!3OoD=ht=^9;+^p@&S@(luI%8#XYR%}xf6reNlaAQ7@WViOS37sJ$(uM6Op!UO-_Cdb&M+NKz!V5 zQRQ?t9ll|MP28Qz{tPET<~?U=Km43zvO6d5_7_5roBz&441z&F?pB&y*P-Z8dmL~c zBGrEW4J#bF#tlzwD%eI^x`$!NrUO_jfn`u<-?B}oQFGb^qA8rg_Wx>9LqG;@k?@6DcfTgkmUW9 z``#Y6%J`hsmW#&4iTLwngx94J^F!6XOPD=_Xj712B2aNW#ZTNIuWH`S&#JVoYwq-- z-EyP~wUcHq&!XvXrqhf`JJtVYz5nS_NlXIvt_L+&umSIV>8QXyVxT9w@wJ&l6aqyg z@eoSko!HTxh9}Y1+{(;jcV9i5$0nlC_beYQe7y6H*xZ0zac}yipdPu_KGBzROuQuY zr?Yl_4>ZzAHR@bgJ?di_5K$Ym{kF4NLV`LhL_M9OSPB5^ALVu1{)ze=BOZ})Vw2x9 zISf-#4g?i`>2YC=i5gqq*BTyAt%9>@b}?d-60-}-d;mEgJ#Q7hSCKg%4;h-*^)x?)(s0Gli}~*veHK*cILj=~K95;2zoZ+gQY`jN+=NV{;sFUJ)K zswn;K;R*l9j)~(kjbdopT)1i70<3uc?_wWIb$|==RET6P?=8iT$j&9Zu1uO}rQ6(S z>_&d3kz8rHA{Xh-gExMwFITK_*@`&ew)wK(+%=5djQ-qU!z|#Q9CoEJq_Bk!+oxbo zB1s{2Kb#A%6sFUsj+2zF5dB3kdaccSg?;TcZ4O-p9--sxq$rS{cvo_-1nRr!(zj98 zyd0e@M%>!ul38tn8Zx@xmfWl6V`=`$ZqthkR!&jGv|%W!L%aECoVTrp5EyK}Ha^Pd zyLPoDe&`X(y-ew?2m}B55KaT3P-4E2>xm6@(Uj z;gNeufHYU*k7KPBmEwP)`fk45_H+0-Q>8dO7XOJJ5$#aPrilPupRU3kj19b~35*}xa##4Xf!IgdG0Qj#2w8WW0LIU zS}((}0sv_6h_dqLHVTpHtEbPtmWQ%^plVB^_lYc|KK4FHN73g`f1pqPPDi3QiE17I zNwWy;z9`)rEFJW3VmMGn!5-{hWw$7Oq{6WH7CJHhNi4gJ0_m{J1S|#bEeLw$e z12UL1#9hgiSOL~(rQmr&`DfCL-#DdD^aOzdS(px$%VVqnlUDW5Hogu$94P-r)M2im z?3C3(#^&4b^b$#hV06s1VdY~6CAxAgF_vy(P!yCsPF>UqRl3adkr!1De2-ZQd3_}o z>rcR=vYy;OF@QJQi=s6X*yvPWcaiXZwukFJy)P=3EJt;7Wtk!O9VWj%mB=ZpxYKc` zqPV@&1wrS7H%FLAg$PcLGvrJ+3VoF48BOA=C0l0`--Y8O7s$0hoXA=3^B8T*t@itaPziTqp@o9(P}RurfsSYlGj-?`k52D9iZEK3cA)JP6|R zoBd~G*su3DcZ)hV(pzHU4P%m*)*saO3;Z@}6lJ_(^=oiLXYuejqDAW-dSo+O7ooZ` z2E|-PH($YVGvsyXe5ON-(#8}W&0%@YkE2#?W`Bmmp(9m*rgg&-^C;KmVhiJWCoaxS zw%v$=IvoI~vn$aN- zxEvx)c@5t4-?WZSA&rL1Jom!>wH4z}+zkjxg9y(bLjU@M&^kH;p@l4Rk?Y^Jju@1o z#1eb|y6Rt=RY>b70512`g_zKPd%0B5I_kcuHvI3d_Y4`*IvP>u%>H++BQZ!F09YtT z`JWH^pC|hNZs{Jja$MpkK62#DnRl% Date: Thu, 8 Feb 2018 16:53:22 +0800 Subject: [PATCH 097/138] refine error msg and add unittest for cpp reader --- python/paddle/v2/fluid/executor.py | 29 +++--------- .../paddle/v2/fluid/tests/test_cpp_reader.py | 44 ++++++++++++------- 2 files changed, 35 insertions(+), 38 deletions(-) diff --git a/python/paddle/v2/fluid/executor.py b/python/paddle/v2/fluid/executor.py index 0eddcc3a5ab..01cbdb3ec48 100644 --- a/python/paddle/v2/fluid/executor.py +++ b/python/paddle/v2/fluid/executor.py @@ -47,27 +47,13 @@ def as_numpy(tensor): return [as_numpy(t) for t in tensor] assert isinstance(tensor, core.LoDTensor) lod = tensor.lod() - tensor_data = np.array(tensor) - if len(lod) == 0: - ans = tensor_data - else: - raise RuntimeError("LoD Calculate lacks unit tests and buggy") - # elif len(lod) == 1: - # ans = [] - # idx = 0 - # while idx < len(lod) - 1: - # ans.append(tensor_data[lod[idx]:lod[idx + 1]]) - # idx += 1 - # else: - # for l in reversed(lod): - # ans = [] - # idx = 0 - # while idx < len(l) - 1: - # ans.append(tensor_data[l[idx]:l[idx + 1]]) - # idx += 1 - # tensor_data = ans - # ans = tensor_data - return ans + if len(lod) > 0: + raise RuntimeError( + "Some of your featched tensors hold LoD information. \ + They can not be completely cast to Python ndarray. \ + Please set the parameter 'return_numpy' as 'False' to \ + return LoDTensor itself directly.") + return np.array(tensor) def has_feed_operators(block, feed_targets, feed_holder_name): @@ -306,7 +292,6 @@ class Executor(object): core.get_fetch_variable(scope, fetch_var_name, i) for i in xrange(len(fetch_list)) ] - if return_numpy: outs = as_numpy(outs) return outs diff --git a/python/paddle/v2/fluid/tests/test_cpp_reader.py b/python/paddle/v2/fluid/tests/test_cpp_reader.py index e71c3a290c9..970f57ed000 100644 --- a/python/paddle/v2/fluid/tests/test_cpp_reader.py +++ b/python/paddle/v2/fluid/tests/test_cpp_reader.py @@ -32,31 +32,43 @@ create_random_data_generator_op = block.append_op( "min": 0.0, "max": 1.0 }) +shuffle_reader = block.create_var( + type=fluid.core.VarDesc.VarType.READER, name="ShuffleReader") +shuffle_reader.desc.set_lod_levels([0, 0]) -out1 = block.create_var( - type=fluid.core.VarDesc.VarType.LOD_TENSOR, - name="Out1", - shape=[10, 2], - dtype="float32", - lod_level=1) -out2 = block.create_var( - type=fluid.core.VarDesc.VarType.LOD_TENSOR, - name="Out2", - shape=[10, 1], - dtype="float32", - lod_level=1) +create_shuffle_reader_op = block.append_op( + type="create_shuffle_reader", + inputs={"UnderlyingReader": random_reader}, + outputs={"Out": shuffle_reader}, + attrs={"buffer_size": 7}) + +batch_reader = block.create_var( + type=fluid.core.VarDesc.VarType.READER, name="BatchReader") +batch_reader.desc.set_lod_levels([1, 1]) + +create_batch_reader_op = block.append_op( + type="create_batch_reader", + inputs={"UnderlyingReader": shuffle_reader}, + outputs={"Out": batch_reader}, + attrs={"batch_size": 10}) + +out1 = block.create_var(type=fluid.core.VarDesc.VarType.LOD_TENSOR, name="Out1") +out2 = block.create_var(type=fluid.core.VarDesc.VarType.LOD_TENSOR, name="Out2") read_op = block.append_op( - type="read", - inputs={"Reader": random_reader}, + type="read", inputs={"Reader": batch_reader}, outputs={"Out": [out1, out2]}) place = fluid.CPUPlace() exe = fluid.Executor(place) -[res1, res2] = exe.run(prog, fetch_list=[out1, out2]) +[res1, res2] = exe.run(prog, fetch_list=[out1, out2], return_numpy=False) + +test_pass = res1.lod() == [range(0, 11)] and res1.lod() == [ + range(0, 11) +] and np.array(res1).shape == (10, 2) and np.array(res2).shape == (10, 1) -if len(res1) == 0 or len(res2) == 0: +if not test_pass: exit(1) exit(0) -- GitLab From e4d9989af0ab4d49d5b04dc95834cbac20c9eed0 Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Thu, 8 Feb 2018 09:32:28 +0000 Subject: [PATCH 098/138] Simplify the cmake of inference. --- cmake/generic.cmake | 14 +++++++++----- paddle/inference/CMakeLists.txt | 15 +++++---------- paddle/inference/tests/book/CMakeLists.txt | 20 ++++++++++---------- 3 files changed, 24 insertions(+), 25 deletions(-) diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 33ef6860e1d..1cb54ba2164 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -179,20 +179,24 @@ function(cc_library TARGET_NAME) set(oneValueArgs "") set(multiValueArgs SRCS DEPS) cmake_parse_arguments(cc_library "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - if (cc_library_SRCS) - if (cc_library_SHARED OR cc_library_shared) # build *.so + if(cc_library_SRCS) + if(cc_library_SHARED OR cc_library_shared) # build *.so add_library(${TARGET_NAME} SHARED ${cc_library_SRCS}) else() add_library(${TARGET_NAME} STATIC ${cc_library_SRCS}) endif() - if (cc_library_DEPS) + if(cc_library_DEPS) # Don't need link libwarpctc.so - if ("${cc_library_DEPS};" MATCHES "warpctc;") + if("${cc_library_DEPS};" MATCHES "warpctc;") list(REMOVE_ITEM cc_library_DEPS warpctc) add_dependencies(${TARGET_NAME} warpctc) endif() + # Support linking flags: --whole-archive (Linux) / -force_load (MacOS) + target_circle_link_libraries(${TARGET_NAME} ${cc_library_DEPS}) + if("${cc_library_DEPS}" MATCHES "ARCHIVE_START") + list(REMOVE_ITEM cc_library_DEPS ARCHIVE_START ARCHIVE_END) + endif() add_dependencies(${TARGET_NAME} ${cc_library_DEPS}) - target_link_libraries(${TARGET_NAME} ${cc_library_DEPS}) endif() # cpplint code style diff --git a/paddle/inference/CMakeLists.txt b/paddle/inference/CMakeLists.txt index 654a6119bdc..bdb147955ca 100644 --- a/paddle/inference/CMakeLists.txt +++ b/paddle/inference/CMakeLists.txt @@ -4,19 +4,14 @@ cc_library(paddle_fluid_api SRCS io.cc DEPS ${FLUID_CORE_MODULES} ${GLOB_OP_LIB}) -# Merge all modules into a single static library +# Create static library cc_library(paddle_fluid DEPS paddle_fluid_api ${FLUID_CORE_MODULES} ${GLOB_OP_LIB}) # Create shared library -add_library(paddle_fluid_shared SHARED io.cc) - -target_circle_link_libraries(paddle_fluid_shared - ARCHIVE_START - ${GLOB_OP_LIB} - ${FLUID_CORE_MODULES} - ARCHIVE_END) - -SET_TARGET_PROPERTIES(paddle_fluid_shared PROPERTIES OUTPUT_NAME paddle_fluid) +cc_library(paddle_fluid_shared SHARED + SRCS io.cc + DEPS ARCHIVE_START ${GLOB_OP_LIB} ${FLUID_CORE_MODULES} ARCHIVE_END) +set_target_properties(paddle_fluid_shared PROPERTIES OUTPUT_NAME paddle_fluid) if(WITH_TESTING) add_subdirectory(tests/book) diff --git a/paddle/inference/tests/book/CMakeLists.txt b/paddle/inference/tests/book/CMakeLists.txt index 63afeb18aeb..124e0fb9ba6 100644 --- a/paddle/inference/tests/book/CMakeLists.txt +++ b/paddle/inference/tests/book/CMakeLists.txt @@ -5,23 +5,23 @@ function(inference_test TARGET_NAME) cmake_parse_arguments(inference_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) set(PYTHON_TESTS_DIR ${PADDLE_SOURCE_DIR}/python/paddle/v2/fluid/tests) + set(arg_list "") if(inference_test_ARGS) foreach(arg ${inference_test_ARGS}) - cc_test(test_inference_${TARGET_NAME}_${arg} - SRCS test_inference_${TARGET_NAME}.cc - DEPS ARCHIVE_START paddle_fluid ARCHIVE_END - ARGS --dirname=${PYTHON_TESTS_DIR}/book/${TARGET_NAME}_${arg}.inference.model) - set_tests_properties(test_inference_${TARGET_NAME}_${arg} - PROPERTIES DEPENDS test_${TARGET_NAME}) + list(APPEND arg_list "_${arg}") endforeach() else() - cc_test(test_inference_${TARGET_NAME} + list(APPEND arg_list "_") + endif() + foreach(arg ${arg_list}) + string(REGEX REPLACE "^_$" "" arg "${arg}") + cc_test(test_inference_${TARGET_NAME}${arg} SRCS test_inference_${TARGET_NAME}.cc DEPS ARCHIVE_START paddle_fluid ARCHIVE_END - ARGS --dirname=${PYTHON_TESTS_DIR}/book/${TARGET_NAME}.inference.model) - set_tests_properties(test_inference_${TARGET_NAME} + ARGS --dirname=${PYTHON_TESTS_DIR}/book/${TARGET_NAME}${arg}.inference.model) + set_tests_properties(test_inference_${TARGET_NAME}${arg} PROPERTIES DEPENDS test_${TARGET_NAME}) - endif() + endforeach() endfunction(inference_test) inference_test(recognize_digits ARGS mlp) -- GitLab From 9efa8d86fea9fbcc87b5e7bd2d5cad207d231739 Mon Sep 17 00:00:00 2001 From: Shan Yi <35982308+shanyi15@users.noreply.github.com> Date: Thu, 8 Feb 2018 19:36:24 +0800 Subject: [PATCH 099/138] Update index_cn.rst --- doc/howto/rnn/index_cn.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/howto/rnn/index_cn.rst b/doc/howto/rnn/index_cn.rst index 9ecab5594cf..bcc8c2f46eb 100644 --- a/doc/howto/rnn/index_cn.rst +++ b/doc/howto/rnn/index_cn.rst @@ -1,4 +1,4 @@ -RNN相关模型 +RNN模型 =========== .. toctree:: -- GitLab From 7c880522542093e0ae23c49e3ed24e3c4100d3e1 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Thu, 8 Feb 2018 19:51:20 +0800 Subject: [PATCH 100/138] Optimize return all opt ops --- python/paddle/v2/fluid/framework.py | 3 + python/paddle/v2/fluid/optimizer.py | 15 ++--- .../paddle/v2/fluid/tests/test_optimizer.py | 56 ++++++++++--------- 3 files changed, 39 insertions(+), 35 deletions(-) diff --git a/python/paddle/v2/fluid/framework.py b/python/paddle/v2/fluid/framework.py index a12427258e9..a517db68c58 100644 --- a/python/paddle/v2/fluid/framework.py +++ b/python/paddle/v2/fluid/framework.py @@ -740,6 +740,9 @@ class Block(object): raise e self.desc.remove_op(start, end + 1) + def slice_ops(self, start, end): + return list(self.ops)[start:end] + def prepend_op(self, *args, **kwargs): op_desc = self.desc.prepend_op() op = Operator(self, op_desc, *args, **kwargs) diff --git a/python/paddle/v2/fluid/optimizer.py b/python/paddle/v2/fluid/optimizer.py index 7844a4e2df1..f8a00e3a5fb 100644 --- a/python/paddle/v2/fluid/optimizer.py +++ b/python/paddle/v2/fluid/optimizer.py @@ -190,6 +190,8 @@ class Optimizer(object): # Create any accumulators program = loss.block.program with program_guard(program, startup_program): + global_block = framework.default_main_program().global_block() + start = len(global_block.ops) self.helper = LayerHelper(self.__class__.__name__) self._create_accumulators(loss.block, [p[0] for p in parameters_and_grads]) @@ -203,19 +205,14 @@ class Optimizer(object): param_and_grad) optimize_ops.append(optimize_op) - # Returned list of ops can include more ops in addition - # to optimization ops - return_ops = optimize_ops - # Get custom finish ops for subclasses # FIXME: Need to fix this once we figure out how to handle dependencies - finish_ops = self._finish_update(loss.block) - if finish_ops is not None: - return_ops += finish_ops + self._finish_update(loss.block) if self._global_step is not None: - return_ops.append(self._increment_global_step(loss.block)) - return return_ops + self._increment_global_step(loss.block) + end = len(global_block.ops) + return global_block.slice_ops(start, end) def minimize(self, loss, diff --git a/python/paddle/v2/fluid/tests/test_optimizer.py b/python/paddle/v2/fluid/tests/test_optimizer.py index 480ee709157..dc6b84dcdc0 100644 --- a/python/paddle/v2/fluid/tests/test_optimizer.py +++ b/python/paddle/v2/fluid/tests/test_optimizer.py @@ -42,9 +42,9 @@ class TestOptimizer(unittest.TestCase): type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}) sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.01) opts, _ = sgd_optimizer.minimize(mean_out, init_program) - self.assertEqual(len(opts), 1) - sgd_op = opts[0] - self.assertEqual(sgd_op.type, "sgd") + self.assertEqual(len(opts), 3) + self.assertEqual([op.type for op in opts], + ["fill_constant", "elementwise_mul", "sgd"]) def test_sgd_optimizer_with_global_step(self): init_program = framework.Program() @@ -72,11 +72,10 @@ class TestOptimizer(unittest.TestCase): sgd_optimizer = optimizer.SGDOptimizer( learning_rate=learning_rate, global_step=global_step) opts, _ = sgd_optimizer.minimize(mean_out, init_program) - self.assertEqual(len(opts), 2) - sgd_op = opts[0] - self.assertEqual(sgd_op.type, "sgd") - increment_op = opts[1] - self.assertEqual(increment_op.type, "increment") + self.assertEqual(len(opts), 4) + self.assertEqual( + [op.type for op in opts], + ["fill_constant", "elementwise_mul", "sgd", "increment"]) # Check init_program init_ops = init_program.global_block().ops @@ -121,9 +120,10 @@ class TestMomentumOptimizer(unittest.TestCase): self.assertEqual(len(momentum_optimizer.get_accumulators()), 0) opts = momentum_optimizer.create_optimization_pass( params_grads, mul_out, init_program) - self.assertEqual(len(opts), 1) - sgd_op = opts[0] - self.assertEqual(sgd_op.type, "momentum") + self.assertEqual(len(opts), 3) + sgd_op = opts[-1] + self.assertEqual([op.type for op in opts], + ["fill_constant", "elementwise_mul", "momentum"]) self.assertFalse(sgd_op.attr('use_nesterov')) # Check accumulators @@ -170,9 +170,10 @@ class TestMomentumOptimizer(unittest.TestCase): self.assertEqual(len(momentum_optimizer.get_accumulators()), 0) opts = momentum_optimizer.create_optimization_pass( params_grads, mul_out, init_program) - self.assertEqual(len(opts), 1) - sgd_op = opts[0] - self.assertEqual(sgd_op.type, "momentum") + self.assertEqual(len(opts), 3) + sgd_op = opts[-1] + self.assertEqual([op.type for op in opts], + ["fill_constant", "elementwise_mul", "momentum"]) self.assertTrue(sgd_op.attr('use_nesterov')) # Check accumulators @@ -228,9 +229,9 @@ class TestAdagradOptimizer(unittest.TestCase): self.assertEqual(len(adagrad_optimizer.get_accumulators()), 0) opts = adagrad_optimizer.create_optimization_pass(params_grads, mul_out, init_program) - self.assertEqual(len(opts), 1) - adagrad_op = opts[0] - self.assertEqual(adagrad_op.type, "adagrad") + self.assertEqual(len(opts), 3) + self.assertEqual([op.type for op in opts], + ["fill_constant", "elementwise_mul", "adagrad"]) # Check accumulators accumulators = adagrad_optimizer.get_accumulators() @@ -288,9 +289,10 @@ class TestAdamOptimizer(unittest.TestCase): self.assertEqual(len(adam_optimizer.get_accumulators()), 0) opts = adam_optimizer.create_optimization_pass(params_grads, mul_out, init_program) - self.assertEqual(len(opts), 3) - adam_op = opts[0] - self.assertEqual(adam_op.type, "adam") + self.assertEqual(len(opts), 5) + self.assertEqual( + [op.type for op in opts], + ["fill_constant", "elementwise_mul", "adam", "scale", "scale"]) # Check accumulators accumulators = adam_optimizer.get_accumulators() @@ -350,9 +352,10 @@ class TestAdamaxOptimizer(unittest.TestCase): self.assertEqual(len(adamax_optimizer.get_accumulators()), 0) opts = adamax_optimizer.create_optimization_pass(params_grads, mul_out, init_program) - self.assertEqual(len(opts), 2) - adam_op = opts[0] - self.assertEqual(adam_op.type, "adamax") + self.assertEqual(len(opts), 4) + self.assertEqual( + [op.type for op in opts], + ["fill_constant", "elementwise_mul", "adamax", "scale"]) # Check accumulators accumulators = adamax_optimizer.get_accumulators() @@ -409,9 +412,10 @@ class TestDecayedAdagradOptimizer(unittest.TestCase): self.assertEqual(len(decayed_adagrad_optimizer.get_accumulators()), 0) opts = decayed_adagrad_optimizer.create_optimization_pass( params_grads, mul_out, init_program) - self.assertEqual(len(opts), 1) - decayed_adagrad_op = opts[0] - self.assertEqual(decayed_adagrad_op.type, "decayed_adagrad") + self.assertEqual(len(opts), 3) + self.assertEqual( + [op.type for op in opts], + ["fill_constant", "elementwise_mul", "decayed_adagrad"]) # Check accumulators accumulators = decayed_adagrad_optimizer.get_accumulators() -- GitLab From ef1aba39a6cadaadc133e9370e5b610596c42cc6 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Thu, 8 Feb 2018 17:50:16 +0800 Subject: [PATCH 101/138] Rewrite mixed_vector.h --- .gitignore | 6 + cmake/cuda.cmake | 3 +- paddle/framework/lod_tensor.h | 24 +- paddle/framework/lod_tensor_test.cu | 9 +- paddle/framework/mixed_vector.h | 399 ++++++++++++------ paddle/framework/mixed_vector_test.cu | 59 --- paddle/framework/tensor.h | 4 + paddle/framework/tensor_impl.h | 2 +- paddle/operators/adagrad_op.cu | 6 +- paddle/operators/adam_op.h | 2 +- paddle/operators/ctc_align_op.cu | 5 +- paddle/operators/lookup_table_op.cu | 4 +- .../operators/math/selected_rows_functor.cc | 2 +- .../operators/math/selected_rows_functor.cu | 15 +- paddle/operators/math/sequence2batch.cu | 4 +- paddle/operators/math/sequence_padding.cu | 8 +- paddle/operators/math/sequence_pooling.cu | 3 +- paddle/operators/math/sequence_scale.cu | 3 +- paddle/operators/parallel_do_op.cc | 9 - paddle/operators/row_conv_op.cu | 4 +- paddle/operators/sequence_erase_op.cu | 3 +- paddle/operators/sgd_op.cu | 4 +- paddle/operators/target_assign_op.h | 4 +- paddle/testing/paddle_gtest_main.cc | 2 +- 24 files changed, 316 insertions(+), 268 deletions(-) diff --git a/.gitignore b/.gitignore index ac56a3320ec..59e650bdfe8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,9 @@ +paddle/operators/check_t.save +paddle/operators/check_tensor.ls +paddle/operators/tensor.save +python/paddle/v2/fluid/tests/book/image_classification_resnet.inference.model/ +python/paddle/v2/fluid/tests/book/image_classification_vgg.inference.model/ +python/paddle/v2/fluid/tests/book/label_semantic_roles.inference.model/ *.DS_Store build/ build_doc/ diff --git a/cmake/cuda.cmake b/cmake/cuda.cmake index 6bea7cf3022..de94bd5008e 100644 --- a/cmake/cuda.cmake +++ b/cmake/cuda.cmake @@ -181,7 +181,8 @@ elseif(CMAKE_BUILD_TYPE STREQUAL "Release") elseif(CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo") list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_RELWITHDEBINFO}) elseif(CMAKE_BUILD_TYPE STREQUAL "MinSizeRel") - list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_MINSIZEREL}) + # nvcc 9 does not support -Os. Use Release flags instead + list(APPEND CUDA_NVCC_FLAGS ${CMAKE_CXX_FLAGS_RELEASE}) endif() mark_as_advanced(CUDA_BUILD_CUBIN CUDA_BUILD_EMULATION CUDA_VERBOSE_BUILD) diff --git a/paddle/framework/lod_tensor.h b/paddle/framework/lod_tensor.h index be2b3016196..9de454428d9 100644 --- a/paddle/framework/lod_tensor.h +++ b/paddle/framework/lod_tensor.h @@ -46,29 +46,7 @@ namespace framework { * 0 2 4 7 * 0 2 5 7 10 12 15 20 */ -struct LoD : public std::vector> { - using std::vector>::vector; - platform::Place place() const { - if (this->size() == 0) { - // Not Initialze Yet. - return platform::CPUPlace(); - } else { - return this->front().place(); - } - } - - void CopyFromCUDA() { - for (auto it = this->begin(); it != this->end(); ++it) { - it->CopyFromCUDA(); - } - } - - void CopyToPeer(platform::Place place) { - for (auto it = this->begin(); it != this->end(); ++it) { - it->CopyToPeer(place); - } - } -}; +using LoD = std::vector>; std::ostream& operator<<(std::ostream& os, const LoD& lod); std::ostream& operator<<(std::ostream& os, const LoDTensor& t); diff --git a/paddle/framework/lod_tensor_test.cu b/paddle/framework/lod_tensor_test.cu index adea02e3b3f..a28b7caf86c 100644 --- a/paddle/framework/lod_tensor_test.cu +++ b/paddle/framework/lod_tensor_test.cu @@ -20,6 +20,7 @@ #include "paddle/platform/assert.h" #include +#include __global__ void test(size_t* a, int size) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; @@ -36,10 +37,9 @@ TEST(LoD, data) { lod.push_back(std::vector({0, 1, 6, 8, 10, 11})); auto& v = lod[0]; - test<<<1, 1>>>(v.cuda_data(), v.size()); + paddle::platform::CUDAPlace gpu(0); + test<<<1, 1>>>(v.CUDAMutableData(gpu), v.size()); cudaDeviceSynchronize(); - - v.CopyFromCUDA(); for (size_t i = 0; i < v.size(); ++i) { EXPECT_EQ(v[i], i * 2); } @@ -63,9 +63,8 @@ TEST(LoDTensor, LoDInGPU) { auto lod = lod_tensor.lod(); - test<<<1, 8>>>(lod[0].cuda_data(), lod[0].size()); + test<<<1, 8>>>(lod[0].CUDAMutableData(place), lod[0].size()); cudaDeviceSynchronize(); - lod.CopyFromCUDA(); for (size_t i = 0; i < src_lod[0].size(); ++i) { EXPECT_EQ(lod[0].data()[i], src_lod[0].data()[i] * 2); diff --git a/paddle/framework/mixed_vector.h b/paddle/framework/mixed_vector.h index 5202775515d..2a80079695f 100644 --- a/paddle/framework/mixed_vector.h +++ b/paddle/framework/mixed_vector.h @@ -17,176 +17,297 @@ #include #include -#include "paddle/memory/memcpy.h" -#include "paddle/memory/memory.h" -#include "paddle/platform/device_context.h" -#include "paddle/platform/enforce.h" -#include "paddle/platform/place.h" +#include "paddle/framework/tensor.h" +#include "paddle/framework/tensor_util.h" + +#include "glog/logging.h" namespace paddle { namespace framework { -/** - * @brief Vector support both cpu and gpu. - * host vector lifetime is same with Vector - * device vector is lazily malloc and modified. - */ - template -class Vector : public std::vector { +class Vector { public: - using std::vector::vector; + using value_type = T; + + Vector() { + size_ = 0; + flag_ = kDataInCPU; + } + + explicit Vector(size_t count, const T& value = T()) { + resize(count); + T* ptr = begin(); + for (size_t i = 0; i < count; ++i) { + ptr[i] = value; + } + } + + Vector(std::initializer_list init) { + InitByIter(init.size(), init.begin(), init.end()); + } + + template + Vector(const std::vector& dat) { // NOLINT + InitByIter(dat.size(), dat.begin(), dat.end()); + } + + Vector(const Vector& other) { this->operator=(other); } + + Vector& operator=(const Vector& other) { + if (other.size() != 0) { + this->InitByIter(other.size(), other.begin(), other.end()); + } else { + size_ = 0; + flag_ = kDataInCPU; + } + return *this; + } + + Vector(Vector&& other) { + this->size_ = other.size_; + this->flag_ = other.flag_; + if (other.cuda_vec_.capacity()) { + this->cuda_vec_.ShareDataWith(other.cuda_vec_); + } + if (other.cpu_vec_.capacity()) { + this->cpu_vec_.ShareDataWith(other.cpu_vec_); + } + } - Vector() {} - Vector(const std::vector &v) : std::vector(v) {} // NOLINT + T& operator[](size_t i) { + MutableCPU(); + return const_cast(cpu_vec_.data())[i]; + } + + const T& operator[](size_t i) const { + ImmutableCPU(); + return cpu_vec_.data()[i]; + } + + size_t size() const { return size_; } + + T* begin() { return &this->operator[](0); } + + T* end() { return &this->operator[](size()); } + + T& front() { return *begin(); } + + T& back() { + auto it = end(); + --it; + return *it; + } + + const T* begin() const { return &this->operator[](0); } + const T* end() const { return &this->operator[](size()); } - inline platform::Place place() const { return place_; } + const T& back() const { + auto it = end(); + --it; + return *it; + } + + const T& front() const { return *begin(); } + + template + void assign(Iter begin, Iter end) { + InitByIter(end - begin, begin, end); + } + + T* data() { return begin(); } - /*! Return a pointer to constant memory block. */ - inline const T *data(platform::Place place) const; + const T* data() const { return begin(); } - /*! Return a pointer to mutable memory block. */ - inline T *mutable_data(platform::Place place); + void push_back(T elem) { + if (size_ + 1 > capacity()) { + reserve((size_ + 1) << 1); + } + *end() = elem; + ++size_; + } - // TODO(dzhwinter): below interfaces should be removed - /* Get device vector */ - T *cuda_data() { - CopyToCUDA(); - PADDLE_ENFORCE_NOT_NULL( - cuda_ptr_, "No data or Insufficient CUDA memory to allocation"); - return static_cast(cuda_ptr_.get()); + void resize(size_t size) { + if (size + 1 < capacity()) { + size_ = size; + } else { + MutableCPU(); + Tensor cpu_tensor; + platform::Place cpu = platform::CPUPlace(); + T* ptr = cpu_tensor.mutable_data( + framework::make_ddim({static_cast(size)}), cpu); + const T* old_ptr = + cpu_vec_.capacity() == 0 ? nullptr : cpu_vec_.data(); + if (old_ptr != nullptr) { + std::copy(old_ptr, old_ptr + size_, ptr); + } + size_ = size; + cpu_vec_.ShareDataWith(cpu_tensor); + } } - /* Get host vector */ - T *data() { return std::vector::data(); } - const T *data() const { return std::vector::data(); } + const T* CUDAData(platform::Place place) const { + PADDLE_ENFORCE(platform::is_gpu_place(place), + "CUDA Data must on CUDA place"); + ImmutableCUDA(place); + return cuda_vec_.data(); + } - T *data(const platform::Place &place) { - if (platform::is_cpu_place(place)) { + T* CUDAMutableData(platform::Place place) { + const T* ptr = CUDAData(place); + flag_ = kDirty | kDataInCUDA; + return const_cast(ptr); + } + + template + void Extend(It begin, It end) { + size_t pre_size = size_; + resize(pre_size + (end - begin)); + T* ptr = this->begin() + pre_size; + for (; begin < end; ++begin, ++ptr) { + *ptr = *begin; + } + } + + void clear() { + size_ = 0; + flag_ = kDirty | kDataInCPU; + } + + size_t capacity() const { + return cpu_vec_.capacity() / SizeOfType(typeid(T)); + } + + void reserve(size_t size) { + size_t pre_size = size_; + resize(size); + resize(pre_size); + } + + const T* Data(platform::Place place) const { + if (platform::is_gpu_place(place)) { + return CUDAData(place); + } else { return data(); + } + } + + T* MutableData(platform::Place place) { + if (platform::is_gpu_place(place)) { + return CUDAMutableData(place); } else { - return cuda_data(); + return data(); } } - /* Synchronize host vector to device vector */ - void CopyToCUDA(); - /* Synchronize device vector to host vector */ - void CopyFromCUDA(); - /* Switch device vector location */ - void CopyToPeer(platform::Place); + operator std::vector() const { + std::vector result; + result.resize(size()); + std::copy(begin(), end(), result.begin()); + return result; + } + + bool operator==(const Vector& other) const { + if (size() != other.size()) return false; + for (auto it1 = begin(), it2 = other.begin(); it1 < end(); ++it1, ++it2) { + if (*it1 != *it2) { + return false; + } + } + return true; + } private: - std::shared_ptr cuda_ptr_; - size_t cuda_size_ = 0; // device vector numel - platform::CUDAPlace place_; -}; + template + void InitByIter(size_t size, Iter begin, Iter end) { + platform::Place cpu = platform::CPUPlace(); + T* ptr = this->cpu_vec_.template mutable_data( + framework::make_ddim({static_cast(size)}), cpu); + for (size_t i = 0; i < size; ++i) { + *ptr++ = *begin++; + } + flag_ = kDataInCPU | kDirty; + size_ = size; + } -template -inline const T *Vector::data(platform::Place place) const { - if (platform::is_cpu_place(place)) { - return std::vector::data(); - } else if (platform::is_gpu_place(place)) { - if (cuda_ptr_ == nullptr) { - return nullptr; + enum DataFlag { kDataInCPU = 0x01, kDataInCUDA = 0x02, kDirty = 0x10 }; + + void MutableCPU() { + if (IsInCUDA() && IsDirty()) { + // COPY GPU Data To CPU + Copy(cuda_vec_, platform::CPUPlace(), &cpu_vec_); + WaitPlace(cuda_vec_.place()); } - if (boost::get(place) == place_) { - return static_cast(cuda_ptr_.get()); + flag_ = kDirty | kDataInCPU; + } + + void ImmutableCUDA(platform::Place place) const { + if (IsDirty()) { + if (IsInCPU()) { + Copy(cpu_vec_, boost::get(place), &cuda_vec_); + WaitPlace(place); + UnsetFlag(kDirty); + SetFlag(kDataInCUDA); + } else if (IsInCUDA() && !(place == cuda_vec_.place())) { + framework::Tensor tmp; + Copy(cuda_vec_, boost::get(place), &tmp); + WaitPlace(cuda_vec_.place()); + cuda_vec_.ShareDataWith(tmp); + // Still dirty + } else { + // Dirty && DataInCUDA && Device is same + // Do nothing + } } else { - PADDLE_THROW( - "Unmatched place. Please use `mutable_data` copy lod to the target " - "Place first."); + if (!IsInCUDA()) { + // Even data is not dirty. However, data is not in CUDA. Copy data. + Copy(cpu_vec_, boost::get(place), &cuda_vec_); + WaitPlace(place); + SetFlag(kDataInCUDA); + } else if (!(place == cuda_vec_.place())) { + framework::Tensor tmp; + Copy(cuda_vec_, boost::get(place), &tmp); + WaitPlace(cuda_vec_.place()); + cuda_vec_.ShareDataWith(tmp); + } else { + // Not Dirty && DataInCUDA && Device is same + // Do nothing. + } } - } else { - PADDLE_THROW("Unsupport Place."); } -} -template -inline T *Vector::mutable_data(platform::Place place) { - if (platform::is_cpu_place(place)) { - return std::vector::data(); - } else if (platform::is_gpu_place(place)) { - if (boost::get(place) != place_) { - place_ = boost::get(place); + void ImmutableCPU() const { + if (IsDirty() && + !IsInCPU()) { // If data has been changed in CUDA, or CPU has no data. + Copy(cuda_vec_, platform::CPUPlace(), &cpu_vec_); + WaitPlace(cuda_vec_.place()); + UnsetFlag(kDirty); } -#ifdef PADDLE_WITH_CUDA - if (cuda_size_ < this->size() || cuda_ptr_ == nullptr) { - cuda_ptr_.reset( - memory::Alloc(place_, this->size() * sizeof(T)), - memory::PlainDeleter(place_)); - } - cuda_size_ = this->size(); - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto *ctx = pool.GetByPlace(place_); - memory::Copy(place_, cuda_ptr_.get(), platform::CPUPlace(), - static_cast(this->data()), - this->size() * sizeof(T), ctx->stream()); - ctx->Wait(); - return static_cast(cuda_ptr_.get()); -#else - return nullptr; -#endif - } else { - PADDLE_THROW("Unsupport Place."); - } -} + SetFlag(kDataInCPU); + } -template -void Vector::CopyToCUDA() { -#ifdef PADDLE_WITH_CUDA - if (cuda_size_ < this->size() || cuda_ptr_ == nullptr) { - cuda_ptr_.reset( - memory::Alloc(place_, this->size() * sizeof(T)), - memory::PlainDeleter(place_)); - } - cuda_size_ = this->size(); - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto *ctx = pool.GetByPlace(place_); - memory::Copy(place_, cuda_ptr_.get(), platform::CPUPlace(), - static_cast(this->data()), - this->size() * sizeof(T), ctx->stream()); - ctx->Wait(); -#endif -} + void UnsetFlag(int flag) const { flag_ &= ~flag; } + void SetFlag(int flag) const { flag_ |= flag; } -template -void Vector::CopyFromCUDA() { -#ifdef PADDLE_WITH_CUDA - if (cuda_ptr_ == nullptr) { - LOG(WARNING) << "No uncommitted cuda data."; - return; - } - this->resize(cuda_size_); - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto *ctx = pool.GetByPlace(place_); - memory::Copy(platform::CPUPlace(), static_cast(this->data()), place_, - static_cast(cuda_ptr_.get()), - this->size() * sizeof(T), ctx->stream()); - ctx->Wait(); -#endif -} + bool IsDirty() const { return flag_ & kDirty; } -template -void Vector::CopyToPeer(platform::Place place) { -#ifdef PADDLE_WITH_CUDA - if (boost::get(place) != place_) { - place_ = boost::get(place); - } - if (cuda_size_ < this->size() || cuda_ptr_ == nullptr) { - cuda_ptr_.reset( - memory::Alloc(place_, this->size() * sizeof(T)), - memory::PlainDeleter(place_)); - } - cuda_size_ = this->size(); - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - auto *ctx = pool.GetByPlace(place_); - memory::Copy(place_, cuda_ptr_.get(), platform::CPUPlace(), - static_cast(this->data()), - this->size() * sizeof(T), ctx->stream()); - ctx->Wait(); -#endif -} + bool IsInCUDA() const { return flag_ & kDataInCUDA; } + + bool IsInCPU() const { return flag_ & kDataInCPU; } + + static void WaitPlace(const platform::Place place) { + if (platform::is_gpu_place(place)) { + platform::DeviceContextPool::Instance() + .Get(boost::get(place)) + ->Wait(); + } + } + + mutable int flag_; + mutable Tensor cpu_vec_; + mutable Tensor cuda_vec_; + size_t size_; +}; } // namespace framework } // namespace paddle diff --git a/paddle/framework/mixed_vector_test.cu b/paddle/framework/mixed_vector_test.cu index 7b571788ad1..6adad6c12c3 100644 --- a/paddle/framework/mixed_vector_test.cu +++ b/paddle/framework/mixed_vector_test.cu @@ -11,62 +11,3 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include -#include -#include "gtest/gtest.h" - -#include "paddle/framework/init.h" -#include "paddle/framework/mixed_vector.h" - -using namespace paddle::framework; -using namespace paddle::platform; -using namespace paddle::memory; - -template -__global__ void test(T* data, int size) { - for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; - i += blockDim.x * gridDim.x) { - data[i] *= 2; - } -} - -TEST(Vector, Normal) { - // fill the device context pool. - InitDevices(); - - Vector vec({1, 2, 3}); - size_t* ptr = vec.data(); - for (size_t i = 0; i < vec.size(); ++i) { - EXPECT_EQ(vec[i], *(ptr + i)); - } - - vec.clear(); - vec.CopyFromCUDA(); - - std::vector v = {1, 2, 3}; - for (size_t i = 0; i < v.size(); ++i) { - EXPECT_EQ(v[i], vec[i]); - } -} - -TEST(Vector, MultipleCopy) { - InitDevices(); - Vector vec({1, 2, 3}); - CUDAPlace place(0); - vec.mutable_data(place); - auto vec2 = Vector(vec); - { - const size_t* ptr = vec2.data(CPUPlace()); - for (size_t i = 0; i < vec2.size(); ++i) { - EXPECT_EQ(*(ptr + i), vec[i]); - } - } - test<<<3, 3>>>(vec2.mutable_data(place), vec2.size()); - vec2.CopyFromCUDA(); - { - const size_t* ptr = vec2.data(CPUPlace()); - for (size_t i = 0; i < vec2.size(); ++i) { - EXPECT_EQ(*(ptr + i), vec[i] * 2); - } - } -} diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index f0ea709a5c3..a8767a75430 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -128,6 +128,10 @@ class Tensor { inline void set_layout(const DataLayout layout) { layout_ = layout; } + size_t capacity() const { + return holder_ == nullptr ? 0UL : holder_->size() - offset_; + } + private: friend class LoDTensor; diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 1340c5e4852..6dcaa024245 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -52,7 +52,7 @@ struct SizeOfTypeFunctor { }; static inline size_t SizeOfType(std::type_index type) { - SizeOfTypeFunctor functor; + SizeOfTypeFunctor functor; size_t size = functor(type); PADDLE_ENFORCE(size != 0UL, "Cannot get size of type %s", type.name()); return size; diff --git a/paddle/operators/adagrad_op.cu b/paddle/operators/adagrad_op.cu index 00cb6e9cafb..9a21e00b12b 100644 --- a/paddle/operators/adagrad_op.cu +++ b/paddle/operators/adagrad_op.cu @@ -101,9 +101,9 @@ struct SparseAdagradFunctor { SparseAdagradFunctorKernel< T, 256><<(context) - .stream()>>>(grad_merge_data, merge_rows.cuda_data(), lr, - param_data, moment_data, grad_width, - epsilon); + .stream()>>>( + grad_merge_data, merge_rows.CUDAMutableData(context.GetPlace()), lr, + param_data, moment_data, grad_width, epsilon); } }; diff --git a/paddle/operators/adam_op.h b/paddle/operators/adam_op.h index bf536687d39..af2c3ecd725 100644 --- a/paddle/operators/adam_op.h +++ b/paddle/operators/adam_op.h @@ -201,7 +201,7 @@ class AdamOpKernel : public framework::OpKernel { const T* grad_data = grad_tensor.template data(); int64_t* rows = nullptr; if (platform::is_gpu_place(ctx.GetPlace())) { - rows = grad_merge.mutable_rows()->cuda_data(); + rows = grad_merge.mutable_rows()->CUDAMutableData(ctx.GetPlace()); } else { rows = grad_merge.mutable_rows()->data(); } diff --git a/paddle/operators/ctc_align_op.cu b/paddle/operators/ctc_align_op.cu index cea595d7c5d..6406825d4a5 100644 --- a/paddle/operators/ctc_align_op.cu +++ b/paddle/operators/ctc_align_op.cu @@ -69,8 +69,9 @@ class CTCAlignOpCUDAKernel : public framework::OpKernel { auto stream = ctx.cuda_device_context().stream(); MergeAndDelCudaKernel<<<1, 1, 0, stream>>>( - num_tokens, tokens, num_seq, input_lod[level].cuda_data(), blank, - merge_repeated, dev_out_lod0_ptr, output_data); + num_tokens, tokens, num_seq, + input_lod[level].CUDAMutableData(ctx.GetPlace()), blank, merge_repeated, + dev_out_lod0_ptr, output_data); // set output lod std::vector host_out_lod0(dev_out_lod0.begin(), dev_out_lod0.end()); diff --git a/paddle/operators/lookup_table_op.cu b/paddle/operators/lookup_table_op.cu index 07372808bbf..9684b6d4612 100644 --- a/paddle/operators/lookup_table_op.cu +++ b/paddle/operators/lookup_table_op.cu @@ -125,7 +125,9 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { new_rows.resize(ids_dim[0]); auto gpu_place = boost::get(context.GetPlace()); - memory::Copy(platform::CPUPlace(), new_rows.cuda_data(), gpu_place, + // TODO(yuyang18): Strange code here. + memory::Copy(platform::CPUPlace(), + new_rows.CUDAMutableData(context.GetPlace()), gpu_place, ids_data, ids_dim[0] * sizeof(int64_t), stream); d_table->set_rows(new_rows); diff --git a/paddle/operators/math/selected_rows_functor.cc b/paddle/operators/math/selected_rows_functor.cc index 8a1ebb58c26..4e15d01a307 100644 --- a/paddle/operators/math/selected_rows_functor.cc +++ b/paddle/operators/math/selected_rows_functor.cc @@ -128,7 +128,7 @@ struct SelectedRowsAddTo { auto* in2_value = input2->mutable_value(); // concat rows - in2_rows.insert(in2_rows.end(), in1_rows.begin(), in1_rows.end()); + in2_rows.Extend(in1_rows.begin(), in1_rows.end()); auto in1_place = input1.place(); PADDLE_ENFORCE(platform::is_cpu_place(in1_place)); diff --git a/paddle/operators/math/selected_rows_functor.cu b/paddle/operators/math/selected_rows_functor.cu index acdd87cb355..5c3a53ae1ba 100644 --- a/paddle/operators/math/selected_rows_functor.cu +++ b/paddle/operators/math/selected_rows_functor.cu @@ -126,7 +126,8 @@ struct SelectedRowsAddTensor { dim3 grid(1, in1_rows.size()); SelectedRowsAddTensorKernel< T, block_size><<>>( - in1_data, in1_rows.cuda_data(), out_data, in1_row_numel); + in1_data, in1_rows.CUDAData(context.GetPlace()), out_data, + in1_row_numel); auto out_eigen = framework::EigenVector::Flatten(*output); auto in2_eigen = framework::EigenVector::Flatten(input2); @@ -153,7 +154,7 @@ struct SelectedRowsAddTo { auto* in2_value = input2->mutable_value(); // concat rows - in2_rows.insert(in2_rows.end(), in1_rows.begin(), in1_rows.end()); + in2_rows.Extend(in1_rows.begin(), in1_rows.end()); auto in1_place = input1.place(); PADDLE_ENFORCE(platform::is_gpu_place(in1_place)); @@ -216,7 +217,8 @@ struct SelectedRowsAddToTensor { dim3 grid(1, in1_rows.size()); SelectedRowsAddToTensorKernel< T, block_size><<>>( - in1_data, in1_rows.cuda_data(), in2_data, in1_row_numel); + in1_data, in1_rows.CUDAData(context.GetPlace()), in2_data, + in1_row_numel); } }; @@ -283,9 +285,10 @@ struct MergeAdd { MergeAddKernel< T, 256><<(context) - .stream()>>>(input_data, input_rows.cuda_data(), out_data, - out.mutable_rows()->cuda_data(), - out.rows().size(), input_width); + .stream()>>>( + input_data, input_rows.CUDAData(context.GetPlace()), out_data, + out.mutable_rows()->CUDAMutableData(context.GetPlace()), + out.rows().size(), input_width); return out; } }; diff --git a/paddle/operators/math/sequence2batch.cu b/paddle/operators/math/sequence2batch.cu index f27631271a4..eaed2c30a80 100644 --- a/paddle/operators/math/sequence2batch.cu +++ b/paddle/operators/math/sequence2batch.cu @@ -45,7 +45,6 @@ class CopyMatrixRowsFunctor { const framework::Tensor& src, framework::Vector index_lod, framework::Tensor& dst, bool is_src_index) { - size_t* index = index_lod.cuda_data(); auto src_dims = src.dims(); auto dst_dims = dst.dims(); PADDLE_ENFORCE_EQ(src_dims.size(), 2, @@ -63,7 +62,8 @@ class CopyMatrixRowsFunctor { dim3 grid(8, 1); auto stream = context.stream(); CopyMatrixRowsKernel<<>>( - src_data, dst_data, index, height, width, is_src_index); + src_data, dst_data, index_lod.CUDAData(context.GetPlace()), height, + width, is_src_index); } }; diff --git a/paddle/operators/math/sequence_padding.cu b/paddle/operators/math/sequence_padding.cu index 65c9cfe4a0e..c2bd56448aa 100644 --- a/paddle/operators/math/sequence_padding.cu +++ b/paddle/operators/math/sequence_padding.cu @@ -121,12 +121,12 @@ class PaddingLoDTensorFunctor { if (norm_by_times) { SequencePaddingKernel<<>>( padding_data, const_cast(seq_data), - abs_offset_lod[level].cuda_data(), sequence_width, + abs_offset_lod[level].CUDAData(context.GetPlace()), sequence_width, max_sequence_length, num_sequences); } else { SequencePaddingKernel<<>>( padding_data, const_cast(seq_data), - abs_offset_lod[level].cuda_data(), sequence_width, + abs_offset_lod[level].CUDAData(context.GetPlace()), sequence_width, max_sequence_length, num_sequences); } } @@ -196,12 +196,12 @@ class UnpaddingLoDTensorFunctor { if (norm_by_times) { SequencePaddingKernel<<>>( const_cast(padding_data), seq_data, - abs_offset_lod[level].cuda_data(), sequence_width, + abs_offset_lod[level].CUDAData(context.GetPlace()), sequence_width, max_sequence_length, num_sequences); } else { SequencePaddingKernel<<>>( const_cast(padding_data), seq_data, - abs_offset_lod[level].cuda_data(), sequence_width, + abs_offset_lod[level].CUDAData(context.GetPlace()), sequence_width, max_sequence_length, num_sequences); } } diff --git a/paddle/operators/math/sequence_pooling.cu b/paddle/operators/math/sequence_pooling.cu index f66534a6812..c69bd3da7e7 100644 --- a/paddle/operators/math/sequence_pooling.cu +++ b/paddle/operators/math/sequence_pooling.cu @@ -73,7 +73,8 @@ class MaxSeqPoolFunctor { dim3 grid(num_seq, 1); auto stream = context.stream(); KeMaxSequencePool<<>>( - in_data, starts.cuda_data(), out_data, max_index, num_seq, dim); + in_data, starts.CUDAData(context.GetPlace()), out_data, max_index, + num_seq, dim); } }; diff --git a/paddle/operators/math/sequence_scale.cu b/paddle/operators/math/sequence_scale.cu index fd4e28f6113..7cb9242db93 100644 --- a/paddle/operators/math/sequence_scale.cu +++ b/paddle/operators/math/sequence_scale.cu @@ -46,7 +46,8 @@ class ScaleLoDTensorFunctor { SequenceScaleKernel<<< num_seq, PADDLE_CUDA_NUM_THREADS, 0, context.stream()>>>( - seq_data, abs_offset_lod[level].cuda_data(), scales, seq_width); + seq_data, abs_offset_lod[level].CUDAMutableData(context.GetPlace()), + scales, seq_width); } }; diff --git a/paddle/operators/parallel_do_op.cc b/paddle/operators/parallel_do_op.cc index 89045923f9f..edb9de82509 100644 --- a/paddle/operators/parallel_do_op.cc +++ b/paddle/operators/parallel_do_op.cc @@ -79,9 +79,6 @@ inline void CopyOrShare(const framework::Variable &src, dst->GetMutable()->set_lod(src.Get().lod()); } else { Copy(src.Get(), dst_place, dst->GetMutable()); - framework::LoD lod(src.Get().lod()); - lod.CopyToPeer(dst_place); - dst->GetMutable()->set_lod(lod); } } else if (src.IsType()) { auto &src_sr = src.Get(); @@ -92,9 +89,6 @@ inline void CopyOrShare(const framework::Variable &src, dst_sr->set_rows(src_sr.rows()); } else { Copy(src_sr.value(), dst_place, dst_sr->mutable_value()); - framework::Vector lod(src_sr.rows()); - lod.CopyToPeer(dst_place); - dst_sr->set_rows(lod); } } else { PADDLE_THROW("Expect LoDTensor/SelectedRows, get %s", src.Type().name()); @@ -152,9 +146,6 @@ class ParallelDoOp : public framework::OperatorBase { auto *sub_scope = sub_scopes[i]; auto *dst = sub_scope->Var(param)->GetMutable(); framework::Copy(src, place, dst); - framework::LoD lod(src.lod()); - lod.CopyToPeer(place); - dst->set_lod(lod); } } WaitOnPlaces(places); diff --git a/paddle/operators/row_conv_op.cu b/paddle/operators/row_conv_op.cu index b3825212e1a..d1a6d119d3d 100644 --- a/paddle/operators/row_conv_op.cu +++ b/paddle/operators/row_conv_op.cu @@ -307,7 +307,7 @@ class RowConvKernel int input_dim = X->dims()[1]; int num_sequence = batch_indices.size() - 1; int future_context = Filter->dims()[0]; - size_t *idx = batch_indices.cuda_data(); + size_t *idx = batch_indices.CUDAMutableData(context.GetPlace()); auto stream = context.cuda_device_context().stream(); if (future_context <= 32) { @@ -345,7 +345,7 @@ class RowConvGradKernel int input_dim = X->dims()[1]; int num_sequence = batch_indices.size() - 1; int future_context = Filter->dims()[0]; - size_t *idx = batch_indices.cuda_data(); + size_t *idx = batch_indices.CUDAMutableData(context.GetPlace()); auto &device_ctx = context.cuda_device_context(); math::SetConstant zero; diff --git a/paddle/operators/sequence_erase_op.cu b/paddle/operators/sequence_erase_op.cu index a5311f15f0c..4a7217cfd65 100644 --- a/paddle/operators/sequence_erase_op.cu +++ b/paddle/operators/sequence_erase_op.cu @@ -87,8 +87,7 @@ class SequenceEraseOpCUDAKernel : public framework::OpKernel { // Copy LoD to GPU auto lod0 = lod[0]; auto lod_len = lod0.size(); - thrust::device_vector dev_in_lod = lod0; - size_t* dev_in_lod_ptr = thrust::raw_pointer_cast(dev_in_lod.data()); + const size_t* dev_in_lod_ptr = lod0.CUDAData(ctx.GetPlace()); // Calc output LoD thrust::device_vector dev_out_lod(lod_len); diff --git a/paddle/operators/sgd_op.cu b/paddle/operators/sgd_op.cu index 29f5aa3542c..d27befe4460 100644 --- a/paddle/operators/sgd_op.cu +++ b/paddle/operators/sgd_op.cu @@ -102,8 +102,8 @@ class SGDOpCUDAKernel : public framework::OpKernel { dim3 grid(1, in_rows.size()); SparseSGDFunctorKernel< T, 256><<>>( - in_data, in_rows.cuda_data(), learning_rate->data(), out_data, - in_row_numel); + in_data, in_rows.CUDAData(ctx.GetPlace()), learning_rate->data(), + out_data, in_row_numel); } else { PADDLE_THROW("Unsupported Variable Type of Grad"); diff --git a/paddle/operators/target_assign_op.h b/paddle/operators/target_assign_op.h index 82fca5724c0..574919e1ef8 100644 --- a/paddle/operators/target_assign_op.h +++ b/paddle/operators/target_assign_op.h @@ -137,8 +137,8 @@ class TargetAssignKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ(gt_lod.data()[i], gt_label_lod.data()[i]); } - size_t* gt_lod_data = gt_lod.data(ctx.GetPlace()); - size_t* neg_lod_data = neg_lod.data(ctx.GetPlace()); + size_t* gt_lod_data = gt_lod.MutableData(ctx.GetPlace()); + size_t* neg_lod_data = neg_lod.MutableData(ctx.GetPlace()); TargetAssignFunctor functor(box_data, label_data, match_idx_data, gt_lod_data, background_label, num, diff --git a/paddle/testing/paddle_gtest_main.cc b/paddle/testing/paddle_gtest_main.cc index fd8c4a69da8..ab84f1c292b 100644 --- a/paddle/testing/paddle_gtest_main.cc +++ b/paddle/testing/paddle_gtest_main.cc @@ -20,6 +20,7 @@ limitations under the License. */ #include "paddle/memory/memory.h" int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); std::vector new_argv; std::string gflags_env; for (int i = 0; i < argc; ++i) { @@ -35,7 +36,6 @@ int main(int argc, char** argv) { int new_argc = static_cast(new_argv.size()); char** new_argv_address = new_argv.data(); google::ParseCommandLineFlags(&new_argc, &new_argv_address, false); - testing::InitGoogleTest(&argc, argv); paddle::memory::Used(paddle::platform::CPUPlace()); #ifdef PADDLE_WITH_CUDA -- GitLab From 11acbe687e80bbe1be2632786fd9fa29ecb9932b Mon Sep 17 00:00:00 2001 From: kavyasrinet Date: Thu, 8 Feb 2018 10:30:10 -0800 Subject: [PATCH 102/138] Updating the copyright year for the recent files. (#8018) * Adding more details to cluster_train * Update copyright for notest_dist_image_classification * Fixed copyright * Updating the copyright year and content --- paddle/framework/channel.h | 2 +- paddle/framework/channel_test.cc | 2 +- paddle/framework/details/buffered_channel.h | 2 +- paddle/framework/details/cow_ptr.h | 2 +- paddle/framework/details/cow_ptr_test.cc | 2 +- paddle/framework/details/op_registry.h | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/paddle/framework/channel.h b/paddle/framework/channel.h index b679387b112..146f0e9e71e 100644 --- a/paddle/framework/channel.h +++ b/paddle/framework/channel.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/framework/channel_test.cc b/paddle/framework/channel_test.cc index a307abb4ed3..8afb988914a 100644 --- a/paddle/framework/channel_test.cc +++ b/paddle/framework/channel_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/framework/details/buffered_channel.h b/paddle/framework/details/buffered_channel.h index 77eebc99249..c6e4bec0f32 100644 --- a/paddle/framework/details/buffered_channel.h +++ b/paddle/framework/details/buffered_channel.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/framework/details/cow_ptr.h b/paddle/framework/details/cow_ptr.h index 7e308ffb5a4..69bcea62528 100644 --- a/paddle/framework/details/cow_ptr.h +++ b/paddle/framework/details/cow_ptr.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/framework/details/cow_ptr_test.cc b/paddle/framework/details/cow_ptr_test.cc index 936954a2333..1f4a12bca0d 100644 --- a/paddle/framework/details/cow_ptr_test.cc +++ b/paddle/framework/details/cow_ptr_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/paddle/framework/details/op_registry.h b/paddle/framework/details/op_registry.h index 6d50e820b2b..31a40bcbcb3 100644 --- a/paddle/framework/details/op_registry.h +++ b/paddle/framework/details/op_registry.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. -- GitLab From f605d00f66f27066d68033e2dbbaef806954e24b Mon Sep 17 00:00:00 2001 From: kavyasrinet Date: Thu, 8 Feb 2018 10:30:29 -0800 Subject: [PATCH 103/138] Fixing unbuffered test to a generic test (#8162) * Fixing unbufered test to a generic test * Update channel_test.cc * splitting over functions * add type * fix * --- paddle/framework/channel_test.cc | 47 ++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 20 deletions(-) diff --git a/paddle/framework/channel_test.cc b/paddle/framework/channel_test.cc index 8afb988914a..35567649b88 100644 --- a/paddle/framework/channel_test.cc +++ b/paddle/framework/channel_test.cc @@ -25,6 +25,26 @@ using paddle::framework::CloseChannel; using paddle::framework::details::Buffered; using paddle::framework::details::UnBuffered; +void RecevingOrderEqualToSendingOrder(Channel *ch) { + unsigned sum_send = 0; + std::thread t([&]() { + for (int i = 0; i < 5; i++) { + EXPECT_EQ(ch->Send(&i), true); + sum_send += i; + } + }); + for (int i = 0; i < 5; i++) { + int recv; + EXPECT_EQ(ch->Receive(&recv), true); + EXPECT_EQ(recv, i); + } + + CloseChannel(ch); + t.join(); + EXPECT_EQ(sum_send, 10U); + delete ch; +} + TEST(Channel, MakeAndClose) { using paddle::framework::details::Buffered; using paddle::framework::details::UnBuffered; @@ -137,9 +157,7 @@ TEST(Channel, ReceiveFromBufferedChannelReturnResidualValuesTest) { for (size_t i = 0; i < buffer_size; ++i) { EXPECT_EQ(ch->Receive(&out), - false); // after receiving residual values, return zeros. - // Note: we cannot check EXPECT_EQ(out, 0), because C++ doesn't - // define zero values like Go does. + false); // receiving on closed channel should return false } delete ch; } @@ -166,25 +184,14 @@ TEST(Channel, ConcurrentSendNonConcurrentReceiveWithSufficientBufferSize) { delete ch; } -TEST(Channel, SimpleUnbufferedChannelTest) { +TEST(Channel, RecevingOrderEqualToSendingOrderWithUnBufferedChannel) { auto ch = MakeChannel(0); - unsigned sum_send = 0; - std::thread t([&]() { - for (int i = 0; i < 5; i++) { - EXPECT_EQ(ch->Send(&i), true); - sum_send += i; - } - }); - for (int i = 0; i < 5; i++) { - int recv; - EXPECT_EQ(ch->Receive(&recv), true); - EXPECT_EQ(recv, i); - } + RecevingOrderEqualToSendingOrder(ch); +} - CloseChannel(ch); - t.join(); - EXPECT_EQ(sum_send, 10U); - delete ch; +TEST(Channel, RecevingOrderEqualToSendingOrderWithBufferedChannel) { + auto ch = MakeChannel(10); + RecevingOrderEqualToSendingOrder(ch); } // This tests that closing a buffered channel also unblocks -- GitLab From 36da52950d53fdcd6903913a627b5e622648f5f4 Mon Sep 17 00:00:00 2001 From: Siddharth Goyal Date: Thu, 8 Feb 2018 13:46:33 -0800 Subject: [PATCH 104/138] Better version of PR #7985 (Modify load() for inference) (#8024) * Refine load * Address review comments: round 1 * Make API consistent with python-save/load * Add another unit test * Remove commented function * Fix GPU bug * Address review comments * Modify wrt PR 8147 * Fix filenames for combined case * Fix typo * Address review comments: round 2 * Unify TestInference by keeping default param in template * Address review comment * Fix spacing --- paddle/inference/io.cc | 73 +++++++++++++++---- paddle/inference/io.h | 8 +- paddle/inference/tests/book/test_helper.h | 17 ++++- .../book/test_inference_recognize_digits.cc | 42 +++++++++++ python/paddle/v2/fluid/io.py | 12 ++- .../fluid/tests/book/test_recognize_digits.py | 44 +++++++---- 6 files changed, 159 insertions(+), 37 deletions(-) diff --git a/paddle/inference/io.cc b/paddle/inference/io.cc index 1ed14b69c83..784e87970f7 100644 --- a/paddle/inference/io.cc +++ b/paddle/inference/io.cc @@ -21,6 +21,17 @@ limitations under the License. */ namespace paddle { namespace inference { +void ReadBinaryFile(const std::string& filename, std::string& contents) { + VLOG(3) << "loading model from " << filename; + std::ifstream inputfs(filename, std::ios::in | std::ios::binary); + inputfs.seekg(0, std::ios::end); + contents.clear(); + contents.resize(inputfs.tellg()); + inputfs.seekg(0, std::ios::beg); + inputfs.read(&contents[0], contents.size()); + inputfs.close(); +} + bool IsParameter(const framework::VarDesc* var, const framework::ProgramDesc& main_program) { if (var->Persistable()) { @@ -44,12 +55,15 @@ bool IsParameter(const framework::VarDesc* var, void LoadPersistables(framework::Executor& executor, framework::Scope& scope, + const framework::ProgramDesc& main_program, const std::string& dirname, - const framework::ProgramDesc& main_program) { + const std::string& param_filename) { const framework::BlockDesc& global_block = main_program.Block(0); framework::ProgramDesc* load_program = new framework::ProgramDesc(); framework::BlockDesc* load_block = load_program->MutableBlock(0); + std::vector paramlist; + for (auto* var : global_block.AllVars()) { if (IsParameter(var, main_program)) { VLOG(3) << "parameter's name: " << var->Name(); @@ -61,15 +75,33 @@ void LoadPersistables(framework::Executor& executor, new_var->SetLoDLevel(var->GetLoDLevel()); new_var->SetPersistable(true); - // append_op - framework::OpDesc* op = load_block->AppendOp(); - op->SetType("load"); - op->SetOutput("Out", {new_var->Name()}); - op->SetAttr("file_path", {dirname + "/" + new_var->Name()}); - op->CheckAttrs(); + if (!param_filename.empty()) { + paramlist.push_back(new_var->Name()); + } else { + // append_op + framework::OpDesc* op = load_block->AppendOp(); + op->SetType("load"); + op->SetOutput("Out", {new_var->Name()}); + op->SetAttr("file_path", {dirname + "/" + new_var->Name()}); + op->CheckAttrs(); + } } } + + if (!param_filename.empty()) { + // sort paramlist to have consistent ordering + std::sort(paramlist.begin(), paramlist.end()); + // append just the load_combine op + framework::OpDesc* op = load_block->AppendOp(); + op->SetType("load_combine"); + op->SetOutput("Out", paramlist); + op->SetAttr("file_path", {param_filename}); + op->CheckAttrs(); + } + executor.Run(*load_program, &scope, 0, true, true); + + VLOG(3) << "Ran loading successfully"; delete load_program; } @@ -77,20 +109,29 @@ std::unique_ptr Load(framework::Executor& executor, framework::Scope& scope, const std::string& dirname) { std::string model_filename = dirname + "/__model__"; - LOG(INFO) << "loading model from " << model_filename; - std::ifstream inputfs(model_filename, std::ios::in | std::ios::binary); std::string program_desc_str; - inputfs.seekg(0, std::ios::end); - program_desc_str.resize(inputfs.tellg()); - inputfs.seekg(0, std::ios::beg); - LOG(INFO) << "program_desc_str's size: " << program_desc_str.size(); - inputfs.read(&program_desc_str[0], program_desc_str.size()); - inputfs.close(); + ReadBinaryFile(model_filename, program_desc_str); + + std::unique_ptr main_program( + new framework::ProgramDesc(program_desc_str)); + + LoadPersistables(executor, scope, *main_program, dirname, ""); + return main_program; +} + +std::unique_ptr Load( + framework::Executor& executor, + framework::Scope& scope, + const std::string& prog_filename, + const std::string& param_filename) { + std::string model_filename = prog_filename; + std::string program_desc_str; + ReadBinaryFile(model_filename, program_desc_str); std::unique_ptr main_program( new framework::ProgramDesc(program_desc_str)); - LoadPersistables(executor, scope, dirname, *main_program); + LoadPersistables(executor, scope, *main_program, "", param_filename); return main_program; } diff --git a/paddle/inference/io.h b/paddle/inference/io.h index 962b6c4e20d..a7d7c499690 100644 --- a/paddle/inference/io.h +++ b/paddle/inference/io.h @@ -26,12 +26,18 @@ namespace inference { void LoadPersistables(framework::Executor& executor, framework::Scope& scope, + const framework::ProgramDesc& main_program, const std::string& dirname, - const framework::ProgramDesc& main_program); + const std::string& param_filename); std::unique_ptr Load(framework::Executor& executor, framework::Scope& scope, const std::string& dirname); +std::unique_ptr Load(framework::Executor& executor, + framework::Scope& scope, + const std::string& prog_filename, + const std::string& param_filename); + } // namespace inference } // namespace paddle diff --git a/paddle/inference/tests/book/test_helper.h b/paddle/inference/tests/book/test_helper.h index 32db643fca2..3e66ced94fe 100644 --- a/paddle/inference/tests/book/test_helper.h +++ b/paddle/inference/tests/book/test_helper.h @@ -67,17 +67,28 @@ void CheckError(paddle::framework::LoDTensor& output1, EXPECT_EQ(count, 0) << "There are " << count << " different elements."; } -template +template void TestInference(const std::string& dirname, const std::vector& cpu_feeds, std::vector& cpu_fetchs) { - // 1. Define place, executor and scope + // 1. Define place, executor, scope and inference_program auto place = Place(); auto executor = paddle::framework::Executor(place); auto* scope = new paddle::framework::Scope(); + std::unique_ptr inference_program; // 2. Initialize the inference_program and load all parameters from file - auto inference_program = paddle::inference::Load(executor, *scope, dirname); + if (IsCombined) { + // Hard-coding the names for combined params case + std::string prog_filename = "__model_combined__"; + std::string param_filename = "__params_combined__"; + inference_program = paddle::inference::Load(executor, + *scope, + dirname + "/" + prog_filename, + dirname + "/" + param_filename); + } else { + inference_program = paddle::inference::Load(executor, *scope, dirname); + } // 3. Get the feed_target_names and fetch_target_names const std::vector& feed_target_names = diff --git a/paddle/inference/tests/book/test_inference_recognize_digits.cc b/paddle/inference/tests/book/test_inference_recognize_digits.cc index 48f887e6bc6..3a48db7fe08 100644 --- a/paddle/inference/tests/book/test_inference_recognize_digits.cc +++ b/paddle/inference/tests/book/test_inference_recognize_digits.cc @@ -59,3 +59,45 @@ TEST(inference, recognize_digits) { CheckError(output1, output2); #endif } + +TEST(inference, recognize_digits_combine) { + if (FLAGS_dirname.empty()) { + LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; + } + + LOG(INFO) << "FLAGS_dirname: " << FLAGS_dirname << std::endl; + std::string dirname = FLAGS_dirname; + + // 0. Call `paddle::framework::InitDevices()` initialize all the devices + // In unittests, this is done in paddle/testing/paddle_gtest_main.cc + + paddle::framework::LoDTensor input; + // Use normilized image pixels as input data, + // which should be in the range [-1.0, 1.0]. + SetupTensor( + input, {1, 28, 28}, static_cast(-1), static_cast(1)); + std::vector cpu_feeds; + cpu_feeds.push_back(&input); + + paddle::framework::LoDTensor output1; + std::vector cpu_fetchs1; + cpu_fetchs1.push_back(&output1); + + // Run inference on CPU + TestInference( + dirname, cpu_feeds, cpu_fetchs1); + LOG(INFO) << output1.dims(); + +#ifdef PADDLE_WITH_CUDA + paddle::framework::LoDTensor output2; + std::vector cpu_fetchs2; + cpu_fetchs2.push_back(&output2); + + // Run inference on CUDA GPU + TestInference( + dirname, cpu_feeds, cpu_fetchs2); + LOG(INFO) << output2.dims(); + + CheckError(output1, output2); +#endif +} diff --git a/python/paddle/v2/fluid/io.py b/python/paddle/v2/fluid/io.py index 613dc20b6ea..0f43e46082a 100644 --- a/python/paddle/v2/fluid/io.py +++ b/python/paddle/v2/fluid/io.py @@ -342,7 +342,11 @@ def save_inference_model(dirname, prepend_feed_ops(inference_program, feeded_var_names) append_fetch_ops(inference_program, fetch_var_names) - model_file_name = dirname + "/__model__" + if save_file_name == None: + model_file_name = dirname + "/__model__" + else: + model_file_name = dirname + "/__model_combined__" + with open(model_file_name, "wb") as f: f.write(inference_program.desc.serialize_to_string()) @@ -384,7 +388,11 @@ def load_inference_model(dirname, executor, load_file_name=None): if not os.path.isdir(dirname): raise ValueError("There is no directory named '%s'", dirname) - model_file_name = dirname + "/__model__" + if load_file_name == None: + model_file_name = dirname + "/__model__" + else: + model_file_name = dirname + "/__model_combined__" + with open(model_file_name, "rb") as f: program_desc_str = f.read() diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py index d8f0ad89cd8..6f9d85faff9 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py @@ -78,7 +78,7 @@ def conv_net(img, label): return loss_net(conv_pool_2, label) -def train(nn_type, use_cuda, parallel, save_dirname): +def train(nn_type, use_cuda, parallel, save_dirname, save_param_filename): if use_cuda and not fluid.core.is_compiled_with_cuda(): return img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') @@ -143,8 +143,10 @@ def train(nn_type, use_cuda, parallel, save_dirname): avg_loss_val = numpy.array(avg_loss_set).mean() if float(acc_val) > 0.85: # test acc > 85% if save_dirname is not None: - fluid.io.save_inference_model(save_dirname, ["img"], - [prediction], exe) + fluid.io.save_inference_model( + save_dirname, ["img"], [prediction], + exe, + save_file_name=save_param_filename) return else: print( @@ -156,7 +158,7 @@ def train(nn_type, use_cuda, parallel, save_dirname): raise AssertionError("Loss of recognize digits is too large") -def infer(use_cuda, save_dirname=None): +def infer(use_cuda, save_dirname=None, param_filename=None): if save_dirname is None: return @@ -167,8 +169,8 @@ def infer(use_cuda, save_dirname=None): # the feed_target_names (the names of variables that will be feeded # data using feed operators), and the fetch_targets (variables that # we want to obtain data from using fetch operators). - [inference_program, feed_target_names, - fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + [inference_program, feed_target_names, fetch_targets + ] = fluid.io.load_inference_model(save_dirname, exe, param_filename) # The input's dimension of conv should be 4-D or 5-D. # Use normilized image pixels as input data, which should be in the range [-1.0, 1.0]. @@ -183,36 +185,45 @@ def infer(use_cuda, save_dirname=None): print("infer results: ", results[0]) -def main(use_cuda, parallel, nn_type): +def main(use_cuda, parallel, nn_type, combine): if not use_cuda and not parallel: save_dirname = "recognize_digits_" + nn_type + ".inference.model" + save_filename = None + if combine == True: + save_filename = "__params_combined__" else: save_dirname = None + save_filename = None train( nn_type=nn_type, use_cuda=use_cuda, parallel=parallel, - save_dirname=save_dirname) - infer(use_cuda=use_cuda, save_dirname=save_dirname) + save_dirname=save_dirname, + save_param_filename=save_filename) + infer( + use_cuda=use_cuda, + save_dirname=save_dirname, + param_filename=save_filename) class TestRecognizeDigits(unittest.TestCase): pass -def inject_test_method(use_cuda, parallel, nn_type): +def inject_test_method(use_cuda, parallel, nn_type, combine): def __impl__(self): prog = fluid.Program() startup_prog = fluid.Program() scope = fluid.core.Scope() with fluid.scope_guard(scope): with fluid.program_guard(prog, startup_prog): - main(use_cuda, parallel, nn_type) + main(use_cuda, parallel, nn_type, combine) - fn = 'test_{0}_{1}_{2}'.format(nn_type, 'cuda' - if use_cuda else 'cpu', 'parallel' - if parallel else 'normal') + fn = 'test_{0}_{1}_{2}_{3}'.format(nn_type, 'cuda' + if use_cuda else 'cpu', 'parallel' + if parallel else 'normal', 'combine' + if combine else 'separate') setattr(TestRecognizeDigits, fn, __impl__) @@ -221,7 +232,10 @@ def inject_all_tests(): for use_cuda in (False, True): for parallel in (False, True): for nn_type in ('mlp', 'conv'): - inject_test_method(use_cuda, parallel, nn_type) + inject_test_method(use_cuda, parallel, nn_type, True) + + # One unit-test for saving parameters as separate files + inject_test_method(False, False, 'mlp', False) inject_all_tests() -- GitLab From 6c7ba81c0ae79275067bf1bab61358681087ec30 Mon Sep 17 00:00:00 2001 From: guosheng Date: Wed, 7 Feb 2018 23:58:02 +0800 Subject: [PATCH 105/138] Add python wrapper for layer_norm --- doc/api/v2/fluid/layers.rst | 6 ++ python/paddle/v2/fluid/__init__.py | 8 +-- python/paddle/v2/fluid/layers/nn.py | 105 ++++++++++++++++++++++++++-- python/paddle/v2/fluid/nets.py | 5 +- 4 files changed, 114 insertions(+), 10 deletions(-) diff --git a/doc/api/v2/fluid/layers.rst b/doc/api/v2/fluid/layers.rst index e24613b94b4..58c493fd741 100644 --- a/doc/api/v2/fluid/layers.rst +++ b/doc/api/v2/fluid/layers.rst @@ -323,6 +323,12 @@ batch_norm .. autofunction:: paddle.v2.fluid.layers.batch_norm :noindex: +layer_norm +---------- + +.. autofunction:: paddle.v2.fluid.layers.layer_norm + :noindex: + beam_search_decode ------------------ diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/v2/fluid/__init__.py index 3ee58393c72..03178ecdcbc 100644 --- a/python/paddle/v2/fluid/__init__.py +++ b/python/paddle/v2/fluid/__init__.py @@ -29,7 +29,7 @@ import optimizer import learning_rate_decay import backward import regularizer -from param_attr import ParamAttr +from param_attr import ParamAttr, WeightNormParamAttr from data_feeder import DataFeeder from core import LoDTensor, CPUPlace, CUDAPlace from distribute_transpiler import DistributeTranspiler @@ -43,9 +43,9 @@ Tensor = LoDTensor __all__ = framework.__all__ + executor.__all__ + [ 'io', 'initializer', 'layers', 'nets', 'optimizer', 'learning_rate_decay', 'backward', 'regularizer', 'LoDTensor', 'CPUPlace', 'CUDAPlace', 'Tensor', - 'ParamAttr' - 'DataFeeder', 'clip', 'SimpleDistributeTranspiler', 'DistributeTranspiler', - 'memory_optimize', 'profiler' + 'ParamAttr', 'WeightNormParamAttr', 'DataFeeder', 'clip', + 'SimpleDistributeTranspiler', 'DistributeTranspiler', 'memory_optimize', + 'profiler' ] diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index a79479f469a..fa7062a3fb4 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -65,6 +65,7 @@ __all__ = [ 'beam_search', 'row_conv', 'multiplex', + 'layer_norm', ] @@ -641,8 +642,8 @@ def dynamic_gru(input, Choices = ["sigmoid", "tanh", "relu", "identity"], default "tanh". Returns: - Variable: The hidden state of GRU. The shape is (T \\times D), and lod \ - is the same with the input. + Variable: The hidden state of GRU. The shape is :math:`(T \\times D)`, \ + and lod is the same with the input. Examples: .. code-block:: python @@ -990,7 +991,7 @@ def square_error_cost(input, label, **kwargs): label(Variable): Label tensor, has target labels. Returns: - Variable: The tensor variable storing the element-wise squared error + Variable: The tensor variable storing the element-wise squared error \ difference of input and label. Examples: @@ -1214,7 +1215,7 @@ def conv2d(input, act(str): Activation type. Default: None Returns: - Variable: The tensor variable storing the convolution and + Variable: The tensor variable storing the convolution and \ non-linearity activation result. Raises: @@ -1565,6 +1566,102 @@ def batch_norm(input, return helper.append_activation(batch_norm_out) +def layer_norm(input, + scale=True, + shift=True, + begin_norm_axis=1, + epsilon=1e-05, + param_attr=None, + bias_attr=None, + act=None, + name=None): + """ + **Layer Normalization** + + Assume feature vectors exist on dimensions + :attr:`begin_norm_axis ... rank(input)` and calculate the moment statistics + along these dimensions for each feature vector :math:`a` with size + :math:`H`, then normalize each feature vector using the corresponding + statistics. After that, apply learnable gain and bias on the normalized + tensor to scale and shift if :attr:`scale` and :attr:`shift` are set. + + Refer to `Layer Normalization `_ + + The formula is as follows: + + .. math:: + + \\mu & = \\frac{1}{H}\\sum_{i=1}^{H} a_i + + \\sigma & = \\sqrt{\\frac{1}{H}\sum_{i=1}^{H}(a_i - \\mu)^2} + + h & = f(\\frac{g}{\\sigma}(a - \\mu) + b) + + Args: + input(Variable): The input tensor variable. + scale(bool): Whether to learn the adaptive gain :math:`g` after + normalization. + shift(bool): Whether to learn the adaptive bias :math:`b` after + normalization. + begin_norm_axis(bool): The normalization will be performed along + dimensions from :attr:`begin_norm_axis` to :attr:`rank(input)`. + epsilon(float): The small value added to the variance to prevent + division by zero. + param_attr(ParamAttr|None): The parameter attribute for the learnable + gain :math:`g`. + bias_attr(ParamAttr|None): The parameter attribute for the learnable + bias :math:`b`. + act(str): Activation to be applied to the output of layer normalizaiton. + + Returns: + Variable: A tensor variable with the same shape as the input. + + Examples: + .. code-block:: python + + data = fluid.layers.data( + name='data', shape=[3, 32, 32], dtype='float32') + x = fluid.layers.layer_norm(input=data, begin_norm_axis=1) + """ + helper = LayerHelper('layer_norm', **locals()) + dtype = helper.input_dtype() + + # create intput and parameters + inputs = {'X': input} + input_shape = input.shape + param_shape = [reduce(lambda x, y: x * y, input_shape[begin_norm_axis:])] + if scale: + scale = helper.create_parameter( + attr=helper.param_attr, + shape=param_shape, + dtype=dtype, + default_initializer=Constant(1.0)) + inputs['Scale'] = scale + if center: + assert bias_attr is not False + bias = helper.create_parameter( + attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True) + inputs['Bias'] = bias + + # create output + mean_out = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) + variance_out = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) + layer_norm_out = helper.create_tmp_variable(dtype) + + helper.append_op( + type="layer_norm", + inputs=inputs, + outputs={ + "Y": layer_norm_out, + "Mean": mean_out, + "Variance": variance_out, + }, + attrs={"epsilon": epsilon, + "begin_norm_axis": begin_norm_axis}) + + return helper.append_activation(layer_norm_out) + + def beam_search_decode(ids, scores, name=None): helper = LayerHelper('beam_search_decode', **locals()) sentence_ids = helper.create_tmp_variable(dtype=ids.dtype) diff --git a/python/paddle/v2/fluid/nets.py b/python/paddle/v2/fluid/nets.py index cb63d43709e..be7878f869b 100644 --- a/python/paddle/v2/fluid/nets.py +++ b/python/paddle/v2/fluid/nets.py @@ -194,7 +194,7 @@ def scaled_dot_product_attention(queries, Returns: - Variable: A 3-D Tensor computed by multi-head scaled dot product + Variable: A 3-D Tensor computed by multi-head scaled dot product \ attention. Raises: @@ -333,6 +333,7 @@ def scaled_dot_product_attention(queries, x=product, shape=[-1, product.shape[-1]], act="softmax"), shape=product.shape) if dropout_rate: - weights = layers.dropout(x, dropout_prob=dropout_rate, is_test=False) + weights = layers.dropout( + weights, dropout_prob=dropout_rate, is_test=False) ctx_multiheads = layers.matmul(weights, v) return __combine_heads(ctx_multiheads) -- GitLab From 0a7ae369f6d6a6be7b87d13834b8ff0a7e3614d1 Mon Sep 17 00:00:00 2001 From: typhoonzero Date: Fri, 9 Feb 2018 10:27:47 +0800 Subject: [PATCH 106/138] fix CI hung --- .../tests/book_memory_optimization/test_memopt_fit_a_line.py | 4 ++++ .../test_memopt_image_classification_train.py | 5 +++++ .../test_memopt_machine_translation.py | 4 ++++ 3 files changed, 13 insertions(+) diff --git a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py index 7ad5e2c594f..045db8390cd 100644 --- a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py @@ -15,6 +15,8 @@ import numpy as np import paddle.v2 as paddle import paddle.v2.fluid as fluid +import math +import sys # need to fix random seed and training data to compare the loss # value accurately calculated by the default and the memory optimization @@ -63,4 +65,6 @@ for pass_id in range(PASS_NUM): if avg_loss_value[0] < 10.0: exit(0) # if avg cost less than 10.0, we think our code is good. + if math.isnan(float(avg_loss_value)): + sys.exit("got NaN loss, training failed.") exit(1) diff --git a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py index 26673afd83c..9fbb36d3638 100644 --- a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py +++ b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py @@ -18,6 +18,8 @@ import sys import paddle.v2 as paddle import paddle.v2.fluid as fluid +import math +import sys # need to fix random seed and training data to compare the loss # value accurately calculated by the default and the memory optimization @@ -152,7 +154,10 @@ for pass_id in range(PASS_NUM): print("loss:" + str(loss) + " acc:" + str(acc) + " pass_acc:" + str( pass_acc)) # this model is slow, so if we can train two mini batch, we think it works properly. + if i > 2: exit(0) + if math.isnan(float(loss)): + sys.exit("got NaN loss, training failed.") i += 1 exit(1) diff --git a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py index ffd53e7a781..48abaa8d875 100644 --- a/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py +++ b/python/paddle/v2/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py @@ -19,6 +19,8 @@ import paddle.v2.fluid.core as core import paddle.v2.fluid.framework as framework import paddle.v2.fluid.layers as layers from paddle.v2.fluid.executor import Executor +import math +import sys dict_size = 30000 source_dict_dim = target_dict_dim = dict_size @@ -137,6 +139,8 @@ def main(): " avg_cost=" + str(avg_cost_val)) if batch_id > 2: exit(0) + if math.isnan(float(avg_cost_val)): + sys.exit("got NaN loss, training failed.") batch_id += 1 -- GitLab From 67e4ff84d144ba10c1e2484f6bf30652bc9e8322 Mon Sep 17 00:00:00 2001 From: wanghaoshuang Date: Fri, 9 Feb 2018 10:33:32 +0800 Subject: [PATCH 107/138] Remove checking 'numel' of 'X' and 'Y' --- paddle/operators/compare_op.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/operators/compare_op.cc b/paddle/operators/compare_op.cc index 930c295a9cb..51b5bcb38f9 100644 --- a/paddle/operators/compare_op.cc +++ b/paddle/operators/compare_op.cc @@ -58,8 +58,8 @@ class CompareOpInferShape : public framework::InferShapeBase { comment.type); auto dim_x = context->GetInputDim("X"); auto dim_y = context->GetInputDim("Y"); - PADDLE_ENFORCE_EQ(framework::product(dim_x), framework::product(dim_y), - "The number of elements in X and Y should be same"); + PADDLE_ENFORCE_GE(dim_x.size(), dim_y.size(), + "The size of dim_y should not be greater than dim_x's."); context->SetOutputDim("Out", context->GetInputDim("X")); context->ShareLoD("X", "Out"); -- GitLab From 0999347910609334e06caddc5a010ce1af568d5e Mon Sep 17 00:00:00 2001 From: guosheng Date: Fri, 9 Feb 2018 11:04:28 +0800 Subject: [PATCH 108/138] Fix python wrapper for layer_norm --- paddle/operators/layer_norm_op.cc | 2 -- python/paddle/v2/fluid/layers/nn.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/paddle/operators/layer_norm_op.cc b/paddle/operators/layer_norm_op.cc index 76d5d571c31..d9b774272cb 100644 --- a/paddle/operators/layer_norm_op.cc +++ b/paddle/operators/layer_norm_op.cc @@ -116,8 +116,6 @@ class LayerNormGradOp : public framework::OperatorWithKernel { // check input PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of LayerNormOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Scale"), - "Input(Scale) of LayerNormOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Mean"), "Input(Mean) of LayerNormOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("Variance"), diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index e8455a8b41e..0b64e09cd35 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -1637,7 +1637,7 @@ def layer_norm(input, dtype=dtype, default_initializer=Constant(1.0)) inputs['Scale'] = scale - if center: + if shift: assert bias_attr is not False bias = helper.create_parameter( attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True) -- GitLab From b5ffe5bce205ea9ce48a329edd6f19164c8608f2 Mon Sep 17 00:00:00 2001 From: QI JUN Date: Fri, 9 Feb 2018 11:08:02 +0800 Subject: [PATCH 109/138] optimize data flow analysis (#8271) --- python/paddle/v2/fluid/memory_optimization_transpiler.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/python/paddle/v2/fluid/memory_optimization_transpiler.py b/python/paddle/v2/fluid/memory_optimization_transpiler.py index 8bb8cf7b1a5..53e0991ee8c 100644 --- a/python/paddle/v2/fluid/memory_optimization_transpiler.py +++ b/python/paddle/v2/fluid/memory_optimization_transpiler.py @@ -92,14 +92,13 @@ class ControlFlowGraph(object): live_in = defaultdict(set) live_out = defaultdict(set) while True: - for i in range(self.op_size): + for i in range(self.op_size, 0, -1): live_in[i] = set(self._live_in[i]) live_out[i] = set(self._live_out[i]) - self._live_in[i] = self._uses[i] | ( - self._live_out[i] - self._defs[i]) for s in self._successors[i]: self._live_out[i] |= self._live_in[s] - + self._live_in[i] = self._uses[i] | ( + self._live_out[i] - self._defs[i]) if self._reach_fixed_point(live_in, live_out): break -- GitLab From 04f625085d17f3ddd91aa9452617bad86cec675c Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 9 Feb 2018 12:52:28 +0800 Subject: [PATCH 110/138] Fix CI --- paddle/framework/mixed_vector.h | 37 ++++++++++++++++++++++----------- 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/paddle/framework/mixed_vector.h b/paddle/framework/mixed_vector.h index 2a80079695f..fe9d8a44a5f 100644 --- a/paddle/framework/mixed_vector.h +++ b/paddle/framework/mixed_vector.h @@ -30,26 +30,35 @@ class Vector { public: using value_type = T; - Vector() { - size_ = 0; - flag_ = kDataInCPU; - } + Vector() { InitEmpty(); } explicit Vector(size_t count, const T& value = T()) { - resize(count); - T* ptr = begin(); - for (size_t i = 0; i < count; ++i) { - ptr[i] = value; + if (count == 0) { + InitEmpty(); + } else { + resize(count); + T* ptr = begin(); + for (size_t i = 0; i < count; ++i) { + ptr[i] = value; + } } } Vector(std::initializer_list init) { - InitByIter(init.size(), init.begin(), init.end()); + if (init.size() == 0) { + InitEmpty(); + } else { + InitByIter(init.size(), init.begin(), init.end()); + } } template Vector(const std::vector& dat) { // NOLINT - InitByIter(dat.size(), dat.begin(), dat.end()); + if (dat.size() == 0) { + InitEmpty(); + } else { + InitByIter(dat.size(), dat.begin(), dat.end()); + } } Vector(const Vector& other) { this->operator=(other); } @@ -58,8 +67,7 @@ class Vector { if (other.size() != 0) { this->InitByIter(other.size(), other.begin(), other.end()); } else { - size_ = 0; - flag_ = kDataInCPU; + InitEmpty(); } return *this; } @@ -218,6 +226,11 @@ class Vector { } private: + void InitEmpty() { + size_ = 0; + flag_ = kDataInCPU; + } + template void InitByIter(size_t size, Iter begin, Iter end) { platform::Place cpu = platform::CPUPlace(); -- GitLab From 02d494c3f0999ce3f5bd0f0cae53540a75c0967e Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 9 Feb 2018 13:11:15 +0800 Subject: [PATCH 111/138] Polish code and add comments --- paddle/framework/mixed_vector.h | 70 ++++++++++++++++++++++++--------- paddle/framework/tensor.h | 5 +-- paddle/framework/tensor_impl.h | 4 +- 3 files changed, 54 insertions(+), 25 deletions(-) diff --git a/paddle/framework/mixed_vector.h b/paddle/framework/mixed_vector.h index fe9d8a44a5f..d388da4f2c7 100644 --- a/paddle/framework/mixed_vector.h +++ b/paddle/framework/mixed_vector.h @@ -25,13 +25,17 @@ namespace paddle { namespace framework { +// Vector implements the std::vector interface, and can get Data or +// MutableData from any place. The data will be synced implicitly inside. template class Vector { public: using value_type = T; + // Default ctor. Create empty Vector Vector() { InitEmpty(); } + // Fill vector with value. The vector size is `count`. explicit Vector(size_t count, const T& value = T()) { if (count == 0) { InitEmpty(); @@ -44,6 +48,7 @@ class Vector { } } + // Ctor with init_list Vector(std::initializer_list init) { if (init.size() == 0) { InitEmpty(); @@ -52,6 +57,7 @@ class Vector { } } + // implicit cast from std::vector. template Vector(const std::vector& dat) { // NOLINT if (dat.size() == 0) { @@ -61,8 +67,10 @@ class Vector { } } + // Copy ctor Vector(const Vector& other) { this->operator=(other); } + // Copy operator Vector& operator=(const Vector& other) { if (other.size() != 0) { this->InitByIter(other.size(), other.begin(), other.end()); @@ -72,27 +80,31 @@ class Vector { return *this; } + // Move ctor Vector(Vector&& other) { this->size_ = other.size_; this->flag_ = other.flag_; - if (other.cuda_vec_.capacity()) { + if (other.cuda_vec_.memory_size()) { this->cuda_vec_.ShareDataWith(other.cuda_vec_); } - if (other.cpu_vec_.capacity()) { + if (other.cpu_vec_.memory_size()) { this->cpu_vec_.ShareDataWith(other.cpu_vec_); } } + // CPU data access method. Mutable. T& operator[](size_t i) { MutableCPU(); return const_cast(cpu_vec_.data())[i]; } + // CPU data access method. Immutable. const T& operator[](size_t i) const { ImmutableCPU(); return cpu_vec_.data()[i]; } + // std::vector iterator methods. Based on CPU data access method size_t size() const { return size_; } T* begin() { return &this->operator[](0); } @@ -116,17 +128,22 @@ class Vector { return *it; } + T* data() { return begin(); } + + const T* data() const { return begin(); } + const T& front() const { return *begin(); } + // end of std::vector iterator methods + // assign this from iterator. + // NOTE: the iterator must support `end-begin` template void assign(Iter begin, Iter end) { InitByIter(end - begin, begin, end); } - T* data() { return begin(); } - - const T* data() const { return begin(); } - + // push_back. If the previous capacity is not enough, the memory will + // double. void push_back(T elem) { if (size_ + 1 > capacity()) { reserve((size_ + 1) << 1); @@ -135,6 +152,19 @@ class Vector { ++size_; } + // extend a vector by iterator. + // NOTE: the iterator must support end-begin + template + void Extend(It begin, It end) { + size_t pre_size = size_; + resize(pre_size + (end - begin)); + T* ptr = this->begin() + pre_size; + for (; begin < end; ++begin, ++ptr) { + *ptr = *begin; + } + } + + // resize the vector void resize(size_t size) { if (size + 1 < capacity()) { size_ = size; @@ -145,7 +175,7 @@ class Vector { T* ptr = cpu_tensor.mutable_data( framework::make_ddim({static_cast(size)}), cpu); const T* old_ptr = - cpu_vec_.capacity() == 0 ? nullptr : cpu_vec_.data(); + cpu_vec_.memory_size() == 0 ? nullptr : cpu_vec_.data(); if (old_ptr != nullptr) { std::copy(old_ptr, old_ptr + size_, ptr); } @@ -154,6 +184,7 @@ class Vector { } } + // get cuda ptr. immutable const T* CUDAData(platform::Place place) const { PADDLE_ENFORCE(platform::is_gpu_place(place), "CUDA Data must on CUDA place"); @@ -161,37 +192,31 @@ class Vector { return cuda_vec_.data(); } + // get cuda ptr. mutable T* CUDAMutableData(platform::Place place) { const T* ptr = CUDAData(place); flag_ = kDirty | kDataInCUDA; return const_cast(ptr); } - template - void Extend(It begin, It end) { - size_t pre_size = size_; - resize(pre_size + (end - begin)); - T* ptr = this->begin() + pre_size; - for (; begin < end; ++begin, ++ptr) { - *ptr = *begin; - } - } - + // clear void clear() { size_ = 0; flag_ = kDirty | kDataInCPU; } size_t capacity() const { - return cpu_vec_.capacity() / SizeOfType(typeid(T)); + return cpu_vec_.memory_size() / SizeOfType(typeid(T)); } + // reserve data void reserve(size_t size) { size_t pre_size = size_; resize(size); resize(pre_size); } + // the unify method to access CPU or CUDA data. immutable. const T* Data(platform::Place place) const { if (platform::is_gpu_place(place)) { return CUDAData(place); @@ -200,6 +225,7 @@ class Vector { } } + // the unify method to access CPU or CUDA data. mutable. T* MutableData(platform::Place place) { if (platform::is_gpu_place(place)) { return CUDAMutableData(place); @@ -208,6 +234,7 @@ class Vector { } } + // implicit cast operator. Vector can be cast to std::vector implicitly. operator std::vector() const { std::vector result; result.resize(size()); @@ -243,7 +270,12 @@ class Vector { size_ = size; } - enum DataFlag { kDataInCPU = 0x01, kDataInCUDA = 0x02, kDirty = 0x10 }; + enum DataFlag { + kDataInCPU = 0x01, + kDataInCUDA = 0x02, + // kDirty means the data has been changed in one device. + kDirty = 0x10 + }; void MutableCPU() { if (IsInCUDA() && IsDirty()) { diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index a8767a75430..be09b7c9450 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -120,6 +120,7 @@ class Tensor { return holder_->type(); } + // memory size returns the holding memory size in byte. size_t memory_size() const; inline void check_memory_size() const; @@ -128,10 +129,6 @@ class Tensor { inline void set_layout(const DataLayout layout) { layout_ = layout; } - size_t capacity() const { - return holder_ == nullptr ? 0UL : holder_->size() - offset_; - } - private: friend class LoDTensor; diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index 6dcaa024245..f75cc31b399 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -62,14 +62,14 @@ inline void Tensor::check_memory_size() const { PADDLE_ENFORCE_NOT_NULL( holder_, "Tensor holds no memory. Call Tensor::mutable_data first."); PADDLE_ENFORCE_GE( - holder_->size(), memory_size() + offset_, + numel() * SizeOfType(type()), memory_size(), "Tensor's dims_ is out of bound. Call Tensor::mutable_data " "first to re-allocate memory.\n" "or maybe the required data-type mismatches the data already stored."); } inline size_t Tensor::memory_size() const { - return holder_ == nullptr ? 0UL : numel() * SizeOfType(type()); + return holder_ == nullptr ? 0UL : holder_->size() - offset_; } template -- GitLab From 9b743b855c6a1ccde54d8b7e359a448a48fc1afb Mon Sep 17 00:00:00 2001 From: guosheng Date: Fri, 9 Feb 2018 13:15:32 +0800 Subject: [PATCH 112/138] Small fix of fluid __init__ --- python/paddle/v2/fluid/__init__.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/v2/fluid/__init__.py index 03178ecdcbc..73acbf3e009 100644 --- a/python/paddle/v2/fluid/__init__.py +++ b/python/paddle/v2/fluid/__init__.py @@ -41,11 +41,26 @@ import profiler Tensor = LoDTensor __all__ = framework.__all__ + executor.__all__ + [ - 'io', 'initializer', 'layers', 'nets', 'optimizer', 'learning_rate_decay', - 'backward', 'regularizer', 'LoDTensor', 'CPUPlace', 'CUDAPlace', 'Tensor', - 'ParamAttr', 'WeightNormParamAttr', 'DataFeeder', 'clip', - 'SimpleDistributeTranspiler', 'DistributeTranspiler', 'memory_optimize', - 'profiler' + 'io', + 'initializer', + 'layers', + 'nets', + 'optimizer', + 'learning_rate_decay', + 'backward', + 'regularizer', + 'LoDTensor', + 'CPUPlace', + 'CUDAPlace', + 'Tensor', + 'ParamAttr', + 'WeightNormParamAttr', + 'DataFeeder', + 'clip', + 'SimpleDistributeTranspiler', + 'DistributeTranspiler', + 'memory_optimize', + 'profiler', ] -- GitLab From 1185a1b5ab897133585ffdcef8f68d0ee6caef46 Mon Sep 17 00:00:00 2001 From: Yiqun Liu Date: Fri, 9 Feb 2018 13:27:42 +0800 Subject: [PATCH 113/138] Add C++ inference unittest of recommender system (#8227) * Save the inference model in Python example of recommender_system. * Add infer() in Python unittest recommender_system. * Add C++ inference unittest of recommender_system. --- paddle/inference/tests/book/CMakeLists.txt | 1 + paddle/inference/tests/book/test_helper.h | 33 +++- .../test_inference_image_classification.cc | 14 +- .../test_inference_label_semantic_roles.cc | 6 +- .../book/test_inference_recognize_digits.cc | 18 ++- .../book/test_inference_recommender_system.cc | 87 +++++++++++ .../test_inference_rnn_encoder_decoder.cc | 6 +- python/paddle/v2/fluid/tests/book/.gitignore | 2 +- .../fluid/tests/book/test_recognize_digits.py | 3 +- .../tests/book/test_recommender_system.py | 146 +++++++++++++++--- 10 files changed, 266 insertions(+), 50 deletions(-) create mode 100644 paddle/inference/tests/book/test_inference_recommender_system.cc diff --git a/paddle/inference/tests/book/CMakeLists.txt b/paddle/inference/tests/book/CMakeLists.txt index 479f51f1df9..5c866eb1e2e 100644 --- a/paddle/inference/tests/book/CMakeLists.txt +++ b/paddle/inference/tests/book/CMakeLists.txt @@ -28,3 +28,4 @@ inference_test(recognize_digits ARGS mlp) inference_test(image_classification ARGS vgg resnet) inference_test(label_semantic_roles) inference_test(rnn_encoder_decoder) +inference_test(recommender_system) diff --git a/paddle/inference/tests/book/test_helper.h b/paddle/inference/tests/book/test_helper.h index 3e66ced94fe..22ce903c725 100644 --- a/paddle/inference/tests/book/test_helper.h +++ b/paddle/inference/tests/book/test_helper.h @@ -30,6 +30,15 @@ void SetupTensor(paddle::framework::LoDTensor& input, } } +template +void SetupTensor(paddle::framework::LoDTensor& input, + paddle::framework::DDim dims, + std::vector& data) { + CHECK_EQ(paddle::framework::product(dims), data.size()); + T* input_ptr = input.mutable_data(dims, paddle::platform::CPUPlace()); + memcpy(input_ptr, data.data(), input.numel() * sizeof(T)); +} + template void SetupLoDTensor(paddle::framework::LoDTensor& input, paddle::framework::LoD& lod, @@ -37,7 +46,18 @@ void SetupLoDTensor(paddle::framework::LoDTensor& input, T upper) { input.set_lod(lod); int dim = lod[0][lod[0].size() - 1]; - SetupTensor(input, {dim, 1}, lower, upper); + SetupTensor(input, {dim, 1}, lower, upper); +} + +template +void SetupLoDTensor(paddle::framework::LoDTensor& input, + paddle::framework::DDim dims, + paddle::framework::LoD lod, + std::vector& data) { + const size_t level = lod.size() - 1; + CHECK_EQ(dims[0], (lod[level]).back()); + input.set_lod(lod); + SetupTensor(input, dims, data); } template @@ -67,7 +87,7 @@ void CheckError(paddle::framework::LoDTensor& output1, EXPECT_EQ(count, 0) << "There are " << count << " different elements."; } -template +template void TestInference(const std::string& dirname, const std::vector& cpu_feeds, std::vector& cpu_fetchs) { @@ -75,11 +95,13 @@ void TestInference(const std::string& dirname, auto place = Place(); auto executor = paddle::framework::Executor(place); auto* scope = new paddle::framework::Scope(); - std::unique_ptr inference_program; - // 2. Initialize the inference_program and load all parameters from file + // 2. Initialize the inference_program and load parameters + std::unique_ptr inference_program; if (IsCombined) { - // Hard-coding the names for combined params case + // All parameters are saved in a single file. + // Hard-coding the file names of program and parameters in unittest. + // Users are free to specify different filename. std::string prog_filename = "__model_combined__"; std::string param_filename = "__params_combined__"; inference_program = paddle::inference::Load(executor, @@ -87,6 +109,7 @@ void TestInference(const std::string& dirname, dirname + "/" + prog_filename, dirname + "/" + param_filename); } else { + // Parameters are saved in separate files sited in the specified `dirname`. inference_program = paddle::inference::Load(executor, *scope, dirname); } diff --git a/paddle/inference/tests/book/test_inference_image_classification.cc b/paddle/inference/tests/book/test_inference_image_classification.cc index 35ff9431e97..36ea7c77a75 100644 --- a/paddle/inference/tests/book/test_inference_image_classification.cc +++ b/paddle/inference/tests/book/test_inference_image_classification.cc @@ -29,11 +29,15 @@ TEST(inference, image_classification) { // 0. Call `paddle::framework::InitDevices()` initialize all the devices // In unittests, this is done in paddle/testing/paddle_gtest_main.cc + int64_t batch_size = 1; + paddle::framework::LoDTensor input; // Use normilized image pixels as input data, // which should be in the range [0.0, 1.0]. - SetupTensor( - input, {1, 3, 32, 32}, static_cast(0), static_cast(1)); + SetupTensor(input, + {batch_size, 3, 32, 32}, + static_cast(0), + static_cast(1)); std::vector cpu_feeds; cpu_feeds.push_back(&input); @@ -42,8 +46,7 @@ TEST(inference, image_classification) { cpu_fetchs1.push_back(&output1); // Run inference on CPU - TestInference( - dirname, cpu_feeds, cpu_fetchs1); + TestInference(dirname, cpu_feeds, cpu_fetchs1); LOG(INFO) << output1.dims(); #ifdef PADDLE_WITH_CUDA @@ -52,8 +55,7 @@ TEST(inference, image_classification) { cpu_fetchs2.push_back(&output2); // Run inference on CUDA GPU - TestInference( - dirname, cpu_feeds, cpu_fetchs2); + TestInference(dirname, cpu_feeds, cpu_fetchs2); LOG(INFO) << output2.dims(); CheckError(output1, output2); diff --git a/paddle/inference/tests/book/test_inference_label_semantic_roles.cc b/paddle/inference/tests/book/test_inference_label_semantic_roles.cc index 1eaf4022a1f..922dbfd3338 100644 --- a/paddle/inference/tests/book/test_inference_label_semantic_roles.cc +++ b/paddle/inference/tests/book/test_inference_label_semantic_roles.cc @@ -58,8 +58,7 @@ TEST(inference, label_semantic_roles) { cpu_fetchs1.push_back(&output1); // Run inference on CPU - TestInference( - dirname, cpu_feeds, cpu_fetchs1); + TestInference(dirname, cpu_feeds, cpu_fetchs1); LOG(INFO) << output1.lod(); LOG(INFO) << output1.dims(); @@ -69,8 +68,7 @@ TEST(inference, label_semantic_roles) { cpu_fetchs2.push_back(&output2); // Run inference on CUDA GPU - TestInference( - dirname, cpu_feeds, cpu_fetchs2); + TestInference(dirname, cpu_feeds, cpu_fetchs2); LOG(INFO) << output2.lod(); LOG(INFO) << output2.dims(); diff --git a/paddle/inference/tests/book/test_inference_recognize_digits.cc b/paddle/inference/tests/book/test_inference_recognize_digits.cc index 3a48db7fe08..af8c2b14c3b 100644 --- a/paddle/inference/tests/book/test_inference_recognize_digits.cc +++ b/paddle/inference/tests/book/test_inference_recognize_digits.cc @@ -29,11 +29,15 @@ TEST(inference, recognize_digits) { // 0. Call `paddle::framework::InitDevices()` initialize all the devices // In unittests, this is done in paddle/testing/paddle_gtest_main.cc + int64_t batch_size = 1; + paddle::framework::LoDTensor input; // Use normilized image pixels as input data, // which should be in the range [-1.0, 1.0]. - SetupTensor( - input, {1, 28, 28}, static_cast(-1), static_cast(1)); + SetupTensor(input, + {batch_size, 1, 28, 28}, + static_cast(-1), + static_cast(1)); std::vector cpu_feeds; cpu_feeds.push_back(&input); @@ -42,8 +46,7 @@ TEST(inference, recognize_digits) { cpu_fetchs1.push_back(&output1); // Run inference on CPU - TestInference( - dirname, cpu_feeds, cpu_fetchs1); + TestInference(dirname, cpu_feeds, cpu_fetchs1); LOG(INFO) << output1.dims(); #ifdef PADDLE_WITH_CUDA @@ -52,8 +55,7 @@ TEST(inference, recognize_digits) { cpu_fetchs2.push_back(&output2); // Run inference on CUDA GPU - TestInference( - dirname, cpu_feeds, cpu_fetchs2); + TestInference(dirname, cpu_feeds, cpu_fetchs2); LOG(INFO) << output2.dims(); CheckError(output1, output2); @@ -84,7 +86,7 @@ TEST(inference, recognize_digits_combine) { cpu_fetchs1.push_back(&output1); // Run inference on CPU - TestInference( + TestInference( dirname, cpu_feeds, cpu_fetchs1); LOG(INFO) << output1.dims(); @@ -94,7 +96,7 @@ TEST(inference, recognize_digits_combine) { cpu_fetchs2.push_back(&output2); // Run inference on CUDA GPU - TestInference( + TestInference( dirname, cpu_feeds, cpu_fetchs2); LOG(INFO) << output2.dims(); diff --git a/paddle/inference/tests/book/test_inference_recommender_system.cc b/paddle/inference/tests/book/test_inference_recommender_system.cc new file mode 100644 index 00000000000..ec24c7e6ab7 --- /dev/null +++ b/paddle/inference/tests/book/test_inference_recommender_system.cc @@ -0,0 +1,87 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "gflags/gflags.h" +#include "test_helper.h" + +DEFINE_string(dirname, "", "Directory of the inference model."); + +TEST(inference, recommender_system) { + if (FLAGS_dirname.empty()) { + LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; + } + + LOG(INFO) << "FLAGS_dirname: " << FLAGS_dirname << std::endl; + std::string dirname = FLAGS_dirname; + + // 0. Call `paddle::framework::InitDevices()` initialize all the devices + // In unittests, this is done in paddle/testing/paddle_gtest_main.cc + + int64_t batch_size = 1; + + paddle::framework::LoDTensor user_id, gender_id, age_id, job_id, movie_id, + category_id, movie_title; + + // Use the first data from paddle.dataset.movielens.test() as input + std::vector user_id_data = {1}; + SetupTensor(user_id, {batch_size, 1}, user_id_data); + + std::vector gender_id_data = {1}; + SetupTensor(gender_id, {batch_size, 1}, gender_id_data); + + std::vector age_id_data = {0}; + SetupTensor(age_id, {batch_size, 1}, age_id_data); + + std::vector job_id_data = {10}; + SetupTensor(job_id, {batch_size, 1}, job_id_data); + + std::vector movie_id_data = {783}; + SetupTensor(movie_id, {batch_size, 1}, movie_id_data); + + std::vector category_id_data = {10, 8, 9}; + SetupLoDTensor(category_id, {3, 1}, {{0, 3}}, category_id_data); + + std::vector movie_title_data = {1069, 4140, 2923, 710, 988}; + SetupLoDTensor(movie_title, {5, 1}, {{0, 5}}, movie_title_data); + + std::vector cpu_feeds; + cpu_feeds.push_back(&user_id); + cpu_feeds.push_back(&gender_id); + cpu_feeds.push_back(&age_id); + cpu_feeds.push_back(&job_id); + cpu_feeds.push_back(&movie_id); + cpu_feeds.push_back(&category_id); + cpu_feeds.push_back(&movie_title); + + paddle::framework::LoDTensor output1; + std::vector cpu_fetchs1; + cpu_fetchs1.push_back(&output1); + + // Run inference on CPU + TestInference(dirname, cpu_feeds, cpu_fetchs1); + LOG(INFO) << output1.dims(); + +#ifdef PADDLE_WITH_CUDA + paddle::framework::LoDTensor output2; + std::vector cpu_fetchs2; + cpu_fetchs2.push_back(&output2); + + // Run inference on CUDA GPU + TestInference(dirname, cpu_feeds, cpu_fetchs2); + LOG(INFO) << output2.dims(); + + CheckError(output1, output2); +#endif +} diff --git a/paddle/inference/tests/book/test_inference_rnn_encoder_decoder.cc b/paddle/inference/tests/book/test_inference_rnn_encoder_decoder.cc index 9bfc0407b7f..248b9dce217 100644 --- a/paddle/inference/tests/book/test_inference_rnn_encoder_decoder.cc +++ b/paddle/inference/tests/book/test_inference_rnn_encoder_decoder.cc @@ -46,8 +46,7 @@ TEST(inference, rnn_encoder_decoder) { cpu_fetchs1.push_back(&output1); // Run inference on CPU - TestInference( - dirname, cpu_feeds, cpu_fetchs1); + TestInference(dirname, cpu_feeds, cpu_fetchs1); LOG(INFO) << output1.lod(); LOG(INFO) << output1.dims(); @@ -57,8 +56,7 @@ TEST(inference, rnn_encoder_decoder) { cpu_fetchs2.push_back(&output2); // Run inference on CUDA GPU - TestInference( - dirname, cpu_feeds, cpu_fetchs2); + TestInference(dirname, cpu_feeds, cpu_fetchs2); LOG(INFO) << output2.lod(); LOG(INFO) << output2.dims(); diff --git a/python/paddle/v2/fluid/tests/book/.gitignore b/python/paddle/v2/fluid/tests/book/.gitignore index f0b574b9396..dd28d354f41 100644 --- a/python/paddle/v2/fluid/tests/book/.gitignore +++ b/python/paddle/v2/fluid/tests/book/.gitignore @@ -1 +1 @@ -recognize_digits_*.inference.model +*.inference.model diff --git a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py index 6f9d85faff9..244c1749cd5 100644 --- a/python/paddle/v2/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/v2/fluid/tests/book/test_recognize_digits.py @@ -174,8 +174,9 @@ def infer(use_cuda, save_dirname=None, param_filename=None): # The input's dimension of conv should be 4-D or 5-D. # Use normilized image pixels as input data, which should be in the range [-1.0, 1.0]. + batch_size = 1 tensor_img = numpy.random.uniform(-1.0, 1.0, - [1, 1, 28, 28]).astype("float32") + [batch_size, 1, 28, 28]).astype("float32") # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. diff --git a/python/paddle/v2/fluid/tests/book/test_recommender_system.py b/python/paddle/v2/fluid/tests/book/test_recommender_system.py index 9c7ab7d6318..612d51e08e4 100644 --- a/python/paddle/v2/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/v2/fluid/tests/book/test_recommender_system.py @@ -16,7 +16,7 @@ import math import sys import numpy as np import paddle.v2 as paddle -import paddle.v2.fluid.core as core +import paddle.v2.fluid as fluid import paddle.v2.fluid.framework as framework import paddle.v2.fluid.layers as layers import paddle.v2.fluid.nets as nets @@ -104,7 +104,8 @@ def get_mov_combined_features(): CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories()) - category_id = layers.data(name='category_id', shape=[1], dtype='int64') + category_id = layers.data( + name='category_id', shape=[1], dtype='int64', lod_level=1) mov_categories_emb = layers.embedding( input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE) @@ -114,7 +115,8 @@ def get_mov_combined_features(): MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict()) - mov_title_id = layers.data(name='movie_title', shape=[1], dtype='int64') + mov_title_id = layers.data( + name='movie_title', shape=[1], dtype='int64', lod_level=1) mov_title_emb = layers.embedding( input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE) @@ -144,23 +146,22 @@ def model(): scale_infer = layers.scale(x=inference, scale=5.0) label = layers.data(name='score', shape=[1], dtype='float32') - square_cost = layers.square_error_cost(input=scale_infer, label=label) - avg_cost = layers.mean(x=square_cost) - return avg_cost + return scale_infer, avg_cost + +def train(use_cuda, save_dirname): + scale_infer, avg_cost = model() + + # test program + test_program = fluid.default_main_program().clone() -def main(): - cost = model() sgd_optimizer = SGDOptimizer(learning_rate=0.2) - opts = sgd_optimizer.minimize(cost) + opts = sgd_optimizer.minimize(avg_cost) - if USE_GPU: - place = core.CUDAPlace(0) - else: - place = core.CPUPlace() + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = Executor(place) exe.run(framework.default_startup_program()) @@ -169,6 +170,8 @@ def main(): paddle.reader.shuffle( paddle.dataset.movielens.train(), buf_size=8192), batch_size=BATCH_SIZE) + test_reader = paddle.batch( + paddle.dataset.movielens.test(), batch_size=BATCH_SIZE) feeding = { 'user_id': 0, @@ -184,7 +187,7 @@ def main(): def func_feed(feeding, data): feed_tensors = {} for (key, idx) in feeding.iteritems(): - tensor = core.LoDTensor() + tensor = fluid.LoDTensor() if key != "category_id" and key != "movie_title": if key == "score": numpy_data = np.array(map(lambda x: x[idx], data)).astype( @@ -211,16 +214,117 @@ def main(): PASS_NUM = 100 for pass_id in range(PASS_NUM): - for data in train_reader(): - outs = exe.run(framework.default_main_program(), + for batch_id, data in enumerate(train_reader()): + # train a mini-batch + outs = exe.run(program=fluid.default_main_program(), feed=func_feed(feeding, data), - fetch_list=[cost]) + fetch_list=[avg_cost]) out = np.array(outs[0]) - if out[0] < 6.0: - # if avg cost less than 6.0, we think our code is good. - exit(0) + if (batch_id + 1) % 10 == 0: + avg_cost_set = [] + for test_data in test_reader(): + avg_cost_np = exe.run(program=test_program, + feed=func_feed(feeding, test_data), + fetch_list=[avg_cost]) + avg_cost_set.append(avg_cost_np[0]) + break # test only 1 segment for speeding up CI + + # get test avg_cost + test_avg_cost = np.array(avg_cost_set).mean() + if test_avg_cost < 6.0: + # if avg_cost less than 6.0, we think our code is good. + if save_dirname is not None: + fluid.io.save_inference_model(save_dirname, [ + "user_id", "gender_id", "age_id", "job_id", + "movie_id", "category_id", "movie_title" + ], [scale_infer], exe) + return + if math.isnan(float(out[0])): sys.exit("got NaN loss, training failed.") -main() +def infer(use_cuda, save_dirname=None): + if save_dirname is None: + return + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + + def create_lod_tensor(data, lod=None): + tensor = fluid.LoDTensor() + if lod is None: + # Tensor, the shape is [batch_size, 1] + index = 0 + lod_0 = [index] + for l in range(len(data)): + index += 1 + lod_0.append(index) + lod = [lod_0] + tensor.set_lod(lod) + + flattened_data = np.concatenate(data, axis=0).astype("int64") + flattened_data = flattened_data.reshape([len(flattened_data), 1]) + tensor.set(flattened_data, place) + return tensor + + # Use the first data from paddle.dataset.movielens.test() as input + assert feed_target_names[0] == "user_id" + user_id = create_lod_tensor([[1]]) + + assert feed_target_names[1] == "gender_id" + gender_id = create_lod_tensor([[1]]) + + assert feed_target_names[2] == "age_id" + age_id = create_lod_tensor([[0]]) + + assert feed_target_names[3] == "job_id" + job_id = create_lod_tensor([[10]]) + + assert feed_target_names[4] == "movie_id" + movie_id = create_lod_tensor([[783]]) + + assert feed_target_names[5] == "category_id" + category_id = create_lod_tensor([[10], [8], [9]], [[0, 3]]) + + assert feed_target_names[6] == "movie_title" + movie_title = create_lod_tensor([[1069], [4140], [2923], [710], [988]], + [[0, 5]]) + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + results = exe.run(inference_program, + feed={ + feed_target_names[0]: user_id, + feed_target_names[1]: gender_id, + feed_target_names[2]: age_id, + feed_target_names[3]: job_id, + feed_target_names[4]: movie_id, + feed_target_names[5]: category_id, + feed_target_names[6]: movie_title + }, + fetch_list=fetch_targets, + return_numpy=False) + print("inferred score: ", np.array(results[0])) + + +def main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + + # Directory for saving the inference model + save_dirname = "recommender_system.inference.model" + + train(use_cuda, save_dirname) + infer(use_cuda, save_dirname) + + +if __name__ == '__main__': + main(USE_GPU) -- GitLab From 3c4117106f2bd4f86fd7ab770fb4fba45a91f692 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 9 Feb 2018 13:34:12 +0800 Subject: [PATCH 114/138] Add unittest --- paddle/framework/mixed_vector_test.cu | 78 +++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/paddle/framework/mixed_vector_test.cu b/paddle/framework/mixed_vector_test.cu index 6adad6c12c3..a037cc3b990 100644 --- a/paddle/framework/mixed_vector_test.cu +++ b/paddle/framework/mixed_vector_test.cu @@ -11,3 +11,81 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include + +#include "glog/logging.h" +#include "gtest/gtest.h" +#include "paddle/framework/mixed_vector.h" +#include "paddle/platform/gpu_info.h" + +template +using vec = paddle::framework::Vector; + +TEST(mixed_vector, CPU_VECTOR) { + vec tmp; + for (int i = 0; i < 10; ++i) { + tmp.push_back(i); + } + ASSERT_EQ(tmp.size(), 10); + vec tmp2; + tmp2 = tmp; + ASSERT_EQ(tmp2.size(), 10); + for (int i = 0; i < 10; ++i) { + ASSERT_EQ(tmp2[i], i); + ASSERT_EQ(tmp2[i], tmp[i]); + } + int cnt = 0; + for (auto& t : tmp2) { + ASSERT_EQ(t, cnt); + ++cnt; + } +} + +static __global__ void multiply_10(int* ptr) { + for (int i = 0; i < 10; ++i) { + ptr[i] *= 10; + } +} + +cudaStream_t GetCUDAStream(paddle::platform::CUDAPlace place) { + return reinterpret_cast( + paddle::platform::DeviceContextPool::Instance().Get(place)) + ->stream(); +} + +TEST(mixed_vector, GPU_VECTOR) { + vec tmp; + for (int i = 0; i < 10; ++i) { + tmp.push_back(i); + } + ASSERT_EQ(tmp.size(), 10); + paddle::platform::CUDAPlace gpu(0); + + multiply_10<<<1, 1, 0, GetCUDAStream(gpu)>>>(tmp.MutableData(gpu)); + + for (int i = 0; i < 10; ++i) { + ASSERT_EQ(tmp[i], i * 10); + } +} + +TEST(mixed_vector, MultiGPU) { + if (paddle::platform::GetCUDADeviceCount() < 2) { + LOG(WARNING) << "Skip mixed_vector.MultiGPU since there are not multiple " + "GPUs in your machine."; + return; + } + + vec tmp; + for (int i = 0; i < 10; ++i) { + tmp.push_back(i); + } + ASSERT_EQ(tmp.size(), 10); + paddle::platform::CUDAPlace gpu0(0); + multiply_10<<<1, 1, 0, GetCUDAStream(gpu0)>>>(tmp.MutableData(gpu0)); + paddle::platform::CUDAPlace gpu1(1); + multiply_10<<<1, 1, 0, GetCUDAStream(gpu1)>>>(tmp.MutableData(gpu1)); + + for (int i = 0; i < 10; ++i) { + ASSERT_EQ(tmp[i], i * 100); + } +} -- GitLab From 12246a9d8168978452298af20b5cc1453360c12e Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 9 Feb 2018 13:40:58 +0800 Subject: [PATCH 115/138] Fix typo --- paddle/framework/tensor_impl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/framework/tensor_impl.h b/paddle/framework/tensor_impl.h index f75cc31b399..652d6b8a90e 100644 --- a/paddle/framework/tensor_impl.h +++ b/paddle/framework/tensor_impl.h @@ -61,7 +61,7 @@ static inline size_t SizeOfType(std::type_index type) { inline void Tensor::check_memory_size() const { PADDLE_ENFORCE_NOT_NULL( holder_, "Tensor holds no memory. Call Tensor::mutable_data first."); - PADDLE_ENFORCE_GE( + PADDLE_ENFORCE_LE( numel() * SizeOfType(type()), memory_size(), "Tensor's dims_ is out of bound. Call Tensor::mutable_data " "first to re-allocate memory.\n" -- GitLab From a063fc28b6993cb59d8eb66ecce8521360314f0b Mon Sep 17 00:00:00 2001 From: guosheng Date: Fri, 9 Feb 2018 14:31:12 +0800 Subject: [PATCH 116/138] Fix the bias of fc when num_flatten_dims is not 1 in fluid layers --- python/paddle/v2/fluid/layers/nn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/v2/fluid/layers/nn.py b/python/paddle/v2/fluid/layers/nn.py index 99168ecc228..eaf02ff589d 100644 --- a/python/paddle/v2/fluid/layers/nn.py +++ b/python/paddle/v2/fluid/layers/nn.py @@ -184,7 +184,7 @@ def fc(input, helper.append_op( type="sum", inputs={"X": mul_results}, outputs={"Out": pre_bias}) # add bias - pre_activation = helper.append_bias_op(pre_bias) + pre_activation = helper.append_bias_op(pre_bias, dim_start=num_flatten_dims) # add activation return helper.append_activation(pre_activation) -- GitLab From 5135f05cf19c30af52a92637d2270938a0e01585 Mon Sep 17 00:00:00 2001 From: Yancey1989 Date: Fri, 9 Feb 2018 16:05:31 +0800 Subject: [PATCH 117/138] create optimize block in pserver program --- .../paddle/v2/fluid/distribute_transpiler.py | 43 +++++++------------ 1 file changed, 15 insertions(+), 28 deletions(-) diff --git a/python/paddle/v2/fluid/distribute_transpiler.py b/python/paddle/v2/fluid/distribute_transpiler.py index c5f1d51bd71..cd89dba72db 100644 --- a/python/paddle/v2/fluid/distribute_transpiler.py +++ b/python/paddle/v2/fluid/distribute_transpiler.py @@ -347,7 +347,8 @@ class DistributeTranspiler: j -= 1 return False - def _append_pserver_ops(self, program, pserver_program, opt_op, endpoint): + def _append_pserver_ops(self, optimize_block, opt_op, endpoint): + program = optimize_block.program new_inputs = dict() # update param/grad shape first, then other inputs like # moment can use the updated shape @@ -371,11 +372,11 @@ class DistributeTranspiler: if self.trainers > 1: vars2merge = self._create_var_for_trainers( program.global_block(), grad_block, self.trainers) - program.global_block().append_op( + optimize_block.append_op( type="sum", inputs={"X": vars2merge}, outputs={"Out": merged_var}) - program.global_block().append_op( + optimize_block.append_op( type="scale", inputs={"X": merged_var}, outputs={"Out": merged_var}, @@ -412,25 +413,18 @@ class DistributeTranspiler: dtype=var.dtype, shape=new_shape) new_inputs[key] = tmpvar - # create var in pserver program global block. - # TODO(typhoonzero): put blocks in one program to avoid create two - # variables. - pserver_program.global_block().create_var( - name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=new_shape) # change output's ParamOut variable outputs = self._get_output_map_from_op(program.global_block(), opt_op) outputs["ParamOut"] = new_inputs["Param"] - program.global_block().append_op( + optimize_block.append_op( type=opt_op.type, inputs=new_inputs, outputs=outputs, attrs=opt_op.attrs) - def _append_pserver_non_opt_ops(self, program, pserver_program, opt_op): + def _append_pserver_non_opt_ops(self, optimize_block, opt_op): + program = optimize_block.program # Append the ops for parameters that do not need to be optimized/updated inputs = self._get_input_map_from_op(self.program.global_block().vars, opt_op) @@ -440,14 +434,8 @@ class DistributeTranspiler: else: varlist = [var] for var in varlist: - # TODO(typhoonzero): will remove below line later. - program.global_block().create_var( - name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=var.shape) - if not pserver_program.global_block().vars.has_key(var.name): - pserver_program.global_block().create_var( + if not program.global_block().vars.has_key(var.name): + program.global_block().create_var( name=var.name, persistable=var.persistable, dtype=var.dtype, @@ -456,7 +444,7 @@ class DistributeTranspiler: outputs = self._get_output_map_from_op(self.program.global_block().vars, opt_op) - program.global_block().append_op( + optimize_block.append_op( type=opt_op.type, inputs=inputs, outputs=outputs, @@ -489,7 +477,7 @@ class DistributeTranspiler: dtype=v.dtype, shape=v.shape) # step6 - optimize_sub_program = Program() + optimize_block = pserver_program.create_block(0) # Iterate through the ops and append ops as needed for idx, opt_op in enumerate(self.optimize_ops): is_op_on_pserver = self._is_op_on_pserver(endpoint, @@ -497,18 +485,17 @@ class DistributeTranspiler: if not is_op_on_pserver: continue if "Grad" in opt_op.desc.input_arg_names(): - self._append_pserver_ops(optimize_sub_program, pserver_program, - opt_op, endpoint) + self._append_pserver_ops(optimize_block, opt_op, endpoint) else: - self._append_pserver_non_opt_ops(optimize_sub_program, - pserver_program, opt_op) + self._append_pserver_non_opt_ops(optimize_block, opt_op) + # Append the listen_and_serv op pserver_program.global_block().append_op( type="listen_and_serv", inputs={}, outputs={}, attrs={ - "OptimizeBlock": optimize_sub_program.global_block(), + "OptimizeBlock": optimize_block, "endpoint": endpoint, "ParamList": [ p.name -- GitLab From 051ba1ce1dcebb6fcd43e46fff648b323b087fca Mon Sep 17 00:00:00 2001 From: Qiao Longfei Date: Fri, 9 Feb 2018 16:44:33 +0800 Subject: [PATCH 118/138] Use force cpu in fill constant op (#8254) --- python/paddle/v2/fluid/initializer.py | 37 +++++- .../paddle/v2/fluid/layers/math_op_patch.py | 10 +- python/paddle/v2/fluid/layers/tensor.py | 27 ++++- python/paddle/v2/fluid/learning_rate_decay.py | 114 ++++++++++-------- .../tests/book/test_label_semantic_roles.py | 12 +- 5 files changed, 138 insertions(+), 62 deletions(-) diff --git a/python/paddle/v2/fluid/initializer.py b/python/paddle/v2/fluid/initializer.py index b9c0d12ad6c..8c70fd90eff 100644 --- a/python/paddle/v2/fluid/initializer.py +++ b/python/paddle/v2/fluid/initializer.py @@ -14,14 +14,37 @@ import framework import numpy as np +import contextlib __all__ = [ - 'Constant', - 'Uniform', - 'Normal', - 'Xavier', + 'Constant', 'Uniform', 'Normal', 'Xavier', 'force_init_on_cpu', + 'init_on_cpu' ] +_force_init_on_cpu_ = False + + +def force_init_on_cpu(): + return _force_init_on_cpu_ + + +@contextlib.contextmanager +def init_on_cpu(): + """ + Switch program with `with` statement + + Examples: + >>> with init_on_cpu(): + >>> step = layers.create_global_var() + + """ + global _force_init_on_cpu_ + + pre_state = force_init_on_cpu() + _force_init_on_cpu_ = True + yield + _force_init_on_cpu_ = pre_state + class Initializer(object): """Base class for variable initializers @@ -80,7 +103,7 @@ class ConstantInitializer(Initializer): """Implements the constant initializer """ - def __init__(self, value=0.0): + def __init__(self, value=0.0, force_cpu=False): """Constructor for ConstantInitializer Args: @@ -89,6 +112,7 @@ class ConstantInitializer(Initializer): assert value is not None super(ConstantInitializer, self).__init__() self._value = value + self._force_cpu = force_cpu def __call__(self, var, block): """Add constant initialization ops for a variable @@ -110,7 +134,8 @@ class ConstantInitializer(Initializer): attrs={ "shape": var.shape, "dtype": int(var.dtype), - "value": self._value + "value": float(self._value), + 'force_cpu': self._force_cpu or force_init_on_cpu() }) var.op = op return op diff --git a/python/paddle/v2/fluid/layers/math_op_patch.py b/python/paddle/v2/fluid/layers/math_op_patch.py index 79a130a3eb1..9b5f22759cf 100644 --- a/python/paddle/v2/fluid/layers/math_op_patch.py +++ b/python/paddle/v2/fluid/layers/math_op_patch.py @@ -14,6 +14,7 @@ from ..framework import Variable, unique_name from layer_function_generator import OpProtoHolder +from ..initializer import force_init_on_cpu __all__ = ['monkey_patch_variable'] @@ -36,9 +37,12 @@ def monkey_patch_variable(): block.append_op( type="fill_constant", outputs={'Out': [var]}, - attrs={'dtype': var.dtype, - 'shape': shape, - 'value': value}) + attrs={ + 'dtype': var.dtype, + 'shape': shape, + 'value': value, + 'force_cpu': force_init_on_cpu() + }) return var def create_scalar(block, value, dtype): diff --git a/python/paddle/v2/fluid/layers/tensor.py b/python/paddle/v2/fluid/layers/tensor.py index 704e040b9f4..2d4e0ab0cc6 100644 --- a/python/paddle/v2/fluid/layers/tensor.py +++ b/python/paddle/v2/fluid/layers/tensor.py @@ -16,7 +16,7 @@ from ..layer_helper import LayerHelper from ..param_attr import ParamAttr from ..framework import convert_np_dtype_to_dtype_ from ..framework import Variable -from ..initializer import Constant +from ..initializer import Constant, force_init_on_cpu from ..core import DataType import numpy @@ -69,12 +69,30 @@ def create_parameter(shape, default_initializer) -def create_global_var(shape, value, dtype, persistable=False, name=None): +def create_global_var(shape, + value, + dtype, + persistable=False, + force_cpu=False, + name=None): + """ + Create a global variable. such as global_step + Args: + shape(list[int]): shape of the variable + value(float): the value of the variable + dtype(string): element type of the parameter + persistable(bool): if this variable is persistable + force_cpu(bool): force this variable to be on CPU + + Returns: + Variable: the created Variable + """ helper = LayerHelper("global_var", **locals()) var = helper.create_global_variable( dtype=dtype, shape=shape, persistable=persistable, name=name) helper.set_variable_initializer( - var, initializer=Constant(value=float(value))) + var, initializer=Constant( + value=float(value), force_cpu=force_cpu)) return var @@ -221,6 +239,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None): dtype(np.dtype|core.DataType|str): Data type of the output tensor. value(float): The constant value used to initialize the output tensor. out(Variable): The output tensor. + force_cpu(True|False): data should be on CPU if set true. Returns: Variable: The tensor variable storing the output. @@ -242,7 +261,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None): 'shape': shape, 'dtype': out.dtype, 'value': float(value), - 'force_cpu': force_cpu + 'force_cpu': force_cpu or force_init_on_cpu() }) out.stop_gradient = True return out diff --git a/python/paddle/v2/fluid/learning_rate_decay.py b/python/paddle/v2/fluid/learning_rate_decay.py index 13dc98075f7..2a2a29fd9cb 100644 --- a/python/paddle/v2/fluid/learning_rate_decay.py +++ b/python/paddle/v2/fluid/learning_rate_decay.py @@ -14,6 +14,7 @@ import layers from framework import Variable +from initializer import init_on_cpu __all__ = [ 'exponential_decay', 'natural_exp_decay', 'inverse_time_decay', @@ -54,11 +55,14 @@ def exponential_decay(learning_rate, if not isinstance(global_step, Variable): raise ValueError("global_step is required for exponential_decay.") - # update learning_rate - div_res = global_step / decay_steps - if staircase: - div_res = layers.floor(x=div_res) - return learning_rate * (decay_rate**div_res) + with init_on_cpu(): + # update learning_rate + div_res = global_step / decay_steps + if staircase: + div_res = layers.floor(x=div_res) + decayed_lr = learning_rate * (decay_rate**div_res) + + return decayed_lr def natural_exp_decay(learning_rate, @@ -88,10 +92,13 @@ def natural_exp_decay(learning_rate, if not isinstance(global_step, Variable): raise ValueError("global_step is required for natural_exp_decay.") - div_res = global_step / decay_steps - if staircase: - div_res = layers.floor(x=div_res) - return learning_rate * layers.exp(x=(-1 * decay_rate * div_res)) + with init_on_cpu(): + div_res = global_step / decay_steps + if staircase: + div_res = layers.floor(x=div_res) + decayed_lr = learning_rate * layers.exp(x=(-1 * decay_rate * div_res)) + + return decayed_lr def inverse_time_decay(learning_rate, @@ -121,11 +128,14 @@ def inverse_time_decay(learning_rate, if not isinstance(global_step, Variable): raise ValueError("global_step is required for inverse_time_decay.") - div_res = global_step / decay_steps - if staircase: - div_res = layers.floor(x=div_res) + with init_on_cpu(): + div_res = global_step / decay_steps + if staircase: + div_res = layers.floor(x=div_res) + + decayed_lr = learning_rate / (1 + decay_rate * div_res) - return learning_rate / (1 + decay_rate * div_res) + return decayed_lr def polynomial_decay(learning_rate, @@ -160,22 +170,27 @@ def polynomial_decay(learning_rate, if not isinstance(global_step, Variable): raise ValueError("global_step is required for inverse_time_decay.") - if cycle: - div_res = layers.ceil(x=(global_step / decay_steps)) - zero_var = layers.fill_constant(shape=[1], dtype='float32', value=0.0) - one_var = layers.fill_constant(shape=[1], dtype='float32', value=1.0) - - with layers.Switch() as switch: - with switch.case(layers.equal(x=global_step, y=zero_var)): - layers.assign(input=one_var, output=div_res) - decay_steps = decay_steps * div_res - else: - decay_steps_var = layers.fill_constant( - shape=[1], dtype='float32', value=float(decay_steps)) - global_step = layers.elementwise_min(x=global_step, y=decay_steps_var) - - return (learning_rate - end_learning_rate) * \ - ((1 - global_step / decay_steps) ** power) + end_learning_rate + with init_on_cpu(): + if cycle: + div_res = layers.ceil(x=(global_step / decay_steps)) + zero_var = layers.fill_constant( + shape=[1], dtype='float32', value=0.0) + one_var = layers.fill_constant( + shape=[1], dtype='float32', value=1.0) + + with layers.Switch() as switch: + with switch.case(layers.equal(x=global_step, y=zero_var)): + layers.assign(input=one_var, output=div_res) + decay_steps = decay_steps * div_res + else: + decay_steps_var = layers.fill_constant( + shape=[1], dtype='float32', value=float(decay_steps)) + global_step = layers.elementwise_min( + x=global_step, y=decay_steps_var) + + decayed_lr = (learning_rate - end_learning_rate) * \ + ((1 - global_step / decay_steps) ** power) + end_learning_rate + return decayed_lr def piecewise_decay(global_step, boundaries, values): @@ -200,24 +215,27 @@ def piecewise_decay(global_step, boundaries, values): if not isinstance(global_step, Variable): raise ValueError("global_step is required for piecewise_decay.") - lr = layers.create_global_var( - shape=[1], - value=0.0, - dtype='float32', - persistable=True, - name="learning_rate") - - with layers.Switch() as switch: - for i in range(len(boundaries)): - boundary_val = layers.fill_constant( - shape=[1], dtype='float32', value=float(boundaries[i])) - value_var = layers.fill_constant( - shape=[1], dtype='float32', value=float(values[i])) - with switch.case(layers.less_than(global_step, boundary_val)): - layers.assign(value_var, lr) - last_value_var = layers.fill_constant( - shape=[1], dtype='float32', value=float(values[len(values) - 1])) - with switch.default(): - layers.assign(last_value_var, lr) + with init_on_cpu(): + lr = layers.create_global_var( + shape=[1], + value=0.0, + dtype='float32', + persistable=True, + name="learning_rate") + + with layers.Switch() as switch: + for i in range(len(boundaries)): + boundary_val = layers.fill_constant( + shape=[1], dtype='float32', value=float(boundaries[i])) + value_var = layers.fill_constant( + shape=[1], dtype='float32', value=float(values[i])) + with switch.case(layers.less_than(global_step, boundary_val)): + layers.assign(value_var, lr) + last_value_var = layers.fill_constant( + shape=[1], + dtype='float32', + value=float(values[len(values) - 1])) + with switch.default(): + layers.assign(last_value_var, lr) return lr diff --git a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py index 1491f7a8d54..f33e81186bd 100644 --- a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py @@ -18,6 +18,7 @@ import numpy as np import paddle.v2 as paddle import paddle.v2.dataset.conll05 as conll05 import paddle.v2.fluid as fluid +from paddle.v2.fluid.initializer import init_on_cpu import contextlib import time import unittest @@ -167,7 +168,16 @@ def train(use_cuda, save_dirname=None): # TODO(qiao) # check other optimizers and check why out will be NAN - sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.0001) + global_step = fluid.layers.create_global_var( + shape=[1], value=0, dtype='float32', force_cpu=True, persistable=True) + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.learning_rate_decay.exponential_decay( + learning_rate=0.0001, + global_step=global_step, + decay_steps=100000, + decay_rate=0.5, + staircase=True), + global_step=global_step) sgd_optimizer.minimize(avg_cost) # TODO(qiao) -- GitLab From a7e231faab8cbf77d97e838a72f26eb2c28f5c44 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Fri, 9 Feb 2018 17:15:47 +0800 Subject: [PATCH 119/138] Delete "API" section from "Documentation" --- doc/index_cn.rst | 1 - doc/index_en.rst | 1 - 2 files changed, 2 deletions(-) diff --git a/doc/index_cn.rst b/doc/index_cn.rst index 63a78428583..0f645db6fc5 100644 --- a/doc/index_cn.rst +++ b/doc/index_cn.rst @@ -8,5 +8,4 @@ PaddlePaddle 文档 build_and_install/index_cn.rst howto/index_cn.rst dev/index_cn.rst - api/index_cn.rst faq/index_cn.rst diff --git a/doc/index_en.rst b/doc/index_en.rst index 5631381be08..166f56c28f4 100644 --- a/doc/index_en.rst +++ b/doc/index_en.rst @@ -8,4 +8,3 @@ PaddlePaddle Documentation build_and_install/index_en.rst howto/index_en.rst dev/index_en.rst - api/index_en.rst -- GitLab From 660f3e25e462b97ab8e98f514c02b28e4a59d7a7 Mon Sep 17 00:00:00 2001 From: Yu Yang Date: Fri, 9 Feb 2018 15:20:01 +0800 Subject: [PATCH 120/138] Fix CI --- paddle/framework/mixed_vector.h | 15 ++++++++++----- paddle/framework/mixed_vector_test.cu | 6 ++++-- paddle/operators/math/selected_rows_functor.cu | 4 +++- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/paddle/framework/mixed_vector.h b/paddle/framework/mixed_vector.h index d388da4f2c7..f776f0317a2 100644 --- a/paddle/framework/mixed_vector.h +++ b/paddle/framework/mixed_vector.h @@ -277,11 +277,15 @@ class Vector { kDirty = 0x10 }; + void CopyToCPU() const { + // COPY GPU Data To CPU + Copy(cuda_vec_, platform::CPUPlace(), &cpu_vec_); + WaitPlace(cuda_vec_.place()); + } + void MutableCPU() { if (IsInCUDA() && IsDirty()) { - // COPY GPU Data To CPU - Copy(cuda_vec_, platform::CPUPlace(), &cpu_vec_); - WaitPlace(cuda_vec_.place()); + CopyToCPU(); } flag_ = kDirty | kDataInCPU; } @@ -311,8 +315,10 @@ class Vector { SetFlag(kDataInCUDA); } else if (!(place == cuda_vec_.place())) { framework::Tensor tmp; + WaitPlace(cuda_vec_.place()); Copy(cuda_vec_, boost::get(place), &tmp); WaitPlace(cuda_vec_.place()); + WaitPlace(place); cuda_vec_.ShareDataWith(tmp); } else { // Not Dirty && DataInCUDA && Device is same @@ -324,8 +330,7 @@ class Vector { void ImmutableCPU() const { if (IsDirty() && !IsInCPU()) { // If data has been changed in CUDA, or CPU has no data. - Copy(cuda_vec_, platform::CPUPlace(), &cpu_vec_); - WaitPlace(cuda_vec_.place()); + CopyToCPU(); UnsetFlag(kDirty); } SetFlag(kDataInCPU); diff --git a/paddle/framework/mixed_vector_test.cu b/paddle/framework/mixed_vector_test.cu index a037cc3b990..f02db8f612c 100644 --- a/paddle/framework/mixed_vector_test.cu +++ b/paddle/framework/mixed_vector_test.cu @@ -81,10 +81,12 @@ TEST(mixed_vector, MultiGPU) { } ASSERT_EQ(tmp.size(), 10); paddle::platform::CUDAPlace gpu0(0); + paddle::platform::SetDeviceId(0); multiply_10<<<1, 1, 0, GetCUDAStream(gpu0)>>>(tmp.MutableData(gpu0)); paddle::platform::CUDAPlace gpu1(1); - multiply_10<<<1, 1, 0, GetCUDAStream(gpu1)>>>(tmp.MutableData(gpu1)); - + auto* gpu1_ptr = tmp.MutableData(gpu1); + paddle::platform::SetDeviceId(1); + multiply_10<<<1, 1, 0, GetCUDAStream(gpu1)>>>(gpu1_ptr); for (int i = 0; i < 10; ++i) { ASSERT_EQ(tmp[i], i * 100); } diff --git a/paddle/operators/math/selected_rows_functor.cu b/paddle/operators/math/selected_rows_functor.cu index 5c3a53ae1ba..54a41a67d06 100644 --- a/paddle/operators/math/selected_rows_functor.cu +++ b/paddle/operators/math/selected_rows_functor.cu @@ -154,7 +154,9 @@ struct SelectedRowsAddTo { auto* in2_value = input2->mutable_value(); // concat rows - in2_rows.Extend(in1_rows.begin(), in1_rows.end()); + if (in1_rows.size()) { + in2_rows.Extend(in1_rows.begin(), in1_rows.end()); + } auto in1_place = input1.place(); PADDLE_ENFORCE(platform::is_gpu_place(in1_place)); -- GitLab From 725e64486acb0f448ae7365a6467ad37229bab2a Mon Sep 17 00:00:00 2001 From: emailweixu Date: Fri, 9 Feb 2018 01:45:14 -0800 Subject: [PATCH 121/138] cumsum operator (#8288) --- paddle/framework/grad_op_desc_maker.h | 5 + paddle/framework/op_registry.h | 2 +- paddle/operators/cum_op.h | 111 +++++++++++++++ paddle/operators/cumsum_op.cc | 82 +++++++++++ paddle/operators/cumsum_op.cu | 22 +++ python/paddle/v2/fluid/layers/ops.py | 2 + python/paddle/v2/fluid/tests/op_test.py | 3 +- .../paddle/v2/fluid/tests/test_cumsum_op.py | 127 ++++++++++++++++++ 8 files changed, 352 insertions(+), 2 deletions(-) create mode 100644 paddle/operators/cum_op.h create mode 100644 paddle/operators/cumsum_op.cc create mode 100644 paddle/operators/cumsum_op.cu create mode 100644 python/paddle/v2/fluid/tests/test_cumsum_op.py diff --git a/paddle/framework/grad_op_desc_maker.h b/paddle/framework/grad_op_desc_maker.h index 2082f8bb76f..f51753453be 100644 --- a/paddle/framework/grad_op_desc_maker.h +++ b/paddle/framework/grad_op_desc_maker.h @@ -122,6 +122,11 @@ class GradOpDescMakerBase { return it->second; } + template + inline const T& Attr(const std::string& name) const { + return boost::get(GetAttr(name)); + } + std::string ForwardOpType() const { return this->fwd_op_.Type(); } private: diff --git a/paddle/framework/op_registry.h b/paddle/framework/op_registry.h index 5de9ae559c4..6fb8532b2a8 100644 --- a/paddle/framework/op_registry.h +++ b/paddle/framework/op_registry.h @@ -143,7 +143,7 @@ class OpKernelRegistrar : public Registrar { /** * Macro to register Operator. When the input is duplicable, you should - * use REGISTER_OP_EX with deop_empty_grad=false instead. + * use REGISTER_OP_EX with drop_empty_grad=false instead. */ #define REGISTER_OP(op_type, op_class, op_maker_class, grad_op_type, \ grad_op_class) \ diff --git a/paddle/operators/cum_op.h b/paddle/operators/cum_op.h new file mode 100644 index 00000000000..e3813ac9036 --- /dev/null +++ b/paddle/operators/cum_op.h @@ -0,0 +1,111 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" +#include "paddle/framework/operator.h" +#include "paddle/operators/detail/safe_ref.h" + +namespace paddle { +namespace operators { + +template +class CumKernel : public framework::OpKernel { + public: + using T = typename Functor::ELEMENT_TYPE; + + void Compute(const framework::ExecutionContext& context) const override { + auto& X = detail::Ref(context.Input("X"), + "Cannot get input tensor X, variable name = %s", + context.op().Input("X")); + + auto& Out = detail::Ref(context.Output("Out"), + "Cannot get output tensor Out, variable name = %s", + context.op().Output("Out")); + int axis = context.Attr("axis"); + bool exclusive = context.Attr("exclusive"); + bool reverse = context.Attr("reverse"); + auto x_dims = X.dims(); + if (axis == -1) { + axis = x_dims.size() - 1; + } + PADDLE_ENFORCE_LT( + axis, x_dims.size(), + "axis should be less than the dimensiotn of the input tensor"); + Out.mutable_data(context.GetPlace()); + + int pre = 1; + int post = 1; + int mid = x_dims[axis]; + for (int i = 0; i < axis; ++i) { + pre *= x_dims[i]; + } + for (int i = axis + 1; i < x_dims.size(); ++i) { + post *= x_dims[i]; + } + + auto x = framework::EigenVector::Flatten(X); + auto out = framework::EigenVector::Flatten(Out); + auto* place = + context.template device_context().eigen_device(); + + using IndexT = Eigen::DenseIndex; + if (pre == 1) { + if (post == 1) { + ComputeImp(*place, Eigen::DSizes(mid), x, out, + /* axis= */ 0, reverse, exclusive); + } else { + ComputeImp(*place, Eigen::DSizes(mid, post), x, out, + /* axis= */ 0, reverse, exclusive); + } + } else { + if (post == 1) { + ComputeImp(*place, Eigen::DSizes(pre, mid), x, out, + /* axis= */ 1, reverse, exclusive); + } else { + ComputeImp(*place, Eigen::DSizes(pre, mid, post), x, out, + /* axis= */ 1, reverse, exclusive); + } + } + } + + private: + template + void ComputeImp(Device d, const Dim& dims, X x, Out out, int axis, + bool reverse, bool exclusive) const { + if (!reverse) { + out.reshape(dims).device(d) = Functor()(x.reshape(dims), axis, exclusive); + } else { + std::array rev; + rev.fill(false); + rev[axis] = reverse; + out.reshape(dims).device(d) = + Functor()(x.reshape(dims).reverse(rev), axis, exclusive).reverse(rev); + } + } +}; + +template +struct CumsumFunctor { + using ELEMENT_TYPE = T; + template + const typename X::TensorScanSumOp operator()(X x, int axis, + bool exclusive) const { + return x.cumsum(axis, exclusive); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/operators/cumsum_op.cc b/paddle/operators/cumsum_op.cc new file mode 100644 index 00000000000..4933cc923d4 --- /dev/null +++ b/paddle/operators/cumsum_op.cc @@ -0,0 +1,82 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/cum_op.h" + +namespace paddle { +namespace operators { + +class CumOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + ctx->SetOutputDim("Out", ctx->GetInputDim("X")); + ctx->ShareLoD("X", /*->*/ "Out"); + } +}; + +class CumsumOpMaker : public framework::OpProtoAndCheckerMaker { + public: + CumsumOpMaker(OpProto *proto, OpAttrChecker *op_checker) + : framework::OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X", "Input of Cumsum operator"); + AddOutput("Out", "Output of Cumsum operator"); + AddAttr("axis", + "(int, default -1). The dimenstion to accumulate along. " + "-1 means the last dimenstion") + .SetDefault(-1) + .EqualGreaterThan(-1); + AddAttr("exclusive", + "bool, default false). Whether to perform exclusive cumsum") + .SetDefault(false); + AddAttr("reverse", + "bool, default false). If true, the cumsum is performed in " + "the reversed direction") + .SetDefault(false); + AddComment(R"DOC( +The cumulative sum of the elements along a given axis. +By default, the first element of the result is the same of the first element of +the input. If exlusive is true, the first element of the result is 0. +)DOC"); + } +}; + +class CumsumGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto *grad_op = new framework::OpDesc(); + grad_op->SetType("cumsum"); + grad_op->SetInput("X", OutputGrad("Out")); + grad_op->SetOutput("Out", InputGrad("X")); + grad_op->SetAttr("axis", Attr("axis")); + grad_op->SetAttr("reverse", !Attr("reverse")); + grad_op->SetAttr("exclusive", Attr("exclusive")); + return std::unique_ptr(grad_op); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +using CPU = paddle::platform::CPUDeviceContext; + +REGISTER_OPERATOR(cumsum, ops::CumOp, ops::CumsumOpMaker, ops::CumsumGradMaker); +REGISTER_OP_CPU_KERNEL(cumsum, ops::CumKernel>, + ops::CumKernel>, + ops::CumKernel>) diff --git a/paddle/operators/cumsum_op.cu b/paddle/operators/cumsum_op.cu new file mode 100644 index 00000000000..90661c4269a --- /dev/null +++ b/paddle/operators/cumsum_op.cu @@ -0,0 +1,22 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/operators/cum_op.h" + +namespace ops = paddle::operators; +using CUDA = paddle::platform::CUDADeviceContext; + +REGISTER_OP_CUDA_KERNEL(cumsum, ops::CumKernel>, + ops::CumKernel>, + ops::CumKernel>) diff --git a/python/paddle/v2/fluid/layers/ops.py b/python/paddle/v2/fluid/layers/ops.py index 38dea2892fc..bb3f71abbb0 100644 --- a/python/paddle/v2/fluid/layers/ops.py +++ b/python/paddle/v2/fluid/layers/ops.py @@ -65,6 +65,8 @@ __all__ = [ 'logical_or', 'logical_xor', 'logical_not', + 'uniform_random', + 'cumsum', ] + __activations__ for _OP in set(__all__): diff --git a/python/paddle/v2/fluid/tests/op_test.py b/python/paddle/v2/fluid/tests/op_test.py index 3f6d7070c29..f8475813c0c 100644 --- a/python/paddle/v2/fluid/tests/op_test.py +++ b/python/paddle/v2/fluid/tests/op_test.py @@ -326,7 +326,8 @@ class OpTest(unittest.TestCase): self.assertTrue( np.allclose( actual_t, expect_t, atol=atol), - "Output (" + out_name + ") has diff at " + str(place)) + "Output (" + out_name + ") has diff at " + str(place) + + str(actual_t) + str(expect_t)) if isinstance(expect, tuple): self.assertListEqual(actual.lod(), expect[1], "Output (" + out_name + diff --git a/python/paddle/v2/fluid/tests/test_cumsum_op.py b/python/paddle/v2/fluid/tests/test_cumsum_op.py new file mode 100644 index 00000000000..e45ef457306 --- /dev/null +++ b/python/paddle/v2/fluid/tests/test_cumsum_op.py @@ -0,0 +1,127 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +class TestSumOp1(OpTest): + def setUp(self): + self.op_type = "cumsum" + self.attrs = {'axis': 2} + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} + self.outputs = {'Out': self.inputs['X'].cumsum(axis=2)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +class TestSumOp2(OpTest): + def setUp(self): + self.op_type = "cumsum" + self.attrs = {'axis': -1, 'reverse': True} + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} + self.outputs = { + 'Out': np.flip( + np.flip( + self.inputs['X'], axis=2).cumsum(axis=2), axis=2) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +class TestSumOp3(OpTest): + def setUp(self): + self.op_type = "cumsum" + self.attrs = {'axis': 1} + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} + self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +class TestSumOp4(OpTest): + def setUp(self): + self.op_type = "cumsum" + self.attrs = {'axis': 0} + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} + self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +class TestSumOp5(OpTest): + def setUp(self): + self.op_type = "cumsum" + self.inputs = {'X': np.random.random((5, 6)).astype("float64")} + self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +class TestSumOp7(OpTest): + def setUp(self): + self.op_type = "cumsum" + self.inputs = {'X': np.random.random((6)).astype("float64")} + self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +class TestSumOp8(OpTest): + def setUp(self): + self.op_type = "cumsum" + self.attrs = {'axis': 2, "exclusive": True} + a = np.random.random((5, 6, 3)).astype("float64") + self.inputs = {'X': a} + self.outputs = { + 'Out': np.concatenate( + (np.zeros( + (5, 6, 1), dtype=np.float64), a[:, :, :-1].cumsum(axis=2)), + axis=2) + } + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +if __name__ == '__main__': + unittest.main() -- GitLab From 720994b40007c3d3fc5691d6a2f0e3257e31f88f Mon Sep 17 00:00:00 2001 From: kexinzhao Date: Fri, 9 Feb 2018 02:06:33 -0800 Subject: [PATCH 122/138] Add inference example and unit-test for fit-a-line book chapter (#8208) * initial commit * minor fix * remove redundency * address comments --- paddle/inference/tests/book/CMakeLists.txt | 1 + .../tests/book/test_inference_fit_a_line.cc | 57 +++++++++++++++++++ .../v2/fluid/tests/book/test_fit_a_line.py | 48 ++++++++++++++-- 3 files changed, 100 insertions(+), 6 deletions(-) create mode 100644 paddle/inference/tests/book/test_inference_fit_a_line.cc diff --git a/paddle/inference/tests/book/CMakeLists.txt b/paddle/inference/tests/book/CMakeLists.txt index 5c866eb1e2e..5d065e53b2d 100644 --- a/paddle/inference/tests/book/CMakeLists.txt +++ b/paddle/inference/tests/book/CMakeLists.txt @@ -24,6 +24,7 @@ function(inference_test TARGET_NAME) endforeach() endfunction(inference_test) +inference_test(fit_a_line) inference_test(recognize_digits ARGS mlp) inference_test(image_classification ARGS vgg resnet) inference_test(label_semantic_roles) diff --git a/paddle/inference/tests/book/test_inference_fit_a_line.cc b/paddle/inference/tests/book/test_inference_fit_a_line.cc new file mode 100644 index 00000000000..201a2801cd6 --- /dev/null +++ b/paddle/inference/tests/book/test_inference_fit_a_line.cc @@ -0,0 +1,57 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "gflags/gflags.h" +#include "test_helper.h" + +DEFINE_string(dirname, "", "Directory of the inference model."); + +TEST(inference, fit_a_line) { + if (FLAGS_dirname.empty()) { + LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; + } + + LOG(INFO) << "FLAGS_dirname: " << FLAGS_dirname << std::endl; + std::string dirname = FLAGS_dirname; + + // 0. Call `paddle::framework::InitDevices()` initialize all the devices + // In unittests, this is done in paddle/testing/paddle_gtest_main.cc + + paddle::framework::LoDTensor input; + // The second dim of the input tensor should be 13 + // The input data should be >= 0 + int64_t batch_size = 10; + SetupTensor( + input, {batch_size, 13}, static_cast(0), static_cast(10)); + std::vector cpu_feeds; + cpu_feeds.push_back(&input); + + paddle::framework::LoDTensor output1; + std::vector cpu_fetchs1; + cpu_fetchs1.push_back(&output1); + + // Run inference on CPU + TestInference(dirname, cpu_feeds, cpu_fetchs1); + LOG(INFO) << output1.dims(); + +#ifdef PADDLE_WITH_CUDA + paddle::framework::LoDTensor output2; + std::vector cpu_fetchs2; + cpu_fetchs2.push_back(&output2); + + // Run inference on CUDA GPU + TestInference(dirname, cpu_feeds, cpu_fetchs2); + LOG(INFO) << output2.dims(); + + CheckError(output1, output2); +#endif +} diff --git a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py index 06860a2a465..b3332b4810b 100644 --- a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py @@ -15,15 +15,13 @@ import paddle.v2 as paddle import paddle.v2.fluid as fluid import contextlib +import numpy import unittest import math import sys -def main(use_cuda): - if use_cuda and not fluid.core.is_compiled_with_cuda(): - return - +def train(use_cuda, save_dirname): x = fluid.layers.data(name='x', shape=[13], dtype='float32') y_predict = fluid.layers.fc(input=x, size=1, act=None) @@ -51,14 +49,15 @@ def main(use_cuda): PASS_NUM = 100 for pass_id in range(PASS_NUM): - fluid.io.save_persistables(exe, "./fit_a_line.model/") - fluid.io.load_persistables(exe, "./fit_a_line.model/") for data in train_reader(): avg_loss_value, = exe.run(fluid.default_main_program(), feed=feeder.feed(data), fetch_list=[avg_cost]) print(avg_loss_value) if avg_loss_value[0] < 10.0: + if save_dirname is not None: + fluid.io.save_inference_model(save_dirname, ['x'], + [y_predict], exe) return if math.isnan(float(avg_loss_value)): sys.exit("got NaN loss, training failed.") @@ -66,6 +65,43 @@ def main(use_cuda): avg_loss_value[0])) +def infer(use_cuda, save_dirname=None): + if save_dirname is None: + return + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + + # The input's dimension should be 2-D and the second dim is 13 + # The input data should be >= 0 + batch_size = 10 + tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32") + assert feed_target_names[0] == 'x' + results = exe.run(inference_program, + feed={feed_target_names[0]: tensor_x}, + fetch_list=fetch_targets) + print("infer shape: ", results[0].shape) + print("infer results: ", results[0]) + + +def main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + + # Directory for saving the trained model + save_dirname = "fit_a_line.inference.model" + + train(use_cuda, save_dirname) + infer(use_cuda, save_dirname) + + class TestFitALine(unittest.TestCase): def test_cpu(self): with self.program_scope_guard(): -- GitLab From 159b7722f496c83cbed44e002914062cf45f6396 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Sat, 10 Feb 2018 00:47:19 +0800 Subject: [PATCH 123/138] Delete "Operators" in API tree (#8324) --- paddle/pybind/CMakeLists.txt | 4 - paddle/pybind/print_operators_doc.cc | 148 --------------------------- paddle/scripts/docker/build.sh | 2 - paddle/scripts/travis/build_doc.sh | 2 - 4 files changed, 156 deletions(-) delete mode 100644 paddle/pybind/print_operators_doc.cc diff --git a/paddle/pybind/CMakeLists.txt b/paddle/pybind/CMakeLists.txt index de53fea0dd6..d62f3403089 100644 --- a/paddle/pybind/CMakeLists.txt +++ b/paddle/pybind/CMakeLists.txt @@ -7,7 +7,3 @@ if(WITH_PYTHON) target_link_libraries(paddle_pybind rt) endif(NOT APPLE AND NOT ANDROID) endif(WITH_PYTHON) - -if(WITH_DOC) - cc_binary(print_operators_doc SRCS print_operators_doc.cc DEPS ${GLOB_OP_LIB}) -endif(WITH_DOC) diff --git a/paddle/pybind/print_operators_doc.cc b/paddle/pybind/print_operators_doc.cc deleted file mode 100644 index b55ddee1761..00000000000 --- a/paddle/pybind/print_operators_doc.cc +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#include -#include // std::stringstream -#include - -#include "paddle/framework/op_info.h" -#include "paddle/framework/op_registry.h" -#include "paddle/pybind/pybind.h" - -std::string Escape(const std::string& s) { - std::string r; - for (size_t i = 0; i < s.size(); i++) { - switch (s[i]) { - case '\"': - r += "\\\""; - break; - case '\\': - r += "\\\\"; - break; - case '\n': - r += "\\n"; - break; - case '\t': - r += "\\t"; - case '\r': - break; - default: - r += s[i]; - break; - } - } - return r; -} - -std::string AttrType(paddle::framework::proto::AttrType at) { - switch (at) { - case paddle::framework::proto::INT: - return "int"; - case paddle::framework::proto::FLOAT: - return "float"; - case paddle::framework::proto::STRING: - return "string"; - case paddle::framework::proto::BOOLEAN: - return "bool"; - case paddle::framework::proto::INTS: - return "int array"; - case paddle::framework::proto::FLOATS: - return "float array"; - case paddle::framework::proto::STRINGS: - return "string array"; - case paddle::framework::proto::BOOLEANS: - return "bool array"; - case paddle::framework::proto::BLOCK: - return "block id"; - case paddle::framework::proto::LONG: - return "long"; - } - return "UNKNOWN"; // not possible -} - -void PrintVar(const paddle::framework::proto::OpProto::Var& v, - std::stringstream& ss) { - ss << " { " - << "\n" - << " \"name\" : \"" << Escape(v.name()) << "\",\n" - << " \"comment\" : \"" << Escape(v.comment()) << "\",\n" - << " \"duplicable\" : " << v.duplicable() << ",\n" - << " \"intermediate\" : " << v.intermediate() << "\n" - << " },"; -} - -void PrintAttr(const paddle::framework::proto::OpProto::Attr& a, - std::stringstream& ss) { - ss << " { " - << "\n" - << " \"name\" : \"" << Escape(a.name()) << "\",\n" - << " \"type\" : \"" << AttrType(a.type()) << "\",\n" - << " \"comment\" : \"" << Escape(a.comment()) << "\",\n" - << " \"generated\" : " << a.generated() << "\n" - << " },"; -} - -void PrintOpProto(const std::string& type, - const paddle::framework::OpInfo& opinfo, - std::stringstream& ss) { - std::cerr << "Processing " << type << "\n"; - - const paddle::framework::proto::OpProto* p = opinfo.proto_; - if (p == nullptr) { - return; // It is possible that an operator doesn't have OpProto. - } - - ss << "{\n" - << " \"type\" : \"" << Escape(p->type()) << "\",\n" - << " \"comment\" : \"" << Escape(p->comment()) << "\",\n"; - - ss << " \"inputs\" : [ " - << "\n"; - for (int i = 0; i < p->inputs_size(); i++) { - PrintVar(p->inputs(i), ss); - } - ss.seekp(-1, ss.cur); // remove the trailing comma - ss << " ], " - << "\n"; - - ss << " \"outputs\" : [ " - << "\n"; - for (int i = 0; i < p->outputs_size(); i++) { - PrintVar(p->outputs(i), ss); - } - ss.seekp(-1, ss.cur); // remove the trailing comma - ss << " ], " - << "\n"; - - ss << " \"attrs\" : [ " - << "\n"; - for (int i = 0; i < p->attrs_size(); i++) { - PrintAttr(p->attrs(i), ss); - } - ss.seekp(-1, ss.cur); // remove the trailing comma - ss << " ] " - << "\n"; - - ss << "},"; -} - -int main() { - std::stringstream ss; - ss << "[\n"; - for (auto& iter : paddle::framework::OpInfoMap::Instance().map()) { - PrintOpProto(iter.first, iter.second, ss); - } - ss.seekp(-1, ss.cur); // remove the trailing comma - ss << "]\n"; - std::cout << ss.str(); -} diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh index ba496db5f83..2f8dd48efe1 100644 --- a/paddle/scripts/docker/build.sh +++ b/paddle/scripts/docker/build.sh @@ -118,8 +118,6 @@ EOF make -j `nproc` gen_proto_py make -j `nproc` paddle_python make -j `nproc` paddle_docs paddle_docs_cn paddle_api_docs - make -j `nproc` print_operators_doc - paddle/pybind/print_operators_doc > doc/en/html/operators.json popd fi diff --git a/paddle/scripts/travis/build_doc.sh b/paddle/scripts/travis/build_doc.sh index 4af4ac4f5e4..486c094a6ac 100755 --- a/paddle/scripts/travis/build_doc.sh +++ b/paddle/scripts/travis/build_doc.sh @@ -10,8 +10,6 @@ cmake .. -DCMAKE_BUILD_TYPE=Debug -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON make -j `nproc` gen_proto_py make -j `nproc` paddle_python make -j `nproc` paddle_docs paddle_docs_cn paddle_api_docs -make -j `nproc` print_operators_doc -paddle/pybind/print_operators_doc > doc/en/html/operators.json # check websites for broken links linkchecker doc/en/html/index.html -- GitLab From 4b62fcd07db826442e291633c5a60d9e2a698b80 Mon Sep 17 00:00:00 2001 From: kexinzhao Date: Fri, 9 Feb 2018 11:32:05 -0800 Subject: [PATCH 124/138] Add Inference example and unit test for understand sentiment (#8251) * initial commit * fix bug * end of file fix * address comments --- paddle/inference/tests/book/CMakeLists.txt | 1 + .../test_inference_understand_sentiment.cc | 60 +++++++++++++++++ .../tests/book/test_understand_sentiment.py | 64 +++++++++++++++++-- 3 files changed, 119 insertions(+), 6 deletions(-) create mode 100644 paddle/inference/tests/book/test_inference_understand_sentiment.cc diff --git a/paddle/inference/tests/book/CMakeLists.txt b/paddle/inference/tests/book/CMakeLists.txt index 5d065e53b2d..ca3c056b097 100644 --- a/paddle/inference/tests/book/CMakeLists.txt +++ b/paddle/inference/tests/book/CMakeLists.txt @@ -30,3 +30,4 @@ inference_test(image_classification ARGS vgg resnet) inference_test(label_semantic_roles) inference_test(rnn_encoder_decoder) inference_test(recommender_system) +inference_test(understand_sentiment) diff --git a/paddle/inference/tests/book/test_inference_understand_sentiment.cc b/paddle/inference/tests/book/test_inference_understand_sentiment.cc new file mode 100644 index 00000000000..1afb6444465 --- /dev/null +++ b/paddle/inference/tests/book/test_inference_understand_sentiment.cc @@ -0,0 +1,60 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "gflags/gflags.h" +#include "test_helper.h" + +DEFINE_string(dirname, "", "Directory of the inference model."); + +TEST(inference, understand_sentiment) { + if (FLAGS_dirname.empty()) { + LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; + } + + LOG(INFO) << "FLAGS_dirname: " << FLAGS_dirname << std::endl; + std::string dirname = FLAGS_dirname; + + // 0. Call `paddle::framework::InitDevices()` initialize all the devices + // In unittests, this is done in paddle/testing/paddle_gtest_main.cc + + paddle::framework::LoDTensor words; + paddle::framework::LoD lod{{0, 4, 10}}; + SetupLoDTensor(words, lod, static_cast(0), static_cast(10)); + + std::vector cpu_feeds; + cpu_feeds.push_back(&words); + + paddle::framework::LoDTensor output1; + std::vector cpu_fetchs1; + cpu_fetchs1.push_back(&output1); + + // Run inference on CPU + TestInference(dirname, cpu_feeds, cpu_fetchs1); + LOG(INFO) << output1.lod(); + LOG(INFO) << output1.dims(); + +#ifdef PADDLE_WITH_CUDA + paddle::framework::LoDTensor output2; + std::vector cpu_fetchs2; + cpu_fetchs2.push_back(&output2); + + // Run inference on CUDA GPU + TestInference(dirname, cpu_feeds, cpu_fetchs2); + LOG(INFO) << output2.lod(); + LOG(INFO) << output2.dims(); + + CheckError(output1, output2); +#endif +} diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py index 9c5cb667aed..6e0206d41db 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py @@ -17,6 +17,7 @@ import paddle.v2.fluid as fluid import paddle.v2 as paddle import contextlib import math +import numpy as np import sys @@ -43,7 +44,7 @@ def convolution_net(data, label, input_dim, class_dim=2, emb_dim=32, adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002) adam_optimizer.minimize(avg_cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) - return avg_cost, accuracy + return avg_cost, accuracy, prediction def stacked_lstm_net(data, @@ -81,13 +82,18 @@ def stacked_lstm_net(data, adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002) adam_optimizer.minimize(avg_cost) accuracy = fluid.layers.accuracy(input=prediction, label=label) - return avg_cost, accuracy + return avg_cost, accuracy, prediction -def main(word_dict, net_method, use_cuda): - if use_cuda and not fluid.core.is_compiled_with_cuda(): - return +def create_random_lodtensor(lod, place, low, high): + data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64") + res = fluid.LoDTensor() + res.set(data, place) + res.set_lod([lod]) + return res + +def train(word_dict, net_method, use_cuda, save_dirname=None): BATCH_SIZE = 128 PASS_NUM = 5 dict_dim = len(word_dict) @@ -96,7 +102,7 @@ def main(word_dict, net_method, use_cuda): data = fluid.layers.data( name="words", shape=[1], dtype="int64", lod_level=1) label = fluid.layers.data(name="label", shape=[1], dtype="int64") - cost, acc_out = net_method( + cost, acc_out, prediction = net_method( data, label, input_dim=dict_dim, class_dim=class_dim) train_data = paddle.batch( @@ -116,6 +122,9 @@ def main(word_dict, net_method, use_cuda): fetch_list=[cost, acc_out]) print("cost=" + str(cost_val) + " acc=" + str(acc_val)) if cost_val < 0.4 and acc_val > 0.8: + if save_dirname is not None: + fluid.io.save_inference_model(save_dirname, ["words"], + prediction, exe) return if math.isnan(float(cost_val)): sys.exit("got NaN loss, training failed.") @@ -123,6 +132,49 @@ def main(word_dict, net_method, use_cuda): net_method.__name__)) +def infer(use_cuda, save_dirname=None): + if save_dirname is None: + return + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + + lod = [0, 4, 10] + word_dict = paddle.dataset.imdb.word_dict() + tensor_words = create_random_lodtensor( + lod, place, low=0, high=len(word_dict) - 1) + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + assert feed_target_names[0] == "words" + results = exe.run(inference_program, + feed={feed_target_names[0]: tensor_words}, + fetch_list=fetch_targets, + return_numpy=False) + print(results[0].lod()) + np_data = np.array(results[0]) + print("Inference Shape: ", np_data.shape) + print("Inference results: ", np_data) + + +def main(word_dict, net_method, use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + + # Directory for saving the trained model + save_dirname = "understand_sentiment.inference.model" + + train(word_dict, net_method, use_cuda, save_dirname) + infer(use_cuda, save_dirname) + + class TestUnderstandSentiment(unittest.TestCase): @classmethod def setUpClass(cls): -- GitLab From 1961470fff6df93383d1f4d7a990680ef480454c Mon Sep 17 00:00:00 2001 From: Siddharth Goyal Date: Fri, 9 Feb 2018 14:30:42 -0800 Subject: [PATCH 125/138] Add inference example and unit-test for word2vec chapter (#8206) * Add unit-test and example * Fix type error * Fix unit test cases * Fix init error for cudaplace * Change unit-test options --- paddle/inference/tests/book/CMakeLists.txt | 5 +- paddle/inference/tests/book/test_helper.h | 5 +- .../tests/book/test_inference_word2vec.cc | 68 ++++++++++++++++ .../v2/fluid/tests/book/test_word2vec.py | 81 +++++++++++++++++-- 4 files changed, 147 insertions(+), 12 deletions(-) create mode 100644 paddle/inference/tests/book/test_inference_word2vec.cc diff --git a/paddle/inference/tests/book/CMakeLists.txt b/paddle/inference/tests/book/CMakeLists.txt index ca3c056b097..9fe76afb582 100644 --- a/paddle/inference/tests/book/CMakeLists.txt +++ b/paddle/inference/tests/book/CMakeLists.txt @@ -25,9 +25,10 @@ function(inference_test TARGET_NAME) endfunction(inference_test) inference_test(fit_a_line) -inference_test(recognize_digits ARGS mlp) inference_test(image_classification ARGS vgg resnet) inference_test(label_semantic_roles) -inference_test(rnn_encoder_decoder) +inference_test(recognize_digits ARGS mlp) inference_test(recommender_system) +inference_test(rnn_encoder_decoder) inference_test(understand_sentiment) +inference_test(word2vec) diff --git a/paddle/inference/tests/book/test_helper.h b/paddle/inference/tests/book/test_helper.h index 22ce903c725..02104306e71 100644 --- a/paddle/inference/tests/book/test_helper.h +++ b/paddle/inference/tests/book/test_helper.h @@ -91,7 +91,7 @@ template void TestInference(const std::string& dirname, const std::vector& cpu_feeds, std::vector& cpu_fetchs) { - // 1. Define place, executor, scope and inference_program + // 1. Define place, executor, scope auto place = Place(); auto executor = paddle::framework::Executor(place); auto* scope = new paddle::framework::Scope(); @@ -101,7 +101,8 @@ void TestInference(const std::string& dirname, if (IsCombined) { // All parameters are saved in a single file. // Hard-coding the file names of program and parameters in unittest. - // Users are free to specify different filename. + // Users are free to specify different filename + // (provided: the filenames are changed in the python api as well: io.py) std::string prog_filename = "__model_combined__"; std::string param_filename = "__params_combined__"; inference_program = paddle::inference::Load(executor, diff --git a/paddle/inference/tests/book/test_inference_word2vec.cc b/paddle/inference/tests/book/test_inference_word2vec.cc new file mode 100644 index 00000000000..ca0c040ff62 --- /dev/null +++ b/paddle/inference/tests/book/test_inference_word2vec.cc @@ -0,0 +1,68 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "gflags/gflags.h" +#include "test_helper.h" + +DEFINE_string(dirname, "", "Directory of the inference model."); + +TEST(inference, word2vec) { + if (FLAGS_dirname.empty()) { + LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; + } + + LOG(INFO) << "FLAGS_dirname: " << FLAGS_dirname << std::endl; + std::string dirname = FLAGS_dirname; + + // 0. Call `paddle::framework::InitDevices()` initialize all the devices + // In unittests, this is done in paddle/testing/paddle_gtest_main.cc + + paddle::framework::LoDTensor first_word, second_word, third_word, fourth_word; + paddle::framework::LoD lod{{0, 1}}; + int64_t dict_size = 2072; // Hard-coding the size of dictionary + + SetupLoDTensor(first_word, lod, static_cast(0), dict_size); + SetupLoDTensor(second_word, lod, static_cast(0), dict_size); + SetupLoDTensor(third_word, lod, static_cast(0), dict_size); + SetupLoDTensor(fourth_word, lod, static_cast(0), dict_size); + + std::vector cpu_feeds; + cpu_feeds.push_back(&first_word); + cpu_feeds.push_back(&second_word); + cpu_feeds.push_back(&third_word); + cpu_feeds.push_back(&fourth_word); + + paddle::framework::LoDTensor output1; + std::vector cpu_fetchs1; + cpu_fetchs1.push_back(&output1); + + // Run inference on CPU + TestInference(dirname, cpu_feeds, cpu_fetchs1); + LOG(INFO) << output1.lod(); + LOG(INFO) << output1.dims(); + +#ifdef PADDLE_WITH_CUDA + paddle::framework::LoDTensor output2; + std::vector cpu_fetchs2; + cpu_fetchs2.push_back(&output2); + + // Run inference on CUDA GPU + TestInference(dirname, cpu_feeds, cpu_fetchs2); + LOG(INFO) << output2.lod(); + LOG(INFO) << output2.dims(); + + CheckError(output1, output2); +#endif +} diff --git a/python/paddle/v2/fluid/tests/book/test_word2vec.py b/python/paddle/v2/fluid/tests/book/test_word2vec.py index f013d7f1551..69bfbcee69a 100644 --- a/python/paddle/v2/fluid/tests/book/test_word2vec.py +++ b/python/paddle/v2/fluid/tests/book/test_word2vec.py @@ -1,6 +1,5 @@ # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # @@ -16,14 +15,67 @@ import paddle.v2 as paddle import paddle.v2.fluid as fluid import unittest import os +import numpy as np import math import sys -def main(use_cuda, is_sparse, parallel): - if use_cuda and not fluid.core.is_compiled_with_cuda(): +def create_random_lodtensor(lod, place, low, high): + data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64") + res = fluid.LoDTensor() + res.set(data, place) + res.set_lod([lod]) + return res + + +def infer(use_cuda, save_dirname=None): + if save_dirname is None: return + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + + word_dict = paddle.dataset.imikolov.build_dict() + dict_size = len(word_dict) - 1 + + # Setup input, by creating 4 words, and setting up lod required for + # lookup_table_op + lod = [0, 1] + first_word = create_random_lodtensor(lod, place, low=0, high=dict_size) + second_word = create_random_lodtensor(lod, place, low=0, high=dict_size) + third_word = create_random_lodtensor(lod, place, low=0, high=dict_size) + fourth_word = create_random_lodtensor(lod, place, low=0, high=dict_size) + + assert feed_target_names[0] == 'firstw' + assert feed_target_names[1] == 'secondw' + assert feed_target_names[2] == 'thirdw' + assert feed_target_names[3] == 'forthw' + + # Construct feed as a dictionary of {feed_target_name: feed_target_data} + # and results will contain a list of data corresponding to fetch_targets. + results = exe.run(inference_program, + feed={ + feed_target_names[0]: first_word, + feed_target_names[1]: second_word, + feed_target_names[2]: third_word, + feed_target_names[3]: fourth_word + }, + fetch_list=fetch_targets, + return_numpy=False) + print(results[0].lod()) + np_data = np.array(results[0]) + print("Inference Shape: ", np_data.shape) + print("Inference results: ", np_data) + + +def train(use_cuda, is_sparse, parallel, save_dirname): PASS_NUM = 100 EMBED_SIZE = 32 HIDDEN_SIZE = 256 @@ -67,7 +119,7 @@ def main(use_cuda, is_sparse, parallel): act='softmax') cost = fluid.layers.cross_entropy(input=predict_word, label=words[4]) avg_cost = fluid.layers.mean(x=cost) - return avg_cost + return avg_cost, predict_word word_dict = paddle.dataset.imikolov.build_dict() dict_size = len(word_dict) @@ -79,13 +131,13 @@ def main(use_cuda, is_sparse, parallel): next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') if not parallel: - avg_cost = __network__( + avg_cost, predict_word = __network__( [first_word, second_word, third_word, forth_word, next_word]) else: places = fluid.layers.get_places() pd = fluid.layers.ParallelDo(places) with pd.do(): - avg_cost = __network__( + avg_cost, predict_word = __network__( map(pd.read_input, [ first_word, second_word, third_word, forth_word, next_word ])) @@ -113,6 +165,10 @@ def main(use_cuda, is_sparse, parallel): feed=feeder.feed(data), fetch_list=[avg_cost]) if avg_cost_np[0] < 5.0: + if save_dirname is not None: + fluid.io.save_inference_model(save_dirname, [ + 'firstw', 'secondw', 'thirdw', 'forthw' + ], [predict_word], exe) return if math.isnan(float(avg_cost_np[0])): sys.exit("got NaN loss, training failed.") @@ -120,6 +176,14 @@ def main(use_cuda, is_sparse, parallel): raise AssertionError("Cost is too large {0:2.2}".format(avg_cost_np[0])) +def main(use_cuda, is_sparse, parallel): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + save_dirname = "word2vec.inference.model" + train(use_cuda, is_sparse, parallel, save_dirname) + infer(use_cuda, save_dirname) + + FULL_TEST = os.getenv('FULL_TEST', '0').lower() in ['true', '1', 't', 'y', 'yes', 'on'] SKIP_REASON = "Only run minimum number of tests in CI server, to make CI faster" @@ -142,7 +206,8 @@ def inject_test_method(use_cuda, is_sparse, parallel): with fluid.program_guard(prog, startup_prog): main(use_cuda=use_cuda, is_sparse=is_sparse, parallel=parallel) - if use_cuda and is_sparse and parallel: + # run only 2 cases: use_cuda is either True or False + if is_sparse == False and parallel == False: fn = __impl__ else: # skip the other test when on CI server -- GitLab From 90648f336d0a73630d0a862259a4f73ab3c9fe8c Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Fri, 9 Feb 2018 16:25:12 -0800 Subject: [PATCH 126/138] Move file to fluid/; Edit CMakeLists.txt --- paddle/CMakeLists.txt | 7 +------ paddle/fluid/CMakeLists.txt | 6 ++++++ paddle/{ => fluid}/framework/.clang-format | 0 paddle/{ => fluid}/framework/CMakeLists.txt | 0 paddle/{ => fluid}/framework/attribute.cc | 0 paddle/{ => fluid}/framework/attribute.h | 0 paddle/{ => fluid}/framework/backward.cc | 0 paddle/{ => fluid}/framework/backward.h | 0 paddle/{ => fluid}/framework/backward_test.cc | 0 paddle/{ => fluid}/framework/block_desc.cc | 0 paddle/{ => fluid}/framework/block_desc.h | 0 paddle/{ => fluid}/framework/channel.h | 0 paddle/{ => fluid}/framework/channel_test.cc | 0 .../{ => fluid}/framework/data_device_transform.cc | 0 .../{ => fluid}/framework/data_device_transform.h | 0 .../framework/data_device_transform_test.cu | 0 paddle/{ => fluid}/framework/data_layout.h | 0 .../{ => fluid}/framework/data_layout_transform.cc | 0 .../{ => fluid}/framework/data_layout_transform.h | 0 .../framework/data_layout_transform_test.cc | 0 paddle/{ => fluid}/framework/data_transform.cc | 0 paddle/{ => fluid}/framework/data_transform.h | 0 paddle/{ => fluid}/framework/data_type.h | 0 paddle/{ => fluid}/framework/data_type_transform.cc | 0 paddle/{ => fluid}/framework/data_type_transform.h | 0 .../framework/data_type_transform_test.cc | 0 paddle/{ => fluid}/framework/ddim.cc | 0 paddle/{ => fluid}/framework/ddim.h | 0 paddle/{ => fluid}/framework/ddim_test.cc | 0 .../framework/details/buffered_channel.h | 0 paddle/{ => fluid}/framework/details/cow_ptr.h | 0 .../{ => fluid}/framework/details/cow_ptr_test.cc | 0 paddle/{ => fluid}/framework/details/op_registry.h | 0 .../framework/details/unbuffered_channel.h | 0 paddle/{ => fluid}/framework/dim.h | 0 paddle/{ => fluid}/framework/dim_test.cu | 0 paddle/{ => fluid}/framework/eigen.h | 0 paddle/{ => fluid}/framework/eigen_test.cc | 0 paddle/{ => fluid}/framework/executor.cc | 0 paddle/{ => fluid}/framework/executor.h | 0 paddle/{ => fluid}/framework/feed_fetch_method.cc | 0 paddle/{ => fluid}/framework/feed_fetch_method.h | 0 paddle/{ => fluid}/framework/feed_fetch_type.h | 0 paddle/{ => fluid}/framework/framework.proto | 0 paddle/{ => fluid}/framework/grad_op_desc_maker.h | 0 paddle/{ => fluid}/framework/init.cc | 0 paddle/{ => fluid}/framework/init.h | 0 paddle/{ => fluid}/framework/init_test.cc | 0 paddle/{ => fluid}/framework/library_type.h | 0 paddle/{ => fluid}/framework/lod_rank_table.cc | 0 paddle/{ => fluid}/framework/lod_rank_table.h | 0 paddle/{ => fluid}/framework/lod_tensor.cc | 0 paddle/{ => fluid}/framework/lod_tensor.h | 0 paddle/{ => fluid}/framework/lod_tensor.md | 0 paddle/{ => fluid}/framework/lod_tensor_array.h | 0 paddle/{ => fluid}/framework/lod_tensor_test.cc | 0 paddle/{ => fluid}/framework/lod_tensor_test.cu | 0 paddle/{ => fluid}/framework/mixed_vector.h | 0 paddle/{ => fluid}/framework/mixed_vector_test.cu | 0 paddle/{ => fluid}/framework/op_desc.cc | 0 paddle/{ => fluid}/framework/op_desc.h | 0 paddle/{ => fluid}/framework/op_info.cc | 0 paddle/{ => fluid}/framework/op_info.h | 0 paddle/{ => fluid}/framework/op_kernel_type.h | 0 paddle/{ => fluid}/framework/op_kernel_type_test.cc | 0 paddle/{ => fluid}/framework/op_proto_maker.cc | 0 paddle/{ => fluid}/framework/op_proto_maker.h | 0 paddle/{ => fluid}/framework/op_proto_maker_test.cc | 0 paddle/{ => fluid}/framework/op_registry.cc | 0 paddle/{ => fluid}/framework/op_registry.h | 0 paddle/{ => fluid}/framework/op_registry_test.cc | 0 paddle/{ => fluid}/framework/operator.cc | 0 paddle/{ => fluid}/framework/operator.h | 0 paddle/{ => fluid}/framework/operator_test.cc | 0 paddle/{ => fluid}/framework/program_desc.cc | 0 paddle/{ => fluid}/framework/program_desc.h | 0 paddle/{ => fluid}/framework/program_desc_test.cc | 0 paddle/{ => fluid}/framework/proto_desc.h | 0 paddle/{ => fluid}/framework/prune.cc | 0 paddle/{ => fluid}/framework/prune.h | 0 paddle/{ => fluid}/framework/prune_test.cc | 0 paddle/{ => fluid}/framework/reader.cc | 0 paddle/{ => fluid}/framework/reader.h | 0 paddle/{ => fluid}/framework/scope.cc | 0 paddle/{ => fluid}/framework/scope.h | 0 paddle/{ => fluid}/framework/scope_test.cc | 0 paddle/{ => fluid}/framework/selected_rows.cc | 0 paddle/{ => fluid}/framework/selected_rows.h | 0 paddle/{ => fluid}/framework/selected_rows_test.cc | 0 paddle/{ => fluid}/framework/shape_inference.cc | 0 paddle/{ => fluid}/framework/shape_inference.h | 0 paddle/{ => fluid}/framework/tensor.cc | 0 paddle/{ => fluid}/framework/tensor.h | 0 paddle/{ => fluid}/framework/tensor.md | 0 paddle/{ => fluid}/framework/tensor_impl.h | 0 paddle/{ => fluid}/framework/tensor_test.cc | 0 paddle/{ => fluid}/framework/tensor_util.cc | 0 paddle/{ => fluid}/framework/tensor_util.cu | 0 paddle/{ => fluid}/framework/tensor_util.h | 0 paddle/{ => fluid}/framework/tensor_util_test.cc | 0 paddle/{ => fluid}/framework/tensor_util_test.cu | 0 paddle/{ => fluid}/framework/threadpool.cc | 0 paddle/{ => fluid}/framework/threadpool.h | 0 paddle/{ => fluid}/framework/threadpool_test.cc | 0 paddle/{ => fluid}/framework/type_defs.h | 0 paddle/{ => fluid}/framework/var_desc.cc | 0 paddle/{ => fluid}/framework/var_desc.h | 0 paddle/{ => fluid}/framework/var_type.h | 0 paddle/{ => fluid}/framework/var_type_inference.h | 0 .../framework/var_type_inference_test.cc | 0 paddle/{ => fluid}/framework/variable.h | 0 paddle/{ => fluid}/framework/variable.md | 0 paddle/{ => fluid}/framework/variable_test.cc | 0 paddle/{ => fluid}/inference/CMakeLists.txt | 0 paddle/{ => fluid}/inference/io.cc | 0 paddle/{ => fluid}/inference/io.h | 0 .../{ => fluid}/inference/tests/book/CMakeLists.txt | 0 .../{ => fluid}/inference/tests/book/test_helper.h | 0 .../tests/book/test_inference_fit_a_line.cc | 0 .../book/test_inference_image_classification.cc | 0 .../book/test_inference_label_semantic_roles.cc | 0 .../tests/book/test_inference_recognize_digits.cc | 0 .../tests/book/test_inference_recommender_system.cc | 0 .../book/test_inference_rnn_encoder_decoder.cc | 0 .../book/test_inference_understand_sentiment.cc | 0 .../inference/tests/book/test_inference_word2vec.cc | 0 paddle/{ => fluid}/memory/.clang-format | 0 paddle/{ => fluid}/memory/CMakeLists.txt | 0 paddle/{ => fluid}/memory/README.md | 0 paddle/{ => fluid}/memory/detail/CMakeLists.txt | 0 paddle/{ => fluid}/memory/detail/buddy_allocator.cc | 0 paddle/{ => fluid}/memory/detail/buddy_allocator.h | 0 paddle/{ => fluid}/memory/detail/memory_block.cc | 0 paddle/{ => fluid}/memory/detail/memory_block.h | 0 paddle/{ => fluid}/memory/detail/meta_cache.cc | 0 paddle/{ => fluid}/memory/detail/meta_cache.h | 0 paddle/{ => fluid}/memory/detail/meta_data.cc | 0 paddle/{ => fluid}/memory/detail/meta_data.h | 0 .../{ => fluid}/memory/detail/system_allocator.cc | 0 paddle/{ => fluid}/memory/detail/system_allocator.h | 0 .../memory/detail/system_allocator_test.cc | 0 paddle/{ => fluid}/memory/memcpy.cc | 0 paddle/{ => fluid}/memory/memcpy.h | 0 paddle/{ => fluid}/memory/memory.cc | 0 paddle/{ => fluid}/memory/memory.h | 0 paddle/{ => fluid}/memory/memory_test.cc | 0 paddle/{ => fluid}/operators/.clang-format | 0 paddle/{ => fluid}/operators/CMakeLists.txt | 0 paddle/{ => fluid}/operators/accuracy_op.cc | 0 paddle/{ => fluid}/operators/accuracy_op.cu | 0 paddle/{ => fluid}/operators/accuracy_op.h | 0 paddle/{ => fluid}/operators/activation_op.cc | 0 paddle/{ => fluid}/operators/activation_op.cu | 0 paddle/{ => fluid}/operators/activation_op.h | 0 paddle/{ => fluid}/operators/adadelta_op.cc | 0 paddle/{ => fluid}/operators/adadelta_op.cu | 0 paddle/{ => fluid}/operators/adadelta_op.h | 0 paddle/{ => fluid}/operators/adagrad_op.cc | 0 paddle/{ => fluid}/operators/adagrad_op.cu | 0 paddle/{ => fluid}/operators/adagrad_op.h | 0 paddle/{ => fluid}/operators/adam_op.cc | 0 paddle/{ => fluid}/operators/adam_op.cu | 0 paddle/{ => fluid}/operators/adam_op.h | 0 paddle/{ => fluid}/operators/adamax_op.cc | 0 paddle/{ => fluid}/operators/adamax_op.cu | 0 paddle/{ => fluid}/operators/adamax_op.h | 0 paddle/{ => fluid}/operators/array_operator.h | 0 .../{ => fluid}/operators/array_to_lod_tensor_op.cc | 0 paddle/{ => fluid}/operators/assign_op.cc | 0 paddle/{ => fluid}/operators/assign_value_op.cc | 0 paddle/{ => fluid}/operators/assign_value_op.cu.cc | 0 paddle/{ => fluid}/operators/assign_value_op.h | 0 paddle/{ => fluid}/operators/auc_op.cc | 0 paddle/{ => fluid}/operators/auc_op.h | 0 paddle/{ => fluid}/operators/batch_norm_op.cc | 0 paddle/{ => fluid}/operators/batch_norm_op.cu.cc | 0 paddle/{ => fluid}/operators/batch_norm_op.h | 0 .../{ => fluid}/operators/beam_search_decode_op.cc | 0 .../{ => fluid}/operators/beam_search_decode_op.h | 0 .../operators/beam_search_decode_op_test.cc | 0 paddle/{ => fluid}/operators/beam_search_op.cc | 0 paddle/{ => fluid}/operators/beam_search_op.h | 0 paddle/{ => fluid}/operators/beam_search_op_test.cc | 0 .../operators/bilinear_tensor_product_op.cc | 0 .../operators/bilinear_tensor_product_op.cu | 0 .../operators/bilinear_tensor_product_op.h | 0 paddle/{ => fluid}/operators/bipartite_match_op.cc | 0 paddle/{ => fluid}/operators/box_coder_op.cc | 0 paddle/{ => fluid}/operators/box_coder_op.cu | 0 paddle/{ => fluid}/operators/box_coder_op.h | 0 paddle/{ => fluid}/operators/cast_op.cc | 0 paddle/{ => fluid}/operators/cast_op.cu | 0 paddle/{ => fluid}/operators/cast_op.h | 0 paddle/{ => fluid}/operators/chunk_eval_op.cc | 0 paddle/{ => fluid}/operators/chunk_eval_op.h | 0 paddle/{ => fluid}/operators/clip_by_norm_op.cc | 0 paddle/{ => fluid}/operators/clip_by_norm_op.cu | 0 paddle/{ => fluid}/operators/clip_by_norm_op.h | 0 paddle/{ => fluid}/operators/clip_op.cc | 0 paddle/{ => fluid}/operators/clip_op.cu | 0 paddle/{ => fluid}/operators/clip_op.h | 0 paddle/{ => fluid}/operators/compare_op.cc | 0 paddle/{ => fluid}/operators/compare_op.cu | 0 paddle/{ => fluid}/operators/compare_op.h | 0 paddle/{ => fluid}/operators/concat_op.cc | 0 paddle/{ => fluid}/operators/concat_op.cu.cc | 0 paddle/{ => fluid}/operators/concat_op.h | 0 paddle/{ => fluid}/operators/cond_op.cc | 0 paddle/{ => fluid}/operators/cond_op.h | 0 .../{ => fluid}/operators/conditional_block_op.cc | 0 paddle/{ => fluid}/operators/conv_cudnn_op.cu.cc | 0 paddle/{ => fluid}/operators/conv_op.cc | 0 paddle/{ => fluid}/operators/conv_op.cu.cc | 0 paddle/{ => fluid}/operators/conv_op.h | 0 paddle/{ => fluid}/operators/conv_shift_op.cc | 0 paddle/{ => fluid}/operators/conv_shift_op.cu | 0 paddle/{ => fluid}/operators/conv_shift_op.h | 0 .../operators/conv_transpose_cudnn_op.cu.cc | 0 paddle/{ => fluid}/operators/conv_transpose_op.cc | 0 .../{ => fluid}/operators/conv_transpose_op.cu.cc | 0 paddle/{ => fluid}/operators/conv_transpose_op.h | 0 paddle/{ => fluid}/operators/cos_sim_op.cc | 0 paddle/{ => fluid}/operators/cos_sim_op.cu | 0 paddle/{ => fluid}/operators/cos_sim_op.h | 0 paddle/{ => fluid}/operators/create_reader_op.cc | 0 paddle/{ => fluid}/operators/crf_decoding_op.cc | 0 paddle/{ => fluid}/operators/crf_decoding_op.h | 0 paddle/{ => fluid}/operators/crop_op.cc | 0 paddle/{ => fluid}/operators/crop_op.cu | 0 paddle/{ => fluid}/operators/crop_op.h | 0 paddle/{ => fluid}/operators/cross_entropy_op.cc | 0 paddle/{ => fluid}/operators/cross_entropy_op.cu | 0 paddle/{ => fluid}/operators/cross_entropy_op.h | 0 paddle/{ => fluid}/operators/ctc_align_op.cc | 0 paddle/{ => fluid}/operators/ctc_align_op.cu | 0 paddle/{ => fluid}/operators/ctc_align_op.h | 0 paddle/{ => fluid}/operators/cum_op.h | 0 paddle/{ => fluid}/operators/cumsum_op.cc | 0 paddle/{ => fluid}/operators/cumsum_op.cu | 0 paddle/{ => fluid}/operators/decayed_adagrad_op.cc | 0 paddle/{ => fluid}/operators/decayed_adagrad_op.cu | 0 paddle/{ => fluid}/operators/decayed_adagrad_op.h | 0 paddle/{ => fluid}/operators/detail/CMakeLists.txt | 0 paddle/{ => fluid}/operators/detail/grpc_client.cc | 0 paddle/{ => fluid}/operators/detail/grpc_client.h | 0 paddle/{ => fluid}/operators/detail/grpc_server.cc | 0 paddle/{ => fluid}/operators/detail/grpc_server.h | 0 paddle/{ => fluid}/operators/detail/safe_ref.h | 0 paddle/{ => fluid}/operators/detail/send_recv.proto | 0 .../operators/detail/sendrecvop_utils.cc | 0 .../{ => fluid}/operators/detail/sendrecvop_utils.h | 0 .../operators/detail/simple_block_queue.h | 0 .../{ => fluid}/operators/detail/strided_memcpy.h | 0 paddle/{ => fluid}/operators/detection_output_op.cc | 0 .../{ => fluid}/operators/detection_output_op.cu.cc | 0 paddle/{ => fluid}/operators/detection_output_op.h | 0 paddle/{ => fluid}/operators/dropout_op.cc | 0 paddle/{ => fluid}/operators/dropout_op.cu | 0 paddle/{ => fluid}/operators/dropout_op.h | 0 paddle/{ => fluid}/operators/edit_distance_op.cc | 0 paddle/{ => fluid}/operators/edit_distance_op.cu | 0 paddle/{ => fluid}/operators/edit_distance_op.h | 0 paddle/{ => fluid}/operators/elementwise_add_op.cc | 0 paddle/{ => fluid}/operators/elementwise_add_op.cu | 0 paddle/{ => fluid}/operators/elementwise_add_op.h | 0 paddle/{ => fluid}/operators/elementwise_div_op.cc | 0 paddle/{ => fluid}/operators/elementwise_div_op.cu | 0 paddle/{ => fluid}/operators/elementwise_div_op.h | 0 paddle/{ => fluid}/operators/elementwise_max_op.cc | 0 paddle/{ => fluid}/operators/elementwise_max_op.cu | 0 paddle/{ => fluid}/operators/elementwise_max_op.h | 0 paddle/{ => fluid}/operators/elementwise_min_op.cc | 0 paddle/{ => fluid}/operators/elementwise_min_op.cu | 0 paddle/{ => fluid}/operators/elementwise_min_op.h | 0 paddle/{ => fluid}/operators/elementwise_mul_op.cc | 0 paddle/{ => fluid}/operators/elementwise_mul_op.cu | 0 paddle/{ => fluid}/operators/elementwise_mul_op.h | 0 paddle/{ => fluid}/operators/elementwise_op.h | 0 .../{ => fluid}/operators/elementwise_op_function.h | 0 paddle/{ => fluid}/operators/elementwise_pow_op.cc | 0 paddle/{ => fluid}/operators/elementwise_pow_op.cu | 0 paddle/{ => fluid}/operators/elementwise_pow_op.h | 0 paddle/{ => fluid}/operators/elementwise_sub_op.cc | 0 paddle/{ => fluid}/operators/elementwise_sub_op.cu | 0 paddle/{ => fluid}/operators/elementwise_sub_op.h | 0 paddle/{ => fluid}/operators/expand_op.cc | 0 paddle/{ => fluid}/operators/expand_op.cu | 0 paddle/{ => fluid}/operators/expand_op.h | 0 paddle/{ => fluid}/operators/feed_op.cc | 0 paddle/{ => fluid}/operators/fetch_op.cc | 0 .../operators/fill_constant_batch_size_like_op.cc | 0 .../fill_constant_batch_size_like_op.cu.cc | 0 .../operators/fill_constant_batch_size_like_op.h | 0 paddle/{ => fluid}/operators/fill_constant_op.cc | 0 paddle/{ => fluid}/operators/fill_op.cc | 0 paddle/{ => fluid}/operators/fill_zeros_like_op.cc | 0 .../{ => fluid}/operators/fill_zeros_like_op.cu.cc | 0 paddle/{ => fluid}/operators/fill_zeros_like_op.h | 0 paddle/{ => fluid}/operators/ftrl_op.cc | 0 paddle/{ => fluid}/operators/ftrl_op.cu | 0 paddle/{ => fluid}/operators/ftrl_op.h | 0 paddle/{ => fluid}/operators/gather.cu.h | 0 paddle/{ => fluid}/operators/gather.h | 0 paddle/{ => fluid}/operators/gather_op.cc | 0 paddle/{ => fluid}/operators/gather_op.cu | 0 paddle/{ => fluid}/operators/gather_op.h | 0 paddle/{ => fluid}/operators/gather_test.cc | 0 paddle/{ => fluid}/operators/gaussian_random_op.cc | 0 paddle/{ => fluid}/operators/gaussian_random_op.cu | 0 paddle/{ => fluid}/operators/get_places_op.cc | 0 paddle/{ => fluid}/operators/gru_op.cc | 0 paddle/{ => fluid}/operators/gru_op.cu.cc | 0 paddle/{ => fluid}/operators/gru_op.h | 0 paddle/{ => fluid}/operators/gru_unit_op.cc | 0 paddle/{ => fluid}/operators/gru_unit_op.cu | 0 paddle/{ => fluid}/operators/gru_unit_op.h | 0 paddle/{ => fluid}/operators/hinge_loss_op.cc | 0 paddle/{ => fluid}/operators/hinge_loss_op.cu | 0 paddle/{ => fluid}/operators/hinge_loss_op.h | 0 paddle/{ => fluid}/operators/huber_loss_op.cc | 0 paddle/{ => fluid}/operators/huber_loss_op.cu | 0 paddle/{ => fluid}/operators/huber_loss_op.h | 0 paddle/{ => fluid}/operators/im2sequence_op.cc | 0 paddle/{ => fluid}/operators/im2sequence_op.cu | 0 paddle/{ => fluid}/operators/im2sequence_op.h | 0 .../operators/images/batch_norm_fork.dot | 0 .../operators/images/batch_norm_fork.png | Bin .../operators/images/batch_norm_op_kernel.png | Bin paddle/{ => fluid}/operators/increment_op.cc | 0 paddle/{ => fluid}/operators/iou_similarity_op.cc | 0 paddle/{ => fluid}/operators/iou_similarity_op.cu | 0 paddle/{ => fluid}/operators/iou_similarity_op.h | 0 paddle/{ => fluid}/operators/is_empty_op.cc | 0 paddle/{ => fluid}/operators/l1_norm_op.cc | 0 paddle/{ => fluid}/operators/l1_norm_op.cu | 0 paddle/{ => fluid}/operators/l1_norm_op.h | 0 paddle/{ => fluid}/operators/label_smooth_op.cc | 0 paddle/{ => fluid}/operators/label_smooth_op.cu | 0 paddle/{ => fluid}/operators/label_smooth_op.h | 0 paddle/{ => fluid}/operators/layer_norm_op.cc | 0 paddle/{ => fluid}/operators/layer_norm_op.cu | 0 paddle/{ => fluid}/operators/layer_norm_op.h | 0 paddle/{ => fluid}/operators/linear_chain_crf_op.cc | 0 paddle/{ => fluid}/operators/linear_chain_crf_op.cu | 0 paddle/{ => fluid}/operators/linear_chain_crf_op.h | 0 paddle/{ => fluid}/operators/listen_and_serv_op.cc | 0 paddle/{ => fluid}/operators/load_combine_op.cc | 0 paddle/{ => fluid}/operators/load_op.cc | 0 paddle/{ => fluid}/operators/lod_array_length_op.cc | 0 paddle/{ => fluid}/operators/lod_rank_table_op.cc | 0 paddle/{ => fluid}/operators/lod_reset_op.cc | 0 paddle/{ => fluid}/operators/lod_reset_op.cu | 0 paddle/{ => fluid}/operators/lod_reset_op.h | 0 .../{ => fluid}/operators/lod_tensor_to_array_op.cc | 0 paddle/{ => fluid}/operators/log_loss_op.cc | 0 paddle/{ => fluid}/operators/log_loss_op.cu | 0 paddle/{ => fluid}/operators/log_loss_op.h | 0 paddle/{ => fluid}/operators/logical_op.cc | 0 paddle/{ => fluid}/operators/logical_op.cu | 0 paddle/{ => fluid}/operators/logical_op.h | 0 paddle/{ => fluid}/operators/lookup_table_op.cc | 0 paddle/{ => fluid}/operators/lookup_table_op.cu | 0 paddle/{ => fluid}/operators/lookup_table_op.h | 0 paddle/{ => fluid}/operators/lrn_op.cc | 0 paddle/{ => fluid}/operators/lrn_op.cu | 0 paddle/{ => fluid}/operators/lrn_op.h | 0 paddle/{ => fluid}/operators/lstm_op.cc | 0 paddle/{ => fluid}/operators/lstm_op.cu.cc | 0 paddle/{ => fluid}/operators/lstm_op.h | 0 paddle/{ => fluid}/operators/lstm_unit_op.cc | 0 paddle/{ => fluid}/operators/lstm_unit_op.cu | 0 paddle/{ => fluid}/operators/lstm_unit_op.h | 0 paddle/{ => fluid}/operators/lstmp_op.cc | 0 paddle/{ => fluid}/operators/lstmp_op.cu | 0 paddle/{ => fluid}/operators/lstmp_op.h | 0 paddle/{ => fluid}/operators/margin_rank_loss_op.cc | 0 paddle/{ => fluid}/operators/margin_rank_loss_op.cu | 0 paddle/{ => fluid}/operators/margin_rank_loss_op.h | 0 paddle/{ => fluid}/operators/math/CMakeLists.txt | 0 .../{ => fluid}/operators/math/context_project.cc | 0 .../{ => fluid}/operators/math/context_project.cu | 0 paddle/{ => fluid}/operators/math/context_project.h | 0 .../{ => fluid}/operators/math/cos_sim_functor.cc | 0 .../{ => fluid}/operators/math/cos_sim_functor.cu | 0 paddle/{ => fluid}/operators/math/cos_sim_functor.h | 0 paddle/{ => fluid}/operators/math/cross_entropy.cc | 0 paddle/{ => fluid}/operators/math/cross_entropy.cu | 0 paddle/{ => fluid}/operators/math/cross_entropy.h | 0 paddle/{ => fluid}/operators/math/depthwise_conv.cu | 0 paddle/{ => fluid}/operators/math/depthwise_conv.h | 0 .../operators/math/detail/CMakeLists.txt | 0 .../operators/math/detail/activation_functions.h | 0 .../operators/math/detail/avx_functions.cc | 0 .../operators/math/detail/gru_cpu_kernel.h | 0 .../operators/math/detail/gru_gpu_kernel.h | 0 .../{ => fluid}/operators/math/detail/gru_kernel.h | 0 .../operators/math/detail/lstm_cpu_kernel.h | 0 .../operators/math/detail/lstm_gpu_kernel.h | 0 .../{ => fluid}/operators/math/detail/lstm_kernel.h | 0 paddle/{ => fluid}/operators/math/detection_util.h | 0 paddle/{ => fluid}/operators/math/gru_compute.cc | 0 paddle/{ => fluid}/operators/math/gru_compute.cu | 0 paddle/{ => fluid}/operators/math/gru_compute.h | 0 paddle/{ => fluid}/operators/math/im2col.cc | 0 paddle/{ => fluid}/operators/math/im2col.cu | 0 paddle/{ => fluid}/operators/math/im2col.h | 0 paddle/{ => fluid}/operators/math/im2col_test.cc | 0 paddle/{ => fluid}/operators/math/lstm_compute.cc | 0 paddle/{ => fluid}/operators/math/lstm_compute.cu | 0 paddle/{ => fluid}/operators/math/lstm_compute.h | 0 paddle/{ => fluid}/operators/math/math_function.cc | 0 paddle/{ => fluid}/operators/math/math_function.cu | 0 paddle/{ => fluid}/operators/math/math_function.h | 0 .../{ => fluid}/operators/math/math_function_impl.h | 0 .../operators/math/math_function_test.cc | 0 .../operators/math/math_function_test.cu | 0 paddle/{ => fluid}/operators/math/matmul.h | 0 paddle/{ => fluid}/operators/math/maxouting.cc | 0 paddle/{ => fluid}/operators/math/maxouting.cu | 0 paddle/{ => fluid}/operators/math/maxouting.h | 0 paddle/{ => fluid}/operators/math/pooling.cc | 0 paddle/{ => fluid}/operators/math/pooling.cu | 0 paddle/{ => fluid}/operators/math/pooling.h | 0 paddle/{ => fluid}/operators/math/sampler.cc | 0 paddle/{ => fluid}/operators/math/sampler.h | 0 .../operators/math/selected_rows_functor.cc | 0 .../operators/math/selected_rows_functor.cu | 0 .../operators/math/selected_rows_functor.h | 0 .../operators/math/selected_rows_functor_test.cc | 0 .../operators/math/selected_rows_functor_test.cu | 0 paddle/{ => fluid}/operators/math/sequence2batch.cc | 0 paddle/{ => fluid}/operators/math/sequence2batch.cu | 0 paddle/{ => fluid}/operators/math/sequence2batch.h | 0 .../{ => fluid}/operators/math/sequence_padding.cc | 0 .../{ => fluid}/operators/math/sequence_padding.cu | 0 .../{ => fluid}/operators/math/sequence_padding.h | 0 .../operators/math/sequence_padding_test.cc | 0 .../{ => fluid}/operators/math/sequence_pooling.cc | 0 .../{ => fluid}/operators/math/sequence_pooling.cu | 0 .../{ => fluid}/operators/math/sequence_pooling.h | 0 paddle/{ => fluid}/operators/math/sequence_scale.cc | 0 paddle/{ => fluid}/operators/math/sequence_scale.cu | 0 paddle/{ => fluid}/operators/math/sequence_scale.h | 0 paddle/{ => fluid}/operators/math/softmax.cc | 0 paddle/{ => fluid}/operators/math/softmax.cu | 0 paddle/{ => fluid}/operators/math/softmax.h | 0 paddle/{ => fluid}/operators/math/softmax_impl.h | 0 paddle/{ => fluid}/operators/math/unpooling.cc | 0 paddle/{ => fluid}/operators/math/unpooling.cu | 0 paddle/{ => fluid}/operators/math/unpooling.h | 0 paddle/{ => fluid}/operators/math/vol2col.cc | 0 paddle/{ => fluid}/operators/math/vol2col.cu | 0 paddle/{ => fluid}/operators/math/vol2col.h | 0 paddle/{ => fluid}/operators/math/vol2col_test.cc | 0 paddle/{ => fluid}/operators/matmul_op.cc | 0 paddle/{ => fluid}/operators/matmul_op.cu.cc | 0 paddle/{ => fluid}/operators/matmul_op.h | 0 paddle/{ => fluid}/operators/max_sequence_len_op.cc | 0 paddle/{ => fluid}/operators/maxout_op.cc | 0 paddle/{ => fluid}/operators/maxout_op.cu.cc | 0 paddle/{ => fluid}/operators/maxout_op.h | 0 paddle/{ => fluid}/operators/mean_op.cc | 0 paddle/{ => fluid}/operators/mean_op.cu | 0 paddle/{ => fluid}/operators/mean_op.h | 0 paddle/{ => fluid}/operators/merge_lod_tensor_op.cc | 0 .../{ => fluid}/operators/mine_hard_examples_op.cc | 0 paddle/{ => fluid}/operators/minus_op.cc | 0 paddle/{ => fluid}/operators/minus_op.cu | 0 paddle/{ => fluid}/operators/minus_op.h | 0 .../{ => fluid}/operators/modified_huber_loss_op.cc | 0 .../{ => fluid}/operators/modified_huber_loss_op.cu | 0 .../{ => fluid}/operators/modified_huber_loss_op.h | 0 paddle/{ => fluid}/operators/momentum_op.cc | 0 paddle/{ => fluid}/operators/momentum_op.cu | 0 paddle/{ => fluid}/operators/momentum_op.h | 0 paddle/{ => fluid}/operators/mul_op.cc | 0 paddle/{ => fluid}/operators/mul_op.cu.cc | 0 paddle/{ => fluid}/operators/mul_op.h | 0 paddle/{ => fluid}/operators/multiclass_nms_op.cc | 0 paddle/{ => fluid}/operators/multiplex_op.cc | 0 paddle/{ => fluid}/operators/multiplex_op.cu | 0 paddle/{ => fluid}/operators/multiplex_op.h | 0 paddle/{ => fluid}/operators/nccl/CMakeLists.txt | 0 .../{ => fluid}/operators/nccl/nccl_gpu_common.cc | 0 paddle/{ => fluid}/operators/nccl/nccl_gpu_common.h | 0 paddle/{ => fluid}/operators/nccl_op.cc | 0 paddle/{ => fluid}/operators/nccl_op.cu.cc | 0 paddle/{ => fluid}/operators/nccl_op_test.cu.cc | 0 paddle/{ => fluid}/operators/nce_op.cc | 0 paddle/{ => fluid}/operators/nce_op.h | 0 paddle/{ => fluid}/operators/net_op.cc | 0 paddle/{ => fluid}/operators/net_op.h | 0 paddle/{ => fluid}/operators/net_op_test.cc | 0 paddle/{ => fluid}/operators/norm_op.cc | 0 paddle/{ => fluid}/operators/norm_op.cu | 0 paddle/{ => fluid}/operators/norm_op.h | 0 paddle/{ => fluid}/operators/one_hot_op.cc | 0 paddle/{ => fluid}/operators/one_hot_op.cu | 0 paddle/{ => fluid}/operators/one_hot_op.h | 0 .../operators/op_documentation/batch_norm_op.md | 0 .../operators/op_documentation/name_convention.md | 0 .../operators/op_documentation/net_op_design.md | 0 .../op_documentation/op_markdown_format.md | 0 .../operators/op_documentation/rnn_design.md | 0 paddle/{ => fluid}/operators/pad_op.cc | 0 paddle/{ => fluid}/operators/pad_op.cu | 0 paddle/{ => fluid}/operators/pad_op.h | 0 paddle/{ => fluid}/operators/parallel_do_op.cc | 0 paddle/{ => fluid}/operators/pool_cudnn_op.cu.cc | 0 paddle/{ => fluid}/operators/pool_op.cc | 0 paddle/{ => fluid}/operators/pool_op.cu.cc | 0 paddle/{ => fluid}/operators/pool_op.h | 0 paddle/{ => fluid}/operators/pool_with_index_op.cc | 0 .../{ => fluid}/operators/pool_with_index_op.cu.cc | 0 paddle/{ => fluid}/operators/pool_with_index_op.h | 0 .../operators/positive_negative_pair_op.cc | 0 .../operators/positive_negative_pair_op.h | 0 paddle/{ => fluid}/operators/precision_recall_op.cc | 0 paddle/{ => fluid}/operators/precision_recall_op.h | 0 paddle/{ => fluid}/operators/prelu_op.cc | 0 paddle/{ => fluid}/operators/prelu_op.cu | 0 paddle/{ => fluid}/operators/prelu_op.h | 0 paddle/{ => fluid}/operators/print_op.cc | 0 paddle/{ => fluid}/operators/prior_box_op.cc | 0 paddle/{ => fluid}/operators/prior_box_op.h | 0 paddle/{ => fluid}/operators/proximal_adagrad_op.cc | 0 paddle/{ => fluid}/operators/proximal_adagrad_op.cu | 0 paddle/{ => fluid}/operators/proximal_adagrad_op.h | 0 paddle/{ => fluid}/operators/proximal_gd_op.cc | 0 paddle/{ => fluid}/operators/proximal_gd_op.cu | 0 paddle/{ => fluid}/operators/proximal_gd_op.h | 0 paddle/{ => fluid}/operators/rank_loss_op.cc | 0 paddle/{ => fluid}/operators/rank_loss_op.cu | 0 paddle/{ => fluid}/operators/rank_loss_op.h | 0 paddle/{ => fluid}/operators/read_op.cc | 0 paddle/{ => fluid}/operators/recurrent_op.cc | 0 paddle/{ => fluid}/operators/recv_op.cc | 0 paddle/{ => fluid}/operators/reduce_op.cc | 0 paddle/{ => fluid}/operators/reduce_op.cu | 0 paddle/{ => fluid}/operators/reduce_op.h | 0 .../operators/reorder_lod_tensor_by_rank_op.cc | 0 paddle/{ => fluid}/operators/reshape_op.cc | 0 paddle/{ => fluid}/operators/reshape_op.cu | 0 paddle/{ => fluid}/operators/reshape_op.h | 0 paddle/{ => fluid}/operators/rmsprop_op.cc | 0 paddle/{ => fluid}/operators/rmsprop_op.cu | 0 paddle/{ => fluid}/operators/rmsprop_op.h | 0 .../{ => fluid}/operators/rnn_memory_helper_op.cc | 0 paddle/{ => fluid}/operators/roi_pool_op.cc | 0 paddle/{ => fluid}/operators/roi_pool_op.cu | 0 paddle/{ => fluid}/operators/roi_pool_op.h | 0 paddle/{ => fluid}/operators/row_conv_op.cc | 0 paddle/{ => fluid}/operators/row_conv_op.cu | 0 paddle/{ => fluid}/operators/row_conv_op.h | 0 paddle/{ => fluid}/operators/save_combine_op.cc | 0 .../operators/save_load_combine_op_test.cc | 0 paddle/{ => fluid}/operators/save_load_op_test.cc | 0 paddle/{ => fluid}/operators/save_op.cc | 0 paddle/{ => fluid}/operators/scale_op.cc | 0 paddle/{ => fluid}/operators/scale_op.cu | 0 paddle/{ => fluid}/operators/scale_op.h | 0 paddle/{ => fluid}/operators/scatter.cu.h | 0 paddle/{ => fluid}/operators/scatter.h | 0 paddle/{ => fluid}/operators/scatter_op.cc | 0 paddle/{ => fluid}/operators/scatter_op.cu | 0 paddle/{ => fluid}/operators/scatter_op.h | 0 paddle/{ => fluid}/operators/scatter_test.cc | 0 paddle/{ => fluid}/operators/send_op.cc | 0 paddle/{ => fluid}/operators/send_recv_op_test.cc | 0 paddle/{ => fluid}/operators/sequence_concat_op.cc | 0 .../{ => fluid}/operators/sequence_concat_op.cu.cc | 0 paddle/{ => fluid}/operators/sequence_concat_op.h | 0 paddle/{ => fluid}/operators/sequence_conv_op.cc | 0 paddle/{ => fluid}/operators/sequence_conv_op.cu.cc | 0 paddle/{ => fluid}/operators/sequence_conv_op.h | 0 paddle/{ => fluid}/operators/sequence_erase_op.cc | 0 paddle/{ => fluid}/operators/sequence_erase_op.cu | 0 paddle/{ => fluid}/operators/sequence_erase_op.h | 0 paddle/{ => fluid}/operators/sequence_expand_op.cc | 0 paddle/{ => fluid}/operators/sequence_expand_op.cu | 0 paddle/{ => fluid}/operators/sequence_expand_op.h | 0 paddle/{ => fluid}/operators/sequence_pool_op.cc | 0 paddle/{ => fluid}/operators/sequence_pool_op.cu | 0 paddle/{ => fluid}/operators/sequence_pool_op.h | 0 paddle/{ => fluid}/operators/sequence_reshape_op.cc | 0 paddle/{ => fluid}/operators/sequence_reshape_op.cu | 0 paddle/{ => fluid}/operators/sequence_reshape_op.h | 0 paddle/{ => fluid}/operators/sequence_slice_op.cc | 0 paddle/{ => fluid}/operators/sequence_slice_op.cu | 0 paddle/{ => fluid}/operators/sequence_slice_op.h | 0 paddle/{ => fluid}/operators/sequence_softmax_op.cc | 0 .../{ => fluid}/operators/sequence_softmax_op.cu.cc | 0 paddle/{ => fluid}/operators/sequence_softmax_op.h | 0 paddle/{ => fluid}/operators/sgd_op.cc | 0 paddle/{ => fluid}/operators/sgd_op.cu | 0 paddle/{ => fluid}/operators/sgd_op.h | 0 .../{ => fluid}/operators/shrink_rnn_memory_op.cc | 0 .../sigmoid_cross_entropy_with_logits_op.cc | 0 .../sigmoid_cross_entropy_with_logits_op.cu | 0 .../sigmoid_cross_entropy_with_logits_op.h | 0 paddle/{ => fluid}/operators/sign_op.cc | 0 paddle/{ => fluid}/operators/sign_op.cu | 0 paddle/{ => fluid}/operators/sign_op.h | 0 paddle/{ => fluid}/operators/smooth_l1_loss_op.cc | 0 paddle/{ => fluid}/operators/smooth_l1_loss_op.cu | 0 paddle/{ => fluid}/operators/smooth_l1_loss_op.h | 0 paddle/{ => fluid}/operators/softmax_op.cc | 0 paddle/{ => fluid}/operators/softmax_op.cu.cc | 0 paddle/{ => fluid}/operators/softmax_op.h | 0 .../operators/softmax_with_cross_entropy_op.cc | 0 .../operators/softmax_with_cross_entropy_op.cu | 0 .../operators/softmax_with_cross_entropy_op.h | 0 paddle/{ => fluid}/operators/split_lod_tensor_op.cc | 0 paddle/{ => fluid}/operators/split_op.cc | 0 paddle/{ => fluid}/operators/split_op.cu.cc | 0 paddle/{ => fluid}/operators/split_op.h | 0 .../{ => fluid}/operators/split_selected_rows_op.cc | 0 .../{ => fluid}/operators/split_selected_rows_op.cu | 0 .../{ => fluid}/operators/split_selected_rows_op.h | 0 paddle/{ => fluid}/operators/spp_op.cc | 0 paddle/{ => fluid}/operators/spp_op.cu.cc | 0 paddle/{ => fluid}/operators/spp_op.h | 0 .../{ => fluid}/operators/squared_l2_distance_op.cc | 0 .../{ => fluid}/operators/squared_l2_distance_op.cu | 0 .../{ => fluid}/operators/squared_l2_distance_op.h | 0 paddle/{ => fluid}/operators/squared_l2_norm_op.cc | 0 paddle/{ => fluid}/operators/squared_l2_norm_op.cu | 0 paddle/{ => fluid}/operators/squared_l2_norm_op.h | 0 paddle/{ => fluid}/operators/strided_memcpy.h | 0 paddle/{ => fluid}/operators/strided_memcpy_test.cc | 0 paddle/{ => fluid}/operators/sum_op.cc | 0 paddle/{ => fluid}/operators/sum_op.cu | 0 paddle/{ => fluid}/operators/sum_op.h | 0 paddle/{ => fluid}/operators/target_assign_op.cc | 0 paddle/{ => fluid}/operators/target_assign_op.cu | 0 paddle/{ => fluid}/operators/target_assign_op.h | 0 .../operators/tensor_array_read_write_op.cc | 0 paddle/{ => fluid}/operators/top_k_op.cc | 0 paddle/{ => fluid}/operators/top_k_op.cu | 0 paddle/{ => fluid}/operators/top_k_op.h | 0 paddle/{ => fluid}/operators/transpose_op.cc | 0 paddle/{ => fluid}/operators/transpose_op.cu.cc | 0 paddle/{ => fluid}/operators/transpose_op.h | 0 paddle/{ => fluid}/operators/uniform_random_op.cc | 0 paddle/{ => fluid}/operators/uniform_random_op.cu | 0 paddle/{ => fluid}/operators/unpool_op.cc | 0 paddle/{ => fluid}/operators/unpool_op.cu.cc | 0 paddle/{ => fluid}/operators/unpool_op.h | 0 paddle/{ => fluid}/operators/warpctc_op.cc | 0 paddle/{ => fluid}/operators/warpctc_op.cu.cc | 0 paddle/{ => fluid}/operators/warpctc_op.h | 0 paddle/{ => fluid}/operators/while_op.cc | 0 paddle/{ => fluid}/platform/.clang-format | 0 paddle/{ => fluid}/platform/CMakeLists.txt | 0 paddle/{ => fluid}/platform/assert.h | 0 paddle/{ => fluid}/platform/call_once.h | 0 paddle/{ => fluid}/platform/cpu_info.cc | 0 paddle/{ => fluid}/platform/cpu_info.h | 0 paddle/{ => fluid}/platform/cpu_info_test.cc | 0 paddle/{ => fluid}/platform/cuda_helper.h | 0 paddle/{ => fluid}/platform/cuda_profiler.h | 0 paddle/{ => fluid}/platform/cudnn_helper.h | 0 paddle/{ => fluid}/platform/cudnn_helper_test.cc | 0 .../{ => fluid}/platform/details/device_ptr_cast.h | 0 paddle/{ => fluid}/platform/device_context.cc | 0 paddle/{ => fluid}/platform/device_context.h | 0 paddle/{ => fluid}/platform/device_context_test.cu | 0 paddle/{ => fluid}/platform/dynload/CMakeLists.txt | 0 paddle/{ => fluid}/platform/dynload/cublas.cc | 0 paddle/{ => fluid}/platform/dynload/cublas.h | 0 paddle/{ => fluid}/platform/dynload/cudnn.cc | 0 paddle/{ => fluid}/platform/dynload/cudnn.h | 0 paddle/{ => fluid}/platform/dynload/curand.cc | 0 paddle/{ => fluid}/platform/dynload/curand.h | 0 .../{ => fluid}/platform/dynload/dynamic_loader.cc | 0 .../{ => fluid}/platform/dynload/dynamic_loader.h | 0 paddle/{ => fluid}/platform/dynload/nccl.cc | 0 paddle/{ => fluid}/platform/dynload/nccl.h | 0 paddle/{ => fluid}/platform/dynload/warpctc.cc | 0 paddle/{ => fluid}/platform/dynload/warpctc.h | 0 paddle/{ => fluid}/platform/enforce.cc | 0 paddle/{ => fluid}/platform/enforce.h | 0 paddle/{ => fluid}/platform/enforce_test.cc | 0 paddle/{ => fluid}/platform/for_range.h | 0 paddle/{ => fluid}/platform/gpu_info.cc | 0 paddle/{ => fluid}/platform/gpu_info.h | 0 paddle/{ => fluid}/platform/hostdevice.h | 0 paddle/{ => fluid}/platform/macros.h | 0 paddle/{ => fluid}/platform/mkldnn_helper.h | 0 paddle/{ => fluid}/platform/nccl_test.cu | 0 paddle/{ => fluid}/platform/place.cc | 0 paddle/{ => fluid}/platform/place.h | 0 paddle/{ => fluid}/platform/place_test.cc | 0 paddle/{ => fluid}/platform/profiler.cc | 0 paddle/{ => fluid}/platform/profiler.h | 0 paddle/{ => fluid}/platform/profiler_test.cc | 0 paddle/{ => fluid}/platform/transform.h | 0 paddle/{ => fluid}/platform/transform_test.cu | 0 paddle/{ => fluid}/platform/variant.h | 0 paddle/{ => fluid}/pybind/.clang-format | 0 paddle/{ => fluid}/pybind/CMakeLists.txt | 0 paddle/{ => fluid}/pybind/const_value.cc | 0 paddle/{ => fluid}/pybind/const_value.h | 0 paddle/{ => fluid}/pybind/exception.cc | 0 paddle/{ => fluid}/pybind/exception.h | 0 paddle/{ => fluid}/pybind/protobuf.cc | 0 paddle/{ => fluid}/pybind/protobuf.h | 0 paddle/{ => fluid}/pybind/pybind.cc | 0 paddle/{ => fluid}/pybind/tensor_py.h | 0 709 files changed, 7 insertions(+), 6 deletions(-) create mode 100644 paddle/fluid/CMakeLists.txt rename paddle/{ => fluid}/framework/.clang-format (100%) rename paddle/{ => fluid}/framework/CMakeLists.txt (100%) rename paddle/{ => fluid}/framework/attribute.cc (100%) rename paddle/{ => fluid}/framework/attribute.h (100%) rename paddle/{ => fluid}/framework/backward.cc (100%) rename paddle/{ => fluid}/framework/backward.h (100%) rename paddle/{ => fluid}/framework/backward_test.cc (100%) rename paddle/{ => fluid}/framework/block_desc.cc (100%) rename paddle/{ => fluid}/framework/block_desc.h (100%) rename paddle/{ => fluid}/framework/channel.h (100%) rename paddle/{ => fluid}/framework/channel_test.cc (100%) rename paddle/{ => fluid}/framework/data_device_transform.cc (100%) rename paddle/{ => fluid}/framework/data_device_transform.h (100%) rename paddle/{ => fluid}/framework/data_device_transform_test.cu (100%) rename paddle/{ => fluid}/framework/data_layout.h (100%) rename paddle/{ => fluid}/framework/data_layout_transform.cc (100%) rename paddle/{ => fluid}/framework/data_layout_transform.h (100%) rename paddle/{ => fluid}/framework/data_layout_transform_test.cc (100%) rename paddle/{ => fluid}/framework/data_transform.cc (100%) rename paddle/{ => fluid}/framework/data_transform.h (100%) rename paddle/{ => fluid}/framework/data_type.h (100%) rename paddle/{ => fluid}/framework/data_type_transform.cc (100%) rename paddle/{ => fluid}/framework/data_type_transform.h (100%) rename paddle/{ => fluid}/framework/data_type_transform_test.cc (100%) rename paddle/{ => fluid}/framework/ddim.cc (100%) rename paddle/{ => fluid}/framework/ddim.h (100%) rename paddle/{ => fluid}/framework/ddim_test.cc (100%) rename paddle/{ => fluid}/framework/details/buffered_channel.h (100%) rename paddle/{ => fluid}/framework/details/cow_ptr.h (100%) rename paddle/{ => fluid}/framework/details/cow_ptr_test.cc (100%) rename paddle/{ => fluid}/framework/details/op_registry.h (100%) rename paddle/{ => fluid}/framework/details/unbuffered_channel.h (100%) rename paddle/{ => fluid}/framework/dim.h (100%) rename paddle/{ => fluid}/framework/dim_test.cu (100%) rename paddle/{ => fluid}/framework/eigen.h (100%) rename paddle/{ => fluid}/framework/eigen_test.cc (100%) rename paddle/{ => fluid}/framework/executor.cc (100%) rename paddle/{ => fluid}/framework/executor.h (100%) rename paddle/{ => fluid}/framework/feed_fetch_method.cc (100%) rename paddle/{ => fluid}/framework/feed_fetch_method.h (100%) rename paddle/{ => fluid}/framework/feed_fetch_type.h (100%) rename paddle/{ => fluid}/framework/framework.proto (100%) rename paddle/{ => fluid}/framework/grad_op_desc_maker.h (100%) rename paddle/{ => fluid}/framework/init.cc (100%) rename paddle/{ => fluid}/framework/init.h (100%) rename paddle/{ => fluid}/framework/init_test.cc (100%) rename paddle/{ => fluid}/framework/library_type.h (100%) rename paddle/{ => fluid}/framework/lod_rank_table.cc (100%) rename paddle/{ => fluid}/framework/lod_rank_table.h (100%) rename paddle/{ => fluid}/framework/lod_tensor.cc (100%) rename paddle/{ => fluid}/framework/lod_tensor.h (100%) rename paddle/{ => fluid}/framework/lod_tensor.md (100%) rename paddle/{ => fluid}/framework/lod_tensor_array.h (100%) rename paddle/{ => fluid}/framework/lod_tensor_test.cc (100%) rename paddle/{ => fluid}/framework/lod_tensor_test.cu (100%) rename paddle/{ => fluid}/framework/mixed_vector.h (100%) rename paddle/{ => fluid}/framework/mixed_vector_test.cu (100%) rename paddle/{ => fluid}/framework/op_desc.cc (100%) rename paddle/{ => fluid}/framework/op_desc.h (100%) rename paddle/{ => fluid}/framework/op_info.cc (100%) rename paddle/{ => fluid}/framework/op_info.h (100%) rename paddle/{ => fluid}/framework/op_kernel_type.h (100%) rename paddle/{ => fluid}/framework/op_kernel_type_test.cc (100%) rename paddle/{ => fluid}/framework/op_proto_maker.cc (100%) rename paddle/{ => fluid}/framework/op_proto_maker.h (100%) rename paddle/{ => fluid}/framework/op_proto_maker_test.cc (100%) rename paddle/{ => fluid}/framework/op_registry.cc (100%) rename paddle/{ => fluid}/framework/op_registry.h (100%) rename paddle/{ => fluid}/framework/op_registry_test.cc (100%) rename paddle/{ => fluid}/framework/operator.cc (100%) rename paddle/{ => fluid}/framework/operator.h (100%) rename paddle/{ => fluid}/framework/operator_test.cc (100%) rename paddle/{ => fluid}/framework/program_desc.cc (100%) rename paddle/{ => fluid}/framework/program_desc.h (100%) rename paddle/{ => fluid}/framework/program_desc_test.cc (100%) rename paddle/{ => fluid}/framework/proto_desc.h (100%) rename paddle/{ => fluid}/framework/prune.cc (100%) rename paddle/{ => fluid}/framework/prune.h (100%) rename paddle/{ => fluid}/framework/prune_test.cc (100%) rename paddle/{ => fluid}/framework/reader.cc (100%) rename paddle/{ => fluid}/framework/reader.h (100%) rename paddle/{ => fluid}/framework/scope.cc (100%) rename paddle/{ => fluid}/framework/scope.h (100%) rename paddle/{ => fluid}/framework/scope_test.cc (100%) rename paddle/{ => fluid}/framework/selected_rows.cc (100%) rename paddle/{ => fluid}/framework/selected_rows.h (100%) rename paddle/{ => fluid}/framework/selected_rows_test.cc (100%) rename paddle/{ => fluid}/framework/shape_inference.cc (100%) rename paddle/{ => fluid}/framework/shape_inference.h (100%) rename paddle/{ => fluid}/framework/tensor.cc (100%) rename paddle/{ => fluid}/framework/tensor.h (100%) rename paddle/{ => fluid}/framework/tensor.md (100%) rename paddle/{ => fluid}/framework/tensor_impl.h (100%) rename paddle/{ => fluid}/framework/tensor_test.cc (100%) rename paddle/{ => fluid}/framework/tensor_util.cc (100%) rename paddle/{ => fluid}/framework/tensor_util.cu (100%) rename paddle/{ => fluid}/framework/tensor_util.h (100%) rename paddle/{ => fluid}/framework/tensor_util_test.cc (100%) rename paddle/{ => fluid}/framework/tensor_util_test.cu (100%) rename paddle/{ => fluid}/framework/threadpool.cc (100%) rename paddle/{ => fluid}/framework/threadpool.h (100%) rename paddle/{ => fluid}/framework/threadpool_test.cc (100%) rename paddle/{ => fluid}/framework/type_defs.h (100%) rename paddle/{ => fluid}/framework/var_desc.cc (100%) rename paddle/{ => fluid}/framework/var_desc.h (100%) rename paddle/{ => fluid}/framework/var_type.h (100%) rename paddle/{ => fluid}/framework/var_type_inference.h (100%) rename paddle/{ => fluid}/framework/var_type_inference_test.cc (100%) rename paddle/{ => fluid}/framework/variable.h (100%) rename paddle/{ => fluid}/framework/variable.md (100%) rename paddle/{ => fluid}/framework/variable_test.cc (100%) rename paddle/{ => fluid}/inference/CMakeLists.txt (100%) rename paddle/{ => fluid}/inference/io.cc (100%) rename paddle/{ => fluid}/inference/io.h (100%) rename paddle/{ => fluid}/inference/tests/book/CMakeLists.txt (100%) rename paddle/{ => fluid}/inference/tests/book/test_helper.h (100%) rename paddle/{ => fluid}/inference/tests/book/test_inference_fit_a_line.cc (100%) rename paddle/{ => fluid}/inference/tests/book/test_inference_image_classification.cc (100%) rename paddle/{ => fluid}/inference/tests/book/test_inference_label_semantic_roles.cc (100%) rename paddle/{ => fluid}/inference/tests/book/test_inference_recognize_digits.cc (100%) rename paddle/{ => fluid}/inference/tests/book/test_inference_recommender_system.cc (100%) rename paddle/{ => fluid}/inference/tests/book/test_inference_rnn_encoder_decoder.cc (100%) rename paddle/{ => fluid}/inference/tests/book/test_inference_understand_sentiment.cc (100%) rename paddle/{ => fluid}/inference/tests/book/test_inference_word2vec.cc (100%) rename paddle/{ => fluid}/memory/.clang-format (100%) rename paddle/{ => fluid}/memory/CMakeLists.txt (100%) rename paddle/{ => fluid}/memory/README.md (100%) rename paddle/{ => fluid}/memory/detail/CMakeLists.txt (100%) rename paddle/{ => fluid}/memory/detail/buddy_allocator.cc (100%) rename paddle/{ => fluid}/memory/detail/buddy_allocator.h (100%) rename paddle/{ => fluid}/memory/detail/memory_block.cc (100%) rename paddle/{ => fluid}/memory/detail/memory_block.h (100%) rename paddle/{ => fluid}/memory/detail/meta_cache.cc (100%) rename paddle/{ => fluid}/memory/detail/meta_cache.h (100%) rename paddle/{ => fluid}/memory/detail/meta_data.cc (100%) rename paddle/{ => fluid}/memory/detail/meta_data.h (100%) rename paddle/{ => fluid}/memory/detail/system_allocator.cc (100%) rename paddle/{ => fluid}/memory/detail/system_allocator.h (100%) rename paddle/{ => fluid}/memory/detail/system_allocator_test.cc (100%) rename paddle/{ => fluid}/memory/memcpy.cc (100%) rename paddle/{ => fluid}/memory/memcpy.h (100%) rename paddle/{ => fluid}/memory/memory.cc (100%) rename paddle/{ => fluid}/memory/memory.h (100%) rename paddle/{ => fluid}/memory/memory_test.cc (100%) rename paddle/{ => fluid}/operators/.clang-format (100%) rename paddle/{ => fluid}/operators/CMakeLists.txt (100%) rename paddle/{ => fluid}/operators/accuracy_op.cc (100%) rename paddle/{ => fluid}/operators/accuracy_op.cu (100%) rename paddle/{ => fluid}/operators/accuracy_op.h (100%) rename paddle/{ => fluid}/operators/activation_op.cc (100%) rename paddle/{ => fluid}/operators/activation_op.cu (100%) rename paddle/{ => fluid}/operators/activation_op.h (100%) rename paddle/{ => fluid}/operators/adadelta_op.cc (100%) rename paddle/{ => fluid}/operators/adadelta_op.cu (100%) rename paddle/{ => fluid}/operators/adadelta_op.h (100%) rename paddle/{ => fluid}/operators/adagrad_op.cc (100%) rename paddle/{ => fluid}/operators/adagrad_op.cu (100%) rename paddle/{ => fluid}/operators/adagrad_op.h (100%) rename paddle/{ => fluid}/operators/adam_op.cc (100%) rename paddle/{ => fluid}/operators/adam_op.cu (100%) rename paddle/{ => fluid}/operators/adam_op.h (100%) rename paddle/{ => fluid}/operators/adamax_op.cc (100%) rename paddle/{ => fluid}/operators/adamax_op.cu (100%) rename paddle/{ => fluid}/operators/adamax_op.h (100%) rename paddle/{ => fluid}/operators/array_operator.h (100%) rename paddle/{ => fluid}/operators/array_to_lod_tensor_op.cc (100%) rename paddle/{ => fluid}/operators/assign_op.cc (100%) rename paddle/{ => fluid}/operators/assign_value_op.cc (100%) rename paddle/{ => fluid}/operators/assign_value_op.cu.cc (100%) rename paddle/{ => fluid}/operators/assign_value_op.h (100%) rename paddle/{ => fluid}/operators/auc_op.cc (100%) rename paddle/{ => fluid}/operators/auc_op.h (100%) rename paddle/{ => fluid}/operators/batch_norm_op.cc (100%) rename paddle/{ => fluid}/operators/batch_norm_op.cu.cc (100%) rename paddle/{ => fluid}/operators/batch_norm_op.h (100%) rename paddle/{ => fluid}/operators/beam_search_decode_op.cc (100%) rename paddle/{ => fluid}/operators/beam_search_decode_op.h (100%) rename paddle/{ => fluid}/operators/beam_search_decode_op_test.cc (100%) rename paddle/{ => fluid}/operators/beam_search_op.cc (100%) rename paddle/{ => fluid}/operators/beam_search_op.h (100%) rename paddle/{ => fluid}/operators/beam_search_op_test.cc (100%) rename paddle/{ => fluid}/operators/bilinear_tensor_product_op.cc (100%) rename paddle/{ => fluid}/operators/bilinear_tensor_product_op.cu (100%) rename paddle/{ => fluid}/operators/bilinear_tensor_product_op.h (100%) rename paddle/{ => fluid}/operators/bipartite_match_op.cc (100%) rename paddle/{ => fluid}/operators/box_coder_op.cc (100%) rename paddle/{ => fluid}/operators/box_coder_op.cu (100%) rename paddle/{ => fluid}/operators/box_coder_op.h (100%) rename paddle/{ => fluid}/operators/cast_op.cc (100%) rename paddle/{ => fluid}/operators/cast_op.cu (100%) rename paddle/{ => fluid}/operators/cast_op.h (100%) rename paddle/{ => fluid}/operators/chunk_eval_op.cc (100%) rename paddle/{ => fluid}/operators/chunk_eval_op.h (100%) rename paddle/{ => fluid}/operators/clip_by_norm_op.cc (100%) rename paddle/{ => fluid}/operators/clip_by_norm_op.cu (100%) rename paddle/{ => fluid}/operators/clip_by_norm_op.h (100%) rename paddle/{ => fluid}/operators/clip_op.cc (100%) rename paddle/{ => fluid}/operators/clip_op.cu (100%) rename paddle/{ => fluid}/operators/clip_op.h (100%) rename paddle/{ => fluid}/operators/compare_op.cc (100%) rename paddle/{ => fluid}/operators/compare_op.cu (100%) rename paddle/{ => fluid}/operators/compare_op.h (100%) rename paddle/{ => fluid}/operators/concat_op.cc (100%) rename paddle/{ => fluid}/operators/concat_op.cu.cc (100%) rename paddle/{ => fluid}/operators/concat_op.h (100%) rename paddle/{ => fluid}/operators/cond_op.cc (100%) rename paddle/{ => fluid}/operators/cond_op.h (100%) rename paddle/{ => fluid}/operators/conditional_block_op.cc (100%) rename paddle/{ => fluid}/operators/conv_cudnn_op.cu.cc (100%) rename paddle/{ => fluid}/operators/conv_op.cc (100%) rename paddle/{ => fluid}/operators/conv_op.cu.cc (100%) rename paddle/{ => fluid}/operators/conv_op.h (100%) rename paddle/{ => fluid}/operators/conv_shift_op.cc (100%) rename paddle/{ => fluid}/operators/conv_shift_op.cu (100%) rename paddle/{ => fluid}/operators/conv_shift_op.h (100%) rename paddle/{ => fluid}/operators/conv_transpose_cudnn_op.cu.cc (100%) rename paddle/{ => fluid}/operators/conv_transpose_op.cc (100%) rename paddle/{ => fluid}/operators/conv_transpose_op.cu.cc (100%) rename paddle/{ => fluid}/operators/conv_transpose_op.h (100%) rename paddle/{ => fluid}/operators/cos_sim_op.cc (100%) rename paddle/{ => fluid}/operators/cos_sim_op.cu (100%) rename paddle/{ => fluid}/operators/cos_sim_op.h (100%) rename paddle/{ => fluid}/operators/create_reader_op.cc (100%) rename paddle/{ => fluid}/operators/crf_decoding_op.cc (100%) rename paddle/{ => fluid}/operators/crf_decoding_op.h (100%) rename paddle/{ => fluid}/operators/crop_op.cc (100%) rename paddle/{ => fluid}/operators/crop_op.cu (100%) rename paddle/{ => fluid}/operators/crop_op.h (100%) rename paddle/{ => fluid}/operators/cross_entropy_op.cc (100%) rename paddle/{ => fluid}/operators/cross_entropy_op.cu (100%) rename paddle/{ => fluid}/operators/cross_entropy_op.h (100%) rename paddle/{ => fluid}/operators/ctc_align_op.cc (100%) rename paddle/{ => fluid}/operators/ctc_align_op.cu (100%) rename paddle/{ => fluid}/operators/ctc_align_op.h (100%) rename paddle/{ => fluid}/operators/cum_op.h (100%) rename paddle/{ => fluid}/operators/cumsum_op.cc (100%) rename paddle/{ => fluid}/operators/cumsum_op.cu (100%) rename paddle/{ => fluid}/operators/decayed_adagrad_op.cc (100%) rename paddle/{ => fluid}/operators/decayed_adagrad_op.cu (100%) rename paddle/{ => fluid}/operators/decayed_adagrad_op.h (100%) rename paddle/{ => fluid}/operators/detail/CMakeLists.txt (100%) rename paddle/{ => fluid}/operators/detail/grpc_client.cc (100%) rename paddle/{ => fluid}/operators/detail/grpc_client.h (100%) rename paddle/{ => fluid}/operators/detail/grpc_server.cc (100%) rename paddle/{ => fluid}/operators/detail/grpc_server.h (100%) rename paddle/{ => fluid}/operators/detail/safe_ref.h (100%) rename paddle/{ => fluid}/operators/detail/send_recv.proto (100%) rename paddle/{ => fluid}/operators/detail/sendrecvop_utils.cc (100%) rename paddle/{ => fluid}/operators/detail/sendrecvop_utils.h (100%) rename paddle/{ => fluid}/operators/detail/simple_block_queue.h (100%) rename paddle/{ => fluid}/operators/detail/strided_memcpy.h (100%) rename paddle/{ => fluid}/operators/detection_output_op.cc (100%) rename paddle/{ => fluid}/operators/detection_output_op.cu.cc (100%) rename paddle/{ => fluid}/operators/detection_output_op.h (100%) rename paddle/{ => fluid}/operators/dropout_op.cc (100%) rename paddle/{ => fluid}/operators/dropout_op.cu (100%) rename paddle/{ => fluid}/operators/dropout_op.h (100%) rename paddle/{ => fluid}/operators/edit_distance_op.cc (100%) rename paddle/{ => fluid}/operators/edit_distance_op.cu (100%) rename paddle/{ => fluid}/operators/edit_distance_op.h (100%) rename paddle/{ => fluid}/operators/elementwise_add_op.cc (100%) rename paddle/{ => fluid}/operators/elementwise_add_op.cu (100%) rename paddle/{ => fluid}/operators/elementwise_add_op.h (100%) rename paddle/{ => fluid}/operators/elementwise_div_op.cc (100%) rename paddle/{ => fluid}/operators/elementwise_div_op.cu (100%) rename paddle/{ => fluid}/operators/elementwise_div_op.h (100%) rename paddle/{ => fluid}/operators/elementwise_max_op.cc (100%) rename paddle/{ => fluid}/operators/elementwise_max_op.cu (100%) rename paddle/{ => fluid}/operators/elementwise_max_op.h (100%) rename paddle/{ => fluid}/operators/elementwise_min_op.cc (100%) rename paddle/{ => fluid}/operators/elementwise_min_op.cu (100%) rename paddle/{ => fluid}/operators/elementwise_min_op.h (100%) rename paddle/{ => fluid}/operators/elementwise_mul_op.cc (100%) rename paddle/{ => fluid}/operators/elementwise_mul_op.cu (100%) rename paddle/{ => fluid}/operators/elementwise_mul_op.h (100%) rename paddle/{ => fluid}/operators/elementwise_op.h (100%) rename paddle/{ => fluid}/operators/elementwise_op_function.h (100%) rename paddle/{ => fluid}/operators/elementwise_pow_op.cc (100%) rename paddle/{ => fluid}/operators/elementwise_pow_op.cu (100%) rename paddle/{ => fluid}/operators/elementwise_pow_op.h (100%) rename paddle/{ => fluid}/operators/elementwise_sub_op.cc (100%) rename paddle/{ => fluid}/operators/elementwise_sub_op.cu (100%) rename paddle/{ => fluid}/operators/elementwise_sub_op.h (100%) rename paddle/{ => fluid}/operators/expand_op.cc (100%) rename paddle/{ => fluid}/operators/expand_op.cu (100%) rename paddle/{ => fluid}/operators/expand_op.h (100%) rename paddle/{ => fluid}/operators/feed_op.cc (100%) rename paddle/{ => fluid}/operators/fetch_op.cc (100%) rename paddle/{ => fluid}/operators/fill_constant_batch_size_like_op.cc (100%) rename paddle/{ => fluid}/operators/fill_constant_batch_size_like_op.cu.cc (100%) rename paddle/{ => fluid}/operators/fill_constant_batch_size_like_op.h (100%) rename paddle/{ => fluid}/operators/fill_constant_op.cc (100%) rename paddle/{ => fluid}/operators/fill_op.cc (100%) rename paddle/{ => fluid}/operators/fill_zeros_like_op.cc (100%) rename paddle/{ => fluid}/operators/fill_zeros_like_op.cu.cc (100%) rename paddle/{ => fluid}/operators/fill_zeros_like_op.h (100%) rename paddle/{ => fluid}/operators/ftrl_op.cc (100%) rename paddle/{ => fluid}/operators/ftrl_op.cu (100%) rename paddle/{ => fluid}/operators/ftrl_op.h (100%) rename paddle/{ => fluid}/operators/gather.cu.h (100%) rename paddle/{ => fluid}/operators/gather.h (100%) rename paddle/{ => fluid}/operators/gather_op.cc (100%) rename paddle/{ => fluid}/operators/gather_op.cu (100%) rename paddle/{ => fluid}/operators/gather_op.h (100%) rename paddle/{ => fluid}/operators/gather_test.cc (100%) rename paddle/{ => fluid}/operators/gaussian_random_op.cc (100%) rename paddle/{ => fluid}/operators/gaussian_random_op.cu (100%) rename paddle/{ => fluid}/operators/get_places_op.cc (100%) rename paddle/{ => fluid}/operators/gru_op.cc (100%) rename paddle/{ => fluid}/operators/gru_op.cu.cc (100%) rename paddle/{ => fluid}/operators/gru_op.h (100%) rename paddle/{ => fluid}/operators/gru_unit_op.cc (100%) rename paddle/{ => fluid}/operators/gru_unit_op.cu (100%) rename paddle/{ => fluid}/operators/gru_unit_op.h (100%) rename paddle/{ => fluid}/operators/hinge_loss_op.cc (100%) rename paddle/{ => fluid}/operators/hinge_loss_op.cu (100%) rename paddle/{ => fluid}/operators/hinge_loss_op.h (100%) rename paddle/{ => fluid}/operators/huber_loss_op.cc (100%) rename paddle/{ => fluid}/operators/huber_loss_op.cu (100%) rename paddle/{ => fluid}/operators/huber_loss_op.h (100%) rename paddle/{ => fluid}/operators/im2sequence_op.cc (100%) rename paddle/{ => fluid}/operators/im2sequence_op.cu (100%) rename paddle/{ => fluid}/operators/im2sequence_op.h (100%) rename paddle/{ => fluid}/operators/images/batch_norm_fork.dot (100%) rename paddle/{ => fluid}/operators/images/batch_norm_fork.png (100%) rename paddle/{ => fluid}/operators/images/batch_norm_op_kernel.png (100%) rename paddle/{ => fluid}/operators/increment_op.cc (100%) rename paddle/{ => fluid}/operators/iou_similarity_op.cc (100%) rename paddle/{ => fluid}/operators/iou_similarity_op.cu (100%) rename paddle/{ => fluid}/operators/iou_similarity_op.h (100%) rename paddle/{ => fluid}/operators/is_empty_op.cc (100%) rename paddle/{ => fluid}/operators/l1_norm_op.cc (100%) rename paddle/{ => fluid}/operators/l1_norm_op.cu (100%) rename paddle/{ => fluid}/operators/l1_norm_op.h (100%) rename paddle/{ => fluid}/operators/label_smooth_op.cc (100%) rename paddle/{ => fluid}/operators/label_smooth_op.cu (100%) rename paddle/{ => fluid}/operators/label_smooth_op.h (100%) rename paddle/{ => fluid}/operators/layer_norm_op.cc (100%) rename paddle/{ => fluid}/operators/layer_norm_op.cu (100%) rename paddle/{ => fluid}/operators/layer_norm_op.h (100%) rename paddle/{ => fluid}/operators/linear_chain_crf_op.cc (100%) rename paddle/{ => fluid}/operators/linear_chain_crf_op.cu (100%) rename paddle/{ => fluid}/operators/linear_chain_crf_op.h (100%) rename paddle/{ => fluid}/operators/listen_and_serv_op.cc (100%) rename paddle/{ => fluid}/operators/load_combine_op.cc (100%) rename paddle/{ => fluid}/operators/load_op.cc (100%) rename paddle/{ => fluid}/operators/lod_array_length_op.cc (100%) rename paddle/{ => fluid}/operators/lod_rank_table_op.cc (100%) rename paddle/{ => fluid}/operators/lod_reset_op.cc (100%) rename paddle/{ => fluid}/operators/lod_reset_op.cu (100%) rename paddle/{ => fluid}/operators/lod_reset_op.h (100%) rename paddle/{ => fluid}/operators/lod_tensor_to_array_op.cc (100%) rename paddle/{ => fluid}/operators/log_loss_op.cc (100%) rename paddle/{ => fluid}/operators/log_loss_op.cu (100%) rename paddle/{ => fluid}/operators/log_loss_op.h (100%) rename paddle/{ => fluid}/operators/logical_op.cc (100%) rename paddle/{ => fluid}/operators/logical_op.cu (100%) rename paddle/{ => fluid}/operators/logical_op.h (100%) rename paddle/{ => fluid}/operators/lookup_table_op.cc (100%) rename paddle/{ => fluid}/operators/lookup_table_op.cu (100%) rename paddle/{ => fluid}/operators/lookup_table_op.h (100%) rename paddle/{ => fluid}/operators/lrn_op.cc (100%) rename paddle/{ => fluid}/operators/lrn_op.cu (100%) rename paddle/{ => fluid}/operators/lrn_op.h (100%) rename paddle/{ => fluid}/operators/lstm_op.cc (100%) rename paddle/{ => fluid}/operators/lstm_op.cu.cc (100%) rename paddle/{ => fluid}/operators/lstm_op.h (100%) rename paddle/{ => fluid}/operators/lstm_unit_op.cc (100%) rename paddle/{ => fluid}/operators/lstm_unit_op.cu (100%) rename paddle/{ => fluid}/operators/lstm_unit_op.h (100%) rename paddle/{ => fluid}/operators/lstmp_op.cc (100%) rename paddle/{ => fluid}/operators/lstmp_op.cu (100%) rename paddle/{ => fluid}/operators/lstmp_op.h (100%) rename paddle/{ => fluid}/operators/margin_rank_loss_op.cc (100%) rename paddle/{ => fluid}/operators/margin_rank_loss_op.cu (100%) rename paddle/{ => fluid}/operators/margin_rank_loss_op.h (100%) rename paddle/{ => fluid}/operators/math/CMakeLists.txt (100%) rename paddle/{ => fluid}/operators/math/context_project.cc (100%) rename paddle/{ => fluid}/operators/math/context_project.cu (100%) rename paddle/{ => fluid}/operators/math/context_project.h (100%) rename paddle/{ => fluid}/operators/math/cos_sim_functor.cc (100%) rename paddle/{ => fluid}/operators/math/cos_sim_functor.cu (100%) rename paddle/{ => fluid}/operators/math/cos_sim_functor.h (100%) rename paddle/{ => fluid}/operators/math/cross_entropy.cc (100%) rename paddle/{ => fluid}/operators/math/cross_entropy.cu (100%) rename paddle/{ => fluid}/operators/math/cross_entropy.h (100%) rename paddle/{ => fluid}/operators/math/depthwise_conv.cu (100%) rename paddle/{ => fluid}/operators/math/depthwise_conv.h (100%) rename paddle/{ => fluid}/operators/math/detail/CMakeLists.txt (100%) rename paddle/{ => fluid}/operators/math/detail/activation_functions.h (100%) rename paddle/{ => fluid}/operators/math/detail/avx_functions.cc (100%) rename paddle/{ => fluid}/operators/math/detail/gru_cpu_kernel.h (100%) rename paddle/{ => fluid}/operators/math/detail/gru_gpu_kernel.h (100%) rename paddle/{ => fluid}/operators/math/detail/gru_kernel.h (100%) rename paddle/{ => fluid}/operators/math/detail/lstm_cpu_kernel.h (100%) rename paddle/{ => fluid}/operators/math/detail/lstm_gpu_kernel.h (100%) rename paddle/{ => fluid}/operators/math/detail/lstm_kernel.h (100%) rename paddle/{ => fluid}/operators/math/detection_util.h (100%) rename paddle/{ => fluid}/operators/math/gru_compute.cc (100%) rename paddle/{ => fluid}/operators/math/gru_compute.cu (100%) rename paddle/{ => fluid}/operators/math/gru_compute.h (100%) rename paddle/{ => fluid}/operators/math/im2col.cc (100%) rename paddle/{ => fluid}/operators/math/im2col.cu (100%) rename paddle/{ => fluid}/operators/math/im2col.h (100%) rename paddle/{ => fluid}/operators/math/im2col_test.cc (100%) rename paddle/{ => fluid}/operators/math/lstm_compute.cc (100%) rename paddle/{ => fluid}/operators/math/lstm_compute.cu (100%) rename paddle/{ => fluid}/operators/math/lstm_compute.h (100%) rename paddle/{ => fluid}/operators/math/math_function.cc (100%) rename paddle/{ => fluid}/operators/math/math_function.cu (100%) rename paddle/{ => fluid}/operators/math/math_function.h (100%) rename paddle/{ => fluid}/operators/math/math_function_impl.h (100%) rename paddle/{ => fluid}/operators/math/math_function_test.cc (100%) rename paddle/{ => fluid}/operators/math/math_function_test.cu (100%) rename paddle/{ => fluid}/operators/math/matmul.h (100%) rename paddle/{ => fluid}/operators/math/maxouting.cc (100%) rename paddle/{ => fluid}/operators/math/maxouting.cu (100%) rename paddle/{ => fluid}/operators/math/maxouting.h (100%) rename paddle/{ => fluid}/operators/math/pooling.cc (100%) rename paddle/{ => fluid}/operators/math/pooling.cu (100%) rename paddle/{ => fluid}/operators/math/pooling.h (100%) rename paddle/{ => fluid}/operators/math/sampler.cc (100%) rename paddle/{ => fluid}/operators/math/sampler.h (100%) rename paddle/{ => fluid}/operators/math/selected_rows_functor.cc (100%) rename paddle/{ => fluid}/operators/math/selected_rows_functor.cu (100%) rename paddle/{ => fluid}/operators/math/selected_rows_functor.h (100%) rename paddle/{ => fluid}/operators/math/selected_rows_functor_test.cc (100%) rename paddle/{ => fluid}/operators/math/selected_rows_functor_test.cu (100%) rename paddle/{ => fluid}/operators/math/sequence2batch.cc (100%) rename paddle/{ => fluid}/operators/math/sequence2batch.cu (100%) rename paddle/{ => fluid}/operators/math/sequence2batch.h (100%) rename paddle/{ => fluid}/operators/math/sequence_padding.cc (100%) rename paddle/{ => fluid}/operators/math/sequence_padding.cu (100%) rename paddle/{ => fluid}/operators/math/sequence_padding.h (100%) rename paddle/{ => fluid}/operators/math/sequence_padding_test.cc (100%) rename paddle/{ => fluid}/operators/math/sequence_pooling.cc (100%) rename paddle/{ => fluid}/operators/math/sequence_pooling.cu (100%) rename paddle/{ => fluid}/operators/math/sequence_pooling.h (100%) rename paddle/{ => fluid}/operators/math/sequence_scale.cc (100%) rename paddle/{ => fluid}/operators/math/sequence_scale.cu (100%) rename paddle/{ => fluid}/operators/math/sequence_scale.h (100%) rename paddle/{ => fluid}/operators/math/softmax.cc (100%) rename paddle/{ => fluid}/operators/math/softmax.cu (100%) rename paddle/{ => fluid}/operators/math/softmax.h (100%) rename paddle/{ => fluid}/operators/math/softmax_impl.h (100%) rename paddle/{ => fluid}/operators/math/unpooling.cc (100%) rename paddle/{ => fluid}/operators/math/unpooling.cu (100%) rename paddle/{ => fluid}/operators/math/unpooling.h (100%) rename paddle/{ => fluid}/operators/math/vol2col.cc (100%) rename paddle/{ => fluid}/operators/math/vol2col.cu (100%) rename paddle/{ => fluid}/operators/math/vol2col.h (100%) rename paddle/{ => fluid}/operators/math/vol2col_test.cc (100%) rename paddle/{ => fluid}/operators/matmul_op.cc (100%) rename paddle/{ => fluid}/operators/matmul_op.cu.cc (100%) rename paddle/{ => fluid}/operators/matmul_op.h (100%) rename paddle/{ => fluid}/operators/max_sequence_len_op.cc (100%) rename paddle/{ => fluid}/operators/maxout_op.cc (100%) rename paddle/{ => fluid}/operators/maxout_op.cu.cc (100%) rename paddle/{ => fluid}/operators/maxout_op.h (100%) rename paddle/{ => fluid}/operators/mean_op.cc (100%) rename paddle/{ => fluid}/operators/mean_op.cu (100%) rename paddle/{ => fluid}/operators/mean_op.h (100%) rename paddle/{ => fluid}/operators/merge_lod_tensor_op.cc (100%) rename paddle/{ => fluid}/operators/mine_hard_examples_op.cc (100%) rename paddle/{ => fluid}/operators/minus_op.cc (100%) rename paddle/{ => fluid}/operators/minus_op.cu (100%) rename paddle/{ => fluid}/operators/minus_op.h (100%) rename paddle/{ => fluid}/operators/modified_huber_loss_op.cc (100%) rename paddle/{ => fluid}/operators/modified_huber_loss_op.cu (100%) rename paddle/{ => fluid}/operators/modified_huber_loss_op.h (100%) rename paddle/{ => fluid}/operators/momentum_op.cc (100%) rename paddle/{ => fluid}/operators/momentum_op.cu (100%) rename paddle/{ => fluid}/operators/momentum_op.h (100%) rename paddle/{ => fluid}/operators/mul_op.cc (100%) rename paddle/{ => fluid}/operators/mul_op.cu.cc (100%) rename paddle/{ => fluid}/operators/mul_op.h (100%) rename paddle/{ => fluid}/operators/multiclass_nms_op.cc (100%) rename paddle/{ => fluid}/operators/multiplex_op.cc (100%) rename paddle/{ => fluid}/operators/multiplex_op.cu (100%) rename paddle/{ => fluid}/operators/multiplex_op.h (100%) rename paddle/{ => fluid}/operators/nccl/CMakeLists.txt (100%) rename paddle/{ => fluid}/operators/nccl/nccl_gpu_common.cc (100%) rename paddle/{ => fluid}/operators/nccl/nccl_gpu_common.h (100%) rename paddle/{ => fluid}/operators/nccl_op.cc (100%) rename paddle/{ => fluid}/operators/nccl_op.cu.cc (100%) rename paddle/{ => fluid}/operators/nccl_op_test.cu.cc (100%) rename paddle/{ => fluid}/operators/nce_op.cc (100%) rename paddle/{ => fluid}/operators/nce_op.h (100%) rename paddle/{ => fluid}/operators/net_op.cc (100%) rename paddle/{ => fluid}/operators/net_op.h (100%) rename paddle/{ => fluid}/operators/net_op_test.cc (100%) rename paddle/{ => fluid}/operators/norm_op.cc (100%) rename paddle/{ => fluid}/operators/norm_op.cu (100%) rename paddle/{ => fluid}/operators/norm_op.h (100%) rename paddle/{ => fluid}/operators/one_hot_op.cc (100%) rename paddle/{ => fluid}/operators/one_hot_op.cu (100%) rename paddle/{ => fluid}/operators/one_hot_op.h (100%) rename paddle/{ => fluid}/operators/op_documentation/batch_norm_op.md (100%) rename paddle/{ => fluid}/operators/op_documentation/name_convention.md (100%) rename paddle/{ => fluid}/operators/op_documentation/net_op_design.md (100%) rename paddle/{ => fluid}/operators/op_documentation/op_markdown_format.md (100%) rename paddle/{ => fluid}/operators/op_documentation/rnn_design.md (100%) rename paddle/{ => fluid}/operators/pad_op.cc (100%) rename paddle/{ => fluid}/operators/pad_op.cu (100%) rename paddle/{ => fluid}/operators/pad_op.h (100%) rename paddle/{ => fluid}/operators/parallel_do_op.cc (100%) rename paddle/{ => fluid}/operators/pool_cudnn_op.cu.cc (100%) rename paddle/{ => fluid}/operators/pool_op.cc (100%) rename paddle/{ => fluid}/operators/pool_op.cu.cc (100%) rename paddle/{ => fluid}/operators/pool_op.h (100%) rename paddle/{ => fluid}/operators/pool_with_index_op.cc (100%) rename paddle/{ => fluid}/operators/pool_with_index_op.cu.cc (100%) rename paddle/{ => fluid}/operators/pool_with_index_op.h (100%) rename paddle/{ => fluid}/operators/positive_negative_pair_op.cc (100%) rename paddle/{ => fluid}/operators/positive_negative_pair_op.h (100%) rename paddle/{ => fluid}/operators/precision_recall_op.cc (100%) rename paddle/{ => fluid}/operators/precision_recall_op.h (100%) rename paddle/{ => fluid}/operators/prelu_op.cc (100%) rename paddle/{ => fluid}/operators/prelu_op.cu (100%) rename paddle/{ => fluid}/operators/prelu_op.h (100%) rename paddle/{ => fluid}/operators/print_op.cc (100%) rename paddle/{ => fluid}/operators/prior_box_op.cc (100%) rename paddle/{ => fluid}/operators/prior_box_op.h (100%) rename paddle/{ => fluid}/operators/proximal_adagrad_op.cc (100%) rename paddle/{ => fluid}/operators/proximal_adagrad_op.cu (100%) rename paddle/{ => fluid}/operators/proximal_adagrad_op.h (100%) rename paddle/{ => fluid}/operators/proximal_gd_op.cc (100%) rename paddle/{ => fluid}/operators/proximal_gd_op.cu (100%) rename paddle/{ => fluid}/operators/proximal_gd_op.h (100%) rename paddle/{ => fluid}/operators/rank_loss_op.cc (100%) rename paddle/{ => fluid}/operators/rank_loss_op.cu (100%) rename paddle/{ => fluid}/operators/rank_loss_op.h (100%) rename paddle/{ => fluid}/operators/read_op.cc (100%) rename paddle/{ => fluid}/operators/recurrent_op.cc (100%) rename paddle/{ => fluid}/operators/recv_op.cc (100%) rename paddle/{ => fluid}/operators/reduce_op.cc (100%) rename paddle/{ => fluid}/operators/reduce_op.cu (100%) rename paddle/{ => fluid}/operators/reduce_op.h (100%) rename paddle/{ => fluid}/operators/reorder_lod_tensor_by_rank_op.cc (100%) rename paddle/{ => fluid}/operators/reshape_op.cc (100%) rename paddle/{ => fluid}/operators/reshape_op.cu (100%) rename paddle/{ => fluid}/operators/reshape_op.h (100%) rename paddle/{ => fluid}/operators/rmsprop_op.cc (100%) rename paddle/{ => fluid}/operators/rmsprop_op.cu (100%) rename paddle/{ => fluid}/operators/rmsprop_op.h (100%) rename paddle/{ => fluid}/operators/rnn_memory_helper_op.cc (100%) rename paddle/{ => fluid}/operators/roi_pool_op.cc (100%) rename paddle/{ => fluid}/operators/roi_pool_op.cu (100%) rename paddle/{ => fluid}/operators/roi_pool_op.h (100%) rename paddle/{ => fluid}/operators/row_conv_op.cc (100%) rename paddle/{ => fluid}/operators/row_conv_op.cu (100%) rename paddle/{ => fluid}/operators/row_conv_op.h (100%) rename paddle/{ => fluid}/operators/save_combine_op.cc (100%) rename paddle/{ => fluid}/operators/save_load_combine_op_test.cc (100%) rename paddle/{ => fluid}/operators/save_load_op_test.cc (100%) rename paddle/{ => fluid}/operators/save_op.cc (100%) rename paddle/{ => fluid}/operators/scale_op.cc (100%) rename paddle/{ => fluid}/operators/scale_op.cu (100%) rename paddle/{ => fluid}/operators/scale_op.h (100%) rename paddle/{ => fluid}/operators/scatter.cu.h (100%) rename paddle/{ => fluid}/operators/scatter.h (100%) rename paddle/{ => fluid}/operators/scatter_op.cc (100%) rename paddle/{ => fluid}/operators/scatter_op.cu (100%) rename paddle/{ => fluid}/operators/scatter_op.h (100%) rename paddle/{ => fluid}/operators/scatter_test.cc (100%) rename paddle/{ => fluid}/operators/send_op.cc (100%) rename paddle/{ => fluid}/operators/send_recv_op_test.cc (100%) rename paddle/{ => fluid}/operators/sequence_concat_op.cc (100%) rename paddle/{ => fluid}/operators/sequence_concat_op.cu.cc (100%) rename paddle/{ => fluid}/operators/sequence_concat_op.h (100%) rename paddle/{ => fluid}/operators/sequence_conv_op.cc (100%) rename paddle/{ => fluid}/operators/sequence_conv_op.cu.cc (100%) rename paddle/{ => fluid}/operators/sequence_conv_op.h (100%) rename paddle/{ => fluid}/operators/sequence_erase_op.cc (100%) rename paddle/{ => fluid}/operators/sequence_erase_op.cu (100%) rename paddle/{ => fluid}/operators/sequence_erase_op.h (100%) rename paddle/{ => fluid}/operators/sequence_expand_op.cc (100%) rename paddle/{ => fluid}/operators/sequence_expand_op.cu (100%) rename paddle/{ => fluid}/operators/sequence_expand_op.h (100%) rename paddle/{ => fluid}/operators/sequence_pool_op.cc (100%) rename paddle/{ => fluid}/operators/sequence_pool_op.cu (100%) rename paddle/{ => fluid}/operators/sequence_pool_op.h (100%) rename paddle/{ => fluid}/operators/sequence_reshape_op.cc (100%) rename paddle/{ => fluid}/operators/sequence_reshape_op.cu (100%) rename paddle/{ => fluid}/operators/sequence_reshape_op.h (100%) rename paddle/{ => fluid}/operators/sequence_slice_op.cc (100%) rename paddle/{ => fluid}/operators/sequence_slice_op.cu (100%) rename paddle/{ => fluid}/operators/sequence_slice_op.h (100%) rename paddle/{ => fluid}/operators/sequence_softmax_op.cc (100%) rename paddle/{ => fluid}/operators/sequence_softmax_op.cu.cc (100%) rename paddle/{ => fluid}/operators/sequence_softmax_op.h (100%) rename paddle/{ => fluid}/operators/sgd_op.cc (100%) rename paddle/{ => fluid}/operators/sgd_op.cu (100%) rename paddle/{ => fluid}/operators/sgd_op.h (100%) rename paddle/{ => fluid}/operators/shrink_rnn_memory_op.cc (100%) rename paddle/{ => fluid}/operators/sigmoid_cross_entropy_with_logits_op.cc (100%) rename paddle/{ => fluid}/operators/sigmoid_cross_entropy_with_logits_op.cu (100%) rename paddle/{ => fluid}/operators/sigmoid_cross_entropy_with_logits_op.h (100%) rename paddle/{ => fluid}/operators/sign_op.cc (100%) rename paddle/{ => fluid}/operators/sign_op.cu (100%) rename paddle/{ => fluid}/operators/sign_op.h (100%) rename paddle/{ => fluid}/operators/smooth_l1_loss_op.cc (100%) rename paddle/{ => fluid}/operators/smooth_l1_loss_op.cu (100%) rename paddle/{ => fluid}/operators/smooth_l1_loss_op.h (100%) rename paddle/{ => fluid}/operators/softmax_op.cc (100%) rename paddle/{ => fluid}/operators/softmax_op.cu.cc (100%) rename paddle/{ => fluid}/operators/softmax_op.h (100%) rename paddle/{ => fluid}/operators/softmax_with_cross_entropy_op.cc (100%) rename paddle/{ => fluid}/operators/softmax_with_cross_entropy_op.cu (100%) rename paddle/{ => fluid}/operators/softmax_with_cross_entropy_op.h (100%) rename paddle/{ => fluid}/operators/split_lod_tensor_op.cc (100%) rename paddle/{ => fluid}/operators/split_op.cc (100%) rename paddle/{ => fluid}/operators/split_op.cu.cc (100%) rename paddle/{ => fluid}/operators/split_op.h (100%) rename paddle/{ => fluid}/operators/split_selected_rows_op.cc (100%) rename paddle/{ => fluid}/operators/split_selected_rows_op.cu (100%) rename paddle/{ => fluid}/operators/split_selected_rows_op.h (100%) rename paddle/{ => fluid}/operators/spp_op.cc (100%) rename paddle/{ => fluid}/operators/spp_op.cu.cc (100%) rename paddle/{ => fluid}/operators/spp_op.h (100%) rename paddle/{ => fluid}/operators/squared_l2_distance_op.cc (100%) rename paddle/{ => fluid}/operators/squared_l2_distance_op.cu (100%) rename paddle/{ => fluid}/operators/squared_l2_distance_op.h (100%) rename paddle/{ => fluid}/operators/squared_l2_norm_op.cc (100%) rename paddle/{ => fluid}/operators/squared_l2_norm_op.cu (100%) rename paddle/{ => fluid}/operators/squared_l2_norm_op.h (100%) rename paddle/{ => fluid}/operators/strided_memcpy.h (100%) rename paddle/{ => fluid}/operators/strided_memcpy_test.cc (100%) rename paddle/{ => fluid}/operators/sum_op.cc (100%) rename paddle/{ => fluid}/operators/sum_op.cu (100%) rename paddle/{ => fluid}/operators/sum_op.h (100%) rename paddle/{ => fluid}/operators/target_assign_op.cc (100%) rename paddle/{ => fluid}/operators/target_assign_op.cu (100%) rename paddle/{ => fluid}/operators/target_assign_op.h (100%) rename paddle/{ => fluid}/operators/tensor_array_read_write_op.cc (100%) rename paddle/{ => fluid}/operators/top_k_op.cc (100%) rename paddle/{ => fluid}/operators/top_k_op.cu (100%) rename paddle/{ => fluid}/operators/top_k_op.h (100%) rename paddle/{ => fluid}/operators/transpose_op.cc (100%) rename paddle/{ => fluid}/operators/transpose_op.cu.cc (100%) rename paddle/{ => fluid}/operators/transpose_op.h (100%) rename paddle/{ => fluid}/operators/uniform_random_op.cc (100%) rename paddle/{ => fluid}/operators/uniform_random_op.cu (100%) rename paddle/{ => fluid}/operators/unpool_op.cc (100%) rename paddle/{ => fluid}/operators/unpool_op.cu.cc (100%) rename paddle/{ => fluid}/operators/unpool_op.h (100%) rename paddle/{ => fluid}/operators/warpctc_op.cc (100%) rename paddle/{ => fluid}/operators/warpctc_op.cu.cc (100%) rename paddle/{ => fluid}/operators/warpctc_op.h (100%) rename paddle/{ => fluid}/operators/while_op.cc (100%) rename paddle/{ => fluid}/platform/.clang-format (100%) rename paddle/{ => fluid}/platform/CMakeLists.txt (100%) rename paddle/{ => fluid}/platform/assert.h (100%) rename paddle/{ => fluid}/platform/call_once.h (100%) rename paddle/{ => fluid}/platform/cpu_info.cc (100%) rename paddle/{ => fluid}/platform/cpu_info.h (100%) rename paddle/{ => fluid}/platform/cpu_info_test.cc (100%) rename paddle/{ => fluid}/platform/cuda_helper.h (100%) rename paddle/{ => fluid}/platform/cuda_profiler.h (100%) rename paddle/{ => fluid}/platform/cudnn_helper.h (100%) rename paddle/{ => fluid}/platform/cudnn_helper_test.cc (100%) rename paddle/{ => fluid}/platform/details/device_ptr_cast.h (100%) rename paddle/{ => fluid}/platform/device_context.cc (100%) rename paddle/{ => fluid}/platform/device_context.h (100%) rename paddle/{ => fluid}/platform/device_context_test.cu (100%) rename paddle/{ => fluid}/platform/dynload/CMakeLists.txt (100%) rename paddle/{ => fluid}/platform/dynload/cublas.cc (100%) rename paddle/{ => fluid}/platform/dynload/cublas.h (100%) rename paddle/{ => fluid}/platform/dynload/cudnn.cc (100%) rename paddle/{ => fluid}/platform/dynload/cudnn.h (100%) rename paddle/{ => fluid}/platform/dynload/curand.cc (100%) rename paddle/{ => fluid}/platform/dynload/curand.h (100%) rename paddle/{ => fluid}/platform/dynload/dynamic_loader.cc (100%) rename paddle/{ => fluid}/platform/dynload/dynamic_loader.h (100%) rename paddle/{ => fluid}/platform/dynload/nccl.cc (100%) rename paddle/{ => fluid}/platform/dynload/nccl.h (100%) rename paddle/{ => fluid}/platform/dynload/warpctc.cc (100%) rename paddle/{ => fluid}/platform/dynload/warpctc.h (100%) rename paddle/{ => fluid}/platform/enforce.cc (100%) rename paddle/{ => fluid}/platform/enforce.h (100%) rename paddle/{ => fluid}/platform/enforce_test.cc (100%) rename paddle/{ => fluid}/platform/for_range.h (100%) rename paddle/{ => fluid}/platform/gpu_info.cc (100%) rename paddle/{ => fluid}/platform/gpu_info.h (100%) rename paddle/{ => fluid}/platform/hostdevice.h (100%) rename paddle/{ => fluid}/platform/macros.h (100%) rename paddle/{ => fluid}/platform/mkldnn_helper.h (100%) rename paddle/{ => fluid}/platform/nccl_test.cu (100%) rename paddle/{ => fluid}/platform/place.cc (100%) rename paddle/{ => fluid}/platform/place.h (100%) rename paddle/{ => fluid}/platform/place_test.cc (100%) rename paddle/{ => fluid}/platform/profiler.cc (100%) rename paddle/{ => fluid}/platform/profiler.h (100%) rename paddle/{ => fluid}/platform/profiler_test.cc (100%) rename paddle/{ => fluid}/platform/transform.h (100%) rename paddle/{ => fluid}/platform/transform_test.cu (100%) rename paddle/{ => fluid}/platform/variant.h (100%) rename paddle/{ => fluid}/pybind/.clang-format (100%) rename paddle/{ => fluid}/pybind/CMakeLists.txt (100%) rename paddle/{ => fluid}/pybind/const_value.cc (100%) rename paddle/{ => fluid}/pybind/const_value.h (100%) rename paddle/{ => fluid}/pybind/exception.cc (100%) rename paddle/{ => fluid}/pybind/exception.h (100%) rename paddle/{ => fluid}/pybind/protobuf.cc (100%) rename paddle/{ => fluid}/pybind/protobuf.h (100%) rename paddle/{ => fluid}/pybind/pybind.cc (100%) rename paddle/{ => fluid}/pybind/tensor_py.h (100%) diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index 3f9c132ef6a..c7deba2ab47 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -19,12 +19,7 @@ else() endif() if(NOT ANDROID AND NOT IOS) - add_subdirectory(memory) - add_subdirectory(platform) - add_subdirectory(framework) - add_subdirectory(operators) - add_subdirectory(pybind) - add_subdirectory(inference) + add_subdirectory(fluid) endif() if(WITH_SWIG_PY) diff --git a/paddle/fluid/CMakeLists.txt b/paddle/fluid/CMakeLists.txt new file mode 100644 index 00000000000..a6b4191518c --- /dev/null +++ b/paddle/fluid/CMakeLists.txt @@ -0,0 +1,6 @@ +add_subdirectory(memory) +add_subdirectory(platform) +add_subdirectory(framework) +add_subdirectory(operators) +add_subdirectory(pybind) +add_subdirectory(inference) diff --git a/paddle/framework/.clang-format b/paddle/fluid/framework/.clang-format similarity index 100% rename from paddle/framework/.clang-format rename to paddle/fluid/framework/.clang-format diff --git a/paddle/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt similarity index 100% rename from paddle/framework/CMakeLists.txt rename to paddle/fluid/framework/CMakeLists.txt diff --git a/paddle/framework/attribute.cc b/paddle/fluid/framework/attribute.cc similarity index 100% rename from paddle/framework/attribute.cc rename to paddle/fluid/framework/attribute.cc diff --git a/paddle/framework/attribute.h b/paddle/fluid/framework/attribute.h similarity index 100% rename from paddle/framework/attribute.h rename to paddle/fluid/framework/attribute.h diff --git a/paddle/framework/backward.cc b/paddle/fluid/framework/backward.cc similarity index 100% rename from paddle/framework/backward.cc rename to paddle/fluid/framework/backward.cc diff --git a/paddle/framework/backward.h b/paddle/fluid/framework/backward.h similarity index 100% rename from paddle/framework/backward.h rename to paddle/fluid/framework/backward.h diff --git a/paddle/framework/backward_test.cc b/paddle/fluid/framework/backward_test.cc similarity index 100% rename from paddle/framework/backward_test.cc rename to paddle/fluid/framework/backward_test.cc diff --git a/paddle/framework/block_desc.cc b/paddle/fluid/framework/block_desc.cc similarity index 100% rename from paddle/framework/block_desc.cc rename to paddle/fluid/framework/block_desc.cc diff --git a/paddle/framework/block_desc.h b/paddle/fluid/framework/block_desc.h similarity index 100% rename from paddle/framework/block_desc.h rename to paddle/fluid/framework/block_desc.h diff --git a/paddle/framework/channel.h b/paddle/fluid/framework/channel.h similarity index 100% rename from paddle/framework/channel.h rename to paddle/fluid/framework/channel.h diff --git a/paddle/framework/channel_test.cc b/paddle/fluid/framework/channel_test.cc similarity index 100% rename from paddle/framework/channel_test.cc rename to paddle/fluid/framework/channel_test.cc diff --git a/paddle/framework/data_device_transform.cc b/paddle/fluid/framework/data_device_transform.cc similarity index 100% rename from paddle/framework/data_device_transform.cc rename to paddle/fluid/framework/data_device_transform.cc diff --git a/paddle/framework/data_device_transform.h b/paddle/fluid/framework/data_device_transform.h similarity index 100% rename from paddle/framework/data_device_transform.h rename to paddle/fluid/framework/data_device_transform.h diff --git a/paddle/framework/data_device_transform_test.cu b/paddle/fluid/framework/data_device_transform_test.cu similarity index 100% rename from paddle/framework/data_device_transform_test.cu rename to paddle/fluid/framework/data_device_transform_test.cu diff --git a/paddle/framework/data_layout.h b/paddle/fluid/framework/data_layout.h similarity index 100% rename from paddle/framework/data_layout.h rename to paddle/fluid/framework/data_layout.h diff --git a/paddle/framework/data_layout_transform.cc b/paddle/fluid/framework/data_layout_transform.cc similarity index 100% rename from paddle/framework/data_layout_transform.cc rename to paddle/fluid/framework/data_layout_transform.cc diff --git a/paddle/framework/data_layout_transform.h b/paddle/fluid/framework/data_layout_transform.h similarity index 100% rename from paddle/framework/data_layout_transform.h rename to paddle/fluid/framework/data_layout_transform.h diff --git a/paddle/framework/data_layout_transform_test.cc b/paddle/fluid/framework/data_layout_transform_test.cc similarity index 100% rename from paddle/framework/data_layout_transform_test.cc rename to paddle/fluid/framework/data_layout_transform_test.cc diff --git a/paddle/framework/data_transform.cc b/paddle/fluid/framework/data_transform.cc similarity index 100% rename from paddle/framework/data_transform.cc rename to paddle/fluid/framework/data_transform.cc diff --git a/paddle/framework/data_transform.h b/paddle/fluid/framework/data_transform.h similarity index 100% rename from paddle/framework/data_transform.h rename to paddle/fluid/framework/data_transform.h diff --git a/paddle/framework/data_type.h b/paddle/fluid/framework/data_type.h similarity index 100% rename from paddle/framework/data_type.h rename to paddle/fluid/framework/data_type.h diff --git a/paddle/framework/data_type_transform.cc b/paddle/fluid/framework/data_type_transform.cc similarity index 100% rename from paddle/framework/data_type_transform.cc rename to paddle/fluid/framework/data_type_transform.cc diff --git a/paddle/framework/data_type_transform.h b/paddle/fluid/framework/data_type_transform.h similarity index 100% rename from paddle/framework/data_type_transform.h rename to paddle/fluid/framework/data_type_transform.h diff --git a/paddle/framework/data_type_transform_test.cc b/paddle/fluid/framework/data_type_transform_test.cc similarity index 100% rename from paddle/framework/data_type_transform_test.cc rename to paddle/fluid/framework/data_type_transform_test.cc diff --git a/paddle/framework/ddim.cc b/paddle/fluid/framework/ddim.cc similarity index 100% rename from paddle/framework/ddim.cc rename to paddle/fluid/framework/ddim.cc diff --git a/paddle/framework/ddim.h b/paddle/fluid/framework/ddim.h similarity index 100% rename from paddle/framework/ddim.h rename to paddle/fluid/framework/ddim.h diff --git a/paddle/framework/ddim_test.cc b/paddle/fluid/framework/ddim_test.cc similarity index 100% rename from paddle/framework/ddim_test.cc rename to paddle/fluid/framework/ddim_test.cc diff --git a/paddle/framework/details/buffered_channel.h b/paddle/fluid/framework/details/buffered_channel.h similarity index 100% rename from paddle/framework/details/buffered_channel.h rename to paddle/fluid/framework/details/buffered_channel.h diff --git a/paddle/framework/details/cow_ptr.h b/paddle/fluid/framework/details/cow_ptr.h similarity index 100% rename from paddle/framework/details/cow_ptr.h rename to paddle/fluid/framework/details/cow_ptr.h diff --git a/paddle/framework/details/cow_ptr_test.cc b/paddle/fluid/framework/details/cow_ptr_test.cc similarity index 100% rename from paddle/framework/details/cow_ptr_test.cc rename to paddle/fluid/framework/details/cow_ptr_test.cc diff --git a/paddle/framework/details/op_registry.h b/paddle/fluid/framework/details/op_registry.h similarity index 100% rename from paddle/framework/details/op_registry.h rename to paddle/fluid/framework/details/op_registry.h diff --git a/paddle/framework/details/unbuffered_channel.h b/paddle/fluid/framework/details/unbuffered_channel.h similarity index 100% rename from paddle/framework/details/unbuffered_channel.h rename to paddle/fluid/framework/details/unbuffered_channel.h diff --git a/paddle/framework/dim.h b/paddle/fluid/framework/dim.h similarity index 100% rename from paddle/framework/dim.h rename to paddle/fluid/framework/dim.h diff --git a/paddle/framework/dim_test.cu b/paddle/fluid/framework/dim_test.cu similarity index 100% rename from paddle/framework/dim_test.cu rename to paddle/fluid/framework/dim_test.cu diff --git a/paddle/framework/eigen.h b/paddle/fluid/framework/eigen.h similarity index 100% rename from paddle/framework/eigen.h rename to paddle/fluid/framework/eigen.h diff --git a/paddle/framework/eigen_test.cc b/paddle/fluid/framework/eigen_test.cc similarity index 100% rename from paddle/framework/eigen_test.cc rename to paddle/fluid/framework/eigen_test.cc diff --git a/paddle/framework/executor.cc b/paddle/fluid/framework/executor.cc similarity index 100% rename from paddle/framework/executor.cc rename to paddle/fluid/framework/executor.cc diff --git a/paddle/framework/executor.h b/paddle/fluid/framework/executor.h similarity index 100% rename from paddle/framework/executor.h rename to paddle/fluid/framework/executor.h diff --git a/paddle/framework/feed_fetch_method.cc b/paddle/fluid/framework/feed_fetch_method.cc similarity index 100% rename from paddle/framework/feed_fetch_method.cc rename to paddle/fluid/framework/feed_fetch_method.cc diff --git a/paddle/framework/feed_fetch_method.h b/paddle/fluid/framework/feed_fetch_method.h similarity index 100% rename from paddle/framework/feed_fetch_method.h rename to paddle/fluid/framework/feed_fetch_method.h diff --git a/paddle/framework/feed_fetch_type.h b/paddle/fluid/framework/feed_fetch_type.h similarity index 100% rename from paddle/framework/feed_fetch_type.h rename to paddle/fluid/framework/feed_fetch_type.h diff --git a/paddle/framework/framework.proto b/paddle/fluid/framework/framework.proto similarity index 100% rename from paddle/framework/framework.proto rename to paddle/fluid/framework/framework.proto diff --git a/paddle/framework/grad_op_desc_maker.h b/paddle/fluid/framework/grad_op_desc_maker.h similarity index 100% rename from paddle/framework/grad_op_desc_maker.h rename to paddle/fluid/framework/grad_op_desc_maker.h diff --git a/paddle/framework/init.cc b/paddle/fluid/framework/init.cc similarity index 100% rename from paddle/framework/init.cc rename to paddle/fluid/framework/init.cc diff --git a/paddle/framework/init.h b/paddle/fluid/framework/init.h similarity index 100% rename from paddle/framework/init.h rename to paddle/fluid/framework/init.h diff --git a/paddle/framework/init_test.cc b/paddle/fluid/framework/init_test.cc similarity index 100% rename from paddle/framework/init_test.cc rename to paddle/fluid/framework/init_test.cc diff --git a/paddle/framework/library_type.h b/paddle/fluid/framework/library_type.h similarity index 100% rename from paddle/framework/library_type.h rename to paddle/fluid/framework/library_type.h diff --git a/paddle/framework/lod_rank_table.cc b/paddle/fluid/framework/lod_rank_table.cc similarity index 100% rename from paddle/framework/lod_rank_table.cc rename to paddle/fluid/framework/lod_rank_table.cc diff --git a/paddle/framework/lod_rank_table.h b/paddle/fluid/framework/lod_rank_table.h similarity index 100% rename from paddle/framework/lod_rank_table.h rename to paddle/fluid/framework/lod_rank_table.h diff --git a/paddle/framework/lod_tensor.cc b/paddle/fluid/framework/lod_tensor.cc similarity index 100% rename from paddle/framework/lod_tensor.cc rename to paddle/fluid/framework/lod_tensor.cc diff --git a/paddle/framework/lod_tensor.h b/paddle/fluid/framework/lod_tensor.h similarity index 100% rename from paddle/framework/lod_tensor.h rename to paddle/fluid/framework/lod_tensor.h diff --git a/paddle/framework/lod_tensor.md b/paddle/fluid/framework/lod_tensor.md similarity index 100% rename from paddle/framework/lod_tensor.md rename to paddle/fluid/framework/lod_tensor.md diff --git a/paddle/framework/lod_tensor_array.h b/paddle/fluid/framework/lod_tensor_array.h similarity index 100% rename from paddle/framework/lod_tensor_array.h rename to paddle/fluid/framework/lod_tensor_array.h diff --git a/paddle/framework/lod_tensor_test.cc b/paddle/fluid/framework/lod_tensor_test.cc similarity index 100% rename from paddle/framework/lod_tensor_test.cc rename to paddle/fluid/framework/lod_tensor_test.cc diff --git a/paddle/framework/lod_tensor_test.cu b/paddle/fluid/framework/lod_tensor_test.cu similarity index 100% rename from paddle/framework/lod_tensor_test.cu rename to paddle/fluid/framework/lod_tensor_test.cu diff --git a/paddle/framework/mixed_vector.h b/paddle/fluid/framework/mixed_vector.h similarity index 100% rename from paddle/framework/mixed_vector.h rename to paddle/fluid/framework/mixed_vector.h diff --git a/paddle/framework/mixed_vector_test.cu b/paddle/fluid/framework/mixed_vector_test.cu similarity index 100% rename from paddle/framework/mixed_vector_test.cu rename to paddle/fluid/framework/mixed_vector_test.cu diff --git a/paddle/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc similarity index 100% rename from paddle/framework/op_desc.cc rename to paddle/fluid/framework/op_desc.cc diff --git a/paddle/framework/op_desc.h b/paddle/fluid/framework/op_desc.h similarity index 100% rename from paddle/framework/op_desc.h rename to paddle/fluid/framework/op_desc.h diff --git a/paddle/framework/op_info.cc b/paddle/fluid/framework/op_info.cc similarity index 100% rename from paddle/framework/op_info.cc rename to paddle/fluid/framework/op_info.cc diff --git a/paddle/framework/op_info.h b/paddle/fluid/framework/op_info.h similarity index 100% rename from paddle/framework/op_info.h rename to paddle/fluid/framework/op_info.h diff --git a/paddle/framework/op_kernel_type.h b/paddle/fluid/framework/op_kernel_type.h similarity index 100% rename from paddle/framework/op_kernel_type.h rename to paddle/fluid/framework/op_kernel_type.h diff --git a/paddle/framework/op_kernel_type_test.cc b/paddle/fluid/framework/op_kernel_type_test.cc similarity index 100% rename from paddle/framework/op_kernel_type_test.cc rename to paddle/fluid/framework/op_kernel_type_test.cc diff --git a/paddle/framework/op_proto_maker.cc b/paddle/fluid/framework/op_proto_maker.cc similarity index 100% rename from paddle/framework/op_proto_maker.cc rename to paddle/fluid/framework/op_proto_maker.cc diff --git a/paddle/framework/op_proto_maker.h b/paddle/fluid/framework/op_proto_maker.h similarity index 100% rename from paddle/framework/op_proto_maker.h rename to paddle/fluid/framework/op_proto_maker.h diff --git a/paddle/framework/op_proto_maker_test.cc b/paddle/fluid/framework/op_proto_maker_test.cc similarity index 100% rename from paddle/framework/op_proto_maker_test.cc rename to paddle/fluid/framework/op_proto_maker_test.cc diff --git a/paddle/framework/op_registry.cc b/paddle/fluid/framework/op_registry.cc similarity index 100% rename from paddle/framework/op_registry.cc rename to paddle/fluid/framework/op_registry.cc diff --git a/paddle/framework/op_registry.h b/paddle/fluid/framework/op_registry.h similarity index 100% rename from paddle/framework/op_registry.h rename to paddle/fluid/framework/op_registry.h diff --git a/paddle/framework/op_registry_test.cc b/paddle/fluid/framework/op_registry_test.cc similarity index 100% rename from paddle/framework/op_registry_test.cc rename to paddle/fluid/framework/op_registry_test.cc diff --git a/paddle/framework/operator.cc b/paddle/fluid/framework/operator.cc similarity index 100% rename from paddle/framework/operator.cc rename to paddle/fluid/framework/operator.cc diff --git a/paddle/framework/operator.h b/paddle/fluid/framework/operator.h similarity index 100% rename from paddle/framework/operator.h rename to paddle/fluid/framework/operator.h diff --git a/paddle/framework/operator_test.cc b/paddle/fluid/framework/operator_test.cc similarity index 100% rename from paddle/framework/operator_test.cc rename to paddle/fluid/framework/operator_test.cc diff --git a/paddle/framework/program_desc.cc b/paddle/fluid/framework/program_desc.cc similarity index 100% rename from paddle/framework/program_desc.cc rename to paddle/fluid/framework/program_desc.cc diff --git a/paddle/framework/program_desc.h b/paddle/fluid/framework/program_desc.h similarity index 100% rename from paddle/framework/program_desc.h rename to paddle/fluid/framework/program_desc.h diff --git a/paddle/framework/program_desc_test.cc b/paddle/fluid/framework/program_desc_test.cc similarity index 100% rename from paddle/framework/program_desc_test.cc rename to paddle/fluid/framework/program_desc_test.cc diff --git a/paddle/framework/proto_desc.h b/paddle/fluid/framework/proto_desc.h similarity index 100% rename from paddle/framework/proto_desc.h rename to paddle/fluid/framework/proto_desc.h diff --git a/paddle/framework/prune.cc b/paddle/fluid/framework/prune.cc similarity index 100% rename from paddle/framework/prune.cc rename to paddle/fluid/framework/prune.cc diff --git a/paddle/framework/prune.h b/paddle/fluid/framework/prune.h similarity index 100% rename from paddle/framework/prune.h rename to paddle/fluid/framework/prune.h diff --git a/paddle/framework/prune_test.cc b/paddle/fluid/framework/prune_test.cc similarity index 100% rename from paddle/framework/prune_test.cc rename to paddle/fluid/framework/prune_test.cc diff --git a/paddle/framework/reader.cc b/paddle/fluid/framework/reader.cc similarity index 100% rename from paddle/framework/reader.cc rename to paddle/fluid/framework/reader.cc diff --git a/paddle/framework/reader.h b/paddle/fluid/framework/reader.h similarity index 100% rename from paddle/framework/reader.h rename to paddle/fluid/framework/reader.h diff --git a/paddle/framework/scope.cc b/paddle/fluid/framework/scope.cc similarity index 100% rename from paddle/framework/scope.cc rename to paddle/fluid/framework/scope.cc diff --git a/paddle/framework/scope.h b/paddle/fluid/framework/scope.h similarity index 100% rename from paddle/framework/scope.h rename to paddle/fluid/framework/scope.h diff --git a/paddle/framework/scope_test.cc b/paddle/fluid/framework/scope_test.cc similarity index 100% rename from paddle/framework/scope_test.cc rename to paddle/fluid/framework/scope_test.cc diff --git a/paddle/framework/selected_rows.cc b/paddle/fluid/framework/selected_rows.cc similarity index 100% rename from paddle/framework/selected_rows.cc rename to paddle/fluid/framework/selected_rows.cc diff --git a/paddle/framework/selected_rows.h b/paddle/fluid/framework/selected_rows.h similarity index 100% rename from paddle/framework/selected_rows.h rename to paddle/fluid/framework/selected_rows.h diff --git a/paddle/framework/selected_rows_test.cc b/paddle/fluid/framework/selected_rows_test.cc similarity index 100% rename from paddle/framework/selected_rows_test.cc rename to paddle/fluid/framework/selected_rows_test.cc diff --git a/paddle/framework/shape_inference.cc b/paddle/fluid/framework/shape_inference.cc similarity index 100% rename from paddle/framework/shape_inference.cc rename to paddle/fluid/framework/shape_inference.cc diff --git a/paddle/framework/shape_inference.h b/paddle/fluid/framework/shape_inference.h similarity index 100% rename from paddle/framework/shape_inference.h rename to paddle/fluid/framework/shape_inference.h diff --git a/paddle/framework/tensor.cc b/paddle/fluid/framework/tensor.cc similarity index 100% rename from paddle/framework/tensor.cc rename to paddle/fluid/framework/tensor.cc diff --git a/paddle/framework/tensor.h b/paddle/fluid/framework/tensor.h similarity index 100% rename from paddle/framework/tensor.h rename to paddle/fluid/framework/tensor.h diff --git a/paddle/framework/tensor.md b/paddle/fluid/framework/tensor.md similarity index 100% rename from paddle/framework/tensor.md rename to paddle/fluid/framework/tensor.md diff --git a/paddle/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h similarity index 100% rename from paddle/framework/tensor_impl.h rename to paddle/fluid/framework/tensor_impl.h diff --git a/paddle/framework/tensor_test.cc b/paddle/fluid/framework/tensor_test.cc similarity index 100% rename from paddle/framework/tensor_test.cc rename to paddle/fluid/framework/tensor_test.cc diff --git a/paddle/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc similarity index 100% rename from paddle/framework/tensor_util.cc rename to paddle/fluid/framework/tensor_util.cc diff --git a/paddle/framework/tensor_util.cu b/paddle/fluid/framework/tensor_util.cu similarity index 100% rename from paddle/framework/tensor_util.cu rename to paddle/fluid/framework/tensor_util.cu diff --git a/paddle/framework/tensor_util.h b/paddle/fluid/framework/tensor_util.h similarity index 100% rename from paddle/framework/tensor_util.h rename to paddle/fluid/framework/tensor_util.h diff --git a/paddle/framework/tensor_util_test.cc b/paddle/fluid/framework/tensor_util_test.cc similarity index 100% rename from paddle/framework/tensor_util_test.cc rename to paddle/fluid/framework/tensor_util_test.cc diff --git a/paddle/framework/tensor_util_test.cu b/paddle/fluid/framework/tensor_util_test.cu similarity index 100% rename from paddle/framework/tensor_util_test.cu rename to paddle/fluid/framework/tensor_util_test.cu diff --git a/paddle/framework/threadpool.cc b/paddle/fluid/framework/threadpool.cc similarity index 100% rename from paddle/framework/threadpool.cc rename to paddle/fluid/framework/threadpool.cc diff --git a/paddle/framework/threadpool.h b/paddle/fluid/framework/threadpool.h similarity index 100% rename from paddle/framework/threadpool.h rename to paddle/fluid/framework/threadpool.h diff --git a/paddle/framework/threadpool_test.cc b/paddle/fluid/framework/threadpool_test.cc similarity index 100% rename from paddle/framework/threadpool_test.cc rename to paddle/fluid/framework/threadpool_test.cc diff --git a/paddle/framework/type_defs.h b/paddle/fluid/framework/type_defs.h similarity index 100% rename from paddle/framework/type_defs.h rename to paddle/fluid/framework/type_defs.h diff --git a/paddle/framework/var_desc.cc b/paddle/fluid/framework/var_desc.cc similarity index 100% rename from paddle/framework/var_desc.cc rename to paddle/fluid/framework/var_desc.cc diff --git a/paddle/framework/var_desc.h b/paddle/fluid/framework/var_desc.h similarity index 100% rename from paddle/framework/var_desc.h rename to paddle/fluid/framework/var_desc.h diff --git a/paddle/framework/var_type.h b/paddle/fluid/framework/var_type.h similarity index 100% rename from paddle/framework/var_type.h rename to paddle/fluid/framework/var_type.h diff --git a/paddle/framework/var_type_inference.h b/paddle/fluid/framework/var_type_inference.h similarity index 100% rename from paddle/framework/var_type_inference.h rename to paddle/fluid/framework/var_type_inference.h diff --git a/paddle/framework/var_type_inference_test.cc b/paddle/fluid/framework/var_type_inference_test.cc similarity index 100% rename from paddle/framework/var_type_inference_test.cc rename to paddle/fluid/framework/var_type_inference_test.cc diff --git a/paddle/framework/variable.h b/paddle/fluid/framework/variable.h similarity index 100% rename from paddle/framework/variable.h rename to paddle/fluid/framework/variable.h diff --git a/paddle/framework/variable.md b/paddle/fluid/framework/variable.md similarity index 100% rename from paddle/framework/variable.md rename to paddle/fluid/framework/variable.md diff --git a/paddle/framework/variable_test.cc b/paddle/fluid/framework/variable_test.cc similarity index 100% rename from paddle/framework/variable_test.cc rename to paddle/fluid/framework/variable_test.cc diff --git a/paddle/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt similarity index 100% rename from paddle/inference/CMakeLists.txt rename to paddle/fluid/inference/CMakeLists.txt diff --git a/paddle/inference/io.cc b/paddle/fluid/inference/io.cc similarity index 100% rename from paddle/inference/io.cc rename to paddle/fluid/inference/io.cc diff --git a/paddle/inference/io.h b/paddle/fluid/inference/io.h similarity index 100% rename from paddle/inference/io.h rename to paddle/fluid/inference/io.h diff --git a/paddle/inference/tests/book/CMakeLists.txt b/paddle/fluid/inference/tests/book/CMakeLists.txt similarity index 100% rename from paddle/inference/tests/book/CMakeLists.txt rename to paddle/fluid/inference/tests/book/CMakeLists.txt diff --git a/paddle/inference/tests/book/test_helper.h b/paddle/fluid/inference/tests/book/test_helper.h similarity index 100% rename from paddle/inference/tests/book/test_helper.h rename to paddle/fluid/inference/tests/book/test_helper.h diff --git a/paddle/inference/tests/book/test_inference_fit_a_line.cc b/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc similarity index 100% rename from paddle/inference/tests/book/test_inference_fit_a_line.cc rename to paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc diff --git a/paddle/inference/tests/book/test_inference_image_classification.cc b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc similarity index 100% rename from paddle/inference/tests/book/test_inference_image_classification.cc rename to paddle/fluid/inference/tests/book/test_inference_image_classification.cc diff --git a/paddle/inference/tests/book/test_inference_label_semantic_roles.cc b/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc similarity index 100% rename from paddle/inference/tests/book/test_inference_label_semantic_roles.cc rename to paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc diff --git a/paddle/inference/tests/book/test_inference_recognize_digits.cc b/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc similarity index 100% rename from paddle/inference/tests/book/test_inference_recognize_digits.cc rename to paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc diff --git a/paddle/inference/tests/book/test_inference_recommender_system.cc b/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc similarity index 100% rename from paddle/inference/tests/book/test_inference_recommender_system.cc rename to paddle/fluid/inference/tests/book/test_inference_recommender_system.cc diff --git a/paddle/inference/tests/book/test_inference_rnn_encoder_decoder.cc b/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc similarity index 100% rename from paddle/inference/tests/book/test_inference_rnn_encoder_decoder.cc rename to paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc diff --git a/paddle/inference/tests/book/test_inference_understand_sentiment.cc b/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc similarity index 100% rename from paddle/inference/tests/book/test_inference_understand_sentiment.cc rename to paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc diff --git a/paddle/inference/tests/book/test_inference_word2vec.cc b/paddle/fluid/inference/tests/book/test_inference_word2vec.cc similarity index 100% rename from paddle/inference/tests/book/test_inference_word2vec.cc rename to paddle/fluid/inference/tests/book/test_inference_word2vec.cc diff --git a/paddle/memory/.clang-format b/paddle/fluid/memory/.clang-format similarity index 100% rename from paddle/memory/.clang-format rename to paddle/fluid/memory/.clang-format diff --git a/paddle/memory/CMakeLists.txt b/paddle/fluid/memory/CMakeLists.txt similarity index 100% rename from paddle/memory/CMakeLists.txt rename to paddle/fluid/memory/CMakeLists.txt diff --git a/paddle/memory/README.md b/paddle/fluid/memory/README.md similarity index 100% rename from paddle/memory/README.md rename to paddle/fluid/memory/README.md diff --git a/paddle/memory/detail/CMakeLists.txt b/paddle/fluid/memory/detail/CMakeLists.txt similarity index 100% rename from paddle/memory/detail/CMakeLists.txt rename to paddle/fluid/memory/detail/CMakeLists.txt diff --git a/paddle/memory/detail/buddy_allocator.cc b/paddle/fluid/memory/detail/buddy_allocator.cc similarity index 100% rename from paddle/memory/detail/buddy_allocator.cc rename to paddle/fluid/memory/detail/buddy_allocator.cc diff --git a/paddle/memory/detail/buddy_allocator.h b/paddle/fluid/memory/detail/buddy_allocator.h similarity index 100% rename from paddle/memory/detail/buddy_allocator.h rename to paddle/fluid/memory/detail/buddy_allocator.h diff --git a/paddle/memory/detail/memory_block.cc b/paddle/fluid/memory/detail/memory_block.cc similarity index 100% rename from paddle/memory/detail/memory_block.cc rename to paddle/fluid/memory/detail/memory_block.cc diff --git a/paddle/memory/detail/memory_block.h b/paddle/fluid/memory/detail/memory_block.h similarity index 100% rename from paddle/memory/detail/memory_block.h rename to paddle/fluid/memory/detail/memory_block.h diff --git a/paddle/memory/detail/meta_cache.cc b/paddle/fluid/memory/detail/meta_cache.cc similarity index 100% rename from paddle/memory/detail/meta_cache.cc rename to paddle/fluid/memory/detail/meta_cache.cc diff --git a/paddle/memory/detail/meta_cache.h b/paddle/fluid/memory/detail/meta_cache.h similarity index 100% rename from paddle/memory/detail/meta_cache.h rename to paddle/fluid/memory/detail/meta_cache.h diff --git a/paddle/memory/detail/meta_data.cc b/paddle/fluid/memory/detail/meta_data.cc similarity index 100% rename from paddle/memory/detail/meta_data.cc rename to paddle/fluid/memory/detail/meta_data.cc diff --git a/paddle/memory/detail/meta_data.h b/paddle/fluid/memory/detail/meta_data.h similarity index 100% rename from paddle/memory/detail/meta_data.h rename to paddle/fluid/memory/detail/meta_data.h diff --git a/paddle/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc similarity index 100% rename from paddle/memory/detail/system_allocator.cc rename to paddle/fluid/memory/detail/system_allocator.cc diff --git a/paddle/memory/detail/system_allocator.h b/paddle/fluid/memory/detail/system_allocator.h similarity index 100% rename from paddle/memory/detail/system_allocator.h rename to paddle/fluid/memory/detail/system_allocator.h diff --git a/paddle/memory/detail/system_allocator_test.cc b/paddle/fluid/memory/detail/system_allocator_test.cc similarity index 100% rename from paddle/memory/detail/system_allocator_test.cc rename to paddle/fluid/memory/detail/system_allocator_test.cc diff --git a/paddle/memory/memcpy.cc b/paddle/fluid/memory/memcpy.cc similarity index 100% rename from paddle/memory/memcpy.cc rename to paddle/fluid/memory/memcpy.cc diff --git a/paddle/memory/memcpy.h b/paddle/fluid/memory/memcpy.h similarity index 100% rename from paddle/memory/memcpy.h rename to paddle/fluid/memory/memcpy.h diff --git a/paddle/memory/memory.cc b/paddle/fluid/memory/memory.cc similarity index 100% rename from paddle/memory/memory.cc rename to paddle/fluid/memory/memory.cc diff --git a/paddle/memory/memory.h b/paddle/fluid/memory/memory.h similarity index 100% rename from paddle/memory/memory.h rename to paddle/fluid/memory/memory.h diff --git a/paddle/memory/memory_test.cc b/paddle/fluid/memory/memory_test.cc similarity index 100% rename from paddle/memory/memory_test.cc rename to paddle/fluid/memory/memory_test.cc diff --git a/paddle/operators/.clang-format b/paddle/fluid/operators/.clang-format similarity index 100% rename from paddle/operators/.clang-format rename to paddle/fluid/operators/.clang-format diff --git a/paddle/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt similarity index 100% rename from paddle/operators/CMakeLists.txt rename to paddle/fluid/operators/CMakeLists.txt diff --git a/paddle/operators/accuracy_op.cc b/paddle/fluid/operators/accuracy_op.cc similarity index 100% rename from paddle/operators/accuracy_op.cc rename to paddle/fluid/operators/accuracy_op.cc diff --git a/paddle/operators/accuracy_op.cu b/paddle/fluid/operators/accuracy_op.cu similarity index 100% rename from paddle/operators/accuracy_op.cu rename to paddle/fluid/operators/accuracy_op.cu diff --git a/paddle/operators/accuracy_op.h b/paddle/fluid/operators/accuracy_op.h similarity index 100% rename from paddle/operators/accuracy_op.h rename to paddle/fluid/operators/accuracy_op.h diff --git a/paddle/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc similarity index 100% rename from paddle/operators/activation_op.cc rename to paddle/fluid/operators/activation_op.cc diff --git a/paddle/operators/activation_op.cu b/paddle/fluid/operators/activation_op.cu similarity index 100% rename from paddle/operators/activation_op.cu rename to paddle/fluid/operators/activation_op.cu diff --git a/paddle/operators/activation_op.h b/paddle/fluid/operators/activation_op.h similarity index 100% rename from paddle/operators/activation_op.h rename to paddle/fluid/operators/activation_op.h diff --git a/paddle/operators/adadelta_op.cc b/paddle/fluid/operators/adadelta_op.cc similarity index 100% rename from paddle/operators/adadelta_op.cc rename to paddle/fluid/operators/adadelta_op.cc diff --git a/paddle/operators/adadelta_op.cu b/paddle/fluid/operators/adadelta_op.cu similarity index 100% rename from paddle/operators/adadelta_op.cu rename to paddle/fluid/operators/adadelta_op.cu diff --git a/paddle/operators/adadelta_op.h b/paddle/fluid/operators/adadelta_op.h similarity index 100% rename from paddle/operators/adadelta_op.h rename to paddle/fluid/operators/adadelta_op.h diff --git a/paddle/operators/adagrad_op.cc b/paddle/fluid/operators/adagrad_op.cc similarity index 100% rename from paddle/operators/adagrad_op.cc rename to paddle/fluid/operators/adagrad_op.cc diff --git a/paddle/operators/adagrad_op.cu b/paddle/fluid/operators/adagrad_op.cu similarity index 100% rename from paddle/operators/adagrad_op.cu rename to paddle/fluid/operators/adagrad_op.cu diff --git a/paddle/operators/adagrad_op.h b/paddle/fluid/operators/adagrad_op.h similarity index 100% rename from paddle/operators/adagrad_op.h rename to paddle/fluid/operators/adagrad_op.h diff --git a/paddle/operators/adam_op.cc b/paddle/fluid/operators/adam_op.cc similarity index 100% rename from paddle/operators/adam_op.cc rename to paddle/fluid/operators/adam_op.cc diff --git a/paddle/operators/adam_op.cu b/paddle/fluid/operators/adam_op.cu similarity index 100% rename from paddle/operators/adam_op.cu rename to paddle/fluid/operators/adam_op.cu diff --git a/paddle/operators/adam_op.h b/paddle/fluid/operators/adam_op.h similarity index 100% rename from paddle/operators/adam_op.h rename to paddle/fluid/operators/adam_op.h diff --git a/paddle/operators/adamax_op.cc b/paddle/fluid/operators/adamax_op.cc similarity index 100% rename from paddle/operators/adamax_op.cc rename to paddle/fluid/operators/adamax_op.cc diff --git a/paddle/operators/adamax_op.cu b/paddle/fluid/operators/adamax_op.cu similarity index 100% rename from paddle/operators/adamax_op.cu rename to paddle/fluid/operators/adamax_op.cu diff --git a/paddle/operators/adamax_op.h b/paddle/fluid/operators/adamax_op.h similarity index 100% rename from paddle/operators/adamax_op.h rename to paddle/fluid/operators/adamax_op.h diff --git a/paddle/operators/array_operator.h b/paddle/fluid/operators/array_operator.h similarity index 100% rename from paddle/operators/array_operator.h rename to paddle/fluid/operators/array_operator.h diff --git a/paddle/operators/array_to_lod_tensor_op.cc b/paddle/fluid/operators/array_to_lod_tensor_op.cc similarity index 100% rename from paddle/operators/array_to_lod_tensor_op.cc rename to paddle/fluid/operators/array_to_lod_tensor_op.cc diff --git a/paddle/operators/assign_op.cc b/paddle/fluid/operators/assign_op.cc similarity index 100% rename from paddle/operators/assign_op.cc rename to paddle/fluid/operators/assign_op.cc diff --git a/paddle/operators/assign_value_op.cc b/paddle/fluid/operators/assign_value_op.cc similarity index 100% rename from paddle/operators/assign_value_op.cc rename to paddle/fluid/operators/assign_value_op.cc diff --git a/paddle/operators/assign_value_op.cu.cc b/paddle/fluid/operators/assign_value_op.cu.cc similarity index 100% rename from paddle/operators/assign_value_op.cu.cc rename to paddle/fluid/operators/assign_value_op.cu.cc diff --git a/paddle/operators/assign_value_op.h b/paddle/fluid/operators/assign_value_op.h similarity index 100% rename from paddle/operators/assign_value_op.h rename to paddle/fluid/operators/assign_value_op.h diff --git a/paddle/operators/auc_op.cc b/paddle/fluid/operators/auc_op.cc similarity index 100% rename from paddle/operators/auc_op.cc rename to paddle/fluid/operators/auc_op.cc diff --git a/paddle/operators/auc_op.h b/paddle/fluid/operators/auc_op.h similarity index 100% rename from paddle/operators/auc_op.h rename to paddle/fluid/operators/auc_op.h diff --git a/paddle/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc similarity index 100% rename from paddle/operators/batch_norm_op.cc rename to paddle/fluid/operators/batch_norm_op.cc diff --git a/paddle/operators/batch_norm_op.cu.cc b/paddle/fluid/operators/batch_norm_op.cu.cc similarity index 100% rename from paddle/operators/batch_norm_op.cu.cc rename to paddle/fluid/operators/batch_norm_op.cu.cc diff --git a/paddle/operators/batch_norm_op.h b/paddle/fluid/operators/batch_norm_op.h similarity index 100% rename from paddle/operators/batch_norm_op.h rename to paddle/fluid/operators/batch_norm_op.h diff --git a/paddle/operators/beam_search_decode_op.cc b/paddle/fluid/operators/beam_search_decode_op.cc similarity index 100% rename from paddle/operators/beam_search_decode_op.cc rename to paddle/fluid/operators/beam_search_decode_op.cc diff --git a/paddle/operators/beam_search_decode_op.h b/paddle/fluid/operators/beam_search_decode_op.h similarity index 100% rename from paddle/operators/beam_search_decode_op.h rename to paddle/fluid/operators/beam_search_decode_op.h diff --git a/paddle/operators/beam_search_decode_op_test.cc b/paddle/fluid/operators/beam_search_decode_op_test.cc similarity index 100% rename from paddle/operators/beam_search_decode_op_test.cc rename to paddle/fluid/operators/beam_search_decode_op_test.cc diff --git a/paddle/operators/beam_search_op.cc b/paddle/fluid/operators/beam_search_op.cc similarity index 100% rename from paddle/operators/beam_search_op.cc rename to paddle/fluid/operators/beam_search_op.cc diff --git a/paddle/operators/beam_search_op.h b/paddle/fluid/operators/beam_search_op.h similarity index 100% rename from paddle/operators/beam_search_op.h rename to paddle/fluid/operators/beam_search_op.h diff --git a/paddle/operators/beam_search_op_test.cc b/paddle/fluid/operators/beam_search_op_test.cc similarity index 100% rename from paddle/operators/beam_search_op_test.cc rename to paddle/fluid/operators/beam_search_op_test.cc diff --git a/paddle/operators/bilinear_tensor_product_op.cc b/paddle/fluid/operators/bilinear_tensor_product_op.cc similarity index 100% rename from paddle/operators/bilinear_tensor_product_op.cc rename to paddle/fluid/operators/bilinear_tensor_product_op.cc diff --git a/paddle/operators/bilinear_tensor_product_op.cu b/paddle/fluid/operators/bilinear_tensor_product_op.cu similarity index 100% rename from paddle/operators/bilinear_tensor_product_op.cu rename to paddle/fluid/operators/bilinear_tensor_product_op.cu diff --git a/paddle/operators/bilinear_tensor_product_op.h b/paddle/fluid/operators/bilinear_tensor_product_op.h similarity index 100% rename from paddle/operators/bilinear_tensor_product_op.h rename to paddle/fluid/operators/bilinear_tensor_product_op.h diff --git a/paddle/operators/bipartite_match_op.cc b/paddle/fluid/operators/bipartite_match_op.cc similarity index 100% rename from paddle/operators/bipartite_match_op.cc rename to paddle/fluid/operators/bipartite_match_op.cc diff --git a/paddle/operators/box_coder_op.cc b/paddle/fluid/operators/box_coder_op.cc similarity index 100% rename from paddle/operators/box_coder_op.cc rename to paddle/fluid/operators/box_coder_op.cc diff --git a/paddle/operators/box_coder_op.cu b/paddle/fluid/operators/box_coder_op.cu similarity index 100% rename from paddle/operators/box_coder_op.cu rename to paddle/fluid/operators/box_coder_op.cu diff --git a/paddle/operators/box_coder_op.h b/paddle/fluid/operators/box_coder_op.h similarity index 100% rename from paddle/operators/box_coder_op.h rename to paddle/fluid/operators/box_coder_op.h diff --git a/paddle/operators/cast_op.cc b/paddle/fluid/operators/cast_op.cc similarity index 100% rename from paddle/operators/cast_op.cc rename to paddle/fluid/operators/cast_op.cc diff --git a/paddle/operators/cast_op.cu b/paddle/fluid/operators/cast_op.cu similarity index 100% rename from paddle/operators/cast_op.cu rename to paddle/fluid/operators/cast_op.cu diff --git a/paddle/operators/cast_op.h b/paddle/fluid/operators/cast_op.h similarity index 100% rename from paddle/operators/cast_op.h rename to paddle/fluid/operators/cast_op.h diff --git a/paddle/operators/chunk_eval_op.cc b/paddle/fluid/operators/chunk_eval_op.cc similarity index 100% rename from paddle/operators/chunk_eval_op.cc rename to paddle/fluid/operators/chunk_eval_op.cc diff --git a/paddle/operators/chunk_eval_op.h b/paddle/fluid/operators/chunk_eval_op.h similarity index 100% rename from paddle/operators/chunk_eval_op.h rename to paddle/fluid/operators/chunk_eval_op.h diff --git a/paddle/operators/clip_by_norm_op.cc b/paddle/fluid/operators/clip_by_norm_op.cc similarity index 100% rename from paddle/operators/clip_by_norm_op.cc rename to paddle/fluid/operators/clip_by_norm_op.cc diff --git a/paddle/operators/clip_by_norm_op.cu b/paddle/fluid/operators/clip_by_norm_op.cu similarity index 100% rename from paddle/operators/clip_by_norm_op.cu rename to paddle/fluid/operators/clip_by_norm_op.cu diff --git a/paddle/operators/clip_by_norm_op.h b/paddle/fluid/operators/clip_by_norm_op.h similarity index 100% rename from paddle/operators/clip_by_norm_op.h rename to paddle/fluid/operators/clip_by_norm_op.h diff --git a/paddle/operators/clip_op.cc b/paddle/fluid/operators/clip_op.cc similarity index 100% rename from paddle/operators/clip_op.cc rename to paddle/fluid/operators/clip_op.cc diff --git a/paddle/operators/clip_op.cu b/paddle/fluid/operators/clip_op.cu similarity index 100% rename from paddle/operators/clip_op.cu rename to paddle/fluid/operators/clip_op.cu diff --git a/paddle/operators/clip_op.h b/paddle/fluid/operators/clip_op.h similarity index 100% rename from paddle/operators/clip_op.h rename to paddle/fluid/operators/clip_op.h diff --git a/paddle/operators/compare_op.cc b/paddle/fluid/operators/compare_op.cc similarity index 100% rename from paddle/operators/compare_op.cc rename to paddle/fluid/operators/compare_op.cc diff --git a/paddle/operators/compare_op.cu b/paddle/fluid/operators/compare_op.cu similarity index 100% rename from paddle/operators/compare_op.cu rename to paddle/fluid/operators/compare_op.cu diff --git a/paddle/operators/compare_op.h b/paddle/fluid/operators/compare_op.h similarity index 100% rename from paddle/operators/compare_op.h rename to paddle/fluid/operators/compare_op.h diff --git a/paddle/operators/concat_op.cc b/paddle/fluid/operators/concat_op.cc similarity index 100% rename from paddle/operators/concat_op.cc rename to paddle/fluid/operators/concat_op.cc diff --git a/paddle/operators/concat_op.cu.cc b/paddle/fluid/operators/concat_op.cu.cc similarity index 100% rename from paddle/operators/concat_op.cu.cc rename to paddle/fluid/operators/concat_op.cu.cc diff --git a/paddle/operators/concat_op.h b/paddle/fluid/operators/concat_op.h similarity index 100% rename from paddle/operators/concat_op.h rename to paddle/fluid/operators/concat_op.h diff --git a/paddle/operators/cond_op.cc b/paddle/fluid/operators/cond_op.cc similarity index 100% rename from paddle/operators/cond_op.cc rename to paddle/fluid/operators/cond_op.cc diff --git a/paddle/operators/cond_op.h b/paddle/fluid/operators/cond_op.h similarity index 100% rename from paddle/operators/cond_op.h rename to paddle/fluid/operators/cond_op.h diff --git a/paddle/operators/conditional_block_op.cc b/paddle/fluid/operators/conditional_block_op.cc similarity index 100% rename from paddle/operators/conditional_block_op.cc rename to paddle/fluid/operators/conditional_block_op.cc diff --git a/paddle/operators/conv_cudnn_op.cu.cc b/paddle/fluid/operators/conv_cudnn_op.cu.cc similarity index 100% rename from paddle/operators/conv_cudnn_op.cu.cc rename to paddle/fluid/operators/conv_cudnn_op.cu.cc diff --git a/paddle/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc similarity index 100% rename from paddle/operators/conv_op.cc rename to paddle/fluid/operators/conv_op.cc diff --git a/paddle/operators/conv_op.cu.cc b/paddle/fluid/operators/conv_op.cu.cc similarity index 100% rename from paddle/operators/conv_op.cu.cc rename to paddle/fluid/operators/conv_op.cu.cc diff --git a/paddle/operators/conv_op.h b/paddle/fluid/operators/conv_op.h similarity index 100% rename from paddle/operators/conv_op.h rename to paddle/fluid/operators/conv_op.h diff --git a/paddle/operators/conv_shift_op.cc b/paddle/fluid/operators/conv_shift_op.cc similarity index 100% rename from paddle/operators/conv_shift_op.cc rename to paddle/fluid/operators/conv_shift_op.cc diff --git a/paddle/operators/conv_shift_op.cu b/paddle/fluid/operators/conv_shift_op.cu similarity index 100% rename from paddle/operators/conv_shift_op.cu rename to paddle/fluid/operators/conv_shift_op.cu diff --git a/paddle/operators/conv_shift_op.h b/paddle/fluid/operators/conv_shift_op.h similarity index 100% rename from paddle/operators/conv_shift_op.h rename to paddle/fluid/operators/conv_shift_op.h diff --git a/paddle/operators/conv_transpose_cudnn_op.cu.cc b/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc similarity index 100% rename from paddle/operators/conv_transpose_cudnn_op.cu.cc rename to paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc diff --git a/paddle/operators/conv_transpose_op.cc b/paddle/fluid/operators/conv_transpose_op.cc similarity index 100% rename from paddle/operators/conv_transpose_op.cc rename to paddle/fluid/operators/conv_transpose_op.cc diff --git a/paddle/operators/conv_transpose_op.cu.cc b/paddle/fluid/operators/conv_transpose_op.cu.cc similarity index 100% rename from paddle/operators/conv_transpose_op.cu.cc rename to paddle/fluid/operators/conv_transpose_op.cu.cc diff --git a/paddle/operators/conv_transpose_op.h b/paddle/fluid/operators/conv_transpose_op.h similarity index 100% rename from paddle/operators/conv_transpose_op.h rename to paddle/fluid/operators/conv_transpose_op.h diff --git a/paddle/operators/cos_sim_op.cc b/paddle/fluid/operators/cos_sim_op.cc similarity index 100% rename from paddle/operators/cos_sim_op.cc rename to paddle/fluid/operators/cos_sim_op.cc diff --git a/paddle/operators/cos_sim_op.cu b/paddle/fluid/operators/cos_sim_op.cu similarity index 100% rename from paddle/operators/cos_sim_op.cu rename to paddle/fluid/operators/cos_sim_op.cu diff --git a/paddle/operators/cos_sim_op.h b/paddle/fluid/operators/cos_sim_op.h similarity index 100% rename from paddle/operators/cos_sim_op.h rename to paddle/fluid/operators/cos_sim_op.h diff --git a/paddle/operators/create_reader_op.cc b/paddle/fluid/operators/create_reader_op.cc similarity index 100% rename from paddle/operators/create_reader_op.cc rename to paddle/fluid/operators/create_reader_op.cc diff --git a/paddle/operators/crf_decoding_op.cc b/paddle/fluid/operators/crf_decoding_op.cc similarity index 100% rename from paddle/operators/crf_decoding_op.cc rename to paddle/fluid/operators/crf_decoding_op.cc diff --git a/paddle/operators/crf_decoding_op.h b/paddle/fluid/operators/crf_decoding_op.h similarity index 100% rename from paddle/operators/crf_decoding_op.h rename to paddle/fluid/operators/crf_decoding_op.h diff --git a/paddle/operators/crop_op.cc b/paddle/fluid/operators/crop_op.cc similarity index 100% rename from paddle/operators/crop_op.cc rename to paddle/fluid/operators/crop_op.cc diff --git a/paddle/operators/crop_op.cu b/paddle/fluid/operators/crop_op.cu similarity index 100% rename from paddle/operators/crop_op.cu rename to paddle/fluid/operators/crop_op.cu diff --git a/paddle/operators/crop_op.h b/paddle/fluid/operators/crop_op.h similarity index 100% rename from paddle/operators/crop_op.h rename to paddle/fluid/operators/crop_op.h diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/fluid/operators/cross_entropy_op.cc similarity index 100% rename from paddle/operators/cross_entropy_op.cc rename to paddle/fluid/operators/cross_entropy_op.cc diff --git a/paddle/operators/cross_entropy_op.cu b/paddle/fluid/operators/cross_entropy_op.cu similarity index 100% rename from paddle/operators/cross_entropy_op.cu rename to paddle/fluid/operators/cross_entropy_op.cu diff --git a/paddle/operators/cross_entropy_op.h b/paddle/fluid/operators/cross_entropy_op.h similarity index 100% rename from paddle/operators/cross_entropy_op.h rename to paddle/fluid/operators/cross_entropy_op.h diff --git a/paddle/operators/ctc_align_op.cc b/paddle/fluid/operators/ctc_align_op.cc similarity index 100% rename from paddle/operators/ctc_align_op.cc rename to paddle/fluid/operators/ctc_align_op.cc diff --git a/paddle/operators/ctc_align_op.cu b/paddle/fluid/operators/ctc_align_op.cu similarity index 100% rename from paddle/operators/ctc_align_op.cu rename to paddle/fluid/operators/ctc_align_op.cu diff --git a/paddle/operators/ctc_align_op.h b/paddle/fluid/operators/ctc_align_op.h similarity index 100% rename from paddle/operators/ctc_align_op.h rename to paddle/fluid/operators/ctc_align_op.h diff --git a/paddle/operators/cum_op.h b/paddle/fluid/operators/cum_op.h similarity index 100% rename from paddle/operators/cum_op.h rename to paddle/fluid/operators/cum_op.h diff --git a/paddle/operators/cumsum_op.cc b/paddle/fluid/operators/cumsum_op.cc similarity index 100% rename from paddle/operators/cumsum_op.cc rename to paddle/fluid/operators/cumsum_op.cc diff --git a/paddle/operators/cumsum_op.cu b/paddle/fluid/operators/cumsum_op.cu similarity index 100% rename from paddle/operators/cumsum_op.cu rename to paddle/fluid/operators/cumsum_op.cu diff --git a/paddle/operators/decayed_adagrad_op.cc b/paddle/fluid/operators/decayed_adagrad_op.cc similarity index 100% rename from paddle/operators/decayed_adagrad_op.cc rename to paddle/fluid/operators/decayed_adagrad_op.cc diff --git a/paddle/operators/decayed_adagrad_op.cu b/paddle/fluid/operators/decayed_adagrad_op.cu similarity index 100% rename from paddle/operators/decayed_adagrad_op.cu rename to paddle/fluid/operators/decayed_adagrad_op.cu diff --git a/paddle/operators/decayed_adagrad_op.h b/paddle/fluid/operators/decayed_adagrad_op.h similarity index 100% rename from paddle/operators/decayed_adagrad_op.h rename to paddle/fluid/operators/decayed_adagrad_op.h diff --git a/paddle/operators/detail/CMakeLists.txt b/paddle/fluid/operators/detail/CMakeLists.txt similarity index 100% rename from paddle/operators/detail/CMakeLists.txt rename to paddle/fluid/operators/detail/CMakeLists.txt diff --git a/paddle/operators/detail/grpc_client.cc b/paddle/fluid/operators/detail/grpc_client.cc similarity index 100% rename from paddle/operators/detail/grpc_client.cc rename to paddle/fluid/operators/detail/grpc_client.cc diff --git a/paddle/operators/detail/grpc_client.h b/paddle/fluid/operators/detail/grpc_client.h similarity index 100% rename from paddle/operators/detail/grpc_client.h rename to paddle/fluid/operators/detail/grpc_client.h diff --git a/paddle/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc similarity index 100% rename from paddle/operators/detail/grpc_server.cc rename to paddle/fluid/operators/detail/grpc_server.cc diff --git a/paddle/operators/detail/grpc_server.h b/paddle/fluid/operators/detail/grpc_server.h similarity index 100% rename from paddle/operators/detail/grpc_server.h rename to paddle/fluid/operators/detail/grpc_server.h diff --git a/paddle/operators/detail/safe_ref.h b/paddle/fluid/operators/detail/safe_ref.h similarity index 100% rename from paddle/operators/detail/safe_ref.h rename to paddle/fluid/operators/detail/safe_ref.h diff --git a/paddle/operators/detail/send_recv.proto b/paddle/fluid/operators/detail/send_recv.proto similarity index 100% rename from paddle/operators/detail/send_recv.proto rename to paddle/fluid/operators/detail/send_recv.proto diff --git a/paddle/operators/detail/sendrecvop_utils.cc b/paddle/fluid/operators/detail/sendrecvop_utils.cc similarity index 100% rename from paddle/operators/detail/sendrecvop_utils.cc rename to paddle/fluid/operators/detail/sendrecvop_utils.cc diff --git a/paddle/operators/detail/sendrecvop_utils.h b/paddle/fluid/operators/detail/sendrecvop_utils.h similarity index 100% rename from paddle/operators/detail/sendrecvop_utils.h rename to paddle/fluid/operators/detail/sendrecvop_utils.h diff --git a/paddle/operators/detail/simple_block_queue.h b/paddle/fluid/operators/detail/simple_block_queue.h similarity index 100% rename from paddle/operators/detail/simple_block_queue.h rename to paddle/fluid/operators/detail/simple_block_queue.h diff --git a/paddle/operators/detail/strided_memcpy.h b/paddle/fluid/operators/detail/strided_memcpy.h similarity index 100% rename from paddle/operators/detail/strided_memcpy.h rename to paddle/fluid/operators/detail/strided_memcpy.h diff --git a/paddle/operators/detection_output_op.cc b/paddle/fluid/operators/detection_output_op.cc similarity index 100% rename from paddle/operators/detection_output_op.cc rename to paddle/fluid/operators/detection_output_op.cc diff --git a/paddle/operators/detection_output_op.cu.cc b/paddle/fluid/operators/detection_output_op.cu.cc similarity index 100% rename from paddle/operators/detection_output_op.cu.cc rename to paddle/fluid/operators/detection_output_op.cu.cc diff --git a/paddle/operators/detection_output_op.h b/paddle/fluid/operators/detection_output_op.h similarity index 100% rename from paddle/operators/detection_output_op.h rename to paddle/fluid/operators/detection_output_op.h diff --git a/paddle/operators/dropout_op.cc b/paddle/fluid/operators/dropout_op.cc similarity index 100% rename from paddle/operators/dropout_op.cc rename to paddle/fluid/operators/dropout_op.cc diff --git a/paddle/operators/dropout_op.cu b/paddle/fluid/operators/dropout_op.cu similarity index 100% rename from paddle/operators/dropout_op.cu rename to paddle/fluid/operators/dropout_op.cu diff --git a/paddle/operators/dropout_op.h b/paddle/fluid/operators/dropout_op.h similarity index 100% rename from paddle/operators/dropout_op.h rename to paddle/fluid/operators/dropout_op.h diff --git a/paddle/operators/edit_distance_op.cc b/paddle/fluid/operators/edit_distance_op.cc similarity index 100% rename from paddle/operators/edit_distance_op.cc rename to paddle/fluid/operators/edit_distance_op.cc diff --git a/paddle/operators/edit_distance_op.cu b/paddle/fluid/operators/edit_distance_op.cu similarity index 100% rename from paddle/operators/edit_distance_op.cu rename to paddle/fluid/operators/edit_distance_op.cu diff --git a/paddle/operators/edit_distance_op.h b/paddle/fluid/operators/edit_distance_op.h similarity index 100% rename from paddle/operators/edit_distance_op.h rename to paddle/fluid/operators/edit_distance_op.h diff --git a/paddle/operators/elementwise_add_op.cc b/paddle/fluid/operators/elementwise_add_op.cc similarity index 100% rename from paddle/operators/elementwise_add_op.cc rename to paddle/fluid/operators/elementwise_add_op.cc diff --git a/paddle/operators/elementwise_add_op.cu b/paddle/fluid/operators/elementwise_add_op.cu similarity index 100% rename from paddle/operators/elementwise_add_op.cu rename to paddle/fluid/operators/elementwise_add_op.cu diff --git a/paddle/operators/elementwise_add_op.h b/paddle/fluid/operators/elementwise_add_op.h similarity index 100% rename from paddle/operators/elementwise_add_op.h rename to paddle/fluid/operators/elementwise_add_op.h diff --git a/paddle/operators/elementwise_div_op.cc b/paddle/fluid/operators/elementwise_div_op.cc similarity index 100% rename from paddle/operators/elementwise_div_op.cc rename to paddle/fluid/operators/elementwise_div_op.cc diff --git a/paddle/operators/elementwise_div_op.cu b/paddle/fluid/operators/elementwise_div_op.cu similarity index 100% rename from paddle/operators/elementwise_div_op.cu rename to paddle/fluid/operators/elementwise_div_op.cu diff --git a/paddle/operators/elementwise_div_op.h b/paddle/fluid/operators/elementwise_div_op.h similarity index 100% rename from paddle/operators/elementwise_div_op.h rename to paddle/fluid/operators/elementwise_div_op.h diff --git a/paddle/operators/elementwise_max_op.cc b/paddle/fluid/operators/elementwise_max_op.cc similarity index 100% rename from paddle/operators/elementwise_max_op.cc rename to paddle/fluid/operators/elementwise_max_op.cc diff --git a/paddle/operators/elementwise_max_op.cu b/paddle/fluid/operators/elementwise_max_op.cu similarity index 100% rename from paddle/operators/elementwise_max_op.cu rename to paddle/fluid/operators/elementwise_max_op.cu diff --git a/paddle/operators/elementwise_max_op.h b/paddle/fluid/operators/elementwise_max_op.h similarity index 100% rename from paddle/operators/elementwise_max_op.h rename to paddle/fluid/operators/elementwise_max_op.h diff --git a/paddle/operators/elementwise_min_op.cc b/paddle/fluid/operators/elementwise_min_op.cc similarity index 100% rename from paddle/operators/elementwise_min_op.cc rename to paddle/fluid/operators/elementwise_min_op.cc diff --git a/paddle/operators/elementwise_min_op.cu b/paddle/fluid/operators/elementwise_min_op.cu similarity index 100% rename from paddle/operators/elementwise_min_op.cu rename to paddle/fluid/operators/elementwise_min_op.cu diff --git a/paddle/operators/elementwise_min_op.h b/paddle/fluid/operators/elementwise_min_op.h similarity index 100% rename from paddle/operators/elementwise_min_op.h rename to paddle/fluid/operators/elementwise_min_op.h diff --git a/paddle/operators/elementwise_mul_op.cc b/paddle/fluid/operators/elementwise_mul_op.cc similarity index 100% rename from paddle/operators/elementwise_mul_op.cc rename to paddle/fluid/operators/elementwise_mul_op.cc diff --git a/paddle/operators/elementwise_mul_op.cu b/paddle/fluid/operators/elementwise_mul_op.cu similarity index 100% rename from paddle/operators/elementwise_mul_op.cu rename to paddle/fluid/operators/elementwise_mul_op.cu diff --git a/paddle/operators/elementwise_mul_op.h b/paddle/fluid/operators/elementwise_mul_op.h similarity index 100% rename from paddle/operators/elementwise_mul_op.h rename to paddle/fluid/operators/elementwise_mul_op.h diff --git a/paddle/operators/elementwise_op.h b/paddle/fluid/operators/elementwise_op.h similarity index 100% rename from paddle/operators/elementwise_op.h rename to paddle/fluid/operators/elementwise_op.h diff --git a/paddle/operators/elementwise_op_function.h b/paddle/fluid/operators/elementwise_op_function.h similarity index 100% rename from paddle/operators/elementwise_op_function.h rename to paddle/fluid/operators/elementwise_op_function.h diff --git a/paddle/operators/elementwise_pow_op.cc b/paddle/fluid/operators/elementwise_pow_op.cc similarity index 100% rename from paddle/operators/elementwise_pow_op.cc rename to paddle/fluid/operators/elementwise_pow_op.cc diff --git a/paddle/operators/elementwise_pow_op.cu b/paddle/fluid/operators/elementwise_pow_op.cu similarity index 100% rename from paddle/operators/elementwise_pow_op.cu rename to paddle/fluid/operators/elementwise_pow_op.cu diff --git a/paddle/operators/elementwise_pow_op.h b/paddle/fluid/operators/elementwise_pow_op.h similarity index 100% rename from paddle/operators/elementwise_pow_op.h rename to paddle/fluid/operators/elementwise_pow_op.h diff --git a/paddle/operators/elementwise_sub_op.cc b/paddle/fluid/operators/elementwise_sub_op.cc similarity index 100% rename from paddle/operators/elementwise_sub_op.cc rename to paddle/fluid/operators/elementwise_sub_op.cc diff --git a/paddle/operators/elementwise_sub_op.cu b/paddle/fluid/operators/elementwise_sub_op.cu similarity index 100% rename from paddle/operators/elementwise_sub_op.cu rename to paddle/fluid/operators/elementwise_sub_op.cu diff --git a/paddle/operators/elementwise_sub_op.h b/paddle/fluid/operators/elementwise_sub_op.h similarity index 100% rename from paddle/operators/elementwise_sub_op.h rename to paddle/fluid/operators/elementwise_sub_op.h diff --git a/paddle/operators/expand_op.cc b/paddle/fluid/operators/expand_op.cc similarity index 100% rename from paddle/operators/expand_op.cc rename to paddle/fluid/operators/expand_op.cc diff --git a/paddle/operators/expand_op.cu b/paddle/fluid/operators/expand_op.cu similarity index 100% rename from paddle/operators/expand_op.cu rename to paddle/fluid/operators/expand_op.cu diff --git a/paddle/operators/expand_op.h b/paddle/fluid/operators/expand_op.h similarity index 100% rename from paddle/operators/expand_op.h rename to paddle/fluid/operators/expand_op.h diff --git a/paddle/operators/feed_op.cc b/paddle/fluid/operators/feed_op.cc similarity index 100% rename from paddle/operators/feed_op.cc rename to paddle/fluid/operators/feed_op.cc diff --git a/paddle/operators/fetch_op.cc b/paddle/fluid/operators/fetch_op.cc similarity index 100% rename from paddle/operators/fetch_op.cc rename to paddle/fluid/operators/fetch_op.cc diff --git a/paddle/operators/fill_constant_batch_size_like_op.cc b/paddle/fluid/operators/fill_constant_batch_size_like_op.cc similarity index 100% rename from paddle/operators/fill_constant_batch_size_like_op.cc rename to paddle/fluid/operators/fill_constant_batch_size_like_op.cc diff --git a/paddle/operators/fill_constant_batch_size_like_op.cu.cc b/paddle/fluid/operators/fill_constant_batch_size_like_op.cu.cc similarity index 100% rename from paddle/operators/fill_constant_batch_size_like_op.cu.cc rename to paddle/fluid/operators/fill_constant_batch_size_like_op.cu.cc diff --git a/paddle/operators/fill_constant_batch_size_like_op.h b/paddle/fluid/operators/fill_constant_batch_size_like_op.h similarity index 100% rename from paddle/operators/fill_constant_batch_size_like_op.h rename to paddle/fluid/operators/fill_constant_batch_size_like_op.h diff --git a/paddle/operators/fill_constant_op.cc b/paddle/fluid/operators/fill_constant_op.cc similarity index 100% rename from paddle/operators/fill_constant_op.cc rename to paddle/fluid/operators/fill_constant_op.cc diff --git a/paddle/operators/fill_op.cc b/paddle/fluid/operators/fill_op.cc similarity index 100% rename from paddle/operators/fill_op.cc rename to paddle/fluid/operators/fill_op.cc diff --git a/paddle/operators/fill_zeros_like_op.cc b/paddle/fluid/operators/fill_zeros_like_op.cc similarity index 100% rename from paddle/operators/fill_zeros_like_op.cc rename to paddle/fluid/operators/fill_zeros_like_op.cc diff --git a/paddle/operators/fill_zeros_like_op.cu.cc b/paddle/fluid/operators/fill_zeros_like_op.cu.cc similarity index 100% rename from paddle/operators/fill_zeros_like_op.cu.cc rename to paddle/fluid/operators/fill_zeros_like_op.cu.cc diff --git a/paddle/operators/fill_zeros_like_op.h b/paddle/fluid/operators/fill_zeros_like_op.h similarity index 100% rename from paddle/operators/fill_zeros_like_op.h rename to paddle/fluid/operators/fill_zeros_like_op.h diff --git a/paddle/operators/ftrl_op.cc b/paddle/fluid/operators/ftrl_op.cc similarity index 100% rename from paddle/operators/ftrl_op.cc rename to paddle/fluid/operators/ftrl_op.cc diff --git a/paddle/operators/ftrl_op.cu b/paddle/fluid/operators/ftrl_op.cu similarity index 100% rename from paddle/operators/ftrl_op.cu rename to paddle/fluid/operators/ftrl_op.cu diff --git a/paddle/operators/ftrl_op.h b/paddle/fluid/operators/ftrl_op.h similarity index 100% rename from paddle/operators/ftrl_op.h rename to paddle/fluid/operators/ftrl_op.h diff --git a/paddle/operators/gather.cu.h b/paddle/fluid/operators/gather.cu.h similarity index 100% rename from paddle/operators/gather.cu.h rename to paddle/fluid/operators/gather.cu.h diff --git a/paddle/operators/gather.h b/paddle/fluid/operators/gather.h similarity index 100% rename from paddle/operators/gather.h rename to paddle/fluid/operators/gather.h diff --git a/paddle/operators/gather_op.cc b/paddle/fluid/operators/gather_op.cc similarity index 100% rename from paddle/operators/gather_op.cc rename to paddle/fluid/operators/gather_op.cc diff --git a/paddle/operators/gather_op.cu b/paddle/fluid/operators/gather_op.cu similarity index 100% rename from paddle/operators/gather_op.cu rename to paddle/fluid/operators/gather_op.cu diff --git a/paddle/operators/gather_op.h b/paddle/fluid/operators/gather_op.h similarity index 100% rename from paddle/operators/gather_op.h rename to paddle/fluid/operators/gather_op.h diff --git a/paddle/operators/gather_test.cc b/paddle/fluid/operators/gather_test.cc similarity index 100% rename from paddle/operators/gather_test.cc rename to paddle/fluid/operators/gather_test.cc diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/fluid/operators/gaussian_random_op.cc similarity index 100% rename from paddle/operators/gaussian_random_op.cc rename to paddle/fluid/operators/gaussian_random_op.cc diff --git a/paddle/operators/gaussian_random_op.cu b/paddle/fluid/operators/gaussian_random_op.cu similarity index 100% rename from paddle/operators/gaussian_random_op.cu rename to paddle/fluid/operators/gaussian_random_op.cu diff --git a/paddle/operators/get_places_op.cc b/paddle/fluid/operators/get_places_op.cc similarity index 100% rename from paddle/operators/get_places_op.cc rename to paddle/fluid/operators/get_places_op.cc diff --git a/paddle/operators/gru_op.cc b/paddle/fluid/operators/gru_op.cc similarity index 100% rename from paddle/operators/gru_op.cc rename to paddle/fluid/operators/gru_op.cc diff --git a/paddle/operators/gru_op.cu.cc b/paddle/fluid/operators/gru_op.cu.cc similarity index 100% rename from paddle/operators/gru_op.cu.cc rename to paddle/fluid/operators/gru_op.cu.cc diff --git a/paddle/operators/gru_op.h b/paddle/fluid/operators/gru_op.h similarity index 100% rename from paddle/operators/gru_op.h rename to paddle/fluid/operators/gru_op.h diff --git a/paddle/operators/gru_unit_op.cc b/paddle/fluid/operators/gru_unit_op.cc similarity index 100% rename from paddle/operators/gru_unit_op.cc rename to paddle/fluid/operators/gru_unit_op.cc diff --git a/paddle/operators/gru_unit_op.cu b/paddle/fluid/operators/gru_unit_op.cu similarity index 100% rename from paddle/operators/gru_unit_op.cu rename to paddle/fluid/operators/gru_unit_op.cu diff --git a/paddle/operators/gru_unit_op.h b/paddle/fluid/operators/gru_unit_op.h similarity index 100% rename from paddle/operators/gru_unit_op.h rename to paddle/fluid/operators/gru_unit_op.h diff --git a/paddle/operators/hinge_loss_op.cc b/paddle/fluid/operators/hinge_loss_op.cc similarity index 100% rename from paddle/operators/hinge_loss_op.cc rename to paddle/fluid/operators/hinge_loss_op.cc diff --git a/paddle/operators/hinge_loss_op.cu b/paddle/fluid/operators/hinge_loss_op.cu similarity index 100% rename from paddle/operators/hinge_loss_op.cu rename to paddle/fluid/operators/hinge_loss_op.cu diff --git a/paddle/operators/hinge_loss_op.h b/paddle/fluid/operators/hinge_loss_op.h similarity index 100% rename from paddle/operators/hinge_loss_op.h rename to paddle/fluid/operators/hinge_loss_op.h diff --git a/paddle/operators/huber_loss_op.cc b/paddle/fluid/operators/huber_loss_op.cc similarity index 100% rename from paddle/operators/huber_loss_op.cc rename to paddle/fluid/operators/huber_loss_op.cc diff --git a/paddle/operators/huber_loss_op.cu b/paddle/fluid/operators/huber_loss_op.cu similarity index 100% rename from paddle/operators/huber_loss_op.cu rename to paddle/fluid/operators/huber_loss_op.cu diff --git a/paddle/operators/huber_loss_op.h b/paddle/fluid/operators/huber_loss_op.h similarity index 100% rename from paddle/operators/huber_loss_op.h rename to paddle/fluid/operators/huber_loss_op.h diff --git a/paddle/operators/im2sequence_op.cc b/paddle/fluid/operators/im2sequence_op.cc similarity index 100% rename from paddle/operators/im2sequence_op.cc rename to paddle/fluid/operators/im2sequence_op.cc diff --git a/paddle/operators/im2sequence_op.cu b/paddle/fluid/operators/im2sequence_op.cu similarity index 100% rename from paddle/operators/im2sequence_op.cu rename to paddle/fluid/operators/im2sequence_op.cu diff --git a/paddle/operators/im2sequence_op.h b/paddle/fluid/operators/im2sequence_op.h similarity index 100% rename from paddle/operators/im2sequence_op.h rename to paddle/fluid/operators/im2sequence_op.h diff --git a/paddle/operators/images/batch_norm_fork.dot b/paddle/fluid/operators/images/batch_norm_fork.dot similarity index 100% rename from paddle/operators/images/batch_norm_fork.dot rename to paddle/fluid/operators/images/batch_norm_fork.dot diff --git a/paddle/operators/images/batch_norm_fork.png b/paddle/fluid/operators/images/batch_norm_fork.png similarity index 100% rename from paddle/operators/images/batch_norm_fork.png rename to paddle/fluid/operators/images/batch_norm_fork.png diff --git a/paddle/operators/images/batch_norm_op_kernel.png b/paddle/fluid/operators/images/batch_norm_op_kernel.png similarity index 100% rename from paddle/operators/images/batch_norm_op_kernel.png rename to paddle/fluid/operators/images/batch_norm_op_kernel.png diff --git a/paddle/operators/increment_op.cc b/paddle/fluid/operators/increment_op.cc similarity index 100% rename from paddle/operators/increment_op.cc rename to paddle/fluid/operators/increment_op.cc diff --git a/paddle/operators/iou_similarity_op.cc b/paddle/fluid/operators/iou_similarity_op.cc similarity index 100% rename from paddle/operators/iou_similarity_op.cc rename to paddle/fluid/operators/iou_similarity_op.cc diff --git a/paddle/operators/iou_similarity_op.cu b/paddle/fluid/operators/iou_similarity_op.cu similarity index 100% rename from paddle/operators/iou_similarity_op.cu rename to paddle/fluid/operators/iou_similarity_op.cu diff --git a/paddle/operators/iou_similarity_op.h b/paddle/fluid/operators/iou_similarity_op.h similarity index 100% rename from paddle/operators/iou_similarity_op.h rename to paddle/fluid/operators/iou_similarity_op.h diff --git a/paddle/operators/is_empty_op.cc b/paddle/fluid/operators/is_empty_op.cc similarity index 100% rename from paddle/operators/is_empty_op.cc rename to paddle/fluid/operators/is_empty_op.cc diff --git a/paddle/operators/l1_norm_op.cc b/paddle/fluid/operators/l1_norm_op.cc similarity index 100% rename from paddle/operators/l1_norm_op.cc rename to paddle/fluid/operators/l1_norm_op.cc diff --git a/paddle/operators/l1_norm_op.cu b/paddle/fluid/operators/l1_norm_op.cu similarity index 100% rename from paddle/operators/l1_norm_op.cu rename to paddle/fluid/operators/l1_norm_op.cu diff --git a/paddle/operators/l1_norm_op.h b/paddle/fluid/operators/l1_norm_op.h similarity index 100% rename from paddle/operators/l1_norm_op.h rename to paddle/fluid/operators/l1_norm_op.h diff --git a/paddle/operators/label_smooth_op.cc b/paddle/fluid/operators/label_smooth_op.cc similarity index 100% rename from paddle/operators/label_smooth_op.cc rename to paddle/fluid/operators/label_smooth_op.cc diff --git a/paddle/operators/label_smooth_op.cu b/paddle/fluid/operators/label_smooth_op.cu similarity index 100% rename from paddle/operators/label_smooth_op.cu rename to paddle/fluid/operators/label_smooth_op.cu diff --git a/paddle/operators/label_smooth_op.h b/paddle/fluid/operators/label_smooth_op.h similarity index 100% rename from paddle/operators/label_smooth_op.h rename to paddle/fluid/operators/label_smooth_op.h diff --git a/paddle/operators/layer_norm_op.cc b/paddle/fluid/operators/layer_norm_op.cc similarity index 100% rename from paddle/operators/layer_norm_op.cc rename to paddle/fluid/operators/layer_norm_op.cc diff --git a/paddle/operators/layer_norm_op.cu b/paddle/fluid/operators/layer_norm_op.cu similarity index 100% rename from paddle/operators/layer_norm_op.cu rename to paddle/fluid/operators/layer_norm_op.cu diff --git a/paddle/operators/layer_norm_op.h b/paddle/fluid/operators/layer_norm_op.h similarity index 100% rename from paddle/operators/layer_norm_op.h rename to paddle/fluid/operators/layer_norm_op.h diff --git a/paddle/operators/linear_chain_crf_op.cc b/paddle/fluid/operators/linear_chain_crf_op.cc similarity index 100% rename from paddle/operators/linear_chain_crf_op.cc rename to paddle/fluid/operators/linear_chain_crf_op.cc diff --git a/paddle/operators/linear_chain_crf_op.cu b/paddle/fluid/operators/linear_chain_crf_op.cu similarity index 100% rename from paddle/operators/linear_chain_crf_op.cu rename to paddle/fluid/operators/linear_chain_crf_op.cu diff --git a/paddle/operators/linear_chain_crf_op.h b/paddle/fluid/operators/linear_chain_crf_op.h similarity index 100% rename from paddle/operators/linear_chain_crf_op.h rename to paddle/fluid/operators/linear_chain_crf_op.h diff --git a/paddle/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc similarity index 100% rename from paddle/operators/listen_and_serv_op.cc rename to paddle/fluid/operators/listen_and_serv_op.cc diff --git a/paddle/operators/load_combine_op.cc b/paddle/fluid/operators/load_combine_op.cc similarity index 100% rename from paddle/operators/load_combine_op.cc rename to paddle/fluid/operators/load_combine_op.cc diff --git a/paddle/operators/load_op.cc b/paddle/fluid/operators/load_op.cc similarity index 100% rename from paddle/operators/load_op.cc rename to paddle/fluid/operators/load_op.cc diff --git a/paddle/operators/lod_array_length_op.cc b/paddle/fluid/operators/lod_array_length_op.cc similarity index 100% rename from paddle/operators/lod_array_length_op.cc rename to paddle/fluid/operators/lod_array_length_op.cc diff --git a/paddle/operators/lod_rank_table_op.cc b/paddle/fluid/operators/lod_rank_table_op.cc similarity index 100% rename from paddle/operators/lod_rank_table_op.cc rename to paddle/fluid/operators/lod_rank_table_op.cc diff --git a/paddle/operators/lod_reset_op.cc b/paddle/fluid/operators/lod_reset_op.cc similarity index 100% rename from paddle/operators/lod_reset_op.cc rename to paddle/fluid/operators/lod_reset_op.cc diff --git a/paddle/operators/lod_reset_op.cu b/paddle/fluid/operators/lod_reset_op.cu similarity index 100% rename from paddle/operators/lod_reset_op.cu rename to paddle/fluid/operators/lod_reset_op.cu diff --git a/paddle/operators/lod_reset_op.h b/paddle/fluid/operators/lod_reset_op.h similarity index 100% rename from paddle/operators/lod_reset_op.h rename to paddle/fluid/operators/lod_reset_op.h diff --git a/paddle/operators/lod_tensor_to_array_op.cc b/paddle/fluid/operators/lod_tensor_to_array_op.cc similarity index 100% rename from paddle/operators/lod_tensor_to_array_op.cc rename to paddle/fluid/operators/lod_tensor_to_array_op.cc diff --git a/paddle/operators/log_loss_op.cc b/paddle/fluid/operators/log_loss_op.cc similarity index 100% rename from paddle/operators/log_loss_op.cc rename to paddle/fluid/operators/log_loss_op.cc diff --git a/paddle/operators/log_loss_op.cu b/paddle/fluid/operators/log_loss_op.cu similarity index 100% rename from paddle/operators/log_loss_op.cu rename to paddle/fluid/operators/log_loss_op.cu diff --git a/paddle/operators/log_loss_op.h b/paddle/fluid/operators/log_loss_op.h similarity index 100% rename from paddle/operators/log_loss_op.h rename to paddle/fluid/operators/log_loss_op.h diff --git a/paddle/operators/logical_op.cc b/paddle/fluid/operators/logical_op.cc similarity index 100% rename from paddle/operators/logical_op.cc rename to paddle/fluid/operators/logical_op.cc diff --git a/paddle/operators/logical_op.cu b/paddle/fluid/operators/logical_op.cu similarity index 100% rename from paddle/operators/logical_op.cu rename to paddle/fluid/operators/logical_op.cu diff --git a/paddle/operators/logical_op.h b/paddle/fluid/operators/logical_op.h similarity index 100% rename from paddle/operators/logical_op.h rename to paddle/fluid/operators/logical_op.h diff --git a/paddle/operators/lookup_table_op.cc b/paddle/fluid/operators/lookup_table_op.cc similarity index 100% rename from paddle/operators/lookup_table_op.cc rename to paddle/fluid/operators/lookup_table_op.cc diff --git a/paddle/operators/lookup_table_op.cu b/paddle/fluid/operators/lookup_table_op.cu similarity index 100% rename from paddle/operators/lookup_table_op.cu rename to paddle/fluid/operators/lookup_table_op.cu diff --git a/paddle/operators/lookup_table_op.h b/paddle/fluid/operators/lookup_table_op.h similarity index 100% rename from paddle/operators/lookup_table_op.h rename to paddle/fluid/operators/lookup_table_op.h diff --git a/paddle/operators/lrn_op.cc b/paddle/fluid/operators/lrn_op.cc similarity index 100% rename from paddle/operators/lrn_op.cc rename to paddle/fluid/operators/lrn_op.cc diff --git a/paddle/operators/lrn_op.cu b/paddle/fluid/operators/lrn_op.cu similarity index 100% rename from paddle/operators/lrn_op.cu rename to paddle/fluid/operators/lrn_op.cu diff --git a/paddle/operators/lrn_op.h b/paddle/fluid/operators/lrn_op.h similarity index 100% rename from paddle/operators/lrn_op.h rename to paddle/fluid/operators/lrn_op.h diff --git a/paddle/operators/lstm_op.cc b/paddle/fluid/operators/lstm_op.cc similarity index 100% rename from paddle/operators/lstm_op.cc rename to paddle/fluid/operators/lstm_op.cc diff --git a/paddle/operators/lstm_op.cu.cc b/paddle/fluid/operators/lstm_op.cu.cc similarity index 100% rename from paddle/operators/lstm_op.cu.cc rename to paddle/fluid/operators/lstm_op.cu.cc diff --git a/paddle/operators/lstm_op.h b/paddle/fluid/operators/lstm_op.h similarity index 100% rename from paddle/operators/lstm_op.h rename to paddle/fluid/operators/lstm_op.h diff --git a/paddle/operators/lstm_unit_op.cc b/paddle/fluid/operators/lstm_unit_op.cc similarity index 100% rename from paddle/operators/lstm_unit_op.cc rename to paddle/fluid/operators/lstm_unit_op.cc diff --git a/paddle/operators/lstm_unit_op.cu b/paddle/fluid/operators/lstm_unit_op.cu similarity index 100% rename from paddle/operators/lstm_unit_op.cu rename to paddle/fluid/operators/lstm_unit_op.cu diff --git a/paddle/operators/lstm_unit_op.h b/paddle/fluid/operators/lstm_unit_op.h similarity index 100% rename from paddle/operators/lstm_unit_op.h rename to paddle/fluid/operators/lstm_unit_op.h diff --git a/paddle/operators/lstmp_op.cc b/paddle/fluid/operators/lstmp_op.cc similarity index 100% rename from paddle/operators/lstmp_op.cc rename to paddle/fluid/operators/lstmp_op.cc diff --git a/paddle/operators/lstmp_op.cu b/paddle/fluid/operators/lstmp_op.cu similarity index 100% rename from paddle/operators/lstmp_op.cu rename to paddle/fluid/operators/lstmp_op.cu diff --git a/paddle/operators/lstmp_op.h b/paddle/fluid/operators/lstmp_op.h similarity index 100% rename from paddle/operators/lstmp_op.h rename to paddle/fluid/operators/lstmp_op.h diff --git a/paddle/operators/margin_rank_loss_op.cc b/paddle/fluid/operators/margin_rank_loss_op.cc similarity index 100% rename from paddle/operators/margin_rank_loss_op.cc rename to paddle/fluid/operators/margin_rank_loss_op.cc diff --git a/paddle/operators/margin_rank_loss_op.cu b/paddle/fluid/operators/margin_rank_loss_op.cu similarity index 100% rename from paddle/operators/margin_rank_loss_op.cu rename to paddle/fluid/operators/margin_rank_loss_op.cu diff --git a/paddle/operators/margin_rank_loss_op.h b/paddle/fluid/operators/margin_rank_loss_op.h similarity index 100% rename from paddle/operators/margin_rank_loss_op.h rename to paddle/fluid/operators/margin_rank_loss_op.h diff --git a/paddle/operators/math/CMakeLists.txt b/paddle/fluid/operators/math/CMakeLists.txt similarity index 100% rename from paddle/operators/math/CMakeLists.txt rename to paddle/fluid/operators/math/CMakeLists.txt diff --git a/paddle/operators/math/context_project.cc b/paddle/fluid/operators/math/context_project.cc similarity index 100% rename from paddle/operators/math/context_project.cc rename to paddle/fluid/operators/math/context_project.cc diff --git a/paddle/operators/math/context_project.cu b/paddle/fluid/operators/math/context_project.cu similarity index 100% rename from paddle/operators/math/context_project.cu rename to paddle/fluid/operators/math/context_project.cu diff --git a/paddle/operators/math/context_project.h b/paddle/fluid/operators/math/context_project.h similarity index 100% rename from paddle/operators/math/context_project.h rename to paddle/fluid/operators/math/context_project.h diff --git a/paddle/operators/math/cos_sim_functor.cc b/paddle/fluid/operators/math/cos_sim_functor.cc similarity index 100% rename from paddle/operators/math/cos_sim_functor.cc rename to paddle/fluid/operators/math/cos_sim_functor.cc diff --git a/paddle/operators/math/cos_sim_functor.cu b/paddle/fluid/operators/math/cos_sim_functor.cu similarity index 100% rename from paddle/operators/math/cos_sim_functor.cu rename to paddle/fluid/operators/math/cos_sim_functor.cu diff --git a/paddle/operators/math/cos_sim_functor.h b/paddle/fluid/operators/math/cos_sim_functor.h similarity index 100% rename from paddle/operators/math/cos_sim_functor.h rename to paddle/fluid/operators/math/cos_sim_functor.h diff --git a/paddle/operators/math/cross_entropy.cc b/paddle/fluid/operators/math/cross_entropy.cc similarity index 100% rename from paddle/operators/math/cross_entropy.cc rename to paddle/fluid/operators/math/cross_entropy.cc diff --git a/paddle/operators/math/cross_entropy.cu b/paddle/fluid/operators/math/cross_entropy.cu similarity index 100% rename from paddle/operators/math/cross_entropy.cu rename to paddle/fluid/operators/math/cross_entropy.cu diff --git a/paddle/operators/math/cross_entropy.h b/paddle/fluid/operators/math/cross_entropy.h similarity index 100% rename from paddle/operators/math/cross_entropy.h rename to paddle/fluid/operators/math/cross_entropy.h diff --git a/paddle/operators/math/depthwise_conv.cu b/paddle/fluid/operators/math/depthwise_conv.cu similarity index 100% rename from paddle/operators/math/depthwise_conv.cu rename to paddle/fluid/operators/math/depthwise_conv.cu diff --git a/paddle/operators/math/depthwise_conv.h b/paddle/fluid/operators/math/depthwise_conv.h similarity index 100% rename from paddle/operators/math/depthwise_conv.h rename to paddle/fluid/operators/math/depthwise_conv.h diff --git a/paddle/operators/math/detail/CMakeLists.txt b/paddle/fluid/operators/math/detail/CMakeLists.txt similarity index 100% rename from paddle/operators/math/detail/CMakeLists.txt rename to paddle/fluid/operators/math/detail/CMakeLists.txt diff --git a/paddle/operators/math/detail/activation_functions.h b/paddle/fluid/operators/math/detail/activation_functions.h similarity index 100% rename from paddle/operators/math/detail/activation_functions.h rename to paddle/fluid/operators/math/detail/activation_functions.h diff --git a/paddle/operators/math/detail/avx_functions.cc b/paddle/fluid/operators/math/detail/avx_functions.cc similarity index 100% rename from paddle/operators/math/detail/avx_functions.cc rename to paddle/fluid/operators/math/detail/avx_functions.cc diff --git a/paddle/operators/math/detail/gru_cpu_kernel.h b/paddle/fluid/operators/math/detail/gru_cpu_kernel.h similarity index 100% rename from paddle/operators/math/detail/gru_cpu_kernel.h rename to paddle/fluid/operators/math/detail/gru_cpu_kernel.h diff --git a/paddle/operators/math/detail/gru_gpu_kernel.h b/paddle/fluid/operators/math/detail/gru_gpu_kernel.h similarity index 100% rename from paddle/operators/math/detail/gru_gpu_kernel.h rename to paddle/fluid/operators/math/detail/gru_gpu_kernel.h diff --git a/paddle/operators/math/detail/gru_kernel.h b/paddle/fluid/operators/math/detail/gru_kernel.h similarity index 100% rename from paddle/operators/math/detail/gru_kernel.h rename to paddle/fluid/operators/math/detail/gru_kernel.h diff --git a/paddle/operators/math/detail/lstm_cpu_kernel.h b/paddle/fluid/operators/math/detail/lstm_cpu_kernel.h similarity index 100% rename from paddle/operators/math/detail/lstm_cpu_kernel.h rename to paddle/fluid/operators/math/detail/lstm_cpu_kernel.h diff --git a/paddle/operators/math/detail/lstm_gpu_kernel.h b/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h similarity index 100% rename from paddle/operators/math/detail/lstm_gpu_kernel.h rename to paddle/fluid/operators/math/detail/lstm_gpu_kernel.h diff --git a/paddle/operators/math/detail/lstm_kernel.h b/paddle/fluid/operators/math/detail/lstm_kernel.h similarity index 100% rename from paddle/operators/math/detail/lstm_kernel.h rename to paddle/fluid/operators/math/detail/lstm_kernel.h diff --git a/paddle/operators/math/detection_util.h b/paddle/fluid/operators/math/detection_util.h similarity index 100% rename from paddle/operators/math/detection_util.h rename to paddle/fluid/operators/math/detection_util.h diff --git a/paddle/operators/math/gru_compute.cc b/paddle/fluid/operators/math/gru_compute.cc similarity index 100% rename from paddle/operators/math/gru_compute.cc rename to paddle/fluid/operators/math/gru_compute.cc diff --git a/paddle/operators/math/gru_compute.cu b/paddle/fluid/operators/math/gru_compute.cu similarity index 100% rename from paddle/operators/math/gru_compute.cu rename to paddle/fluid/operators/math/gru_compute.cu diff --git a/paddle/operators/math/gru_compute.h b/paddle/fluid/operators/math/gru_compute.h similarity index 100% rename from paddle/operators/math/gru_compute.h rename to paddle/fluid/operators/math/gru_compute.h diff --git a/paddle/operators/math/im2col.cc b/paddle/fluid/operators/math/im2col.cc similarity index 100% rename from paddle/operators/math/im2col.cc rename to paddle/fluid/operators/math/im2col.cc diff --git a/paddle/operators/math/im2col.cu b/paddle/fluid/operators/math/im2col.cu similarity index 100% rename from paddle/operators/math/im2col.cu rename to paddle/fluid/operators/math/im2col.cu diff --git a/paddle/operators/math/im2col.h b/paddle/fluid/operators/math/im2col.h similarity index 100% rename from paddle/operators/math/im2col.h rename to paddle/fluid/operators/math/im2col.h diff --git a/paddle/operators/math/im2col_test.cc b/paddle/fluid/operators/math/im2col_test.cc similarity index 100% rename from paddle/operators/math/im2col_test.cc rename to paddle/fluid/operators/math/im2col_test.cc diff --git a/paddle/operators/math/lstm_compute.cc b/paddle/fluid/operators/math/lstm_compute.cc similarity index 100% rename from paddle/operators/math/lstm_compute.cc rename to paddle/fluid/operators/math/lstm_compute.cc diff --git a/paddle/operators/math/lstm_compute.cu b/paddle/fluid/operators/math/lstm_compute.cu similarity index 100% rename from paddle/operators/math/lstm_compute.cu rename to paddle/fluid/operators/math/lstm_compute.cu diff --git a/paddle/operators/math/lstm_compute.h b/paddle/fluid/operators/math/lstm_compute.h similarity index 100% rename from paddle/operators/math/lstm_compute.h rename to paddle/fluid/operators/math/lstm_compute.h diff --git a/paddle/operators/math/math_function.cc b/paddle/fluid/operators/math/math_function.cc similarity index 100% rename from paddle/operators/math/math_function.cc rename to paddle/fluid/operators/math/math_function.cc diff --git a/paddle/operators/math/math_function.cu b/paddle/fluid/operators/math/math_function.cu similarity index 100% rename from paddle/operators/math/math_function.cu rename to paddle/fluid/operators/math/math_function.cu diff --git a/paddle/operators/math/math_function.h b/paddle/fluid/operators/math/math_function.h similarity index 100% rename from paddle/operators/math/math_function.h rename to paddle/fluid/operators/math/math_function.h diff --git a/paddle/operators/math/math_function_impl.h b/paddle/fluid/operators/math/math_function_impl.h similarity index 100% rename from paddle/operators/math/math_function_impl.h rename to paddle/fluid/operators/math/math_function_impl.h diff --git a/paddle/operators/math/math_function_test.cc b/paddle/fluid/operators/math/math_function_test.cc similarity index 100% rename from paddle/operators/math/math_function_test.cc rename to paddle/fluid/operators/math/math_function_test.cc diff --git a/paddle/operators/math/math_function_test.cu b/paddle/fluid/operators/math/math_function_test.cu similarity index 100% rename from paddle/operators/math/math_function_test.cu rename to paddle/fluid/operators/math/math_function_test.cu diff --git a/paddle/operators/math/matmul.h b/paddle/fluid/operators/math/matmul.h similarity index 100% rename from paddle/operators/math/matmul.h rename to paddle/fluid/operators/math/matmul.h diff --git a/paddle/operators/math/maxouting.cc b/paddle/fluid/operators/math/maxouting.cc similarity index 100% rename from paddle/operators/math/maxouting.cc rename to paddle/fluid/operators/math/maxouting.cc diff --git a/paddle/operators/math/maxouting.cu b/paddle/fluid/operators/math/maxouting.cu similarity index 100% rename from paddle/operators/math/maxouting.cu rename to paddle/fluid/operators/math/maxouting.cu diff --git a/paddle/operators/math/maxouting.h b/paddle/fluid/operators/math/maxouting.h similarity index 100% rename from paddle/operators/math/maxouting.h rename to paddle/fluid/operators/math/maxouting.h diff --git a/paddle/operators/math/pooling.cc b/paddle/fluid/operators/math/pooling.cc similarity index 100% rename from paddle/operators/math/pooling.cc rename to paddle/fluid/operators/math/pooling.cc diff --git a/paddle/operators/math/pooling.cu b/paddle/fluid/operators/math/pooling.cu similarity index 100% rename from paddle/operators/math/pooling.cu rename to paddle/fluid/operators/math/pooling.cu diff --git a/paddle/operators/math/pooling.h b/paddle/fluid/operators/math/pooling.h similarity index 100% rename from paddle/operators/math/pooling.h rename to paddle/fluid/operators/math/pooling.h diff --git a/paddle/operators/math/sampler.cc b/paddle/fluid/operators/math/sampler.cc similarity index 100% rename from paddle/operators/math/sampler.cc rename to paddle/fluid/operators/math/sampler.cc diff --git a/paddle/operators/math/sampler.h b/paddle/fluid/operators/math/sampler.h similarity index 100% rename from paddle/operators/math/sampler.h rename to paddle/fluid/operators/math/sampler.h diff --git a/paddle/operators/math/selected_rows_functor.cc b/paddle/fluid/operators/math/selected_rows_functor.cc similarity index 100% rename from paddle/operators/math/selected_rows_functor.cc rename to paddle/fluid/operators/math/selected_rows_functor.cc diff --git a/paddle/operators/math/selected_rows_functor.cu b/paddle/fluid/operators/math/selected_rows_functor.cu similarity index 100% rename from paddle/operators/math/selected_rows_functor.cu rename to paddle/fluid/operators/math/selected_rows_functor.cu diff --git a/paddle/operators/math/selected_rows_functor.h b/paddle/fluid/operators/math/selected_rows_functor.h similarity index 100% rename from paddle/operators/math/selected_rows_functor.h rename to paddle/fluid/operators/math/selected_rows_functor.h diff --git a/paddle/operators/math/selected_rows_functor_test.cc b/paddle/fluid/operators/math/selected_rows_functor_test.cc similarity index 100% rename from paddle/operators/math/selected_rows_functor_test.cc rename to paddle/fluid/operators/math/selected_rows_functor_test.cc diff --git a/paddle/operators/math/selected_rows_functor_test.cu b/paddle/fluid/operators/math/selected_rows_functor_test.cu similarity index 100% rename from paddle/operators/math/selected_rows_functor_test.cu rename to paddle/fluid/operators/math/selected_rows_functor_test.cu diff --git a/paddle/operators/math/sequence2batch.cc b/paddle/fluid/operators/math/sequence2batch.cc similarity index 100% rename from paddle/operators/math/sequence2batch.cc rename to paddle/fluid/operators/math/sequence2batch.cc diff --git a/paddle/operators/math/sequence2batch.cu b/paddle/fluid/operators/math/sequence2batch.cu similarity index 100% rename from paddle/operators/math/sequence2batch.cu rename to paddle/fluid/operators/math/sequence2batch.cu diff --git a/paddle/operators/math/sequence2batch.h b/paddle/fluid/operators/math/sequence2batch.h similarity index 100% rename from paddle/operators/math/sequence2batch.h rename to paddle/fluid/operators/math/sequence2batch.h diff --git a/paddle/operators/math/sequence_padding.cc b/paddle/fluid/operators/math/sequence_padding.cc similarity index 100% rename from paddle/operators/math/sequence_padding.cc rename to paddle/fluid/operators/math/sequence_padding.cc diff --git a/paddle/operators/math/sequence_padding.cu b/paddle/fluid/operators/math/sequence_padding.cu similarity index 100% rename from paddle/operators/math/sequence_padding.cu rename to paddle/fluid/operators/math/sequence_padding.cu diff --git a/paddle/operators/math/sequence_padding.h b/paddle/fluid/operators/math/sequence_padding.h similarity index 100% rename from paddle/operators/math/sequence_padding.h rename to paddle/fluid/operators/math/sequence_padding.h diff --git a/paddle/operators/math/sequence_padding_test.cc b/paddle/fluid/operators/math/sequence_padding_test.cc similarity index 100% rename from paddle/operators/math/sequence_padding_test.cc rename to paddle/fluid/operators/math/sequence_padding_test.cc diff --git a/paddle/operators/math/sequence_pooling.cc b/paddle/fluid/operators/math/sequence_pooling.cc similarity index 100% rename from paddle/operators/math/sequence_pooling.cc rename to paddle/fluid/operators/math/sequence_pooling.cc diff --git a/paddle/operators/math/sequence_pooling.cu b/paddle/fluid/operators/math/sequence_pooling.cu similarity index 100% rename from paddle/operators/math/sequence_pooling.cu rename to paddle/fluid/operators/math/sequence_pooling.cu diff --git a/paddle/operators/math/sequence_pooling.h b/paddle/fluid/operators/math/sequence_pooling.h similarity index 100% rename from paddle/operators/math/sequence_pooling.h rename to paddle/fluid/operators/math/sequence_pooling.h diff --git a/paddle/operators/math/sequence_scale.cc b/paddle/fluid/operators/math/sequence_scale.cc similarity index 100% rename from paddle/operators/math/sequence_scale.cc rename to paddle/fluid/operators/math/sequence_scale.cc diff --git a/paddle/operators/math/sequence_scale.cu b/paddle/fluid/operators/math/sequence_scale.cu similarity index 100% rename from paddle/operators/math/sequence_scale.cu rename to paddle/fluid/operators/math/sequence_scale.cu diff --git a/paddle/operators/math/sequence_scale.h b/paddle/fluid/operators/math/sequence_scale.h similarity index 100% rename from paddle/operators/math/sequence_scale.h rename to paddle/fluid/operators/math/sequence_scale.h diff --git a/paddle/operators/math/softmax.cc b/paddle/fluid/operators/math/softmax.cc similarity index 100% rename from paddle/operators/math/softmax.cc rename to paddle/fluid/operators/math/softmax.cc diff --git a/paddle/operators/math/softmax.cu b/paddle/fluid/operators/math/softmax.cu similarity index 100% rename from paddle/operators/math/softmax.cu rename to paddle/fluid/operators/math/softmax.cu diff --git a/paddle/operators/math/softmax.h b/paddle/fluid/operators/math/softmax.h similarity index 100% rename from paddle/operators/math/softmax.h rename to paddle/fluid/operators/math/softmax.h diff --git a/paddle/operators/math/softmax_impl.h b/paddle/fluid/operators/math/softmax_impl.h similarity index 100% rename from paddle/operators/math/softmax_impl.h rename to paddle/fluid/operators/math/softmax_impl.h diff --git a/paddle/operators/math/unpooling.cc b/paddle/fluid/operators/math/unpooling.cc similarity index 100% rename from paddle/operators/math/unpooling.cc rename to paddle/fluid/operators/math/unpooling.cc diff --git a/paddle/operators/math/unpooling.cu b/paddle/fluid/operators/math/unpooling.cu similarity index 100% rename from paddle/operators/math/unpooling.cu rename to paddle/fluid/operators/math/unpooling.cu diff --git a/paddle/operators/math/unpooling.h b/paddle/fluid/operators/math/unpooling.h similarity index 100% rename from paddle/operators/math/unpooling.h rename to paddle/fluid/operators/math/unpooling.h diff --git a/paddle/operators/math/vol2col.cc b/paddle/fluid/operators/math/vol2col.cc similarity index 100% rename from paddle/operators/math/vol2col.cc rename to paddle/fluid/operators/math/vol2col.cc diff --git a/paddle/operators/math/vol2col.cu b/paddle/fluid/operators/math/vol2col.cu similarity index 100% rename from paddle/operators/math/vol2col.cu rename to paddle/fluid/operators/math/vol2col.cu diff --git a/paddle/operators/math/vol2col.h b/paddle/fluid/operators/math/vol2col.h similarity index 100% rename from paddle/operators/math/vol2col.h rename to paddle/fluid/operators/math/vol2col.h diff --git a/paddle/operators/math/vol2col_test.cc b/paddle/fluid/operators/math/vol2col_test.cc similarity index 100% rename from paddle/operators/math/vol2col_test.cc rename to paddle/fluid/operators/math/vol2col_test.cc diff --git a/paddle/operators/matmul_op.cc b/paddle/fluid/operators/matmul_op.cc similarity index 100% rename from paddle/operators/matmul_op.cc rename to paddle/fluid/operators/matmul_op.cc diff --git a/paddle/operators/matmul_op.cu.cc b/paddle/fluid/operators/matmul_op.cu.cc similarity index 100% rename from paddle/operators/matmul_op.cu.cc rename to paddle/fluid/operators/matmul_op.cu.cc diff --git a/paddle/operators/matmul_op.h b/paddle/fluid/operators/matmul_op.h similarity index 100% rename from paddle/operators/matmul_op.h rename to paddle/fluid/operators/matmul_op.h diff --git a/paddle/operators/max_sequence_len_op.cc b/paddle/fluid/operators/max_sequence_len_op.cc similarity index 100% rename from paddle/operators/max_sequence_len_op.cc rename to paddle/fluid/operators/max_sequence_len_op.cc diff --git a/paddle/operators/maxout_op.cc b/paddle/fluid/operators/maxout_op.cc similarity index 100% rename from paddle/operators/maxout_op.cc rename to paddle/fluid/operators/maxout_op.cc diff --git a/paddle/operators/maxout_op.cu.cc b/paddle/fluid/operators/maxout_op.cu.cc similarity index 100% rename from paddle/operators/maxout_op.cu.cc rename to paddle/fluid/operators/maxout_op.cu.cc diff --git a/paddle/operators/maxout_op.h b/paddle/fluid/operators/maxout_op.h similarity index 100% rename from paddle/operators/maxout_op.h rename to paddle/fluid/operators/maxout_op.h diff --git a/paddle/operators/mean_op.cc b/paddle/fluid/operators/mean_op.cc similarity index 100% rename from paddle/operators/mean_op.cc rename to paddle/fluid/operators/mean_op.cc diff --git a/paddle/operators/mean_op.cu b/paddle/fluid/operators/mean_op.cu similarity index 100% rename from paddle/operators/mean_op.cu rename to paddle/fluid/operators/mean_op.cu diff --git a/paddle/operators/mean_op.h b/paddle/fluid/operators/mean_op.h similarity index 100% rename from paddle/operators/mean_op.h rename to paddle/fluid/operators/mean_op.h diff --git a/paddle/operators/merge_lod_tensor_op.cc b/paddle/fluid/operators/merge_lod_tensor_op.cc similarity index 100% rename from paddle/operators/merge_lod_tensor_op.cc rename to paddle/fluid/operators/merge_lod_tensor_op.cc diff --git a/paddle/operators/mine_hard_examples_op.cc b/paddle/fluid/operators/mine_hard_examples_op.cc similarity index 100% rename from paddle/operators/mine_hard_examples_op.cc rename to paddle/fluid/operators/mine_hard_examples_op.cc diff --git a/paddle/operators/minus_op.cc b/paddle/fluid/operators/minus_op.cc similarity index 100% rename from paddle/operators/minus_op.cc rename to paddle/fluid/operators/minus_op.cc diff --git a/paddle/operators/minus_op.cu b/paddle/fluid/operators/minus_op.cu similarity index 100% rename from paddle/operators/minus_op.cu rename to paddle/fluid/operators/minus_op.cu diff --git a/paddle/operators/minus_op.h b/paddle/fluid/operators/minus_op.h similarity index 100% rename from paddle/operators/minus_op.h rename to paddle/fluid/operators/minus_op.h diff --git a/paddle/operators/modified_huber_loss_op.cc b/paddle/fluid/operators/modified_huber_loss_op.cc similarity index 100% rename from paddle/operators/modified_huber_loss_op.cc rename to paddle/fluid/operators/modified_huber_loss_op.cc diff --git a/paddle/operators/modified_huber_loss_op.cu b/paddle/fluid/operators/modified_huber_loss_op.cu similarity index 100% rename from paddle/operators/modified_huber_loss_op.cu rename to paddle/fluid/operators/modified_huber_loss_op.cu diff --git a/paddle/operators/modified_huber_loss_op.h b/paddle/fluid/operators/modified_huber_loss_op.h similarity index 100% rename from paddle/operators/modified_huber_loss_op.h rename to paddle/fluid/operators/modified_huber_loss_op.h diff --git a/paddle/operators/momentum_op.cc b/paddle/fluid/operators/momentum_op.cc similarity index 100% rename from paddle/operators/momentum_op.cc rename to paddle/fluid/operators/momentum_op.cc diff --git a/paddle/operators/momentum_op.cu b/paddle/fluid/operators/momentum_op.cu similarity index 100% rename from paddle/operators/momentum_op.cu rename to paddle/fluid/operators/momentum_op.cu diff --git a/paddle/operators/momentum_op.h b/paddle/fluid/operators/momentum_op.h similarity index 100% rename from paddle/operators/momentum_op.h rename to paddle/fluid/operators/momentum_op.h diff --git a/paddle/operators/mul_op.cc b/paddle/fluid/operators/mul_op.cc similarity index 100% rename from paddle/operators/mul_op.cc rename to paddle/fluid/operators/mul_op.cc diff --git a/paddle/operators/mul_op.cu.cc b/paddle/fluid/operators/mul_op.cu.cc similarity index 100% rename from paddle/operators/mul_op.cu.cc rename to paddle/fluid/operators/mul_op.cu.cc diff --git a/paddle/operators/mul_op.h b/paddle/fluid/operators/mul_op.h similarity index 100% rename from paddle/operators/mul_op.h rename to paddle/fluid/operators/mul_op.h diff --git a/paddle/operators/multiclass_nms_op.cc b/paddle/fluid/operators/multiclass_nms_op.cc similarity index 100% rename from paddle/operators/multiclass_nms_op.cc rename to paddle/fluid/operators/multiclass_nms_op.cc diff --git a/paddle/operators/multiplex_op.cc b/paddle/fluid/operators/multiplex_op.cc similarity index 100% rename from paddle/operators/multiplex_op.cc rename to paddle/fluid/operators/multiplex_op.cc diff --git a/paddle/operators/multiplex_op.cu b/paddle/fluid/operators/multiplex_op.cu similarity index 100% rename from paddle/operators/multiplex_op.cu rename to paddle/fluid/operators/multiplex_op.cu diff --git a/paddle/operators/multiplex_op.h b/paddle/fluid/operators/multiplex_op.h similarity index 100% rename from paddle/operators/multiplex_op.h rename to paddle/fluid/operators/multiplex_op.h diff --git a/paddle/operators/nccl/CMakeLists.txt b/paddle/fluid/operators/nccl/CMakeLists.txt similarity index 100% rename from paddle/operators/nccl/CMakeLists.txt rename to paddle/fluid/operators/nccl/CMakeLists.txt diff --git a/paddle/operators/nccl/nccl_gpu_common.cc b/paddle/fluid/operators/nccl/nccl_gpu_common.cc similarity index 100% rename from paddle/operators/nccl/nccl_gpu_common.cc rename to paddle/fluid/operators/nccl/nccl_gpu_common.cc diff --git a/paddle/operators/nccl/nccl_gpu_common.h b/paddle/fluid/operators/nccl/nccl_gpu_common.h similarity index 100% rename from paddle/operators/nccl/nccl_gpu_common.h rename to paddle/fluid/operators/nccl/nccl_gpu_common.h diff --git a/paddle/operators/nccl_op.cc b/paddle/fluid/operators/nccl_op.cc similarity index 100% rename from paddle/operators/nccl_op.cc rename to paddle/fluid/operators/nccl_op.cc diff --git a/paddle/operators/nccl_op.cu.cc b/paddle/fluid/operators/nccl_op.cu.cc similarity index 100% rename from paddle/operators/nccl_op.cu.cc rename to paddle/fluid/operators/nccl_op.cu.cc diff --git a/paddle/operators/nccl_op_test.cu.cc b/paddle/fluid/operators/nccl_op_test.cu.cc similarity index 100% rename from paddle/operators/nccl_op_test.cu.cc rename to paddle/fluid/operators/nccl_op_test.cu.cc diff --git a/paddle/operators/nce_op.cc b/paddle/fluid/operators/nce_op.cc similarity index 100% rename from paddle/operators/nce_op.cc rename to paddle/fluid/operators/nce_op.cc diff --git a/paddle/operators/nce_op.h b/paddle/fluid/operators/nce_op.h similarity index 100% rename from paddle/operators/nce_op.h rename to paddle/fluid/operators/nce_op.h diff --git a/paddle/operators/net_op.cc b/paddle/fluid/operators/net_op.cc similarity index 100% rename from paddle/operators/net_op.cc rename to paddle/fluid/operators/net_op.cc diff --git a/paddle/operators/net_op.h b/paddle/fluid/operators/net_op.h similarity index 100% rename from paddle/operators/net_op.h rename to paddle/fluid/operators/net_op.h diff --git a/paddle/operators/net_op_test.cc b/paddle/fluid/operators/net_op_test.cc similarity index 100% rename from paddle/operators/net_op_test.cc rename to paddle/fluid/operators/net_op_test.cc diff --git a/paddle/operators/norm_op.cc b/paddle/fluid/operators/norm_op.cc similarity index 100% rename from paddle/operators/norm_op.cc rename to paddle/fluid/operators/norm_op.cc diff --git a/paddle/operators/norm_op.cu b/paddle/fluid/operators/norm_op.cu similarity index 100% rename from paddle/operators/norm_op.cu rename to paddle/fluid/operators/norm_op.cu diff --git a/paddle/operators/norm_op.h b/paddle/fluid/operators/norm_op.h similarity index 100% rename from paddle/operators/norm_op.h rename to paddle/fluid/operators/norm_op.h diff --git a/paddle/operators/one_hot_op.cc b/paddle/fluid/operators/one_hot_op.cc similarity index 100% rename from paddle/operators/one_hot_op.cc rename to paddle/fluid/operators/one_hot_op.cc diff --git a/paddle/operators/one_hot_op.cu b/paddle/fluid/operators/one_hot_op.cu similarity index 100% rename from paddle/operators/one_hot_op.cu rename to paddle/fluid/operators/one_hot_op.cu diff --git a/paddle/operators/one_hot_op.h b/paddle/fluid/operators/one_hot_op.h similarity index 100% rename from paddle/operators/one_hot_op.h rename to paddle/fluid/operators/one_hot_op.h diff --git a/paddle/operators/op_documentation/batch_norm_op.md b/paddle/fluid/operators/op_documentation/batch_norm_op.md similarity index 100% rename from paddle/operators/op_documentation/batch_norm_op.md rename to paddle/fluid/operators/op_documentation/batch_norm_op.md diff --git a/paddle/operators/op_documentation/name_convention.md b/paddle/fluid/operators/op_documentation/name_convention.md similarity index 100% rename from paddle/operators/op_documentation/name_convention.md rename to paddle/fluid/operators/op_documentation/name_convention.md diff --git a/paddle/operators/op_documentation/net_op_design.md b/paddle/fluid/operators/op_documentation/net_op_design.md similarity index 100% rename from paddle/operators/op_documentation/net_op_design.md rename to paddle/fluid/operators/op_documentation/net_op_design.md diff --git a/paddle/operators/op_documentation/op_markdown_format.md b/paddle/fluid/operators/op_documentation/op_markdown_format.md similarity index 100% rename from paddle/operators/op_documentation/op_markdown_format.md rename to paddle/fluid/operators/op_documentation/op_markdown_format.md diff --git a/paddle/operators/op_documentation/rnn_design.md b/paddle/fluid/operators/op_documentation/rnn_design.md similarity index 100% rename from paddle/operators/op_documentation/rnn_design.md rename to paddle/fluid/operators/op_documentation/rnn_design.md diff --git a/paddle/operators/pad_op.cc b/paddle/fluid/operators/pad_op.cc similarity index 100% rename from paddle/operators/pad_op.cc rename to paddle/fluid/operators/pad_op.cc diff --git a/paddle/operators/pad_op.cu b/paddle/fluid/operators/pad_op.cu similarity index 100% rename from paddle/operators/pad_op.cu rename to paddle/fluid/operators/pad_op.cu diff --git a/paddle/operators/pad_op.h b/paddle/fluid/operators/pad_op.h similarity index 100% rename from paddle/operators/pad_op.h rename to paddle/fluid/operators/pad_op.h diff --git a/paddle/operators/parallel_do_op.cc b/paddle/fluid/operators/parallel_do_op.cc similarity index 100% rename from paddle/operators/parallel_do_op.cc rename to paddle/fluid/operators/parallel_do_op.cc diff --git a/paddle/operators/pool_cudnn_op.cu.cc b/paddle/fluid/operators/pool_cudnn_op.cu.cc similarity index 100% rename from paddle/operators/pool_cudnn_op.cu.cc rename to paddle/fluid/operators/pool_cudnn_op.cu.cc diff --git a/paddle/operators/pool_op.cc b/paddle/fluid/operators/pool_op.cc similarity index 100% rename from paddle/operators/pool_op.cc rename to paddle/fluid/operators/pool_op.cc diff --git a/paddle/operators/pool_op.cu.cc b/paddle/fluid/operators/pool_op.cu.cc similarity index 100% rename from paddle/operators/pool_op.cu.cc rename to paddle/fluid/operators/pool_op.cu.cc diff --git a/paddle/operators/pool_op.h b/paddle/fluid/operators/pool_op.h similarity index 100% rename from paddle/operators/pool_op.h rename to paddle/fluid/operators/pool_op.h diff --git a/paddle/operators/pool_with_index_op.cc b/paddle/fluid/operators/pool_with_index_op.cc similarity index 100% rename from paddle/operators/pool_with_index_op.cc rename to paddle/fluid/operators/pool_with_index_op.cc diff --git a/paddle/operators/pool_with_index_op.cu.cc b/paddle/fluid/operators/pool_with_index_op.cu.cc similarity index 100% rename from paddle/operators/pool_with_index_op.cu.cc rename to paddle/fluid/operators/pool_with_index_op.cu.cc diff --git a/paddle/operators/pool_with_index_op.h b/paddle/fluid/operators/pool_with_index_op.h similarity index 100% rename from paddle/operators/pool_with_index_op.h rename to paddle/fluid/operators/pool_with_index_op.h diff --git a/paddle/operators/positive_negative_pair_op.cc b/paddle/fluid/operators/positive_negative_pair_op.cc similarity index 100% rename from paddle/operators/positive_negative_pair_op.cc rename to paddle/fluid/operators/positive_negative_pair_op.cc diff --git a/paddle/operators/positive_negative_pair_op.h b/paddle/fluid/operators/positive_negative_pair_op.h similarity index 100% rename from paddle/operators/positive_negative_pair_op.h rename to paddle/fluid/operators/positive_negative_pair_op.h diff --git a/paddle/operators/precision_recall_op.cc b/paddle/fluid/operators/precision_recall_op.cc similarity index 100% rename from paddle/operators/precision_recall_op.cc rename to paddle/fluid/operators/precision_recall_op.cc diff --git a/paddle/operators/precision_recall_op.h b/paddle/fluid/operators/precision_recall_op.h similarity index 100% rename from paddle/operators/precision_recall_op.h rename to paddle/fluid/operators/precision_recall_op.h diff --git a/paddle/operators/prelu_op.cc b/paddle/fluid/operators/prelu_op.cc similarity index 100% rename from paddle/operators/prelu_op.cc rename to paddle/fluid/operators/prelu_op.cc diff --git a/paddle/operators/prelu_op.cu b/paddle/fluid/operators/prelu_op.cu similarity index 100% rename from paddle/operators/prelu_op.cu rename to paddle/fluid/operators/prelu_op.cu diff --git a/paddle/operators/prelu_op.h b/paddle/fluid/operators/prelu_op.h similarity index 100% rename from paddle/operators/prelu_op.h rename to paddle/fluid/operators/prelu_op.h diff --git a/paddle/operators/print_op.cc b/paddle/fluid/operators/print_op.cc similarity index 100% rename from paddle/operators/print_op.cc rename to paddle/fluid/operators/print_op.cc diff --git a/paddle/operators/prior_box_op.cc b/paddle/fluid/operators/prior_box_op.cc similarity index 100% rename from paddle/operators/prior_box_op.cc rename to paddle/fluid/operators/prior_box_op.cc diff --git a/paddle/operators/prior_box_op.h b/paddle/fluid/operators/prior_box_op.h similarity index 100% rename from paddle/operators/prior_box_op.h rename to paddle/fluid/operators/prior_box_op.h diff --git a/paddle/operators/proximal_adagrad_op.cc b/paddle/fluid/operators/proximal_adagrad_op.cc similarity index 100% rename from paddle/operators/proximal_adagrad_op.cc rename to paddle/fluid/operators/proximal_adagrad_op.cc diff --git a/paddle/operators/proximal_adagrad_op.cu b/paddle/fluid/operators/proximal_adagrad_op.cu similarity index 100% rename from paddle/operators/proximal_adagrad_op.cu rename to paddle/fluid/operators/proximal_adagrad_op.cu diff --git a/paddle/operators/proximal_adagrad_op.h b/paddle/fluid/operators/proximal_adagrad_op.h similarity index 100% rename from paddle/operators/proximal_adagrad_op.h rename to paddle/fluid/operators/proximal_adagrad_op.h diff --git a/paddle/operators/proximal_gd_op.cc b/paddle/fluid/operators/proximal_gd_op.cc similarity index 100% rename from paddle/operators/proximal_gd_op.cc rename to paddle/fluid/operators/proximal_gd_op.cc diff --git a/paddle/operators/proximal_gd_op.cu b/paddle/fluid/operators/proximal_gd_op.cu similarity index 100% rename from paddle/operators/proximal_gd_op.cu rename to paddle/fluid/operators/proximal_gd_op.cu diff --git a/paddle/operators/proximal_gd_op.h b/paddle/fluid/operators/proximal_gd_op.h similarity index 100% rename from paddle/operators/proximal_gd_op.h rename to paddle/fluid/operators/proximal_gd_op.h diff --git a/paddle/operators/rank_loss_op.cc b/paddle/fluid/operators/rank_loss_op.cc similarity index 100% rename from paddle/operators/rank_loss_op.cc rename to paddle/fluid/operators/rank_loss_op.cc diff --git a/paddle/operators/rank_loss_op.cu b/paddle/fluid/operators/rank_loss_op.cu similarity index 100% rename from paddle/operators/rank_loss_op.cu rename to paddle/fluid/operators/rank_loss_op.cu diff --git a/paddle/operators/rank_loss_op.h b/paddle/fluid/operators/rank_loss_op.h similarity index 100% rename from paddle/operators/rank_loss_op.h rename to paddle/fluid/operators/rank_loss_op.h diff --git a/paddle/operators/read_op.cc b/paddle/fluid/operators/read_op.cc similarity index 100% rename from paddle/operators/read_op.cc rename to paddle/fluid/operators/read_op.cc diff --git a/paddle/operators/recurrent_op.cc b/paddle/fluid/operators/recurrent_op.cc similarity index 100% rename from paddle/operators/recurrent_op.cc rename to paddle/fluid/operators/recurrent_op.cc diff --git a/paddle/operators/recv_op.cc b/paddle/fluid/operators/recv_op.cc similarity index 100% rename from paddle/operators/recv_op.cc rename to paddle/fluid/operators/recv_op.cc diff --git a/paddle/operators/reduce_op.cc b/paddle/fluid/operators/reduce_op.cc similarity index 100% rename from paddle/operators/reduce_op.cc rename to paddle/fluid/operators/reduce_op.cc diff --git a/paddle/operators/reduce_op.cu b/paddle/fluid/operators/reduce_op.cu similarity index 100% rename from paddle/operators/reduce_op.cu rename to paddle/fluid/operators/reduce_op.cu diff --git a/paddle/operators/reduce_op.h b/paddle/fluid/operators/reduce_op.h similarity index 100% rename from paddle/operators/reduce_op.h rename to paddle/fluid/operators/reduce_op.h diff --git a/paddle/operators/reorder_lod_tensor_by_rank_op.cc b/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc similarity index 100% rename from paddle/operators/reorder_lod_tensor_by_rank_op.cc rename to paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc diff --git a/paddle/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc similarity index 100% rename from paddle/operators/reshape_op.cc rename to paddle/fluid/operators/reshape_op.cc diff --git a/paddle/operators/reshape_op.cu b/paddle/fluid/operators/reshape_op.cu similarity index 100% rename from paddle/operators/reshape_op.cu rename to paddle/fluid/operators/reshape_op.cu diff --git a/paddle/operators/reshape_op.h b/paddle/fluid/operators/reshape_op.h similarity index 100% rename from paddle/operators/reshape_op.h rename to paddle/fluid/operators/reshape_op.h diff --git a/paddle/operators/rmsprop_op.cc b/paddle/fluid/operators/rmsprop_op.cc similarity index 100% rename from paddle/operators/rmsprop_op.cc rename to paddle/fluid/operators/rmsprop_op.cc diff --git a/paddle/operators/rmsprop_op.cu b/paddle/fluid/operators/rmsprop_op.cu similarity index 100% rename from paddle/operators/rmsprop_op.cu rename to paddle/fluid/operators/rmsprop_op.cu diff --git a/paddle/operators/rmsprop_op.h b/paddle/fluid/operators/rmsprop_op.h similarity index 100% rename from paddle/operators/rmsprop_op.h rename to paddle/fluid/operators/rmsprop_op.h diff --git a/paddle/operators/rnn_memory_helper_op.cc b/paddle/fluid/operators/rnn_memory_helper_op.cc similarity index 100% rename from paddle/operators/rnn_memory_helper_op.cc rename to paddle/fluid/operators/rnn_memory_helper_op.cc diff --git a/paddle/operators/roi_pool_op.cc b/paddle/fluid/operators/roi_pool_op.cc similarity index 100% rename from paddle/operators/roi_pool_op.cc rename to paddle/fluid/operators/roi_pool_op.cc diff --git a/paddle/operators/roi_pool_op.cu b/paddle/fluid/operators/roi_pool_op.cu similarity index 100% rename from paddle/operators/roi_pool_op.cu rename to paddle/fluid/operators/roi_pool_op.cu diff --git a/paddle/operators/roi_pool_op.h b/paddle/fluid/operators/roi_pool_op.h similarity index 100% rename from paddle/operators/roi_pool_op.h rename to paddle/fluid/operators/roi_pool_op.h diff --git a/paddle/operators/row_conv_op.cc b/paddle/fluid/operators/row_conv_op.cc similarity index 100% rename from paddle/operators/row_conv_op.cc rename to paddle/fluid/operators/row_conv_op.cc diff --git a/paddle/operators/row_conv_op.cu b/paddle/fluid/operators/row_conv_op.cu similarity index 100% rename from paddle/operators/row_conv_op.cu rename to paddle/fluid/operators/row_conv_op.cu diff --git a/paddle/operators/row_conv_op.h b/paddle/fluid/operators/row_conv_op.h similarity index 100% rename from paddle/operators/row_conv_op.h rename to paddle/fluid/operators/row_conv_op.h diff --git a/paddle/operators/save_combine_op.cc b/paddle/fluid/operators/save_combine_op.cc similarity index 100% rename from paddle/operators/save_combine_op.cc rename to paddle/fluid/operators/save_combine_op.cc diff --git a/paddle/operators/save_load_combine_op_test.cc b/paddle/fluid/operators/save_load_combine_op_test.cc similarity index 100% rename from paddle/operators/save_load_combine_op_test.cc rename to paddle/fluid/operators/save_load_combine_op_test.cc diff --git a/paddle/operators/save_load_op_test.cc b/paddle/fluid/operators/save_load_op_test.cc similarity index 100% rename from paddle/operators/save_load_op_test.cc rename to paddle/fluid/operators/save_load_op_test.cc diff --git a/paddle/operators/save_op.cc b/paddle/fluid/operators/save_op.cc similarity index 100% rename from paddle/operators/save_op.cc rename to paddle/fluid/operators/save_op.cc diff --git a/paddle/operators/scale_op.cc b/paddle/fluid/operators/scale_op.cc similarity index 100% rename from paddle/operators/scale_op.cc rename to paddle/fluid/operators/scale_op.cc diff --git a/paddle/operators/scale_op.cu b/paddle/fluid/operators/scale_op.cu similarity index 100% rename from paddle/operators/scale_op.cu rename to paddle/fluid/operators/scale_op.cu diff --git a/paddle/operators/scale_op.h b/paddle/fluid/operators/scale_op.h similarity index 100% rename from paddle/operators/scale_op.h rename to paddle/fluid/operators/scale_op.h diff --git a/paddle/operators/scatter.cu.h b/paddle/fluid/operators/scatter.cu.h similarity index 100% rename from paddle/operators/scatter.cu.h rename to paddle/fluid/operators/scatter.cu.h diff --git a/paddle/operators/scatter.h b/paddle/fluid/operators/scatter.h similarity index 100% rename from paddle/operators/scatter.h rename to paddle/fluid/operators/scatter.h diff --git a/paddle/operators/scatter_op.cc b/paddle/fluid/operators/scatter_op.cc similarity index 100% rename from paddle/operators/scatter_op.cc rename to paddle/fluid/operators/scatter_op.cc diff --git a/paddle/operators/scatter_op.cu b/paddle/fluid/operators/scatter_op.cu similarity index 100% rename from paddle/operators/scatter_op.cu rename to paddle/fluid/operators/scatter_op.cu diff --git a/paddle/operators/scatter_op.h b/paddle/fluid/operators/scatter_op.h similarity index 100% rename from paddle/operators/scatter_op.h rename to paddle/fluid/operators/scatter_op.h diff --git a/paddle/operators/scatter_test.cc b/paddle/fluid/operators/scatter_test.cc similarity index 100% rename from paddle/operators/scatter_test.cc rename to paddle/fluid/operators/scatter_test.cc diff --git a/paddle/operators/send_op.cc b/paddle/fluid/operators/send_op.cc similarity index 100% rename from paddle/operators/send_op.cc rename to paddle/fluid/operators/send_op.cc diff --git a/paddle/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc similarity index 100% rename from paddle/operators/send_recv_op_test.cc rename to paddle/fluid/operators/send_recv_op_test.cc diff --git a/paddle/operators/sequence_concat_op.cc b/paddle/fluid/operators/sequence_concat_op.cc similarity index 100% rename from paddle/operators/sequence_concat_op.cc rename to paddle/fluid/operators/sequence_concat_op.cc diff --git a/paddle/operators/sequence_concat_op.cu.cc b/paddle/fluid/operators/sequence_concat_op.cu.cc similarity index 100% rename from paddle/operators/sequence_concat_op.cu.cc rename to paddle/fluid/operators/sequence_concat_op.cu.cc diff --git a/paddle/operators/sequence_concat_op.h b/paddle/fluid/operators/sequence_concat_op.h similarity index 100% rename from paddle/operators/sequence_concat_op.h rename to paddle/fluid/operators/sequence_concat_op.h diff --git a/paddle/operators/sequence_conv_op.cc b/paddle/fluid/operators/sequence_conv_op.cc similarity index 100% rename from paddle/operators/sequence_conv_op.cc rename to paddle/fluid/operators/sequence_conv_op.cc diff --git a/paddle/operators/sequence_conv_op.cu.cc b/paddle/fluid/operators/sequence_conv_op.cu.cc similarity index 100% rename from paddle/operators/sequence_conv_op.cu.cc rename to paddle/fluid/operators/sequence_conv_op.cu.cc diff --git a/paddle/operators/sequence_conv_op.h b/paddle/fluid/operators/sequence_conv_op.h similarity index 100% rename from paddle/operators/sequence_conv_op.h rename to paddle/fluid/operators/sequence_conv_op.h diff --git a/paddle/operators/sequence_erase_op.cc b/paddle/fluid/operators/sequence_erase_op.cc similarity index 100% rename from paddle/operators/sequence_erase_op.cc rename to paddle/fluid/operators/sequence_erase_op.cc diff --git a/paddle/operators/sequence_erase_op.cu b/paddle/fluid/operators/sequence_erase_op.cu similarity index 100% rename from paddle/operators/sequence_erase_op.cu rename to paddle/fluid/operators/sequence_erase_op.cu diff --git a/paddle/operators/sequence_erase_op.h b/paddle/fluid/operators/sequence_erase_op.h similarity index 100% rename from paddle/operators/sequence_erase_op.h rename to paddle/fluid/operators/sequence_erase_op.h diff --git a/paddle/operators/sequence_expand_op.cc b/paddle/fluid/operators/sequence_expand_op.cc similarity index 100% rename from paddle/operators/sequence_expand_op.cc rename to paddle/fluid/operators/sequence_expand_op.cc diff --git a/paddle/operators/sequence_expand_op.cu b/paddle/fluid/operators/sequence_expand_op.cu similarity index 100% rename from paddle/operators/sequence_expand_op.cu rename to paddle/fluid/operators/sequence_expand_op.cu diff --git a/paddle/operators/sequence_expand_op.h b/paddle/fluid/operators/sequence_expand_op.h similarity index 100% rename from paddle/operators/sequence_expand_op.h rename to paddle/fluid/operators/sequence_expand_op.h diff --git a/paddle/operators/sequence_pool_op.cc b/paddle/fluid/operators/sequence_pool_op.cc similarity index 100% rename from paddle/operators/sequence_pool_op.cc rename to paddle/fluid/operators/sequence_pool_op.cc diff --git a/paddle/operators/sequence_pool_op.cu b/paddle/fluid/operators/sequence_pool_op.cu similarity index 100% rename from paddle/operators/sequence_pool_op.cu rename to paddle/fluid/operators/sequence_pool_op.cu diff --git a/paddle/operators/sequence_pool_op.h b/paddle/fluid/operators/sequence_pool_op.h similarity index 100% rename from paddle/operators/sequence_pool_op.h rename to paddle/fluid/operators/sequence_pool_op.h diff --git a/paddle/operators/sequence_reshape_op.cc b/paddle/fluid/operators/sequence_reshape_op.cc similarity index 100% rename from paddle/operators/sequence_reshape_op.cc rename to paddle/fluid/operators/sequence_reshape_op.cc diff --git a/paddle/operators/sequence_reshape_op.cu b/paddle/fluid/operators/sequence_reshape_op.cu similarity index 100% rename from paddle/operators/sequence_reshape_op.cu rename to paddle/fluid/operators/sequence_reshape_op.cu diff --git a/paddle/operators/sequence_reshape_op.h b/paddle/fluid/operators/sequence_reshape_op.h similarity index 100% rename from paddle/operators/sequence_reshape_op.h rename to paddle/fluid/operators/sequence_reshape_op.h diff --git a/paddle/operators/sequence_slice_op.cc b/paddle/fluid/operators/sequence_slice_op.cc similarity index 100% rename from paddle/operators/sequence_slice_op.cc rename to paddle/fluid/operators/sequence_slice_op.cc diff --git a/paddle/operators/sequence_slice_op.cu b/paddle/fluid/operators/sequence_slice_op.cu similarity index 100% rename from paddle/operators/sequence_slice_op.cu rename to paddle/fluid/operators/sequence_slice_op.cu diff --git a/paddle/operators/sequence_slice_op.h b/paddle/fluid/operators/sequence_slice_op.h similarity index 100% rename from paddle/operators/sequence_slice_op.h rename to paddle/fluid/operators/sequence_slice_op.h diff --git a/paddle/operators/sequence_softmax_op.cc b/paddle/fluid/operators/sequence_softmax_op.cc similarity index 100% rename from paddle/operators/sequence_softmax_op.cc rename to paddle/fluid/operators/sequence_softmax_op.cc diff --git a/paddle/operators/sequence_softmax_op.cu.cc b/paddle/fluid/operators/sequence_softmax_op.cu.cc similarity index 100% rename from paddle/operators/sequence_softmax_op.cu.cc rename to paddle/fluid/operators/sequence_softmax_op.cu.cc diff --git a/paddle/operators/sequence_softmax_op.h b/paddle/fluid/operators/sequence_softmax_op.h similarity index 100% rename from paddle/operators/sequence_softmax_op.h rename to paddle/fluid/operators/sequence_softmax_op.h diff --git a/paddle/operators/sgd_op.cc b/paddle/fluid/operators/sgd_op.cc similarity index 100% rename from paddle/operators/sgd_op.cc rename to paddle/fluid/operators/sgd_op.cc diff --git a/paddle/operators/sgd_op.cu b/paddle/fluid/operators/sgd_op.cu similarity index 100% rename from paddle/operators/sgd_op.cu rename to paddle/fluid/operators/sgd_op.cu diff --git a/paddle/operators/sgd_op.h b/paddle/fluid/operators/sgd_op.h similarity index 100% rename from paddle/operators/sgd_op.h rename to paddle/fluid/operators/sgd_op.h diff --git a/paddle/operators/shrink_rnn_memory_op.cc b/paddle/fluid/operators/shrink_rnn_memory_op.cc similarity index 100% rename from paddle/operators/shrink_rnn_memory_op.cc rename to paddle/fluid/operators/shrink_rnn_memory_op.cc diff --git a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cc b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc similarity index 100% rename from paddle/operators/sigmoid_cross_entropy_with_logits_op.cc rename to paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc diff --git a/paddle/operators/sigmoid_cross_entropy_with_logits_op.cu b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu similarity index 100% rename from paddle/operators/sigmoid_cross_entropy_with_logits_op.cu rename to paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu diff --git a/paddle/operators/sigmoid_cross_entropy_with_logits_op.h b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.h similarity index 100% rename from paddle/operators/sigmoid_cross_entropy_with_logits_op.h rename to paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.h diff --git a/paddle/operators/sign_op.cc b/paddle/fluid/operators/sign_op.cc similarity index 100% rename from paddle/operators/sign_op.cc rename to paddle/fluid/operators/sign_op.cc diff --git a/paddle/operators/sign_op.cu b/paddle/fluid/operators/sign_op.cu similarity index 100% rename from paddle/operators/sign_op.cu rename to paddle/fluid/operators/sign_op.cu diff --git a/paddle/operators/sign_op.h b/paddle/fluid/operators/sign_op.h similarity index 100% rename from paddle/operators/sign_op.h rename to paddle/fluid/operators/sign_op.h diff --git a/paddle/operators/smooth_l1_loss_op.cc b/paddle/fluid/operators/smooth_l1_loss_op.cc similarity index 100% rename from paddle/operators/smooth_l1_loss_op.cc rename to paddle/fluid/operators/smooth_l1_loss_op.cc diff --git a/paddle/operators/smooth_l1_loss_op.cu b/paddle/fluid/operators/smooth_l1_loss_op.cu similarity index 100% rename from paddle/operators/smooth_l1_loss_op.cu rename to paddle/fluid/operators/smooth_l1_loss_op.cu diff --git a/paddle/operators/smooth_l1_loss_op.h b/paddle/fluid/operators/smooth_l1_loss_op.h similarity index 100% rename from paddle/operators/smooth_l1_loss_op.h rename to paddle/fluid/operators/smooth_l1_loss_op.h diff --git a/paddle/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc similarity index 100% rename from paddle/operators/softmax_op.cc rename to paddle/fluid/operators/softmax_op.cc diff --git a/paddle/operators/softmax_op.cu.cc b/paddle/fluid/operators/softmax_op.cu.cc similarity index 100% rename from paddle/operators/softmax_op.cu.cc rename to paddle/fluid/operators/softmax_op.cu.cc diff --git a/paddle/operators/softmax_op.h b/paddle/fluid/operators/softmax_op.h similarity index 100% rename from paddle/operators/softmax_op.h rename to paddle/fluid/operators/softmax_op.h diff --git a/paddle/operators/softmax_with_cross_entropy_op.cc b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc similarity index 100% rename from paddle/operators/softmax_with_cross_entropy_op.cc rename to paddle/fluid/operators/softmax_with_cross_entropy_op.cc diff --git a/paddle/operators/softmax_with_cross_entropy_op.cu b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu similarity index 100% rename from paddle/operators/softmax_with_cross_entropy_op.cu rename to paddle/fluid/operators/softmax_with_cross_entropy_op.cu diff --git a/paddle/operators/softmax_with_cross_entropy_op.h b/paddle/fluid/operators/softmax_with_cross_entropy_op.h similarity index 100% rename from paddle/operators/softmax_with_cross_entropy_op.h rename to paddle/fluid/operators/softmax_with_cross_entropy_op.h diff --git a/paddle/operators/split_lod_tensor_op.cc b/paddle/fluid/operators/split_lod_tensor_op.cc similarity index 100% rename from paddle/operators/split_lod_tensor_op.cc rename to paddle/fluid/operators/split_lod_tensor_op.cc diff --git a/paddle/operators/split_op.cc b/paddle/fluid/operators/split_op.cc similarity index 100% rename from paddle/operators/split_op.cc rename to paddle/fluid/operators/split_op.cc diff --git a/paddle/operators/split_op.cu.cc b/paddle/fluid/operators/split_op.cu.cc similarity index 100% rename from paddle/operators/split_op.cu.cc rename to paddle/fluid/operators/split_op.cu.cc diff --git a/paddle/operators/split_op.h b/paddle/fluid/operators/split_op.h similarity index 100% rename from paddle/operators/split_op.h rename to paddle/fluid/operators/split_op.h diff --git a/paddle/operators/split_selected_rows_op.cc b/paddle/fluid/operators/split_selected_rows_op.cc similarity index 100% rename from paddle/operators/split_selected_rows_op.cc rename to paddle/fluid/operators/split_selected_rows_op.cc diff --git a/paddle/operators/split_selected_rows_op.cu b/paddle/fluid/operators/split_selected_rows_op.cu similarity index 100% rename from paddle/operators/split_selected_rows_op.cu rename to paddle/fluid/operators/split_selected_rows_op.cu diff --git a/paddle/operators/split_selected_rows_op.h b/paddle/fluid/operators/split_selected_rows_op.h similarity index 100% rename from paddle/operators/split_selected_rows_op.h rename to paddle/fluid/operators/split_selected_rows_op.h diff --git a/paddle/operators/spp_op.cc b/paddle/fluid/operators/spp_op.cc similarity index 100% rename from paddle/operators/spp_op.cc rename to paddle/fluid/operators/spp_op.cc diff --git a/paddle/operators/spp_op.cu.cc b/paddle/fluid/operators/spp_op.cu.cc similarity index 100% rename from paddle/operators/spp_op.cu.cc rename to paddle/fluid/operators/spp_op.cu.cc diff --git a/paddle/operators/spp_op.h b/paddle/fluid/operators/spp_op.h similarity index 100% rename from paddle/operators/spp_op.h rename to paddle/fluid/operators/spp_op.h diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/fluid/operators/squared_l2_distance_op.cc similarity index 100% rename from paddle/operators/squared_l2_distance_op.cc rename to paddle/fluid/operators/squared_l2_distance_op.cc diff --git a/paddle/operators/squared_l2_distance_op.cu b/paddle/fluid/operators/squared_l2_distance_op.cu similarity index 100% rename from paddle/operators/squared_l2_distance_op.cu rename to paddle/fluid/operators/squared_l2_distance_op.cu diff --git a/paddle/operators/squared_l2_distance_op.h b/paddle/fluid/operators/squared_l2_distance_op.h similarity index 100% rename from paddle/operators/squared_l2_distance_op.h rename to paddle/fluid/operators/squared_l2_distance_op.h diff --git a/paddle/operators/squared_l2_norm_op.cc b/paddle/fluid/operators/squared_l2_norm_op.cc similarity index 100% rename from paddle/operators/squared_l2_norm_op.cc rename to paddle/fluid/operators/squared_l2_norm_op.cc diff --git a/paddle/operators/squared_l2_norm_op.cu b/paddle/fluid/operators/squared_l2_norm_op.cu similarity index 100% rename from paddle/operators/squared_l2_norm_op.cu rename to paddle/fluid/operators/squared_l2_norm_op.cu diff --git a/paddle/operators/squared_l2_norm_op.h b/paddle/fluid/operators/squared_l2_norm_op.h similarity index 100% rename from paddle/operators/squared_l2_norm_op.h rename to paddle/fluid/operators/squared_l2_norm_op.h diff --git a/paddle/operators/strided_memcpy.h b/paddle/fluid/operators/strided_memcpy.h similarity index 100% rename from paddle/operators/strided_memcpy.h rename to paddle/fluid/operators/strided_memcpy.h diff --git a/paddle/operators/strided_memcpy_test.cc b/paddle/fluid/operators/strided_memcpy_test.cc similarity index 100% rename from paddle/operators/strided_memcpy_test.cc rename to paddle/fluid/operators/strided_memcpy_test.cc diff --git a/paddle/operators/sum_op.cc b/paddle/fluid/operators/sum_op.cc similarity index 100% rename from paddle/operators/sum_op.cc rename to paddle/fluid/operators/sum_op.cc diff --git a/paddle/operators/sum_op.cu b/paddle/fluid/operators/sum_op.cu similarity index 100% rename from paddle/operators/sum_op.cu rename to paddle/fluid/operators/sum_op.cu diff --git a/paddle/operators/sum_op.h b/paddle/fluid/operators/sum_op.h similarity index 100% rename from paddle/operators/sum_op.h rename to paddle/fluid/operators/sum_op.h diff --git a/paddle/operators/target_assign_op.cc b/paddle/fluid/operators/target_assign_op.cc similarity index 100% rename from paddle/operators/target_assign_op.cc rename to paddle/fluid/operators/target_assign_op.cc diff --git a/paddle/operators/target_assign_op.cu b/paddle/fluid/operators/target_assign_op.cu similarity index 100% rename from paddle/operators/target_assign_op.cu rename to paddle/fluid/operators/target_assign_op.cu diff --git a/paddle/operators/target_assign_op.h b/paddle/fluid/operators/target_assign_op.h similarity index 100% rename from paddle/operators/target_assign_op.h rename to paddle/fluid/operators/target_assign_op.h diff --git a/paddle/operators/tensor_array_read_write_op.cc b/paddle/fluid/operators/tensor_array_read_write_op.cc similarity index 100% rename from paddle/operators/tensor_array_read_write_op.cc rename to paddle/fluid/operators/tensor_array_read_write_op.cc diff --git a/paddle/operators/top_k_op.cc b/paddle/fluid/operators/top_k_op.cc similarity index 100% rename from paddle/operators/top_k_op.cc rename to paddle/fluid/operators/top_k_op.cc diff --git a/paddle/operators/top_k_op.cu b/paddle/fluid/operators/top_k_op.cu similarity index 100% rename from paddle/operators/top_k_op.cu rename to paddle/fluid/operators/top_k_op.cu diff --git a/paddle/operators/top_k_op.h b/paddle/fluid/operators/top_k_op.h similarity index 100% rename from paddle/operators/top_k_op.h rename to paddle/fluid/operators/top_k_op.h diff --git a/paddle/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc similarity index 100% rename from paddle/operators/transpose_op.cc rename to paddle/fluid/operators/transpose_op.cc diff --git a/paddle/operators/transpose_op.cu.cc b/paddle/fluid/operators/transpose_op.cu.cc similarity index 100% rename from paddle/operators/transpose_op.cu.cc rename to paddle/fluid/operators/transpose_op.cu.cc diff --git a/paddle/operators/transpose_op.h b/paddle/fluid/operators/transpose_op.h similarity index 100% rename from paddle/operators/transpose_op.h rename to paddle/fluid/operators/transpose_op.h diff --git a/paddle/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc similarity index 100% rename from paddle/operators/uniform_random_op.cc rename to paddle/fluid/operators/uniform_random_op.cc diff --git a/paddle/operators/uniform_random_op.cu b/paddle/fluid/operators/uniform_random_op.cu similarity index 100% rename from paddle/operators/uniform_random_op.cu rename to paddle/fluid/operators/uniform_random_op.cu diff --git a/paddle/operators/unpool_op.cc b/paddle/fluid/operators/unpool_op.cc similarity index 100% rename from paddle/operators/unpool_op.cc rename to paddle/fluid/operators/unpool_op.cc diff --git a/paddle/operators/unpool_op.cu.cc b/paddle/fluid/operators/unpool_op.cu.cc similarity index 100% rename from paddle/operators/unpool_op.cu.cc rename to paddle/fluid/operators/unpool_op.cu.cc diff --git a/paddle/operators/unpool_op.h b/paddle/fluid/operators/unpool_op.h similarity index 100% rename from paddle/operators/unpool_op.h rename to paddle/fluid/operators/unpool_op.h diff --git a/paddle/operators/warpctc_op.cc b/paddle/fluid/operators/warpctc_op.cc similarity index 100% rename from paddle/operators/warpctc_op.cc rename to paddle/fluid/operators/warpctc_op.cc diff --git a/paddle/operators/warpctc_op.cu.cc b/paddle/fluid/operators/warpctc_op.cu.cc similarity index 100% rename from paddle/operators/warpctc_op.cu.cc rename to paddle/fluid/operators/warpctc_op.cu.cc diff --git a/paddle/operators/warpctc_op.h b/paddle/fluid/operators/warpctc_op.h similarity index 100% rename from paddle/operators/warpctc_op.h rename to paddle/fluid/operators/warpctc_op.h diff --git a/paddle/operators/while_op.cc b/paddle/fluid/operators/while_op.cc similarity index 100% rename from paddle/operators/while_op.cc rename to paddle/fluid/operators/while_op.cc diff --git a/paddle/platform/.clang-format b/paddle/fluid/platform/.clang-format similarity index 100% rename from paddle/platform/.clang-format rename to paddle/fluid/platform/.clang-format diff --git a/paddle/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt similarity index 100% rename from paddle/platform/CMakeLists.txt rename to paddle/fluid/platform/CMakeLists.txt diff --git a/paddle/platform/assert.h b/paddle/fluid/platform/assert.h similarity index 100% rename from paddle/platform/assert.h rename to paddle/fluid/platform/assert.h diff --git a/paddle/platform/call_once.h b/paddle/fluid/platform/call_once.h similarity index 100% rename from paddle/platform/call_once.h rename to paddle/fluid/platform/call_once.h diff --git a/paddle/platform/cpu_info.cc b/paddle/fluid/platform/cpu_info.cc similarity index 100% rename from paddle/platform/cpu_info.cc rename to paddle/fluid/platform/cpu_info.cc diff --git a/paddle/platform/cpu_info.h b/paddle/fluid/platform/cpu_info.h similarity index 100% rename from paddle/platform/cpu_info.h rename to paddle/fluid/platform/cpu_info.h diff --git a/paddle/platform/cpu_info_test.cc b/paddle/fluid/platform/cpu_info_test.cc similarity index 100% rename from paddle/platform/cpu_info_test.cc rename to paddle/fluid/platform/cpu_info_test.cc diff --git a/paddle/platform/cuda_helper.h b/paddle/fluid/platform/cuda_helper.h similarity index 100% rename from paddle/platform/cuda_helper.h rename to paddle/fluid/platform/cuda_helper.h diff --git a/paddle/platform/cuda_profiler.h b/paddle/fluid/platform/cuda_profiler.h similarity index 100% rename from paddle/platform/cuda_profiler.h rename to paddle/fluid/platform/cuda_profiler.h diff --git a/paddle/platform/cudnn_helper.h b/paddle/fluid/platform/cudnn_helper.h similarity index 100% rename from paddle/platform/cudnn_helper.h rename to paddle/fluid/platform/cudnn_helper.h diff --git a/paddle/platform/cudnn_helper_test.cc b/paddle/fluid/platform/cudnn_helper_test.cc similarity index 100% rename from paddle/platform/cudnn_helper_test.cc rename to paddle/fluid/platform/cudnn_helper_test.cc diff --git a/paddle/platform/details/device_ptr_cast.h b/paddle/fluid/platform/details/device_ptr_cast.h similarity index 100% rename from paddle/platform/details/device_ptr_cast.h rename to paddle/fluid/platform/details/device_ptr_cast.h diff --git a/paddle/platform/device_context.cc b/paddle/fluid/platform/device_context.cc similarity index 100% rename from paddle/platform/device_context.cc rename to paddle/fluid/platform/device_context.cc diff --git a/paddle/platform/device_context.h b/paddle/fluid/platform/device_context.h similarity index 100% rename from paddle/platform/device_context.h rename to paddle/fluid/platform/device_context.h diff --git a/paddle/platform/device_context_test.cu b/paddle/fluid/platform/device_context_test.cu similarity index 100% rename from paddle/platform/device_context_test.cu rename to paddle/fluid/platform/device_context_test.cu diff --git a/paddle/platform/dynload/CMakeLists.txt b/paddle/fluid/platform/dynload/CMakeLists.txt similarity index 100% rename from paddle/platform/dynload/CMakeLists.txt rename to paddle/fluid/platform/dynload/CMakeLists.txt diff --git a/paddle/platform/dynload/cublas.cc b/paddle/fluid/platform/dynload/cublas.cc similarity index 100% rename from paddle/platform/dynload/cublas.cc rename to paddle/fluid/platform/dynload/cublas.cc diff --git a/paddle/platform/dynload/cublas.h b/paddle/fluid/platform/dynload/cublas.h similarity index 100% rename from paddle/platform/dynload/cublas.h rename to paddle/fluid/platform/dynload/cublas.h diff --git a/paddle/platform/dynload/cudnn.cc b/paddle/fluid/platform/dynload/cudnn.cc similarity index 100% rename from paddle/platform/dynload/cudnn.cc rename to paddle/fluid/platform/dynload/cudnn.cc diff --git a/paddle/platform/dynload/cudnn.h b/paddle/fluid/platform/dynload/cudnn.h similarity index 100% rename from paddle/platform/dynload/cudnn.h rename to paddle/fluid/platform/dynload/cudnn.h diff --git a/paddle/platform/dynload/curand.cc b/paddle/fluid/platform/dynload/curand.cc similarity index 100% rename from paddle/platform/dynload/curand.cc rename to paddle/fluid/platform/dynload/curand.cc diff --git a/paddle/platform/dynload/curand.h b/paddle/fluid/platform/dynload/curand.h similarity index 100% rename from paddle/platform/dynload/curand.h rename to paddle/fluid/platform/dynload/curand.h diff --git a/paddle/platform/dynload/dynamic_loader.cc b/paddle/fluid/platform/dynload/dynamic_loader.cc similarity index 100% rename from paddle/platform/dynload/dynamic_loader.cc rename to paddle/fluid/platform/dynload/dynamic_loader.cc diff --git a/paddle/platform/dynload/dynamic_loader.h b/paddle/fluid/platform/dynload/dynamic_loader.h similarity index 100% rename from paddle/platform/dynload/dynamic_loader.h rename to paddle/fluid/platform/dynload/dynamic_loader.h diff --git a/paddle/platform/dynload/nccl.cc b/paddle/fluid/platform/dynload/nccl.cc similarity index 100% rename from paddle/platform/dynload/nccl.cc rename to paddle/fluid/platform/dynload/nccl.cc diff --git a/paddle/platform/dynload/nccl.h b/paddle/fluid/platform/dynload/nccl.h similarity index 100% rename from paddle/platform/dynload/nccl.h rename to paddle/fluid/platform/dynload/nccl.h diff --git a/paddle/platform/dynload/warpctc.cc b/paddle/fluid/platform/dynload/warpctc.cc similarity index 100% rename from paddle/platform/dynload/warpctc.cc rename to paddle/fluid/platform/dynload/warpctc.cc diff --git a/paddle/platform/dynload/warpctc.h b/paddle/fluid/platform/dynload/warpctc.h similarity index 100% rename from paddle/platform/dynload/warpctc.h rename to paddle/fluid/platform/dynload/warpctc.h diff --git a/paddle/platform/enforce.cc b/paddle/fluid/platform/enforce.cc similarity index 100% rename from paddle/platform/enforce.cc rename to paddle/fluid/platform/enforce.cc diff --git a/paddle/platform/enforce.h b/paddle/fluid/platform/enforce.h similarity index 100% rename from paddle/platform/enforce.h rename to paddle/fluid/platform/enforce.h diff --git a/paddle/platform/enforce_test.cc b/paddle/fluid/platform/enforce_test.cc similarity index 100% rename from paddle/platform/enforce_test.cc rename to paddle/fluid/platform/enforce_test.cc diff --git a/paddle/platform/for_range.h b/paddle/fluid/platform/for_range.h similarity index 100% rename from paddle/platform/for_range.h rename to paddle/fluid/platform/for_range.h diff --git a/paddle/platform/gpu_info.cc b/paddle/fluid/platform/gpu_info.cc similarity index 100% rename from paddle/platform/gpu_info.cc rename to paddle/fluid/platform/gpu_info.cc diff --git a/paddle/platform/gpu_info.h b/paddle/fluid/platform/gpu_info.h similarity index 100% rename from paddle/platform/gpu_info.h rename to paddle/fluid/platform/gpu_info.h diff --git a/paddle/platform/hostdevice.h b/paddle/fluid/platform/hostdevice.h similarity index 100% rename from paddle/platform/hostdevice.h rename to paddle/fluid/platform/hostdevice.h diff --git a/paddle/platform/macros.h b/paddle/fluid/platform/macros.h similarity index 100% rename from paddle/platform/macros.h rename to paddle/fluid/platform/macros.h diff --git a/paddle/platform/mkldnn_helper.h b/paddle/fluid/platform/mkldnn_helper.h similarity index 100% rename from paddle/platform/mkldnn_helper.h rename to paddle/fluid/platform/mkldnn_helper.h diff --git a/paddle/platform/nccl_test.cu b/paddle/fluid/platform/nccl_test.cu similarity index 100% rename from paddle/platform/nccl_test.cu rename to paddle/fluid/platform/nccl_test.cu diff --git a/paddle/platform/place.cc b/paddle/fluid/platform/place.cc similarity index 100% rename from paddle/platform/place.cc rename to paddle/fluid/platform/place.cc diff --git a/paddle/platform/place.h b/paddle/fluid/platform/place.h similarity index 100% rename from paddle/platform/place.h rename to paddle/fluid/platform/place.h diff --git a/paddle/platform/place_test.cc b/paddle/fluid/platform/place_test.cc similarity index 100% rename from paddle/platform/place_test.cc rename to paddle/fluid/platform/place_test.cc diff --git a/paddle/platform/profiler.cc b/paddle/fluid/platform/profiler.cc similarity index 100% rename from paddle/platform/profiler.cc rename to paddle/fluid/platform/profiler.cc diff --git a/paddle/platform/profiler.h b/paddle/fluid/platform/profiler.h similarity index 100% rename from paddle/platform/profiler.h rename to paddle/fluid/platform/profiler.h diff --git a/paddle/platform/profiler_test.cc b/paddle/fluid/platform/profiler_test.cc similarity index 100% rename from paddle/platform/profiler_test.cc rename to paddle/fluid/platform/profiler_test.cc diff --git a/paddle/platform/transform.h b/paddle/fluid/platform/transform.h similarity index 100% rename from paddle/platform/transform.h rename to paddle/fluid/platform/transform.h diff --git a/paddle/platform/transform_test.cu b/paddle/fluid/platform/transform_test.cu similarity index 100% rename from paddle/platform/transform_test.cu rename to paddle/fluid/platform/transform_test.cu diff --git a/paddle/platform/variant.h b/paddle/fluid/platform/variant.h similarity index 100% rename from paddle/platform/variant.h rename to paddle/fluid/platform/variant.h diff --git a/paddle/pybind/.clang-format b/paddle/fluid/pybind/.clang-format similarity index 100% rename from paddle/pybind/.clang-format rename to paddle/fluid/pybind/.clang-format diff --git a/paddle/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt similarity index 100% rename from paddle/pybind/CMakeLists.txt rename to paddle/fluid/pybind/CMakeLists.txt diff --git a/paddle/pybind/const_value.cc b/paddle/fluid/pybind/const_value.cc similarity index 100% rename from paddle/pybind/const_value.cc rename to paddle/fluid/pybind/const_value.cc diff --git a/paddle/pybind/const_value.h b/paddle/fluid/pybind/const_value.h similarity index 100% rename from paddle/pybind/const_value.h rename to paddle/fluid/pybind/const_value.h diff --git a/paddle/pybind/exception.cc b/paddle/fluid/pybind/exception.cc similarity index 100% rename from paddle/pybind/exception.cc rename to paddle/fluid/pybind/exception.cc diff --git a/paddle/pybind/exception.h b/paddle/fluid/pybind/exception.h similarity index 100% rename from paddle/pybind/exception.h rename to paddle/fluid/pybind/exception.h diff --git a/paddle/pybind/protobuf.cc b/paddle/fluid/pybind/protobuf.cc similarity index 100% rename from paddle/pybind/protobuf.cc rename to paddle/fluid/pybind/protobuf.cc diff --git a/paddle/pybind/protobuf.h b/paddle/fluid/pybind/protobuf.h similarity index 100% rename from paddle/pybind/protobuf.h rename to paddle/fluid/pybind/protobuf.h diff --git a/paddle/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc similarity index 100% rename from paddle/pybind/pybind.cc rename to paddle/fluid/pybind/pybind.cc diff --git a/paddle/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h similarity index 100% rename from paddle/pybind/tensor_py.h rename to paddle/fluid/pybind/tensor_py.h -- GitLab From fc374821ddb9d40daaaf443c3d78ac2d3643ce03 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Fri, 9 Feb 2018 17:18:35 -0800 Subject: [PATCH 127/138] Correct #include path --- paddle/fluid/framework/attribute.cc | 2 +- paddle/fluid/framework/attribute.h | 6 +- paddle/fluid/framework/backward.cc | 10 +- paddle/fluid/framework/backward.h | 4 +- paddle/fluid/framework/backward_test.cc | 12 +- paddle/fluid/framework/block_desc.cc | 6 +- paddle/fluid/framework/block_desc.h | 8 +- paddle/fluid/framework/channel.h | 4 +- paddle/fluid/framework/channel_test.cc | 2 +- .../fluid/framework/data_device_transform.cc | 2 +- .../fluid/framework/data_device_transform.h | 8 +- .../framework/data_device_transform_test.cu | 14 +- paddle/fluid/framework/data_layout.h | 2 +- .../fluid/framework/data_layout_transform.cc | 4 +- .../fluid/framework/data_layout_transform.h | 6 +- .../framework/data_layout_transform_test.cc | 4 +- paddle/fluid/framework/data_transform.cc | 8 +- paddle/fluid/framework/data_transform.h | 16 +-- paddle/fluid/framework/data_type.h | 4 +- paddle/fluid/framework/data_type_transform.cc | 6 +- paddle/fluid/framework/data_type_transform.h | 8 +- .../framework/data_type_transform_test.cc | 2 +- paddle/fluid/framework/ddim.cc | 4 +- paddle/fluid/framework/ddim.h | 6 +- paddle/fluid/framework/ddim_test.cc | 2 +- .../framework/details/buffered_channel.h | 4 +- .../fluid/framework/details/cow_ptr_test.cc | 2 +- paddle/fluid/framework/details/op_registry.h | 10 +- .../framework/details/unbuffered_channel.h | 2 +- paddle/fluid/framework/dim.h | 4 +- paddle/fluid/framework/dim_test.cu | 2 +- paddle/fluid/framework/eigen.h | 2 +- paddle/fluid/framework/eigen_test.cc | 2 +- paddle/fluid/framework/executor.cc | 18 +-- paddle/fluid/framework/executor.h | 10 +- paddle/fluid/framework/feed_fetch_method.cc | 4 +- paddle/fluid/framework/feed_fetch_method.h | 4 +- paddle/fluid/framework/feed_fetch_type.h | 2 +- paddle/fluid/framework/grad_op_desc_maker.h | 4 +- paddle/fluid/framework/init.cc | 8 +- paddle/fluid/framework/init_test.cc | 4 +- paddle/fluid/framework/lod_rank_table.cc | 2 +- paddle/fluid/framework/lod_rank_table.h | 2 +- paddle/fluid/framework/lod_tensor.cc | 10 +- paddle/fluid/framework/lod_tensor.h | 12 +- paddle/fluid/framework/lod_tensor_array.h | 2 +- paddle/fluid/framework/lod_tensor_test.cc | 2 +- paddle/fluid/framework/lod_tensor_test.cu | 10 +- paddle/fluid/framework/mixed_vector.h | 4 +- paddle/fluid/framework/mixed_vector_test.cu | 4 +- paddle/fluid/framework/op_desc.cc | 10 +- paddle/fluid/framework/op_desc.h | 6 +- paddle/fluid/framework/op_info.cc | 2 +- paddle/fluid/framework/op_info.h | 6 +- paddle/fluid/framework/op_kernel_type.h | 10 +- paddle/fluid/framework/op_kernel_type_test.cc | 2 +- paddle/fluid/framework/op_proto_maker.cc | 2 +- paddle/fluid/framework/op_proto_maker.h | 4 +- paddle/fluid/framework/op_proto_maker_test.cc | 2 +- paddle/fluid/framework/op_registry.cc | 2 +- paddle/fluid/framework/op_registry.h | 16 +-- paddle/fluid/framework/op_registry_test.cc | 2 +- paddle/fluid/framework/operator.cc | 10 +- paddle/fluid/framework/operator.h | 22 ++-- paddle/fluid/framework/operator_test.cc | 8 +- paddle/fluid/framework/program_desc.cc | 6 +- paddle/fluid/framework/program_desc.h | 8 +- paddle/fluid/framework/program_desc_test.cc | 4 +- paddle/fluid/framework/prune.cc | 2 +- paddle/fluid/framework/prune.h | 4 +- paddle/fluid/framework/prune_test.cc | 14 +- paddle/fluid/framework/reader.cc | 2 +- paddle/fluid/framework/reader.h | 4 +- paddle/fluid/framework/scope.cc | 4 +- paddle/fluid/framework/scope.h | 4 +- paddle/fluid/framework/scope_test.cc | 2 +- paddle/fluid/framework/selected_rows.cc | 2 +- paddle/fluid/framework/selected_rows.h | 4 +- paddle/fluid/framework/selected_rows_test.cc | 2 +- paddle/fluid/framework/shape_inference.cc | 4 +- paddle/fluid/framework/shape_inference.h | 6 +- paddle/fluid/framework/tensor.cc | 2 +- paddle/fluid/framework/tensor.h | 14 +- paddle/fluid/framework/tensor_impl.h | 4 +- paddle/fluid/framework/tensor_test.cc | 2 +- paddle/fluid/framework/tensor_util.cc | 2 +- paddle/fluid/framework/tensor_util.cu | 120 +++++++++++++++++- paddle/fluid/framework/tensor_util.h | 10 +- paddle/fluid/framework/tensor_util_test.cc | 2 +- paddle/fluid/framework/tensor_util_test.cu | 6 +- paddle/fluid/framework/threadpool.cc | 4 +- paddle/fluid/framework/threadpool.h | 4 +- paddle/fluid/framework/type_defs.h | 2 +- paddle/fluid/framework/var_desc.cc | 4 +- paddle/fluid/framework/var_desc.h | 2 +- paddle/fluid/framework/var_type.h | 14 +- paddle/fluid/framework/var_type_inference.h | 2 +- .../framework/var_type_inference_test.cc | 8 +- paddle/fluid/framework/variable.h | 2 +- paddle/fluid/framework/variable_test.cc | 2 +- paddle/fluid/inference/io.cc | 6 +- paddle/fluid/inference/io.h | 6 +- .../fluid/inference/tests/book/test_helper.h | 4 +- paddle/fluid/memory/.clang-format | 6 +- paddle/fluid/memory/detail/buddy_allocator.cc | 2 +- paddle/fluid/memory/detail/buddy_allocator.h | 12 +- paddle/fluid/memory/detail/memory_block.cc | 8 +- paddle/fluid/memory/detail/meta_cache.cc | 6 +- paddle/fluid/memory/detail/meta_cache.h | 4 +- paddle/fluid/memory/detail/meta_data.cc | 2 +- paddle/fluid/memory/detail/meta_data.h | 2 +- .../fluid/memory/detail/system_allocator.cc | 8 +- .../memory/detail/system_allocator_test.cc | 2 +- paddle/fluid/memory/memcpy.cc | 2 +- paddle/fluid/memory/memcpy.h | 4 +- paddle/fluid/memory/memory.cc | 8 +- paddle/fluid/memory/memory.h | 2 +- paddle/fluid/memory/memory_test.cc | 12 +- paddle/fluid/operators/.clang-format | 6 +- paddle/fluid/operators/accuracy_op.cc | 2 +- paddle/fluid/operators/accuracy_op.cu | 6 +- paddle/fluid/operators/accuracy_op.h | 2 +- paddle/fluid/operators/activation_op.cc | 2 +- paddle/fluid/operators/activation_op.cu | 2 +- paddle/fluid/operators/activation_op.h | 6 +- paddle/fluid/operators/adadelta_op.cc | 2 +- paddle/fluid/operators/adadelta_op.cu | 2 +- paddle/fluid/operators/adadelta_op.h | 4 +- paddle/fluid/operators/adagrad_op.cc | 6 +- paddle/fluid/operators/adagrad_op.cu | 8 +- paddle/fluid/operators/adagrad_op.h | 4 +- paddle/fluid/operators/adam_op.cc | 2 +- paddle/fluid/operators/adam_op.cu | 2 +- paddle/fluid/operators/adam_op.h | 8 +- paddle/fluid/operators/adamax_op.cc | 2 +- paddle/fluid/operators/adamax_op.cu | 2 +- paddle/fluid/operators/adamax_op.h | 4 +- paddle/fluid/operators/array_operator.h | 6 +- .../fluid/operators/array_to_lod_tensor_op.cc | 10 +- paddle/fluid/operators/assign_op.cc | 8 +- paddle/fluid/operators/assign_value_op.cc | 2 +- paddle/fluid/operators/assign_value_op.cu.cc | 2 +- paddle/fluid/operators/assign_value_op.h | 6 +- paddle/fluid/operators/auc_op.cc | 2 +- paddle/fluid/operators/auc_op.h | 4 +- paddle/fluid/operators/batch_norm_op.cc | 4 +- paddle/fluid/operators/batch_norm_op.cu.cc | 8 +- paddle/fluid/operators/batch_norm_op.h | 4 +- .../fluid/operators/beam_search_decode_op.cc | 4 +- .../fluid/operators/beam_search_decode_op.h | 4 +- .../operators/beam_search_decode_op_test.cc | 2 +- paddle/fluid/operators/beam_search_op.cc | 6 +- paddle/fluid/operators/beam_search_op.h | 4 +- paddle/fluid/operators/beam_search_op_test.cc | 2 +- .../operators/bilinear_tensor_product_op.cc | 2 +- .../operators/bilinear_tensor_product_op.cu | 2 +- .../operators/bilinear_tensor_product_op.h | 6 +- paddle/fluid/operators/bipartite_match_op.cc | 4 +- paddle/fluid/operators/box_coder_op.cc | 2 +- paddle/fluid/operators/box_coder_op.cu | 4 +- paddle/fluid/operators/box_coder_op.h | 4 +- paddle/fluid/operators/cast_op.cc | 4 +- paddle/fluid/operators/cast_op.cu | 2 +- paddle/fluid/operators/cast_op.h | 8 +- paddle/fluid/operators/chunk_eval_op.cc | 2 +- paddle/fluid/operators/chunk_eval_op.h | 4 +- paddle/fluid/operators/clip_by_norm_op.cc | 2 +- paddle/fluid/operators/clip_by_norm_op.cu | 2 +- paddle/fluid/operators/clip_by_norm_op.h | 6 +- paddle/fluid/operators/clip_op.cc | 2 +- paddle/fluid/operators/clip_op.cu | 2 +- paddle/fluid/operators/clip_op.h | 6 +- paddle/fluid/operators/compare_op.cc | 4 +- paddle/fluid/operators/compare_op.cu | 2 +- paddle/fluid/operators/compare_op.h | 6 +- paddle/fluid/operators/concat_op.cc | 2 +- paddle/fluid/operators/concat_op.cu.cc | 2 +- paddle/fluid/operators/concat_op.h | 4 +- paddle/fluid/operators/cond_op.cc | 8 +- paddle/fluid/operators/cond_op.h | 10 +- .../fluid/operators/conditional_block_op.cc | 4 +- paddle/fluid/operators/conv_cudnn_op.cu.cc | 12 +- paddle/fluid/operators/conv_op.cc | 2 +- paddle/fluid/operators/conv_op.cu.cc | 2 +- paddle/fluid/operators/conv_op.h | 12 +- paddle/fluid/operators/conv_shift_op.cc | 4 +- paddle/fluid/operators/conv_shift_op.cu | 6 +- paddle/fluid/operators/conv_shift_op.h | 2 +- .../operators/conv_transpose_cudnn_op.cu.cc | 12 +- paddle/fluid/operators/conv_transpose_op.cc | 2 +- .../fluid/operators/conv_transpose_op.cu.cc | 2 +- paddle/fluid/operators/conv_transpose_op.h | 10 +- paddle/fluid/operators/cos_sim_op.cc | 2 +- paddle/fluid/operators/cos_sim_op.cu | 2 +- paddle/fluid/operators/cos_sim_op.h | 8 +- paddle/fluid/operators/create_reader_op.cc | 4 +- paddle/fluid/operators/crf_decoding_op.cc | 2 +- paddle/fluid/operators/crf_decoding_op.h | 6 +- paddle/fluid/operators/crop_op.cc | 2 +- paddle/fluid/operators/crop_op.cu | 2 +- paddle/fluid/operators/crop_op.h | 6 +- paddle/fluid/operators/cross_entropy_op.cc | 2 +- paddle/fluid/operators/cross_entropy_op.cu | 2 +- paddle/fluid/operators/cross_entropy_op.h | 8 +- paddle/fluid/operators/ctc_align_op.cc | 2 +- paddle/fluid/operators/ctc_align_op.cu | 2 +- paddle/fluid/operators/ctc_align_op.h | 4 +- paddle/fluid/operators/cum_op.h | 8 +- paddle/fluid/operators/cumsum_op.cc | 2 +- paddle/fluid/operators/cumsum_op.cu | 2 +- paddle/fluid/operators/decayed_adagrad_op.cc | 2 +- paddle/fluid/operators/decayed_adagrad_op.cu | 2 +- paddle/fluid/operators/decayed_adagrad_op.h | 4 +- paddle/fluid/operators/detail/grpc_client.cc | 2 +- paddle/fluid/operators/detail/grpc_client.h | 12 +- paddle/fluid/operators/detail/grpc_server.cc | 2 +- paddle/fluid/operators/detail/grpc_server.h | 16 +-- .../operators/detail/sendrecvop_utils.cc | 2 +- .../fluid/operators/detail/sendrecvop_utils.h | 16 +-- .../fluid/operators/detail/strided_memcpy.h | 6 +- paddle/fluid/operators/detection_output_op.cc | 2 +- .../fluid/operators/detection_output_op.cu.cc | 2 +- paddle/fluid/operators/detection_output_op.h | 12 +- paddle/fluid/operators/dropout_op.cc | 2 +- paddle/fluid/operators/dropout_op.cu | 2 +- paddle/fluid/operators/dropout_op.h | 4 +- paddle/fluid/operators/edit_distance_op.cc | 2 +- paddle/fluid/operators/edit_distance_op.cu | 8 +- paddle/fluid/operators/edit_distance_op.h | 4 +- paddle/fluid/operators/elementwise_add_op.cc | 4 +- paddle/fluid/operators/elementwise_add_op.cu | 2 +- paddle/fluid/operators/elementwise_add_op.h | 2 +- paddle/fluid/operators/elementwise_div_op.cc | 4 +- paddle/fluid/operators/elementwise_div_op.cu | 2 +- paddle/fluid/operators/elementwise_div_op.h | 2 +- paddle/fluid/operators/elementwise_max_op.cc | 4 +- paddle/fluid/operators/elementwise_max_op.cu | 2 +- paddle/fluid/operators/elementwise_max_op.h | 2 +- paddle/fluid/operators/elementwise_min_op.cc | 4 +- paddle/fluid/operators/elementwise_min_op.cu | 2 +- paddle/fluid/operators/elementwise_min_op.h | 2 +- paddle/fluid/operators/elementwise_mul_op.cc | 4 +- paddle/fluid/operators/elementwise_mul_op.cu | 2 +- paddle/fluid/operators/elementwise_mul_op.h | 2 +- paddle/fluid/operators/elementwise_op.h | 4 +- .../fluid/operators/elementwise_op_function.h | 10 +- paddle/fluid/operators/elementwise_pow_op.cc | 4 +- paddle/fluid/operators/elementwise_pow_op.cu | 2 +- paddle/fluid/operators/elementwise_pow_op.h | 2 +- paddle/fluid/operators/elementwise_sub_op.cc | 4 +- paddle/fluid/operators/elementwise_sub_op.cu | 2 +- paddle/fluid/operators/elementwise_sub_op.h | 2 +- paddle/fluid/operators/expand_op.cc | 2 +- paddle/fluid/operators/expand_op.cu | 2 +- paddle/fluid/operators/expand_op.h | 6 +- paddle/fluid/operators/feed_op.cc | 6 +- paddle/fluid/operators/fetch_op.cc | 6 +- .../fill_constant_batch_size_like_op.cc | 2 +- .../fill_constant_batch_size_like_op.cu.cc | 4 +- .../fill_constant_batch_size_like_op.h | 4 +- paddle/fluid/operators/fill_constant_op.cc | 8 +- paddle/fluid/operators/fill_op.cc | 8 +- paddle/fluid/operators/fill_zeros_like_op.cc | 2 +- .../fluid/operators/fill_zeros_like_op.cu.cc | 4 +- paddle/fluid/operators/fill_zeros_like_op.h | 4 +- paddle/fluid/operators/ftrl_op.cc | 2 +- paddle/fluid/operators/ftrl_op.cu | 2 +- paddle/fluid/operators/ftrl_op.h | 4 +- paddle/fluid/operators/gather.cu.h | 4 +- paddle/fluid/operators/gather.h | 8 +- paddle/fluid/operators/gather_op.cc | 4 +- paddle/fluid/operators/gather_op.cu | 4 +- paddle/fluid/operators/gather_op.h | 4 +- paddle/fluid/operators/gather_test.cc | 8 +- paddle/fluid/operators/gaussian_random_op.cc | 2 +- paddle/fluid/operators/gaussian_random_op.cu | 4 +- paddle/fluid/operators/get_places_op.cc | 8 +- paddle/fluid/operators/gru_op.cc | 2 +- paddle/fluid/operators/gru_op.cu.cc | 2 +- paddle/fluid/operators/gru_op.h | 12 +- paddle/fluid/operators/gru_unit_op.cc | 2 +- paddle/fluid/operators/gru_unit_op.cu | 2 +- paddle/fluid/operators/gru_unit_op.h | 8 +- paddle/fluid/operators/hinge_loss_op.cc | 2 +- paddle/fluid/operators/hinge_loss_op.cu | 2 +- paddle/fluid/operators/hinge_loss_op.h | 4 +- paddle/fluid/operators/huber_loss_op.cc | 2 +- paddle/fluid/operators/huber_loss_op.cu | 2 +- paddle/fluid/operators/huber_loss_op.h | 6 +- paddle/fluid/operators/im2sequence_op.cc | 2 +- paddle/fluid/operators/im2sequence_op.cu | 2 +- paddle/fluid/operators/im2sequence_op.h | 10 +- paddle/fluid/operators/increment_op.cc | 2 +- paddle/fluid/operators/iou_similarity_op.cc | 2 +- paddle/fluid/operators/iou_similarity_op.cu | 2 +- paddle/fluid/operators/iou_similarity_op.h | 4 +- paddle/fluid/operators/is_empty_op.cc | 4 +- paddle/fluid/operators/l1_norm_op.cc | 2 +- paddle/fluid/operators/l1_norm_op.cu | 2 +- paddle/fluid/operators/l1_norm_op.h | 4 +- paddle/fluid/operators/label_smooth_op.cc | 2 +- paddle/fluid/operators/label_smooth_op.cu | 2 +- paddle/fluid/operators/label_smooth_op.h | 4 +- paddle/fluid/operators/layer_norm_op.cc | 2 +- paddle/fluid/operators/layer_norm_op.cu | 2 +- paddle/fluid/operators/layer_norm_op.h | 8 +- paddle/fluid/operators/linear_chain_crf_op.cc | 2 +- paddle/fluid/operators/linear_chain_crf_op.cu | 2 +- paddle/fluid/operators/linear_chain_crf_op.h | 6 +- paddle/fluid/operators/listen_and_serv_op.cc | 16 +-- paddle/fluid/operators/load_combine_op.cc | 4 +- paddle/fluid/operators/load_op.cc | 4 +- paddle/fluid/operators/lod_array_length_op.cc | 4 +- paddle/fluid/operators/lod_rank_table_op.cc | 4 +- paddle/fluid/operators/lod_reset_op.cc | 2 +- paddle/fluid/operators/lod_reset_op.cu | 2 +- paddle/fluid/operators/lod_reset_op.h | 4 +- .../fluid/operators/lod_tensor_to_array_op.cc | 10 +- paddle/fluid/operators/log_loss_op.cc | 2 +- paddle/fluid/operators/log_loss_op.cu | 2 +- paddle/fluid/operators/log_loss_op.h | 4 +- paddle/fluid/operators/logical_op.cc | 4 +- paddle/fluid/operators/logical_op.cu | 2 +- paddle/fluid/operators/logical_op.h | 4 +- paddle/fluid/operators/lookup_table_op.cc | 4 +- paddle/fluid/operators/lookup_table_op.cu | 10 +- paddle/fluid/operators/lookup_table_op.h | 8 +- paddle/fluid/operators/lrn_op.cc | 2 +- paddle/fluid/operators/lrn_op.cu | 2 +- paddle/fluid/operators/lrn_op.h | 6 +- paddle/fluid/operators/lstm_op.cc | 2 +- paddle/fluid/operators/lstm_op.cu.cc | 2 +- paddle/fluid/operators/lstm_op.h | 10 +- paddle/fluid/operators/lstm_unit_op.cc | 2 +- paddle/fluid/operators/lstm_unit_op.cu | 8 +- paddle/fluid/operators/lstm_unit_op.h | 2 +- paddle/fluid/operators/lstmp_op.cc | 2 +- paddle/fluid/operators/lstmp_op.cu | 2 +- paddle/fluid/operators/lstmp_op.h | 16 +-- paddle/fluid/operators/margin_rank_loss_op.cc | 2 +- paddle/fluid/operators/margin_rank_loss_op.cu | 2 +- paddle/fluid/operators/margin_rank_loss_op.h | 4 +- .../fluid/operators/math/context_project.cc | 2 +- .../fluid/operators/math/context_project.cu | 2 +- paddle/fluid/operators/math/context_project.h | 6 +- .../fluid/operators/math/cos_sim_functor.cc | 2 +- .../fluid/operators/math/cos_sim_functor.cu | 4 +- paddle/fluid/operators/math/cos_sim_functor.h | 4 +- paddle/fluid/operators/math/cross_entropy.cc | 2 +- paddle/fluid/operators/math/cross_entropy.cu | 2 +- paddle/fluid/operators/math/cross_entropy.h | 6 +- paddle/fluid/operators/math/depthwise_conv.cu | 4 +- paddle/fluid/operators/math/depthwise_conv.h | 6 +- .../math/detail/activation_functions.h | 4 +- .../operators/math/detail/avx_functions.cc | 2 +- .../operators/math/detail/gru_cpu_kernel.h | 4 +- .../operators/math/detail/gru_gpu_kernel.h | 8 +- .../fluid/operators/math/detail/gru_kernel.h | 4 +- .../operators/math/detail/lstm_cpu_kernel.h | 4 +- .../operators/math/detail/lstm_gpu_kernel.h | 8 +- .../fluid/operators/math/detail/lstm_kernel.h | 4 +- paddle/fluid/operators/math/detection_util.h | 4 +- paddle/fluid/operators/math/gru_compute.cc | 8 +- paddle/fluid/operators/math/gru_compute.cu | 8 +- paddle/fluid/operators/math/gru_compute.h | 6 +- paddle/fluid/operators/math/im2col.cc | 2 +- paddle/fluid/operators/math/im2col.cu | 4 +- paddle/fluid/operators/math/im2col.h | 6 +- paddle/fluid/operators/math/im2col_test.cc | 2 +- paddle/fluid/operators/math/lstm_compute.cc | 6 +- paddle/fluid/operators/math/lstm_compute.cu | 6 +- paddle/fluid/operators/math/lstm_compute.h | 6 +- paddle/fluid/operators/math/math_function.cc | 6 +- paddle/fluid/operators/math/math_function.cu | 6 +- paddle/fluid/operators/math/math_function.h | 10 +- .../fluid/operators/math/math_function_impl.h | 4 +- .../operators/math/math_function_test.cc | 2 +- .../operators/math/math_function_test.cu | 2 +- paddle/fluid/operators/math/matmul.h | 2 +- paddle/fluid/operators/math/maxouting.cc | 2 +- paddle/fluid/operators/math/maxouting.cu | 4 +- paddle/fluid/operators/math/maxouting.h | 6 +- paddle/fluid/operators/math/pooling.cc | 2 +- paddle/fluid/operators/math/pooling.cu | 4 +- paddle/fluid/operators/math/pooling.h | 8 +- .../operators/math/selected_rows_functor.cc | 4 +- .../operators/math/selected_rows_functor.cu | 6 +- .../operators/math/selected_rows_functor.h | 6 +- .../math/selected_rows_functor_test.cc | 4 +- .../math/selected_rows_functor_test.cu | 4 +- paddle/fluid/operators/math/sequence2batch.cc | 4 +- paddle/fluid/operators/math/sequence2batch.cu | 2 +- paddle/fluid/operators/math/sequence2batch.h | 8 +- .../fluid/operators/math/sequence_padding.cc | 2 +- .../fluid/operators/math/sequence_padding.cu | 2 +- .../fluid/operators/math/sequence_padding.h | 4 +- .../operators/math/sequence_padding_test.cc | 2 +- .../fluid/operators/math/sequence_pooling.cc | 4 +- .../fluid/operators/math/sequence_pooling.cu | 4 +- .../fluid/operators/math/sequence_pooling.h | 6 +- paddle/fluid/operators/math/sequence_scale.cc | 2 +- paddle/fluid/operators/math/sequence_scale.cu | 4 +- paddle/fluid/operators/math/sequence_scale.h | 4 +- paddle/fluid/operators/math/softmax.cc | 4 +- paddle/fluid/operators/math/softmax.cu | 4 +- paddle/fluid/operators/math/softmax.h | 2 +- paddle/fluid/operators/math/softmax_impl.h | 4 +- paddle/fluid/operators/math/unpooling.cc | 2 +- paddle/fluid/operators/math/unpooling.cu | 4 +- paddle/fluid/operators/math/unpooling.h | 2 +- paddle/fluid/operators/math/vol2col.cc | 2 +- paddle/fluid/operators/math/vol2col.cu | 4 +- paddle/fluid/operators/math/vol2col.h | 6 +- paddle/fluid/operators/math/vol2col_test.cc | 2 +- paddle/fluid/operators/matmul_op.cc | 2 +- paddle/fluid/operators/matmul_op.cu.cc | 2 +- paddle/fluid/operators/matmul_op.h | 6 +- paddle/fluid/operators/max_sequence_len_op.cc | 6 +- paddle/fluid/operators/maxout_op.cc | 2 +- paddle/fluid/operators/maxout_op.cu.cc | 2 +- paddle/fluid/operators/maxout_op.h | 6 +- paddle/fluid/operators/mean_op.cc | 2 +- paddle/fluid/operators/mean_op.cu | 2 +- paddle/fluid/operators/mean_op.h | 4 +- paddle/fluid/operators/merge_lod_tensor_op.cc | 4 +- .../fluid/operators/mine_hard_examples_op.cc | 4 +- paddle/fluid/operators/minus_op.cc | 4 +- paddle/fluid/operators/minus_op.cu | 2 +- paddle/fluid/operators/minus_op.h | 4 +- .../fluid/operators/modified_huber_loss_op.cc | 2 +- .../fluid/operators/modified_huber_loss_op.cu | 6 +- .../fluid/operators/modified_huber_loss_op.h | 6 +- paddle/fluid/operators/momentum_op.cc | 2 +- paddle/fluid/operators/momentum_op.cu | 2 +- paddle/fluid/operators/momentum_op.h | 4 +- paddle/fluid/operators/mul_op.cc | 2 +- paddle/fluid/operators/mul_op.cu.cc | 2 +- paddle/fluid/operators/mul_op.h | 4 +- paddle/fluid/operators/multiclass_nms_op.cc | 2 +- paddle/fluid/operators/multiplex_op.cc | 2 +- paddle/fluid/operators/multiplex_op.cu | 4 +- paddle/fluid/operators/multiplex_op.h | 6 +- .../fluid/operators/nccl/nccl_gpu_common.cc | 4 +- paddle/fluid/operators/nccl/nccl_gpu_common.h | 8 +- paddle/fluid/operators/nccl_op.cc | 4 +- paddle/fluid/operators/nccl_op.cu.cc | 6 +- paddle/fluid/operators/nccl_op_test.cu.cc | 22 ++-- paddle/fluid/operators/nce_op.cc | 2 +- paddle/fluid/operators/nce_op.h | 4 +- paddle/fluid/operators/net_op.cc | 4 +- paddle/fluid/operators/net_op.h | 4 +- paddle/fluid/operators/net_op_test.cc | 2 +- paddle/fluid/operators/norm_op.cc | 2 +- paddle/fluid/operators/norm_op.cu | 2 +- paddle/fluid/operators/norm_op.h | 4 +- paddle/fluid/operators/one_hot_op.cc | 4 +- paddle/fluid/operators/one_hot_op.cu | 6 +- paddle/fluid/operators/one_hot_op.h | 4 +- paddle/fluid/operators/pad_op.cc | 2 +- paddle/fluid/operators/pad_op.cu | 2 +- paddle/fluid/operators/pad_op.h | 4 +- paddle/fluid/operators/parallel_do_op.cc | 8 +- paddle/fluid/operators/pool_cudnn_op.cu.cc | 6 +- paddle/fluid/operators/pool_op.cc | 2 +- paddle/fluid/operators/pool_op.cu.cc | 2 +- paddle/fluid/operators/pool_op.h | 8 +- paddle/fluid/operators/pool_with_index_op.cc | 2 +- .../fluid/operators/pool_with_index_op.cu.cc | 2 +- paddle/fluid/operators/pool_with_index_op.h | 8 +- .../operators/positive_negative_pair_op.cc | 2 +- .../operators/positive_negative_pair_op.h | 4 +- paddle/fluid/operators/precision_recall_op.cc | 2 +- paddle/fluid/operators/precision_recall_op.h | 4 +- paddle/fluid/operators/prelu_op.cc | 4 +- paddle/fluid/operators/prelu_op.cu | 2 +- paddle/fluid/operators/prelu_op.h | 6 +- paddle/fluid/operators/print_op.cc | 4 +- paddle/fluid/operators/prior_box_op.cc | 2 +- paddle/fluid/operators/prior_box_op.h | 6 +- paddle/fluid/operators/proximal_adagrad_op.cc | 2 +- paddle/fluid/operators/proximal_adagrad_op.cu | 2 +- paddle/fluid/operators/proximal_adagrad_op.h | 4 +- paddle/fluid/operators/proximal_gd_op.cc | 2 +- paddle/fluid/operators/proximal_gd_op.cu | 2 +- paddle/fluid/operators/proximal_gd_op.h | 4 +- paddle/fluid/operators/rank_loss_op.cc | 2 +- paddle/fluid/operators/rank_loss_op.cu | 2 +- paddle/fluid/operators/rank_loss_op.h | 4 +- paddle/fluid/operators/read_op.cc | 4 +- paddle/fluid/operators/recurrent_op.cc | 4 +- paddle/fluid/operators/recv_op.cc | 10 +- paddle/fluid/operators/reduce_op.cc | 2 +- paddle/fluid/operators/reduce_op.cu | 2 +- paddle/fluid/operators/reduce_op.h | 4 +- .../reorder_lod_tensor_by_rank_op.cc | 8 +- paddle/fluid/operators/reshape_op.cc | 2 +- paddle/fluid/operators/reshape_op.cu | 2 +- paddle/fluid/operators/reshape_op.h | 4 +- paddle/fluid/operators/rmsprop_op.cc | 2 +- paddle/fluid/operators/rmsprop_op.cu | 2 +- paddle/fluid/operators/rmsprop_op.h | 4 +- .../fluid/operators/rnn_memory_helper_op.cc | 4 +- paddle/fluid/operators/roi_pool_op.cc | 2 +- paddle/fluid/operators/roi_pool_op.cu | 4 +- paddle/fluid/operators/roi_pool_op.h | 4 +- paddle/fluid/operators/row_conv_op.cc | 4 +- paddle/fluid/operators/row_conv_op.cu | 6 +- paddle/fluid/operators/row_conv_op.h | 2 +- paddle/fluid/operators/save_combine_op.cc | 10 +- .../operators/save_load_combine_op_test.cc | 2 +- paddle/fluid/operators/save_load_op_test.cc | 2 +- paddle/fluid/operators/save_op.cc | 10 +- paddle/fluid/operators/scale_op.cc | 4 +- paddle/fluid/operators/scale_op.cu | 2 +- paddle/fluid/operators/scale_op.h | 4 +- paddle/fluid/operators/scatter.cu.h | 4 +- paddle/fluid/operators/scatter.h | 8 +- paddle/fluid/operators/scatter_op.cc | 4 +- paddle/fluid/operators/scatter_op.cu | 2 +- paddle/fluid/operators/scatter_op.h | 4 +- paddle/fluid/operators/scatter_test.cc | 8 +- paddle/fluid/operators/send_op.cc | 10 +- paddle/fluid/operators/send_recv_op_test.cc | 10 +- paddle/fluid/operators/sequence_concat_op.cc | 2 +- .../fluid/operators/sequence_concat_op.cu.cc | 2 +- paddle/fluid/operators/sequence_concat_op.h | 4 +- paddle/fluid/operators/sequence_conv_op.cc | 2 +- paddle/fluid/operators/sequence_conv_op.cu.cc | 2 +- paddle/fluid/operators/sequence_conv_op.h | 6 +- paddle/fluid/operators/sequence_erase_op.cc | 2 +- paddle/fluid/operators/sequence_erase_op.cu | 4 +- paddle/fluid/operators/sequence_erase_op.h | 2 +- paddle/fluid/operators/sequence_expand_op.cc | 2 +- paddle/fluid/operators/sequence_expand_op.cu | 2 +- paddle/fluid/operators/sequence_expand_op.h | 4 +- paddle/fluid/operators/sequence_pool_op.cc | 2 +- paddle/fluid/operators/sequence_pool_op.cu | 2 +- paddle/fluid/operators/sequence_pool_op.h | 8 +- paddle/fluid/operators/sequence_reshape_op.cc | 4 +- paddle/fluid/operators/sequence_reshape_op.cu | 2 +- paddle/fluid/operators/sequence_reshape_op.h | 4 +- paddle/fluid/operators/sequence_slice_op.cc | 2 +- paddle/fluid/operators/sequence_slice_op.cu | 2 +- paddle/fluid/operators/sequence_slice_op.h | 6 +- paddle/fluid/operators/sequence_softmax_op.cc | 2 +- .../fluid/operators/sequence_softmax_op.cu.cc | 2 +- paddle/fluid/operators/sequence_softmax_op.h | 4 +- paddle/fluid/operators/sgd_op.cc | 2 +- paddle/fluid/operators/sgd_op.cu | 4 +- paddle/fluid/operators/sgd_op.h | 6 +- .../fluid/operators/shrink_rnn_memory_op.cc | 8 +- .../sigmoid_cross_entropy_with_logits_op.cc | 2 +- .../sigmoid_cross_entropy_with_logits_op.cu | 2 +- .../sigmoid_cross_entropy_with_logits_op.h | 4 +- paddle/fluid/operators/sign_op.cc | 2 +- paddle/fluid/operators/sign_op.cu | 2 +- paddle/fluid/operators/sign_op.h | 4 +- paddle/fluid/operators/smooth_l1_loss_op.cc | 2 +- paddle/fluid/operators/smooth_l1_loss_op.cu | 2 +- paddle/fluid/operators/smooth_l1_loss_op.h | 6 +- paddle/fluid/operators/softmax_op.cc | 2 +- paddle/fluid/operators/softmax_op.cu.cc | 2 +- paddle/fluid/operators/softmax_op.h | 4 +- .../softmax_with_cross_entropy_op.cc | 2 +- .../softmax_with_cross_entropy_op.cu | 2 +- .../operators/softmax_with_cross_entropy_op.h | 8 +- paddle/fluid/operators/split_lod_tensor_op.cc | 6 +- paddle/fluid/operators/split_op.cc | 4 +- paddle/fluid/operators/split_op.cu.cc | 2 +- paddle/fluid/operators/split_op.h | 4 +- .../fluid/operators/split_selected_rows_op.cc | 2 +- .../fluid/operators/split_selected_rows_op.cu | 2 +- .../fluid/operators/split_selected_rows_op.h | 4 +- paddle/fluid/operators/spp_op.cc | 2 +- paddle/fluid/operators/spp_op.cu.cc | 2 +- paddle/fluid/operators/spp_op.h | 8 +- .../fluid/operators/squared_l2_distance_op.cc | 2 +- .../fluid/operators/squared_l2_distance_op.cu | 2 +- .../fluid/operators/squared_l2_distance_op.h | 4 +- paddle/fluid/operators/squared_l2_norm_op.cc | 2 +- paddle/fluid/operators/squared_l2_norm_op.cu | 2 +- paddle/fluid/operators/squared_l2_norm_op.h | 4 +- paddle/fluid/operators/strided_memcpy.h | 2 +- paddle/fluid/operators/strided_memcpy_test.cc | 4 +- paddle/fluid/operators/sum_op.cc | 6 +- paddle/fluid/operators/sum_op.cu | 2 +- paddle/fluid/operators/sum_op.h | 10 +- paddle/fluid/operators/target_assign_op.cc | 2 +- paddle/fluid/operators/target_assign_op.cu | 2 +- paddle/fluid/operators/target_assign_op.h | 6 +- .../operators/tensor_array_read_write_op.cc | 4 +- paddle/fluid/operators/top_k_op.cc | 2 +- paddle/fluid/operators/top_k_op.cu | 4 +- paddle/fluid/operators/top_k_op.h | 4 +- paddle/fluid/operators/transpose_op.cc | 2 +- paddle/fluid/operators/transpose_op.cu.cc | 2 +- paddle/fluid/operators/transpose_op.h | 4 +- paddle/fluid/operators/uniform_random_op.cc | 4 +- paddle/fluid/operators/uniform_random_op.cu | 4 +- paddle/fluid/operators/unpool_op.cc | 2 +- paddle/fluid/operators/unpool_op.cu.cc | 2 +- paddle/fluid/operators/unpool_op.h | 6 +- paddle/fluid/operators/warpctc_op.cc | 2 +- paddle/fluid/operators/warpctc_op.cu.cc | 2 +- paddle/fluid/operators/warpctc_op.h | 10 +- paddle/fluid/operators/while_op.cc | 10 +- paddle/fluid/platform/cpu_info.cc | 2 +- paddle/fluid/platform/cpu_info_test.cc | 2 +- paddle/fluid/platform/cudnn_helper.h | 6 +- paddle/fluid/platform/cudnn_helper_test.cc | 2 +- paddle/fluid/platform/device_context.cc | 4 +- paddle/fluid/platform/device_context.h | 12 +- paddle/fluid/platform/device_context_test.cu | 2 +- paddle/fluid/platform/dynload/cublas.cc | 2 +- paddle/fluid/platform/dynload/cublas.h | 2 +- paddle/fluid/platform/dynload/cudnn.cc | 4 +- paddle/fluid/platform/dynload/cudnn.h | 2 +- paddle/fluid/platform/dynload/curand.cc | 2 +- paddle/fluid/platform/dynload/curand.h | 2 +- .../fluid/platform/dynload/dynamic_loader.cc | 4 +- paddle/fluid/platform/dynload/nccl.cc | 2 +- paddle/fluid/platform/dynload/nccl.h | 4 +- paddle/fluid/platform/dynload/warpctc.cc | 2 +- paddle/fluid/platform/dynload/warpctc.h | 2 +- paddle/fluid/platform/enforce.cc | 2 +- paddle/fluid/platform/enforce.h | 10 +- paddle/fluid/platform/enforce_test.cc | 2 +- paddle/fluid/platform/for_range.h | 2 +- paddle/fluid/platform/gpu_info.cc | 4 +- paddle/fluid/platform/nccl_test.cu | 10 +- paddle/fluid/platform/place.cc | 2 +- paddle/fluid/platform/place.h | 4 +- paddle/fluid/platform/place_test.cc | 2 +- paddle/fluid/platform/profiler.cc | 2 +- paddle/fluid/platform/profiler.h | 2 +- paddle/fluid/platform/profiler_test.cc | 2 +- paddle/fluid/platform/transform.h | 10 +- paddle/fluid/platform/transform_test.cu | 8 +- paddle/fluid/pybind/.clang-format | 6 +- paddle/fluid/pybind/const_value.cc | 2 +- paddle/fluid/pybind/const_value.h | 2 +- paddle/fluid/pybind/exception.cc | 2 +- paddle/fluid/pybind/exception.h | 2 +- paddle/fluid/pybind/protobuf.cc | 12 +- paddle/fluid/pybind/protobuf.h | 2 +- paddle/fluid/pybind/pybind.cc | 46 +++---- paddle/fluid/pybind/tensor_py.h | 6 +- paddle/math/float16.h | 2 +- paddle/testing/paddle_gtest_main.cc | 4 +- 649 files changed, 1534 insertions(+), 1404 deletions(-) mode change 120000 => 100644 paddle/fluid/framework/tensor_util.cu mode change 120000 => 100644 paddle/fluid/memory/.clang-format mode change 120000 => 100644 paddle/fluid/operators/.clang-format mode change 120000 => 100644 paddle/fluid/pybind/.clang-format diff --git a/paddle/fluid/framework/attribute.cc b/paddle/fluid/framework/attribute.cc index 5074e8f5a05..1d7e7366b07 100644 --- a/paddle/fluid/framework/attribute.cc +++ b/paddle/fluid/framework/attribute.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/attribute.h" +#include "paddle/fluid/framework/attribute.h" #include diff --git a/paddle/fluid/framework/attribute.h b/paddle/fluid/framework/attribute.h index bcff9bc4c48..16be42ae714 100644 --- a/paddle/fluid/framework/attribute.h +++ b/paddle/fluid/framework/attribute.h @@ -20,9 +20,9 @@ limitations under the License. */ #include #include -#include "paddle/framework/framework.pb.h" -#include "paddle/framework/type_defs.h" -#include "paddle/platform/enforce.h" +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/type_defs.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/backward.cc b/paddle/fluid/framework/backward.cc index f52a51519fc..c4795f4fc5c 100644 --- a/paddle/fluid/framework/backward.cc +++ b/paddle/fluid/framework/backward.cc @@ -12,17 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/backward.h" -#include "paddle/operators/net_op.h" +#include "paddle/fluid/framework/backward.h" +#include "paddle/fluid/operators/net_op.h" #include #include #include #include -#include "paddle/framework/block_desc.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/net_op.h" +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/net_op.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/backward.h b/paddle/fluid/framework/backward.h index 69ee3802369..2ea6922426e 100644 --- a/paddle/fluid/framework/backward.h +++ b/paddle/fluid/framework/backward.h @@ -18,8 +18,8 @@ limitations under the License. */ #include #include -#include "paddle/framework/operator.h" -#include "paddle/framework/program_desc.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/program_desc.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/backward_test.cc b/paddle/fluid/framework/backward_test.cc index 72743b5fd0b..f9604c68913 100644 --- a/paddle/fluid/framework/backward_test.cc +++ b/paddle/fluid/framework/backward_test.cc @@ -12,14 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/framework/backward.h" +#include "paddle/fluid/framework/backward.h" #include -#include "paddle/framework/block_desc.h" -#include "paddle/framework/op_desc.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/var_desc.h" -#include "paddle/operators/net_op.h" +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/op_desc.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/var_desc.h" +#include "paddle/fluid/operators/net_op.h" USE_NO_KERNEL_OP(fill_constant); diff --git a/paddle/fluid/framework/block_desc.cc b/paddle/fluid/framework/block_desc.cc index 3e344ea3790..9550159155c 100644 --- a/paddle/fluid/framework/block_desc.cc +++ b/paddle/fluid/framework/block_desc.cc @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/block_desc.h" -#include "paddle/framework/operator.h" -#include "paddle/framework/program_desc.h" +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/program_desc.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/block_desc.h b/paddle/fluid/framework/block_desc.h index 4b609e4bcb6..5f7eca3878f 100644 --- a/paddle/fluid/framework/block_desc.h +++ b/paddle/fluid/framework/block_desc.h @@ -20,10 +20,10 @@ limitations under the License. */ #include #include -#include "paddle/framework/op_desc.h" -#include "paddle/framework/proto_desc.h" -#include "paddle/framework/var_desc.h" -#include "paddle/platform/macros.h" +#include "paddle/fluid/framework/op_desc.h" +#include "paddle/fluid/framework/proto_desc.h" +#include "paddle/fluid/framework/var_desc.h" +#include "paddle/fluid/platform/macros.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/channel.h b/paddle/fluid/framework/channel.h index 146f0e9e71e..5acf4fb39bb 100644 --- a/paddle/fluid/framework/channel.h +++ b/paddle/fluid/framework/channel.h @@ -54,5 +54,5 @@ void CloseChannel(Channel* ch) { } // namespace framework } // namespace paddle -#include "paddle/framework/details/buffered_channel.h" -#include "paddle/framework/details/unbuffered_channel.h" +#include "paddle/fluid/framework/details/buffered_channel.h" +#include "paddle/fluid/framework/details/unbuffered_channel.h" diff --git a/paddle/fluid/framework/channel_test.cc b/paddle/fluid/framework/channel_test.cc index d7140dd1066..953fa40fec8 100644 --- a/paddle/fluid/framework/channel_test.cc +++ b/paddle/fluid/framework/channel_test.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/channel.h" +#include "paddle/fluid/framework/channel.h" #include #include diff --git a/paddle/fluid/framework/data_device_transform.cc b/paddle/fluid/framework/data_device_transform.cc index 5daf5a4e0ab..3c6dd28455b 100644 --- a/paddle/fluid/framework/data_device_transform.cc +++ b/paddle/fluid/framework/data_device_transform.cc @@ -11,7 +11,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/data_device_transform.h" +#include "paddle/fluid/framework/data_device_transform.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/data_device_transform.h b/paddle/fluid/framework/data_device_transform.h index 39750a85f27..0c4559f586a 100644 --- a/paddle/fluid/framework/data_device_transform.h +++ b/paddle/fluid/framework/data_device_transform.h @@ -13,10 +13,10 @@ limitations under the License. */ #pragma once -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/tensor.h" -#include "paddle/framework/tensor_util.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/framework/tensor_util.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/data_device_transform_test.cu b/paddle/fluid/framework/data_device_transform_test.cu index efc05b3106b..f740f9b3268 100644 --- a/paddle/fluid/framework/data_device_transform_test.cu +++ b/paddle/fluid/framework/data_device_transform_test.cu @@ -14,13 +14,13 @@ limitations under the License. */ #include "gtest/gtest.h" -#include "paddle/framework/init.h" -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/op_info.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/elementwise_op_function.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/init.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_info.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/data_layout.h b/paddle/fluid/framework/data_layout.h index 31817251ed0..b72f13f2e8f 100644 --- a/paddle/fluid/framework/data_layout.h +++ b/paddle/fluid/framework/data_layout.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include -#include "paddle/platform/enforce.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/data_layout_transform.cc b/paddle/fluid/framework/data_layout_transform.cc index 9d0a6d5ea3e..c546a508fe1 100644 --- a/paddle/fluid/framework/data_layout_transform.cc +++ b/paddle/fluid/framework/data_layout_transform.cc @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/framework/data_layout_transform.h" +#include "paddle/fluid/framework/data_layout_transform.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/data_layout_transform.h b/paddle/fluid/framework/data_layout_transform.h index 368f7fc9898..862405fbf46 100644 --- a/paddle/fluid/framework/data_layout_transform.h +++ b/paddle/fluid/framework/data_layout_transform.h @@ -14,9 +14,9 @@ #pragma once -#include "paddle/framework/op_kernel_type.h" -#include "paddle/framework/tensor.h" -#include "paddle/framework/variable.h" +#include "paddle/fluid/framework/op_kernel_type.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/framework/variable.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/data_layout_transform_test.cc b/paddle/fluid/framework/data_layout_transform_test.cc index 093e8d4d345..99eb46bde34 100644 --- a/paddle/fluid/framework/data_layout_transform_test.cc +++ b/paddle/fluid/framework/data_layout_transform_test.cc @@ -12,10 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/framework/data_layout_transform.h" +#include "paddle/fluid/framework/data_layout_transform.h" #include "gtest/gtest.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/platform/device_context.h" TEST(DataTransform, DataLayoutFunction) { using namespace paddle::framework; diff --git a/paddle/fluid/framework/data_transform.cc b/paddle/fluid/framework/data_transform.cc index b6fd46401ff..9575d01af88 100644 --- a/paddle/fluid/framework/data_transform.cc +++ b/paddle/fluid/framework/data_transform.cc @@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/data_transform.h" +#include "paddle/fluid/framework/data_transform.h" -#include "paddle/framework/data_device_transform.h" -#include "paddle/framework/data_layout_transform.h" -#include "paddle/framework/data_type_transform.h" +#include "paddle/fluid/framework/data_device_transform.h" +#include "paddle/fluid/framework/data_layout_transform.h" +#include "paddle/fluid/framework/data_type_transform.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/data_transform.h b/paddle/fluid/framework/data_transform.h index a4b78902379..70d3a174acc 100644 --- a/paddle/fluid/framework/data_transform.h +++ b/paddle/fluid/framework/data_transform.h @@ -18,14 +18,14 @@ limitations under the License. */ #include #include -#include "paddle/framework/op_kernel_type.h" -#include "paddle/framework/selected_rows.h" -#include "paddle/framework/tensor.h" -#include "paddle/framework/variable.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/platform/device_context.h" -#include "paddle/platform/macros.h" -#include "paddle/platform/transform.h" +#include "paddle/fluid/framework/op_kernel_type.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/framework/variable.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/macros.h" +#include "paddle/fluid/platform/transform.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/data_type.h b/paddle/fluid/framework/data_type.h index 98eb3e857d1..7a527f0d0c1 100644 --- a/paddle/fluid/framework/data_type.h +++ b/paddle/fluid/framework/data_type.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once #include -#include "paddle/framework/framework.pb.h" -#include "paddle/platform/enforce.h" +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/data_type_transform.cc b/paddle/fluid/framework/data_type_transform.cc index 7df1cc6b75b..6921927305a 100644 --- a/paddle/fluid/framework/data_type_transform.cc +++ b/paddle/fluid/framework/data_type_transform.cc @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/data_type_transform.h" +#include "paddle/fluid/framework/data_type_transform.h" -#include "paddle/framework/selected_rows.h" -#include "paddle/platform/transform.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/platform/transform.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/data_type_transform.h b/paddle/fluid/framework/data_type_transform.h index 067c0c2a5b1..830cced0939 100644 --- a/paddle/fluid/framework/data_type_transform.h +++ b/paddle/fluid/framework/data_type_transform.h @@ -14,10 +14,10 @@ limitations under the License. */ #pragma once -#include "paddle/framework/op_kernel_type.h" -#include "paddle/framework/tensor.h" -#include "paddle/framework/variable.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/op_kernel_type.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/framework/variable.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/data_type_transform_test.cc b/paddle/fluid/framework/data_type_transform_test.cc index 89d32f52833..88dbc51b217 100644 --- a/paddle/fluid/framework/data_type_transform_test.cc +++ b/paddle/fluid/framework/data_type_transform_test.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/data_type_transform.h" +#include "paddle/fluid/framework/data_type_transform.h" #include "gtest/gtest.h" diff --git a/paddle/fluid/framework/ddim.cc b/paddle/fluid/framework/ddim.cc index 8b6f42b82df..f063ee2e6dd 100644 --- a/paddle/fluid/framework/ddim.cc +++ b/paddle/fluid/framework/ddim.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/ddim.h" -#include "paddle/platform/enforce.h" +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/ddim.h b/paddle/fluid/framework/ddim.h index 4ca5e49566b..750ab787abb 100644 --- a/paddle/fluid/framework/ddim.h +++ b/paddle/fluid/framework/ddim.h @@ -17,9 +17,9 @@ limitations under the License. */ #include #include #include -#include "paddle/framework/dim.h" -#include "paddle/platform/enforce.h" -#include "paddle/platform/variant.h" +#include "paddle/fluid/framework/dim.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/variant.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/ddim_test.cc b/paddle/fluid/framework/ddim_test.cc index bc259d1f603..18d305a4036 100644 --- a/paddle/fluid/framework/ddim_test.cc +++ b/paddle/fluid/framework/ddim_test.cc @@ -15,7 +15,7 @@ limitations under the License. */ #include #include "gtest/gtest.h" -#include "paddle/framework/ddim.h" +#include "paddle/fluid/framework/ddim.h" TEST(DDim, Equality) { // construct a DDim from an initialization list diff --git a/paddle/fluid/framework/details/buffered_channel.h b/paddle/fluid/framework/details/buffered_channel.h index 227a4e4811f..88faf3acf7c 100644 --- a/paddle/fluid/framework/details/buffered_channel.h +++ b/paddle/fluid/framework/details/buffered_channel.h @@ -18,8 +18,8 @@ limitations under the License. */ #include #include -#include "paddle/framework/channel.h" -#include "paddle/platform/enforce.h" +#include "paddle/fluid/framework/channel.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/details/cow_ptr_test.cc b/paddle/fluid/framework/details/cow_ptr_test.cc index 1f4a12bca0d..d2142af277c 100644 --- a/paddle/fluid/framework/details/cow_ptr_test.cc +++ b/paddle/fluid/framework/details/cow_ptr_test.cc @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/details/cow_ptr.h" +#include "paddle/fluid/framework/details/cow_ptr.h" #include "gtest/gtest.h" namespace paddle { diff --git a/paddle/fluid/framework/details/op_registry.h b/paddle/fluid/framework/details/op_registry.h index 31a40bcbcb3..d73604ad185 100644 --- a/paddle/fluid/framework/details/op_registry.h +++ b/paddle/fluid/framework/details/op_registry.h @@ -14,11 +14,11 @@ limitations under the License. */ #pragma once -#include "paddle/framework/grad_op_desc_maker.h" -#include "paddle/framework/op_info.h" -#include "paddle/framework/op_proto_maker.h" -#include "paddle/framework/operator.h" -#include "paddle/framework/var_type_inference.h" +#include "paddle/fluid/framework/grad_op_desc_maker.h" +#include "paddle/fluid/framework/op_info.h" +#include "paddle/fluid/framework/op_proto_maker.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/var_type_inference.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/details/unbuffered_channel.h b/paddle/fluid/framework/details/unbuffered_channel.h index 6b5c2196cb2..5c9424928cb 100644 --- a/paddle/fluid/framework/details/unbuffered_channel.h +++ b/paddle/fluid/framework/details/unbuffered_channel.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include -#include "paddle/framework/channel.h" +#include "paddle/fluid/framework/channel.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/dim.h b/paddle/fluid/framework/dim.h index ec17d7c6156..3938fd3df5b 100644 --- a/paddle/fluid/framework/dim.h +++ b/paddle/fluid/framework/dim.h @@ -18,8 +18,8 @@ #include #include -#include "paddle/platform/assert.h" -#include "paddle/platform/hostdevice.h" +#include "paddle/fluid/platform/assert.h" +#include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/dim_test.cu b/paddle/fluid/framework/dim_test.cu index 2bcab7c5c2e..0f1969d7977 100644 --- a/paddle/fluid/framework/dim_test.cu +++ b/paddle/fluid/framework/dim_test.cu @@ -15,7 +15,7 @@ #include #include "gtest/gtest.h" -#include "paddle/framework/dim.h" +#include "paddle/fluid/framework/dim.h" __global__ void test(paddle::framework::Dim<2>* o) { o[0] = paddle::framework::make_dim(5, 6); diff --git a/paddle/fluid/framework/eigen.h b/paddle/fluid/framework/eigen.h index 54bbeafcabd..d1b8c701a79 100644 --- a/paddle/fluid/framework/eigen.h +++ b/paddle/fluid/framework/eigen.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/framework/tensor.h" +#include "paddle/fluid/framework/tensor.h" #include "unsupported/Eigen/CXX11/Tensor" namespace paddle { diff --git a/paddle/fluid/framework/eigen_test.cc b/paddle/fluid/framework/eigen_test.cc index 9e368a522ce..f9e3abeccb3 100644 --- a/paddle/fluid/framework/eigen_test.cc +++ b/paddle/fluid/framework/eigen_test.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/framework/eigen.h" +#include "paddle/fluid/framework/eigen.h" #include namespace paddle { diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index 2a88e5a9298..816ad8d6590 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -12,19 +12,19 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/executor.h" +#include "paddle/fluid/framework/executor.h" #include #include "gflags/gflags.h" -#include "paddle/framework/feed_fetch_method.h" -#include "paddle/framework/feed_fetch_type.h" -#include "paddle/framework/lod_rank_table.h" -#include "paddle/framework/lod_tensor_array.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/reader.h" -#include "paddle/platform/place.h" -#include "paddle/platform/profiler.h" +#include "paddle/fluid/framework/feed_fetch_method.h" +#include "paddle/fluid/framework/feed_fetch_type.h" +#include "paddle/fluid/framework/lod_rank_table.h" +#include "paddle/fluid/framework/lod_tensor_array.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/reader.h" +#include "paddle/fluid/platform/place.h" +#include "paddle/fluid/platform/profiler.h" DECLARE_bool(benchmark); DEFINE_bool(check_nan_inf, false, diff --git a/paddle/fluid/framework/executor.h b/paddle/fluid/framework/executor.h index 035ff48a52b..893c949939e 100644 --- a/paddle/fluid/framework/executor.h +++ b/paddle/fluid/framework/executor.h @@ -14,11 +14,11 @@ limitations under the License. */ #pragma once -#include "paddle/framework/op_info.h" -#include "paddle/framework/program_desc.h" -#include "paddle/framework/scope.h" -#include "paddle/framework/tensor.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/op_info.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/feed_fetch_method.cc b/paddle/fluid/framework/feed_fetch_method.cc index 21201b67551..a9bb17355d9 100644 --- a/paddle/fluid/framework/feed_fetch_method.cc +++ b/paddle/fluid/framework/feed_fetch_method.cc @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/feed_fetch_method.h" +#include "paddle/fluid/framework/feed_fetch_method.h" #include "glog/logging.h" -#include "paddle/framework/variable.h" +#include "paddle/fluid/framework/variable.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/feed_fetch_method.h b/paddle/fluid/framework/feed_fetch_method.h index b71945fcc88..5355c29047e 100644 --- a/paddle/fluid/framework/feed_fetch_method.h +++ b/paddle/fluid/framework/feed_fetch_method.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/framework/feed_fetch_type.h" -#include "paddle/framework/scope.h" +#include "paddle/fluid/framework/feed_fetch_type.h" +#include "paddle/fluid/framework/scope.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/feed_fetch_type.h b/paddle/fluid/framework/feed_fetch_type.h index 168f456675a..4281e36b138 100644 --- a/paddle/fluid/framework/feed_fetch_type.h +++ b/paddle/fluid/framework/feed_fetch_type.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include #include -#include "paddle/framework/lod_tensor.h" +#include "paddle/fluid/framework/lod_tensor.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/grad_op_desc_maker.h b/paddle/fluid/framework/grad_op_desc_maker.h index f51753453be..21dd4e88548 100644 --- a/paddle/fluid/framework/grad_op_desc_maker.h +++ b/paddle/fluid/framework/grad_op_desc_maker.h @@ -16,8 +16,8 @@ limitations under the License. */ #include #include #include -#include "paddle/framework/op_desc.h" -#include "paddle/framework/operator.h" +#include "paddle/fluid/framework/op_desc.h" +#include "paddle/fluid/framework/operator.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/init.cc b/paddle/fluid/framework/init.cc index 3f6ea121b39..cb2d740d860 100644 --- a/paddle/fluid/framework/init.cc +++ b/paddle/fluid/framework/init.cc @@ -16,10 +16,10 @@ limitations under the License. */ #include #include -#include "paddle/framework/init.h" -#include "paddle/framework/operator.h" -#include "paddle/platform/device_context.h" -#include "paddle/platform/place.h" +#include "paddle/fluid/framework/init.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/place.h" #include "paddle/string/piece.h" namespace paddle { diff --git a/paddle/fluid/framework/init_test.cc b/paddle/fluid/framework/init_test.cc index 01e076dd8ea..f3018541e27 100644 --- a/paddle/fluid/framework/init_test.cc +++ b/paddle/fluid/framework/init_test.cc @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "gtest/gtest.h" -#include "paddle/framework/init.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/init.h" +#include "paddle/fluid/platform/device_context.h" TEST(InitDevices, CPU) { using paddle::framework::InitDevices; diff --git a/paddle/fluid/framework/lod_rank_table.cc b/paddle/fluid/framework/lod_rank_table.cc index 704bce2a0eb..31c87492349 100644 --- a/paddle/fluid/framework/lod_rank_table.cc +++ b/paddle/fluid/framework/lod_rank_table.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/lod_rank_table.h" +#include "paddle/fluid/framework/lod_rank_table.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/lod_rank_table.h b/paddle/fluid/framework/lod_rank_table.h index df188709e91..0eaaf49e4c4 100644 --- a/paddle/fluid/framework/lod_rank_table.h +++ b/paddle/fluid/framework/lod_rank_table.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once #include -#include "paddle/framework/lod_tensor.h" +#include "paddle/fluid/framework/lod_tensor.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/lod_tensor.cc b/paddle/fluid/framework/lod_tensor.cc index cb27de69916..05c67e453d0 100644 --- a/paddle/fluid/framework/lod_tensor.cc +++ b/paddle/fluid/framework/lod_tensor.cc @@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/data_type.h" -#include "paddle/framework/framework.pb.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/framework.pb.h" -#include "paddle/memory/memcpy.h" -#include "paddle/memory/memory.h" +#include "paddle/fluid/memory/memcpy.h" +#include "paddle/fluid/memory/memory.h" #include #include diff --git a/paddle/fluid/framework/lod_tensor.h b/paddle/fluid/framework/lod_tensor.h index 9de454428d9..1509a9fb134 100644 --- a/paddle/fluid/framework/lod_tensor.h +++ b/paddle/fluid/framework/lod_tensor.h @@ -21,12 +21,12 @@ limitations under the License. */ #endif #include -#include "paddle/framework/ddim.h" -#include "paddle/framework/mixed_vector.h" -#include "paddle/framework/tensor.h" -#include "paddle/framework/tensor_util.h" -#include "paddle/platform/enforce.h" -#include "paddle/platform/place.h" +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/framework/mixed_vector.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/framework/tensor_util.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/place.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/lod_tensor_array.h b/paddle/fluid/framework/lod_tensor_array.h index 4a8e7f4fa54..652513bd225 100644 --- a/paddle/fluid/framework/lod_tensor_array.h +++ b/paddle/fluid/framework/lod_tensor_array.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once #include -#include "paddle/framework/lod_tensor.h" +#include "paddle/fluid/framework/lod_tensor.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/lod_tensor_test.cc b/paddle/fluid/framework/lod_tensor_test.cc index 3b63020e685..7e0ed2495d6 100644 --- a/paddle/fluid/framework/lod_tensor_test.cc +++ b/paddle/fluid/framework/lod_tensor_test.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/framework/lod_tensor.h" +#include "paddle/fluid/framework/lod_tensor.h" #include #include diff --git a/paddle/fluid/framework/lod_tensor_test.cu b/paddle/fluid/framework/lod_tensor_test.cu index a28b7caf86c..4dd7810c1b2 100644 --- a/paddle/fluid/framework/lod_tensor_test.cu +++ b/paddle/fluid/framework/lod_tensor_test.cu @@ -15,12 +15,12 @@ #include #include #include -#include "paddle/framework/init.h" -#include "paddle/framework/lod_tensor.h" -#include "paddle/platform/assert.h" -#include -#include +#include "gtest/gtest.h" +#include "paddle/fluid/framework/init.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/platform/assert.h" +#include "paddle/fluid/platform/place.h" __global__ void test(size_t* a, int size) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; diff --git a/paddle/fluid/framework/mixed_vector.h b/paddle/fluid/framework/mixed_vector.h index f776f0317a2..9756754260d 100644 --- a/paddle/fluid/framework/mixed_vector.h +++ b/paddle/fluid/framework/mixed_vector.h @@ -17,8 +17,8 @@ #include #include -#include "paddle/framework/tensor.h" -#include "paddle/framework/tensor_util.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/framework/tensor_util.h" #include "glog/logging.h" diff --git a/paddle/fluid/framework/mixed_vector_test.cu b/paddle/fluid/framework/mixed_vector_test.cu index f02db8f612c..a8906452566 100644 --- a/paddle/fluid/framework/mixed_vector_test.cu +++ b/paddle/fluid/framework/mixed_vector_test.cu @@ -15,8 +15,8 @@ #include "glog/logging.h" #include "gtest/gtest.h" -#include "paddle/framework/mixed_vector.h" -#include "paddle/platform/gpu_info.h" +#include "paddle/fluid/framework/mixed_vector.h" +#include "paddle/fluid/platform/gpu_info.h" template using vec = paddle::framework::Vector; diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index b51afe499bb..b7847326005 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -12,15 +12,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/op_desc.h" +#include "paddle/fluid/framework/op_desc.h" #include #include #include #include "glog/logging.h" -#include "paddle/framework/block_desc.h" -#include "paddle/framework/operator.h" -#include "paddle/framework/program_desc.h" -#include "paddle/framework/shape_inference.h" +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/shape_inference.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/op_desc.h b/paddle/fluid/framework/op_desc.h index 13695cff59f..698df829e56 100644 --- a/paddle/fluid/framework/op_desc.h +++ b/paddle/fluid/framework/op_desc.h @@ -16,9 +16,9 @@ limitations under the License. */ #include #include -#include "paddle/framework/attribute.h" -#include "paddle/framework/type_defs.h" -#include "paddle/framework/var_desc.h" +#include "paddle/fluid/framework/attribute.h" +#include "paddle/fluid/framework/type_defs.h" +#include "paddle/fluid/framework/var_desc.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/op_info.cc b/paddle/fluid/framework/op_info.cc index b520108109b..703c9c3234b 100644 --- a/paddle/fluid/framework/op_info.cc +++ b/paddle/fluid/framework/op_info.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/op_info.h" +#include "paddle/fluid/framework/op_info.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/op_info.h b/paddle/fluid/framework/op_info.h index d9b89f9cac9..e6b3ff9e653 100644 --- a/paddle/fluid/framework/op_info.h +++ b/paddle/fluid/framework/op_info.h @@ -18,9 +18,9 @@ limitations under the License. */ #include #include -#include "paddle/framework/attribute.h" -#include "paddle/framework/type_defs.h" -#include "paddle/platform/macros.h" +#include "paddle/fluid/framework/attribute.h" +#include "paddle/fluid/framework/type_defs.h" +#include "paddle/fluid/platform/macros.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/op_kernel_type.h b/paddle/fluid/framework/op_kernel_type.h index 44adb94d2a8..b5dbff26d7e 100644 --- a/paddle/fluid/framework/op_kernel_type.h +++ b/paddle/fluid/framework/op_kernel_type.h @@ -14,11 +14,11 @@ limitations under the License. */ #pragma once -#include "paddle/framework/data_layout.h" -#include "paddle/framework/data_type.h" -#include "paddle/framework/library_type.h" -#include "paddle/platform/device_context.h" -#include "paddle/platform/place.h" +#include "paddle/fluid/framework/data_layout.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/library_type.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/place.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/op_kernel_type_test.cc b/paddle/fluid/framework/op_kernel_type_test.cc index cb23bbde014..64096907df5 100644 --- a/paddle/fluid/framework/op_kernel_type_test.cc +++ b/paddle/fluid/framework/op_kernel_type_test.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/op_kernel_type.h" +#include "paddle/fluid/framework/op_kernel_type.h" #include #include diff --git a/paddle/fluid/framework/op_proto_maker.cc b/paddle/fluid/framework/op_proto_maker.cc index 151d61d5b17..0a779b10b49 100644 --- a/paddle/fluid/framework/op_proto_maker.cc +++ b/paddle/fluid/framework/op_proto_maker.cc @@ -11,7 +11,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/op_proto_maker.h" +#include "paddle/fluid/framework/op_proto_maker.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/op_proto_maker.h b/paddle/fluid/framework/op_proto_maker.h index efd3a5ca535..1dbfc7d37be 100644 --- a/paddle/fluid/framework/op_proto_maker.h +++ b/paddle/fluid/framework/op_proto_maker.h @@ -13,8 +13,8 @@ limitations under the License. */ #pragma once -#include "paddle/framework/attribute.h" -#include "paddle/framework/framework.pb.h" +#include "paddle/fluid/framework/attribute.h" +#include "paddle/fluid/framework/framework.pb.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/op_proto_maker_test.cc b/paddle/fluid/framework/op_proto_maker_test.cc index f16cb6fa3aa..cfefee8dbde 100644 --- a/paddle/fluid/framework/op_proto_maker_test.cc +++ b/paddle/fluid/framework/op_proto_maker_test.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/op_proto_maker.h" +#include "paddle/fluid/framework/op_proto_maker.h" #include "gtest/gtest.h" diff --git a/paddle/fluid/framework/op_registry.cc b/paddle/fluid/framework/op_registry.cc index dfa151316da..739ec72ebc1 100644 --- a/paddle/fluid/framework/op_registry.cc +++ b/paddle/fluid/framework/op_registry.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include "paddle/fluid/framework/op_registry.h" #include diff --git a/paddle/fluid/framework/op_registry.h b/paddle/fluid/framework/op_registry.h index 6fb8532b2a8..73faa99668a 100644 --- a/paddle/fluid/framework/op_registry.h +++ b/paddle/fluid/framework/op_registry.h @@ -22,14 +22,14 @@ limitations under the License. */ #include #include "glog/logging.h" // For VLOG() -#include "paddle/framework/attribute.h" -#include "paddle/framework/details/op_registry.h" -#include "paddle/framework/framework.pb.h" -#include "paddle/framework/grad_op_desc_maker.h" -#include "paddle/framework/op_desc.h" -#include "paddle/framework/operator.h" -#include "paddle/framework/scope.h" -#include "paddle/framework/shape_inference.h" +#include "paddle/fluid/framework/attribute.h" +#include "paddle/fluid/framework/details/op_registry.h" +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/grad_op_desc_maker.h" +#include "paddle/fluid/framework/op_desc.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/shape_inference.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/op_registry_test.cc b/paddle/fluid/framework/op_registry_test.cc index 341da8befd4..bfbb2cfc2c5 100644 --- a/paddle/fluid/framework/op_registry_test.cc +++ b/paddle/fluid/framework/op_registry_test.cc @@ -15,7 +15,7 @@ #include #include -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/op_registry.h" namespace pd = paddle::framework; diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 52387aabd9d..07c65ae926f 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -16,11 +16,11 @@ limitations under the License. */ #include -#include "paddle/framework/data_transform.h" -#include "paddle/framework/executor.h" -#include "paddle/framework/operator.h" -#include "paddle/framework/shape_inference.h" -#include "paddle/framework/var_type.h" +#include "paddle/fluid/framework/data_transform.h" +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/shape_inference.h" +#include "paddle/fluid/framework/var_type.h" DECLARE_bool(benchmark); diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index c9140f304c8..52300abeb7d 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -22,17 +22,17 @@ limitations under the License. */ #include #include "glog/logging.h" // For VLOG -#include "paddle/framework/attribute.h" -#include "paddle/framework/block_desc.h" -#include "paddle/framework/framework.pb.h" -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/op_info.h" -#include "paddle/framework/op_kernel_type.h" -#include "paddle/framework/scope.h" -#include "paddle/framework/selected_rows.h" -#include "paddle/framework/tensor.h" -#include "paddle/platform/device_context.h" -#include "paddle/platform/variant.h" +#include "paddle/fluid/framework/attribute.h" +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_info.h" +#include "paddle/fluid/framework/op_kernel_type.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/variant.h" #include "paddle/utils/Error.h" namespace paddle { diff --git a/paddle/fluid/framework/operator_test.cc b/paddle/fluid/framework/operator_test.cc index b69d7c7a740..b90f5538bb6 100644 --- a/paddle/fluid/framework/operator_test.cc +++ b/paddle/fluid/framework/operator_test.cc @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "gtest/gtest.h" -#include "paddle/framework/init.h" -#include "paddle/framework/op_info.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" +#include "paddle/fluid/framework/init.h" +#include "paddle/fluid/framework/op_info.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/program_desc.cc b/paddle/fluid/framework/program_desc.cc index 0e937dda4e1..b3f2e97cd95 100644 --- a/paddle/fluid/framework/program_desc.cc +++ b/paddle/fluid/framework/program_desc.cc @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/program_desc.h" -#include "paddle/framework/block_desc.h" -#include "paddle/framework/feed_fetch_type.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/feed_fetch_type.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/program_desc.h b/paddle/fluid/framework/program_desc.h index 8e958eab6ee..937de6ba927 100644 --- a/paddle/fluid/framework/program_desc.h +++ b/paddle/fluid/framework/program_desc.h @@ -16,10 +16,10 @@ limitations under the License. */ #include #include -#include "paddle/framework/block_desc.h" -#include "paddle/framework/framework.pb.h" -#include "paddle/framework/proto_desc.h" -#include "paddle/platform/macros.h" +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/proto_desc.h" +#include "paddle/fluid/platform/macros.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/program_desc_test.cc b/paddle/fluid/framework/program_desc_test.cc index 9945aee31b6..afd5c9dabfb 100644 --- a/paddle/fluid/framework/program_desc_test.cc +++ b/paddle/fluid/framework/program_desc_test.cc @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/program_desc.h" +#include "paddle/fluid/framework/program_desc.h" #include "gtest/gtest.h" -#include "paddle/framework/block_desc.h" +#include "paddle/fluid/framework/block_desc.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/prune.cc b/paddle/fluid/framework/prune.cc index ddd6b993d40..79dbd3bcab4 100644 --- a/paddle/fluid/framework/prune.cc +++ b/paddle/fluid/framework/prune.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/prune.h" +#include "paddle/fluid/framework/prune.h" #include #include diff --git a/paddle/fluid/framework/prune.h b/paddle/fluid/framework/prune.h index 593292523d0..601e66b67a7 100644 --- a/paddle/fluid/framework/prune.h +++ b/paddle/fluid/framework/prune.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/framework/framework.pb.h" -#include "paddle/platform/enforce.h" +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/prune_test.cc b/paddle/fluid/framework/prune_test.cc index d76c5abca94..36b76f0763e 100644 --- a/paddle/fluid/framework/prune_test.cc +++ b/paddle/fluid/framework/prune_test.cc @@ -12,15 +12,15 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/prune.h" +#include "paddle/fluid/framework/prune.h" -#include "paddle/framework/attribute.h" -#include "paddle/framework/operator.h" -#include "paddle/operators/net_op.h" +#include "paddle/fluid/framework/attribute.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/operators/net_op.h" -#include "paddle/framework/block_desc.h" -#include "paddle/framework/op_desc.h" -#include "paddle/framework/program_desc.h" +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/op_desc.h" +#include "paddle/fluid/framework/program_desc.h" #include diff --git a/paddle/fluid/framework/reader.cc b/paddle/fluid/framework/reader.cc index 928b661aaad..96c563814c3 100644 --- a/paddle/fluid/framework/reader.cc +++ b/paddle/fluid/framework/reader.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/framework/reader.h" +#include "paddle/fluid/framework/reader.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/reader.h b/paddle/fluid/framework/reader.h index 534894cfbd6..4a5eba5fb73 100644 --- a/paddle/fluid/framework/reader.h +++ b/paddle/fluid/framework/reader.h @@ -14,8 +14,8 @@ #pragma once -#include "paddle/framework/ddim.h" -#include "paddle/framework/lod_tensor_array.h" +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/framework/lod_tensor_array.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/scope.cc b/paddle/fluid/framework/scope.cc index af08b2ab816..6006ed16bd4 100644 --- a/paddle/fluid/framework/scope.cc +++ b/paddle/fluid/framework/scope.cc @@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/scope.h" +#include "paddle/fluid/framework/scope.h" #include // for unique_ptr #include // for call_once #include "glog/logging.h" -#include "paddle/framework/threadpool.h" +#include "paddle/fluid/framework/threadpool.h" #include "paddle/string/printf.h" DEFINE_bool(benchmark, false, diff --git a/paddle/fluid/framework/scope.h b/paddle/fluid/framework/scope.h index a1da81cc797..2da9e0716e7 100644 --- a/paddle/fluid/framework/scope.h +++ b/paddle/fluid/framework/scope.h @@ -19,8 +19,8 @@ limitations under the License. */ #include #include -#include "paddle/framework/variable.h" -#include "paddle/platform/macros.h" +#include "paddle/fluid/framework/variable.h" +#include "paddle/fluid/platform/macros.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/scope_test.cc b/paddle/fluid/framework/scope_test.cc index 0f5b86061db..d64acb130cb 100644 --- a/paddle/fluid/framework/scope_test.cc +++ b/paddle/fluid/framework/scope_test.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/scope.h" +#include "paddle/fluid/framework/scope.h" #include "glog/logging.h" #include "gtest/gtest.h" diff --git a/paddle/fluid/framework/selected_rows.cc b/paddle/fluid/framework/selected_rows.cc index 3b3e60177a4..f5d9e9a4951 100644 --- a/paddle/fluid/framework/selected_rows.cc +++ b/paddle/fluid/framework/selected_rows.cc @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/selected_rows.h" +#include "paddle/fluid/framework/selected_rows.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/selected_rows.h b/paddle/fluid/framework/selected_rows.h index 30d3dfc1e89..f1a263962b2 100644 --- a/paddle/fluid/framework/selected_rows.h +++ b/paddle/fluid/framework/selected_rows.h @@ -10,8 +10,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/tensor.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/tensor.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/selected_rows_test.cc b/paddle/fluid/framework/selected_rows_test.cc index 8ff3fb6a971..d414f2a5934 100644 --- a/paddle/fluid/framework/selected_rows_test.cc +++ b/paddle/fluid/framework/selected_rows_test.cc @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/selected_rows.h" +#include "paddle/fluid/framework/selected_rows.h" #include "gtest/gtest.h" namespace paddle { diff --git a/paddle/fluid/framework/shape_inference.cc b/paddle/fluid/framework/shape_inference.cc index 2f4d4505771..ac7fc029737 100644 --- a/paddle/fluid/framework/shape_inference.cc +++ b/paddle/fluid/framework/shape_inference.cc @@ -11,9 +11,9 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/shape_inference.h" +#include "paddle/fluid/framework/shape_inference.h" #include "grad_op_desc_maker.h" -#include "paddle/framework/operator.h" +#include "paddle/fluid/framework/operator.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/shape_inference.h b/paddle/fluid/framework/shape_inference.h index 7bee8698523..a923463ce37 100644 --- a/paddle/fluid/framework/shape_inference.h +++ b/paddle/fluid/framework/shape_inference.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/framework/attribute.h" -#include "paddle/framework/ddim.h" -#include "paddle/framework/framework.pb.h" +#include "paddle/fluid/framework/attribute.h" +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/framework/framework.pb.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/tensor.cc b/paddle/fluid/framework/tensor.cc index f922e606249..a56091d3c62 100644 --- a/paddle/fluid/framework/tensor.cc +++ b/paddle/fluid/framework/tensor.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/tensor.h" +#include "paddle/fluid/framework/tensor.h" namespace paddle { namespace framework {} diff --git a/paddle/fluid/framework/tensor.h b/paddle/fluid/framework/tensor.h index be09b7c9450..44d2c7dae94 100644 --- a/paddle/fluid/framework/tensor.h +++ b/paddle/fluid/framework/tensor.h @@ -20,12 +20,12 @@ limitations under the License. */ #include #include -#include "paddle/framework/data_layout.h" -#include "paddle/framework/ddim.h" -#include "paddle/memory/memory.h" -#include "paddle/platform/device_context.h" -#include "paddle/platform/enforce.h" -#include "paddle/platform/place.h" +#include "paddle/fluid/framework/data_layout.h" +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/memory/memory.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/place.h" namespace paddle { @@ -224,4 +224,4 @@ inline void Tensor::switch_place(platform::Place new_place) { } // namespace framework } // namespace paddle -#include "paddle/framework/tensor_impl.h" +#include "paddle/fluid/framework/tensor_impl.h" diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index 652d6b8a90e..e69836292cd 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/memory/memcpy.h" -#include "paddle/platform/enforce.h" +#include "paddle/fluid/memory/memcpy.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/tensor_test.cc b/paddle/fluid/framework/tensor_test.cc index 9a387526ac2..6ed416e46f9 100644 --- a/paddle/fluid/framework/tensor_test.cc +++ b/paddle/fluid/framework/tensor_test.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/framework/tensor.h" +#include "paddle/fluid/framework/tensor.h" #include #include diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index a5b83eaa07a..537fb4614ca 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/tensor_util.h" +#include "paddle/fluid/framework/tensor_util.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/tensor_util.cu b/paddle/fluid/framework/tensor_util.cu deleted file mode 120000 index b00e6e59d93..00000000000 --- a/paddle/fluid/framework/tensor_util.cu +++ /dev/null @@ -1 +0,0 @@ -./tensor_util.cc \ No newline at end of file diff --git a/paddle/fluid/framework/tensor_util.cu b/paddle/fluid/framework/tensor_util.cu new file mode 100644 index 00000000000..537fb4614ca --- /dev/null +++ b/paddle/fluid/framework/tensor_util.cu @@ -0,0 +1,119 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/fluid/framework/tensor_util.h" + +namespace paddle { +namespace framework { +template +struct AnyDTypeVisitor { + Predicate predicate_; + const Tensor& tensor_; + const DevCtx& ctx_; + Tensor* out_; + + AnyDTypeVisitor(Predicate predicate, const Tensor& tensor, const DevCtx& ctx, + Tensor* out) + : predicate_(predicate), tensor_(tensor), ctx_(ctx), out_(out) {} + + template + void operator()() const { + auto t = EigenVector::Flatten(tensor_); + auto o = EigenScalar::From(*out_); + // return any of predicate_(t) is true. + o.device(*ctx_.eigen_device()) = predicate_(t).any(); + } +}; + +template +inline void AnyImpl(Predicate predicate, const framework::Tensor& tensor, + const DevCtx& ctx, framework::Tensor* out) { + VisitDataType(ToDataType(tensor.type()), AnyDTypeVisitor( + predicate, tensor, ctx, out)); +} + +template +struct AnyVisitor : public boost::static_visitor { + const framework::Tensor& tensor_; + Predicate predicate_; + + AnyVisitor(const framework::Tensor& tensor, Predicate predicate) + : tensor_(tensor), predicate_(std::move(predicate)) {} + + template + bool operator()(const Place& place) const { + framework::Tensor out; + out.Resize({1}); + out.mutable_data(place); + auto* ctx = platform::DeviceContextPool::Instance().GetByPlace(place); + AnyImpl(predicate_, tensor_, *ctx, &out); + return this->GetResult(out, place); + } + + bool GetResult(const framework::Tensor& out, + const platform::CUDAPlace& gpu) const { + platform::CPUPlace cpu; + framework::Tensor tmp; + tmp.Resize({1}); + tmp.mutable_data(cpu); + auto gpuctx = platform::DeviceContextPool::Instance().Get(gpu); + gpuctx->Wait(); + Copy(out, cpu, *gpuctx, &tmp); + gpuctx->Wait(); + return GetResult(tmp, cpu); + } + + bool GetResult(const framework::Tensor& out, + const platform::CPUPlace& cpu) const { + return *out.data(); + } +}; + +template +inline bool Any(const framework::Tensor& tensor, Predicate predicate) { + AnyVisitor visitor(tensor, predicate); + auto place = tensor.place(); + return platform::VisitPlace(place, visitor); +} + +struct HasNANPredicate { + template + auto operator()(const T& eigen_vec) const + -> decltype(std::declval().isnan()) { + // Cast eigen_vector to vector of bool. true if is inf. + return eigen_vec.isnan(); + } +}; + +bool HasNAN(const framework::Tensor& tensor) { + HasNANPredicate predicate; + return Any(tensor, predicate); +} + +struct HasInfPredicate { + template + auto operator()(const T& eigen_vec) const + -> decltype(std::declval().isinf()) { + // Cast eigen_vector to vector of bool. true if is inf. + return eigen_vec.isinf(); + } +}; + +bool HasInf(const framework::Tensor& tensor) { + HasInfPredicate predicate; + return Any(tensor, predicate); +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/tensor_util.h b/paddle/fluid/framework/tensor_util.h index b49c6144998..b7e772b6daa 100644 --- a/paddle/fluid/framework/tensor_util.h +++ b/paddle/fluid/framework/tensor_util.h @@ -13,11 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/data_type.h" -#include "paddle/framework/eigen.h" -#include "paddle/framework/framework.pb.h" -#include "paddle/framework/tensor.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/tensor_util_test.cc b/paddle/fluid/framework/tensor_util_test.cc index 906b0b56563..8764c692e87 100644 --- a/paddle/fluid/framework/tensor_util_test.cc +++ b/paddle/fluid/framework/tensor_util_test.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/framework/tensor_util.h" +#include "paddle/fluid/framework/tensor_util.h" #include #include #include diff --git a/paddle/fluid/framework/tensor_util_test.cu b/paddle/fluid/framework/tensor_util_test.cu index ebd35fdf6c2..1982b642bcd 100644 --- a/paddle/fluid/framework/tensor_util_test.cu +++ b/paddle/fluid/framework/tensor_util_test.cu @@ -13,9 +13,9 @@ limitations under the License. */ #include "gtest/gtest.h" -#include "paddle/framework/tensor_util.h" -#include "paddle/platform/device_context.h" -#include "paddle/platform/place.h" +#include "paddle/fluid/framework/tensor_util.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/place.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/threadpool.cc b/paddle/fluid/framework/threadpool.cc index b7d7c00bcf9..2c4de41b0c4 100644 --- a/paddle/fluid/framework/threadpool.cc +++ b/paddle/fluid/framework/threadpool.cc @@ -12,9 +12,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/threadpool.h" +#include "paddle/fluid/framework/threadpool.h" -#include "paddle/platform/enforce.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/threadpool.h b/paddle/fluid/framework/threadpool.h index 77d31a1176d..e88e6c01f02 100644 --- a/paddle/fluid/framework/threadpool.h +++ b/paddle/fluid/framework/threadpool.h @@ -22,8 +22,8 @@ limitations under the License. */ #include #include #include "glog/logging.h" -#include "paddle/platform/enforce.h" -#include "paddle/platform/macros.h" // for DISABLE_COPY_AND_ASSIGN +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/macros.h" // for DISABLE_COPY_AND_ASSIGN namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/type_defs.h b/paddle/fluid/framework/type_defs.h index 1eedbbc419a..786d78a6440 100644 --- a/paddle/fluid/framework/type_defs.h +++ b/paddle/fluid/framework/type_defs.h @@ -20,7 +20,7 @@ limitations under the License. */ #include #include #include -#include "paddle/platform/variant.h" +#include "paddle/fluid/platform/variant.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/var_desc.cc b/paddle/fluid/framework/var_desc.cc index 11a4daf2c99..7ec9b2ced94 100644 --- a/paddle/fluid/framework/var_desc.cc +++ b/paddle/fluid/framework/var_desc.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/var_desc.h" -#include "paddle/platform/enforce.h" +#include "paddle/fluid/framework/var_desc.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/var_desc.h b/paddle/fluid/framework/var_desc.h index 72da2fbb0a6..cdb1bc3ec09 100644 --- a/paddle/fluid/framework/var_desc.h +++ b/paddle/fluid/framework/var_desc.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "glog/logging.h" -#include "paddle/framework/framework.pb.h" +#include "paddle/fluid/framework/framework.pb.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/var_type.h b/paddle/fluid/framework/var_type.h index 599d4514902..2dc4de52981 100644 --- a/paddle/fluid/framework/var_type.h +++ b/paddle/fluid/framework/var_type.h @@ -13,13 +13,13 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/framework.pb.h" -#include "paddle/framework/lod_rank_table.h" -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/lod_tensor_array.h" -#include "paddle/framework/reader.h" -#include "paddle/framework/selected_rows.h" -#include "paddle/framework/variable.h" +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/lod_rank_table.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/lod_tensor_array.h" +#include "paddle/fluid/framework/reader.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/framework/variable.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/var_type_inference.h b/paddle/fluid/framework/var_type_inference.h index 6c11f2fee7f..44fd4cd622c 100644 --- a/paddle/fluid/framework/var_type_inference.h +++ b/paddle/fluid/framework/var_type_inference.h @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/type_defs.h" +#include "paddle/fluid/framework/type_defs.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/var_type_inference_test.cc b/paddle/fluid/framework/var_type_inference_test.cc index fa6018b1c58..0ee589c821a 100644 --- a/paddle/fluid/framework/var_type_inference_test.cc +++ b/paddle/fluid/framework/var_type_inference_test.cc @@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/var_type_inference.h" +#include "paddle/fluid/framework/var_type_inference.h" #include "gtest/gtest.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" -#include "paddle/framework/program_desc.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/program_desc.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/variable.h b/paddle/fluid/framework/variable.h index 3b7ec0a2a90..9fb8ca92d68 100644 --- a/paddle/fluid/framework/variable.h +++ b/paddle/fluid/framework/variable.h @@ -17,7 +17,7 @@ #include #include -#include "paddle/platform/enforce.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/variable_test.cc b/paddle/fluid/framework/variable_test.cc index e5585c8724d..8c14e506fd7 100644 --- a/paddle/fluid/framework/variable_test.cc +++ b/paddle/fluid/framework/variable_test.cc @@ -16,7 +16,7 @@ #include #include "gtest/gtest.h" -#include "paddle/framework/variable.h" +#include "paddle/fluid/framework/variable.h" TEST(Variable, GetMutable) { using paddle::framework::Variable; diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 784e87970f7..58d7ab40bfa 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/inference/io.h" +#include "paddle/fluid/inference/io.h" #include -#include "paddle/framework/block_desc.h" -#include "paddle/framework/feed_fetch_type.h" +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/feed_fetch_type.h" namespace paddle { namespace inference { diff --git a/paddle/fluid/inference/io.h b/paddle/fluid/inference/io.h index a7d7c499690..9d786406064 100644 --- a/paddle/fluid/inference/io.h +++ b/paddle/fluid/inference/io.h @@ -17,9 +17,9 @@ limitations under the License. */ #include #include #include -#include "paddle/framework/executor.h" -#include "paddle/framework/program_desc.h" -#include "paddle/framework/scope.h" +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/scope.h" namespace paddle { namespace inference { diff --git a/paddle/fluid/inference/tests/book/test_helper.h b/paddle/fluid/inference/tests/book/test_helper.h index 02104306e71..d7401709516 100644 --- a/paddle/fluid/inference/tests/book/test_helper.h +++ b/paddle/fluid/inference/tests/book/test_helper.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/framework/lod_tensor.h" -#include "paddle/inference/io.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/inference/io.h" template void SetupTensor(paddle::framework::LoDTensor& input, diff --git a/paddle/fluid/memory/.clang-format b/paddle/fluid/memory/.clang-format deleted file mode 120000 index 7d28cb39247..00000000000 --- a/paddle/fluid/memory/.clang-format +++ /dev/null @@ -1 +0,0 @@ -../framework/.clang-format \ No newline at end of file diff --git a/paddle/fluid/memory/.clang-format b/paddle/fluid/memory/.clang-format new file mode 100644 index 00000000000..29282dc87e2 --- /dev/null +++ b/paddle/fluid/memory/.clang-format @@ -0,0 +1,5 @@ +--- +Language: Cpp +BasedOnStyle: Google +Standard: Cpp11 +... diff --git a/paddle/fluid/memory/detail/buddy_allocator.cc b/paddle/fluid/memory/detail/buddy_allocator.cc index 2bc2c06a157..2cee8271d27 100644 --- a/paddle/fluid/memory/detail/buddy_allocator.cc +++ b/paddle/fluid/memory/detail/buddy_allocator.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/memory/detail/buddy_allocator.h" +#include "paddle/fluid/memory/detail/buddy_allocator.h" #include "glog/logging.h" namespace paddle { diff --git a/paddle/fluid/memory/detail/buddy_allocator.h b/paddle/fluid/memory/detail/buddy_allocator.h index 4e0135dd655..644d7933068 100644 --- a/paddle/fluid/memory/detail/buddy_allocator.h +++ b/paddle/fluid/memory/detail/buddy_allocator.h @@ -14,12 +14,12 @@ limitations under the License. */ #pragma once -#include "paddle/memory/detail/meta_cache.h" -#include "paddle/memory/detail/meta_data.h" -#include "paddle/memory/detail/system_allocator.h" -#include "paddle/platform/assert.h" -#include "paddle/platform/cpu_info.h" -#include "paddle/platform/gpu_info.h" +#include "paddle/fluid/memory/detail/meta_cache.h" +#include "paddle/fluid/memory/detail/meta_data.h" +#include "paddle/fluid/memory/detail/system_allocator.h" +#include "paddle/fluid/platform/assert.h" +#include "paddle/fluid/platform/cpu_info.h" +#include "paddle/fluid/platform/gpu_info.h" #include #include diff --git a/paddle/fluid/memory/detail/memory_block.cc b/paddle/fluid/memory/detail/memory_block.cc index f50eceba096..23388cdd5b7 100644 --- a/paddle/fluid/memory/detail/memory_block.cc +++ b/paddle/fluid/memory/detail/memory_block.cc @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/memory/detail/memory_block.h" -#include "paddle/memory/detail/meta_cache.h" -#include "paddle/memory/detail/meta_data.h" -#include "paddle/platform/assert.h" +#include "paddle/fluid/memory/detail/memory_block.h" +#include "paddle/fluid/memory/detail/meta_cache.h" +#include "paddle/fluid/memory/detail/meta_data.h" +#include "paddle/fluid/platform/assert.h" namespace paddle { namespace memory { diff --git a/paddle/fluid/memory/detail/meta_cache.cc b/paddle/fluid/memory/detail/meta_cache.cc index 2bacca75108..7d78811c771 100644 --- a/paddle/fluid/memory/detail/meta_cache.cc +++ b/paddle/fluid/memory/detail/meta_cache.cc @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/memory/detail/meta_cache.h" +#include "paddle/fluid/memory/detail/meta_cache.h" #include "glog/logging.h" -#include "paddle/memory/detail/memory_block.h" -#include "paddle/platform/assert.h" +#include "paddle/fluid/memory/detail/memory_block.h" +#include "paddle/fluid/platform/assert.h" namespace paddle { namespace memory { diff --git a/paddle/fluid/memory/detail/meta_cache.h b/paddle/fluid/memory/detail/meta_cache.h index db8ffd49ae3..635d6398e69 100644 --- a/paddle/fluid/memory/detail/meta_cache.h +++ b/paddle/fluid/memory/detail/meta_cache.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/memory/detail/memory_block.h" -#include "paddle/memory/detail/meta_data.h" +#include "paddle/fluid/memory/detail/memory_block.h" +#include "paddle/fluid/memory/detail/meta_data.h" #include diff --git a/paddle/fluid/memory/detail/meta_data.cc b/paddle/fluid/memory/detail/meta_data.cc index dc57d4d2376..eae49ebdcff 100644 --- a/paddle/fluid/memory/detail/meta_data.cc +++ b/paddle/fluid/memory/detail/meta_data.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/memory/detail/meta_data.h" +#include "paddle/fluid/memory/detail/meta_data.h" #include diff --git a/paddle/fluid/memory/detail/meta_data.h b/paddle/fluid/memory/detail/meta_data.h index 6b83c42eb85..368523701ef 100644 --- a/paddle/fluid/memory/detail/meta_data.h +++ b/paddle/fluid/memory/detail/meta_data.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/memory/detail/memory_block.h" +#include "paddle/fluid/memory/detail/memory_block.h" #include diff --git a/paddle/fluid/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc index 509250debc2..1f07c5e789c 100644 --- a/paddle/fluid/memory/detail/system_allocator.cc +++ b/paddle/fluid/memory/detail/system_allocator.cc @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/memory/detail/system_allocator.h" -#include "paddle/platform/assert.h" -#include "paddle/platform/enforce.h" -#include "paddle/platform/gpu_info.h" +#include "paddle/fluid/memory/detail/system_allocator.h" +#include "paddle/fluid/platform/assert.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/gpu_info.h" #include // for malloc and free #include // for mlock and munlock diff --git a/paddle/fluid/memory/detail/system_allocator_test.cc b/paddle/fluid/memory/detail/system_allocator_test.cc index 6a8558937bf..a850e480ec9 100644 --- a/paddle/fluid/memory/detail/system_allocator_test.cc +++ b/paddle/fluid/memory/detail/system_allocator_test.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/memory/detail/system_allocator.h" +#include "paddle/fluid/memory/detail/system_allocator.h" #include #include diff --git a/paddle/fluid/memory/memcpy.cc b/paddle/fluid/memory/memcpy.cc index b46141aafd7..8938b361337 100644 --- a/paddle/fluid/memory/memcpy.cc +++ b/paddle/fluid/memory/memcpy.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/memory/memcpy.h" +#include "paddle/fluid/memory/memcpy.h" #include // for memcpy diff --git a/paddle/fluid/memory/memcpy.h b/paddle/fluid/memory/memcpy.h index 29c20e18601..77d209c3fbe 100644 --- a/paddle/fluid/memory/memcpy.h +++ b/paddle/fluid/memory/memcpy.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/platform/gpu_info.h" -#include "paddle/platform/place.h" +#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/place.h" namespace paddle { namespace memory { diff --git a/paddle/fluid/memory/memory.cc b/paddle/fluid/memory/memory.cc index 1a73a94567e..6eedab5d034 100644 --- a/paddle/fluid/memory/memory.cc +++ b/paddle/fluid/memory/memory.cc @@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/memory/memory.h" +#include "paddle/fluid/memory/memory.h" #include "glog/logging.h" -#include "paddle/memory/detail/buddy_allocator.h" -#include "paddle/memory/detail/system_allocator.h" -#include "paddle/platform/gpu_info.h" +#include "paddle/fluid/memory/detail/buddy_allocator.h" +#include "paddle/fluid/memory/detail/system_allocator.h" +#include "paddle/fluid/platform/gpu_info.h" DECLARE_double(fraction_of_gpu_memory_to_use); diff --git a/paddle/fluid/memory/memory.h b/paddle/fluid/memory/memory.h index 30ed68c6e0e..a9166a6746e 100644 --- a/paddle/fluid/memory/memory.h +++ b/paddle/fluid/memory/memory.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/platform/place.h" +#include "paddle/fluid/platform/place.h" namespace paddle { namespace memory { diff --git a/paddle/fluid/memory/memory_test.cc b/paddle/fluid/memory/memory_test.cc index b3f699f9b7e..d7505ef0f36 100644 --- a/paddle/fluid/memory/memory_test.cc +++ b/paddle/fluid/memory/memory_test.cc @@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/memory/memory.h" -#include "paddle/memory/detail/memory_block.h" -#include "paddle/memory/detail/meta_data.h" +#include "paddle/fluid/memory/memory.h" +#include "paddle/fluid/memory/detail/memory_block.h" +#include "paddle/fluid/memory/detail/meta_data.h" -#include "paddle/platform/cpu_info.h" -#include "paddle/platform/gpu_info.h" -#include "paddle/platform/place.h" +#include "paddle/fluid/platform/cpu_info.h" +#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/place.h" #include #include diff --git a/paddle/fluid/operators/.clang-format b/paddle/fluid/operators/.clang-format deleted file mode 120000 index 7d28cb39247..00000000000 --- a/paddle/fluid/operators/.clang-format +++ /dev/null @@ -1 +0,0 @@ -../framework/.clang-format \ No newline at end of file diff --git a/paddle/fluid/operators/.clang-format b/paddle/fluid/operators/.clang-format new file mode 100644 index 00000000000..29282dc87e2 --- /dev/null +++ b/paddle/fluid/operators/.clang-format @@ -0,0 +1,5 @@ +--- +Language: Cpp +BasedOnStyle: Google +Standard: Cpp11 +... diff --git a/paddle/fluid/operators/accuracy_op.cc b/paddle/fluid/operators/accuracy_op.cc index 8e8a3c7dd30..43689b3b7da 100644 --- a/paddle/fluid/operators/accuracy_op.cc +++ b/paddle/fluid/operators/accuracy_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/accuracy_op.h" +#include "paddle/fluid/operators/accuracy_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/accuracy_op.cu b/paddle/fluid/operators/accuracy_op.cu index 0aadd5af415..4462b9ba5c0 100644 --- a/paddle/fluid/operators/accuracy_op.cu +++ b/paddle/fluid/operators/accuracy_op.cu @@ -14,9 +14,9 @@ limitations under the License. */ #include #include -#include "paddle/operators/accuracy_op.h" -#include "paddle/platform/cuda_helper.h" -#include "paddle/platform/gpu_info.h" +#include "paddle/fluid/operators/accuracy_op.h" +#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/accuracy_op.h b/paddle/fluid/operators/accuracy_op.h index 04104a695fa..b3ed1d3fe09 100644 --- a/paddle/fluid/operators/accuracy_op.h +++ b/paddle/fluid/operators/accuracy_op.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once #include -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc index 4188858a90d..c04dd8cb916 100644 --- a/paddle/fluid/operators/activation_op.cc +++ b/paddle/fluid/operators/activation_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/activation_op.h" +#include "paddle/fluid/operators/activation_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/activation_op.cu b/paddle/fluid/operators/activation_op.cu index b9ccdf639cf..b86a7926a97 100644 --- a/paddle/fluid/operators/activation_op.cu +++ b/paddle/fluid/operators/activation_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/activation_op.h" +#include "paddle/fluid/operators/activation_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h index c0809abc051..7a6ae2224c8 100644 --- a/paddle/fluid/operators/activation_op.h +++ b/paddle/fluid/operators/activation_op.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/detail/safe_ref.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/detail/safe_ref.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/adadelta_op.cc b/paddle/fluid/operators/adadelta_op.cc index d8a9491c824..ececd47e6a6 100644 --- a/paddle/fluid/operators/adadelta_op.cc +++ b/paddle/fluid/operators/adadelta_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/adadelta_op.h" +#include "paddle/fluid/operators/adadelta_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/adadelta_op.cu b/paddle/fluid/operators/adadelta_op.cu index 91294a0d5d1..733482f788d 100644 --- a/paddle/fluid/operators/adadelta_op.cu +++ b/paddle/fluid/operators/adadelta_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/adadelta_op.h" +#include "paddle/fluid/operators/adadelta_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/adadelta_op.h b/paddle/fluid/operators/adadelta_op.h index 819d0845dbd..82ced087104 100644 --- a/paddle/fluid/operators/adadelta_op.h +++ b/paddle/fluid/operators/adadelta_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/adagrad_op.cc b/paddle/fluid/operators/adagrad_op.cc index c83318a2723..61c0ecd019b 100644 --- a/paddle/fluid/operators/adagrad_op.cc +++ b/paddle/fluid/operators/adagrad_op.cc @@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/adagrad_op.h" +#include "paddle/fluid/operators/adagrad_op.h" #include -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/selected_rows_functor.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/selected_rows_functor.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/adagrad_op.cu b/paddle/fluid/operators/adagrad_op.cu index 9a21e00b12b..1117363c133 100644 --- a/paddle/fluid/operators/adagrad_op.cu +++ b/paddle/fluid/operators/adagrad_op.cu @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/adagrad_op.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/selected_rows_functor.h" -#include "paddle/platform/cuda_helper.h" +#include "paddle/fluid/operators/adagrad_op.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/selected_rows_functor.h" +#include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/adagrad_op.h b/paddle/fluid/operators/adagrad_op.h index 66f5b0f449a..ee503b2c362 100644 --- a/paddle/fluid/operators/adagrad_op.h +++ b/paddle/fluid/operators/adagrad_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/adam_op.cc b/paddle/fluid/operators/adam_op.cc index 03527de936b..25da9336b28 100644 --- a/paddle/fluid/operators/adam_op.cc +++ b/paddle/fluid/operators/adam_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/adam_op.h" +#include "paddle/fluid/operators/adam_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/adam_op.cu b/paddle/fluid/operators/adam_op.cu index 94f840c1889..85b806eb6a1 100644 --- a/paddle/fluid/operators/adam_op.cu +++ b/paddle/fluid/operators/adam_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/adam_op.h" +#include "paddle/fluid/operators/adam_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/adam_op.h b/paddle/fluid/operators/adam_op.h index af2c3ecd725..a51b46ef157 100644 --- a/paddle/fluid/operators/adam_op.h +++ b/paddle/fluid/operators/adam_op.h @@ -14,10 +14,10 @@ limitations under the License. */ #pragma once #include // for sqrt in CPU and CUDA -#include "paddle/framework/op_registry.h" -#include "paddle/operators/detail/safe_ref.h" -#include "paddle/operators/math/selected_rows_functor.h" -#include "paddle/platform/for_range.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/operators/math/selected_rows_functor.h" +#include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/adamax_op.cc b/paddle/fluid/operators/adamax_op.cc index 3b0b7141847..b2249b8f96d 100644 --- a/paddle/fluid/operators/adamax_op.cc +++ b/paddle/fluid/operators/adamax_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/adamax_op.h" +#include "paddle/fluid/operators/adamax_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/adamax_op.cu b/paddle/fluid/operators/adamax_op.cu index 8f87bb28671..44a5d6c7bde 100644 --- a/paddle/fluid/operators/adamax_op.cu +++ b/paddle/fluid/operators/adamax_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/adamax_op.h" +#include "paddle/fluid/operators/adamax_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/adamax_op.h b/paddle/fluid/operators/adamax_op.h index 172c179c5fa..124453c0ece 100644 --- a/paddle/fluid/operators/adamax_op.h +++ b/paddle/fluid/operators/adamax_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/array_operator.h b/paddle/fluid/operators/array_operator.h index 3fdad5ad9b1..4ffb414ecea 100644 --- a/paddle/fluid/operators/array_operator.h +++ b/paddle/fluid/operators/array_operator.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/lod_tensor_array.h" -#include "paddle/framework/op_registry.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/lod_tensor_array.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/array_to_lod_tensor_op.cc b/paddle/fluid/operators/array_to_lod_tensor_op.cc index ba5c6bd3c68..bf8e11bd8c0 100644 --- a/paddle/fluid/operators/array_to_lod_tensor_op.cc +++ b/paddle/fluid/operators/array_to_lod_tensor_op.cc @@ -13,11 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/framework/lod_rank_table.h" -#include "paddle/framework/lod_tensor_array.h" -#include "paddle/framework/op_registry.h" -#include "paddle/memory/memcpy.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/lod_rank_table.h" +#include "paddle/fluid/framework/lod_tensor_array.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/memory/memcpy.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/assign_op.cc b/paddle/fluid/operators/assign_op.cc index e04aa2d28cf..f99f9af4276 100644 --- a/paddle/fluid/operators/assign_op.cc +++ b/paddle/fluid/operators/assign_op.cc @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/data_type.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/var_type.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/var_type.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/assign_value_op.cc b/paddle/fluid/operators/assign_value_op.cc index 8e3a5304892..835043d9ab4 100644 --- a/paddle/fluid/operators/assign_value_op.cc +++ b/paddle/fluid/operators/assign_value_op.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/operators/assign_value_op.h" +#include "paddle/fluid/operators/assign_value_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/assign_value_op.cu.cc b/paddle/fluid/operators/assign_value_op.cu.cc index b17e2015005..616163f97b9 100644 --- a/paddle/fluid/operators/assign_value_op.cu.cc +++ b/paddle/fluid/operators/assign_value_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/assign_value_op.h" +#include "paddle/fluid/operators/assign_value_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(assign_value, ops::AssignValueKernel, diff --git a/paddle/fluid/operators/assign_value_op.h b/paddle/fluid/operators/assign_value_op.h index ec98c535132..33a344cad59 100644 --- a/paddle/fluid/operators/assign_value_op.h +++ b/paddle/fluid/operators/assign_value_op.h @@ -14,9 +14,9 @@ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/platform/enforce.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/auc_op.cc b/paddle/fluid/operators/auc_op.cc index b6494f95097..8ac08ea4a19 100644 --- a/paddle/fluid/operators/auc_op.cc +++ b/paddle/fluid/operators/auc_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/auc_op.h" +#include "paddle/fluid/operators/auc_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/auc_op.h b/paddle/fluid/operators/auc_op.h index b80509e2a99..e648db70974 100644 --- a/paddle/fluid/operators/auc_op.h +++ b/paddle/fluid/operators/auc_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index 0e984c38ba7..506c25d50d4 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/batch_norm_op.h" -#include "paddle/framework/data_layout.h" +#include "paddle/fluid/operators/batch_norm_op.h" +#include "paddle/fluid/framework/data_layout.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/batch_norm_op.cu.cc b/paddle/fluid/operators/batch_norm_op.cu.cc index 3d17725ab47..b9c97211e14 100644 --- a/paddle/fluid/operators/batch_norm_op.cu.cc +++ b/paddle/fluid/operators/batch_norm_op.cu.cc @@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/batch_norm_op.h" -#include "paddle/framework/data_layout.h" +#include "paddle/fluid/operators/batch_norm_op.h" +#include "paddle/fluid/framework/data_layout.h" #include -#include "paddle/operators/math/math_function.h" -#include "paddle/platform/cudnn_helper.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/cudnn_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/batch_norm_op.h b/paddle/fluid/operators/batch_norm_op.h index a817ef41fc8..fa9942ad099 100644 --- a/paddle/fluid/operators/batch_norm_op.h +++ b/paddle/fluid/operators/batch_norm_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/beam_search_decode_op.cc b/paddle/fluid/operators/beam_search_decode_op.cc index 72e05607b0b..7737d4e098a 100644 --- a/paddle/fluid/operators/beam_search_decode_op.cc +++ b/paddle/fluid/operators/beam_search_decode_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/beam_search_decode_op.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/operators/beam_search_decode_op.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/beam_search_decode_op.h b/paddle/fluid/operators/beam_search_decode_op.h index 3b1c6cd7a10..aeecb8d39ac 100644 --- a/paddle/fluid/operators/beam_search_decode_op.h +++ b/paddle/fluid/operators/beam_search_decode_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/framework/lod_tensor_array.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/lod_tensor_array.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/beam_search_decode_op_test.cc b/paddle/fluid/operators/beam_search_decode_op_test.cc index 5ac23991f3c..24f87279d5e 100644 --- a/paddle/fluid/operators/beam_search_decode_op_test.cc +++ b/paddle/fluid/operators/beam_search_decode_op_test.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/beam_search_decode_op.h" +#include "paddle/fluid/operators/beam_search_decode_op.h" #include "gtest/gtest.h" using CPUPlace = paddle::platform::CPUPlace; diff --git a/paddle/fluid/operators/beam_search_op.cc b/paddle/fluid/operators/beam_search_op.cc index 844ade40eb2..6f4c8c7e06e 100644 --- a/paddle/fluid/operators/beam_search_op.cc +++ b/paddle/fluid/operators/beam_search_op.cc @@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/beam_search_op.h" +#include "paddle/fluid/operators/beam_search_op.h" #include -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/beam_search_op.h b/paddle/fluid/operators/beam_search_op.h index 7ad85874fcb..9e2a05a60c3 100644 --- a/paddle/fluid/operators/beam_search_op.h +++ b/paddle/fluid/operators/beam_search_op.h @@ -18,8 +18,8 @@ limitations under the License. */ #include "gtest/gtest.h" #endif -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/operator.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/operator.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/beam_search_op_test.cc b/paddle/fluid/operators/beam_search_op_test.cc index d4beb64a85a..ea2afda4d49 100644 --- a/paddle/fluid/operators/beam_search_op_test.cc +++ b/paddle/fluid/operators/beam_search_op_test.cc @@ -12,7 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/beam_search_op.h" +#include "paddle/fluid/operators/beam_search_op.h" #include #include diff --git a/paddle/fluid/operators/bilinear_tensor_product_op.cc b/paddle/fluid/operators/bilinear_tensor_product_op.cc index 7640147a12d..cc378b1b453 100644 --- a/paddle/fluid/operators/bilinear_tensor_product_op.cc +++ b/paddle/fluid/operators/bilinear_tensor_product_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/bilinear_tensor_product_op.h" +#include "paddle/fluid/operators/bilinear_tensor_product_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/bilinear_tensor_product_op.cu b/paddle/fluid/operators/bilinear_tensor_product_op.cu index 0f48010716f..2cec48ee69a 100644 --- a/paddle/fluid/operators/bilinear_tensor_product_op.cu +++ b/paddle/fluid/operators/bilinear_tensor_product_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/bilinear_tensor_product_op.h" +#include "paddle/fluid/operators/bilinear_tensor_product_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/bilinear_tensor_product_op.h b/paddle/fluid/operators/bilinear_tensor_product_op.h index ba9a2c5ce3c..626fa957c42 100644 --- a/paddle/fluid/operators/bilinear_tensor_product_op.h +++ b/paddle/fluid/operators/bilinear_tensor_product_op.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/bipartite_match_op.cc b/paddle/fluid/operators/bipartite_match_op.cc index 1e6fa2091de..d614bf70438 100644 --- a/paddle/fluid/operators/bipartite_match_op.cc +++ b/paddle/fluid/operators/bipartite_match_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/box_coder_op.cc b/paddle/fluid/operators/box_coder_op.cc index 539813d4858..8e0fee22d8d 100644 --- a/paddle/fluid/operators/box_coder_op.cc +++ b/paddle/fluid/operators/box_coder_op.cc @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/box_coder_op.h" +#include "paddle/fluid/operators/box_coder_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/box_coder_op.cu b/paddle/fluid/operators/box_coder_op.cu index 98bd93457fa..dd9299ceacd 100644 --- a/paddle/fluid/operators/box_coder_op.cu +++ b/paddle/fluid/operators/box_coder_op.cu @@ -9,8 +9,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/box_coder_op.h" -#include "paddle/platform/cuda_helper.h" +#include "paddle/fluid/operators/box_coder_op.h" +#include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/box_coder_op.h b/paddle/fluid/operators/box_coder_op.h index 086251f6e06..c41bcc212b8 100644 --- a/paddle/fluid/operators/box_coder_op.h +++ b/paddle/fluid/operators/box_coder_op.h @@ -10,8 +10,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/cast_op.cc b/paddle/fluid/operators/cast_op.cc index 446976edafc..364c21f7619 100644 --- a/paddle/fluid/operators/cast_op.cc +++ b/paddle/fluid/operators/cast_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/cast_op.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/operators/cast_op.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/cast_op.cu b/paddle/fluid/operators/cast_op.cu index d68bbe6e39a..fb597be9d93 100644 --- a/paddle/fluid/operators/cast_op.cu +++ b/paddle/fluid/operators/cast_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/cast_op.h" +#include "paddle/fluid/operators/cast_op.h" template using CastOpKernel = diff --git a/paddle/fluid/operators/cast_op.h b/paddle/fluid/operators/cast_op.h index 9f39d91edd4..9ab4961cef4 100644 --- a/paddle/fluid/operators/cast_op.h +++ b/paddle/fluid/operators/cast_op.h @@ -14,10 +14,10 @@ limitations under the License. */ #pragma once -#include "paddle/framework/data_type.h" -#include "paddle/framework/framework.pb.h" -#include "paddle/framework/op_registry.h" -#include "paddle/platform/transform.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/transform.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/chunk_eval_op.cc b/paddle/fluid/operators/chunk_eval_op.cc index 44f667aead9..080e4d80da4 100644 --- a/paddle/fluid/operators/chunk_eval_op.cc +++ b/paddle/fluid/operators/chunk_eval_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/chunk_eval_op.h" +#include "paddle/fluid/operators/chunk_eval_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/chunk_eval_op.h b/paddle/fluid/operators/chunk_eval_op.h index 300aff90c0a..3dca3d2c0f9 100644 --- a/paddle/fluid/operators/chunk_eval_op.h +++ b/paddle/fluid/operators/chunk_eval_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once #include -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/clip_by_norm_op.cc b/paddle/fluid/operators/clip_by_norm_op.cc index b90921d79ba..89df118c06f 100644 --- a/paddle/fluid/operators/clip_by_norm_op.cc +++ b/paddle/fluid/operators/clip_by_norm_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/clip_by_norm_op.h" +#include "paddle/fluid/operators/clip_by_norm_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/clip_by_norm_op.cu b/paddle/fluid/operators/clip_by_norm_op.cu index cbf8fa44133..a466b335914 100644 --- a/paddle/fluid/operators/clip_by_norm_op.cu +++ b/paddle/fluid/operators/clip_by_norm_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/clip_by_norm_op.h" +#include "paddle/fluid/operators/clip_by_norm_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/clip_by_norm_op.h b/paddle/fluid/operators/clip_by_norm_op.h index 87956a707cf..82bcf07657b 100644 --- a/paddle/fluid/operators/clip_by_norm_op.h +++ b/paddle/fluid/operators/clip_by_norm_op.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/platform/transform.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/transform.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/clip_op.cc b/paddle/fluid/operators/clip_op.cc index 7adb74eab78..76b2cefbf9d 100644 --- a/paddle/fluid/operators/clip_op.cc +++ b/paddle/fluid/operators/clip_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/clip_op.h" +#include "paddle/fluid/operators/clip_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/clip_op.cu b/paddle/fluid/operators/clip_op.cu index 5ccbc964340..7b044d6e699 100644 --- a/paddle/fluid/operators/clip_op.cu +++ b/paddle/fluid/operators/clip_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/clip_op.h" +#include "paddle/fluid/operators/clip_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/clip_op.h b/paddle/fluid/operators/clip_op.h index 51db185dffd..aecd6f83bfa 100644 --- a/paddle/fluid/operators/clip_op.h +++ b/paddle/fluid/operators/clip_op.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/platform/transform.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/transform.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/compare_op.cc b/paddle/fluid/operators/compare_op.cc index 51b5bcb38f9..f3414c33b5a 100644 --- a/paddle/fluid/operators/compare_op.cc +++ b/paddle/fluid/operators/compare_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/compare_op.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/operators/compare_op.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/compare_op.cu b/paddle/fluid/operators/compare_op.cu index f625824dbc9..3507af2ae3a 100644 --- a/paddle/fluid/operators/compare_op.cu +++ b/paddle/fluid/operators/compare_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/compare_op.h" +#include "paddle/fluid/operators/compare_op.h" REGISTER_LOGICAL_KERNEL(less_than, CUDA, paddle::operators::LessThanFunctor); REGISTER_LOGICAL_KERNEL(less_equal, CUDA, paddle::operators::LessEqualFunctor); diff --git a/paddle/fluid/operators/compare_op.h b/paddle/fluid/operators/compare_op.h index 79b8c6f59c7..4b2ee5a9d68 100644 --- a/paddle/fluid/operators/compare_op.h +++ b/paddle/fluid/operators/compare_op.h @@ -15,9 +15,9 @@ limitations under the License. */ #pragma once #include #include -#include "paddle/framework/op_registry.h" -#include "paddle/operators/elementwise_op_function.h" -#include "paddle/platform/transform.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/elementwise_op_function.h" +#include "paddle/fluid/platform/transform.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/concat_op.cc b/paddle/fluid/operators/concat_op.cc index 32b61edfd0d..68eb5412beb 100644 --- a/paddle/fluid/operators/concat_op.cc +++ b/paddle/fluid/operators/concat_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/concat_op.h" +#include "paddle/fluid/operators/concat_op.h" #include namespace paddle { diff --git a/paddle/fluid/operators/concat_op.cu.cc b/paddle/fluid/operators/concat_op.cu.cc index 7b46452d3d5..143bda61167 100644 --- a/paddle/fluid/operators/concat_op.cu.cc +++ b/paddle/fluid/operators/concat_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/concat_op.h" +#include "paddle/fluid/operators/concat_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( concat, ops::ConcatKernel); diff --git a/paddle/fluid/operators/concat_op.h b/paddle/fluid/operators/concat_op.h index de4011585af..72b3e225bf6 100644 --- a/paddle/fluid/operators/concat_op.h +++ b/paddle/fluid/operators/concat_op.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include -#include "paddle/framework/op_registry.h" -#include "paddle/operators/strided_memcpy.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/strided_memcpy.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/cond_op.cc b/paddle/fluid/operators/cond_op.cc index e333002bfd1..dd93790d5b5 100644 --- a/paddle/fluid/operators/cond_op.cc +++ b/paddle/fluid/operators/cond_op.cc @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/cond_op.h" -#include "paddle/operators/gather.h" -#include "paddle/operators/scatter.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/operators/cond_op.h" +#include "paddle/fluid/operators/gather.h" +#include "paddle/fluid/operators/scatter.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/cond_op.h b/paddle/fluid/operators/cond_op.h index 7dcdc47e0b2..695af449069 100644 --- a/paddle/fluid/operators/cond_op.h +++ b/paddle/fluid/operators/cond_op.h @@ -15,11 +15,11 @@ limitations under the License. */ #pragma once #include #include "glog/logging.h" -#include "paddle/framework/ddim.h" -#include "paddle/framework/eigen.h" -#include "paddle/framework/operator.h" -#include "paddle/framework/tensor.h" -#include "paddle/operators/net_op.h" +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/operators/net_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/conditional_block_op.cc b/paddle/fluid/operators/conditional_block_op.cc index bdcdb85be7a..30435c6cca0 100644 --- a/paddle/fluid/operators/conditional_block_op.cc +++ b/paddle/fluid/operators/conditional_block_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/framework/executor.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/conv_cudnn_op.cu.cc b/paddle/fluid/operators/conv_cudnn_op.cu.cc index 3a5409a7e3f..a729d376ac8 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_cudnn_op.cu.cc @@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/memory/memory.h" -#include "paddle/operators/conv_op.h" -#include "paddle/platform/assert.h" -#include "paddle/platform/cudnn_helper.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/memory/memory.h" +#include "paddle/fluid/operators/conv_op.h" +#include "paddle/fluid/platform/assert.h" +#include "paddle/fluid/platform/cudnn_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index cef7ddd5fe7..a047e579163 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/conv_op.h" +#include "paddle/fluid/operators/conv_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/conv_op.cu.cc b/paddle/fluid/operators/conv_op.cu.cc index d0bd40ee95d..b2129d3b461 100644 --- a/paddle/fluid/operators/conv_op.cu.cc +++ b/paddle/fluid/operators/conv_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/conv_op.h" +#include "paddle/fluid/operators/conv_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/conv_op.h b/paddle/fluid/operators/conv_op.h index 3c1d0e9c1c4..1156e6c8fe3 100644 --- a/paddle/fluid/operators/conv_op.h +++ b/paddle/fluid/operators/conv_op.h @@ -14,12 +14,12 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/depthwise_conv.h" -#include "paddle/operators/math/im2col.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/vol2col.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/depthwise_conv.h" +#include "paddle/fluid/operators/math/im2col.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/vol2col.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/conv_shift_op.cc b/paddle/fluid/operators/conv_shift_op.cc index 106b68a0a0e..a96aac63e09 100644 --- a/paddle/fluid/operators/conv_shift_op.cc +++ b/paddle/fluid/operators/conv_shift_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/conv_shift_op.h" -#include "paddle/framework/eigen.h" +#include "paddle/fluid/operators/conv_shift_op.h" +#include "paddle/fluid/framework/eigen.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/conv_shift_op.cu b/paddle/fluid/operators/conv_shift_op.cu index cf7abc196e1..9818707ce3b 100644 --- a/paddle/fluid/operators/conv_shift_op.cu +++ b/paddle/fluid/operators/conv_shift_op.cu @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/conv_shift_op.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/platform/cuda_helper.h" +#include "paddle/fluid/operators/conv_shift_op.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/conv_shift_op.h b/paddle/fluid/operators/conv_shift_op.h index 6781d87ef0d..987a690895e 100644 --- a/paddle/fluid/operators/conv_shift_op.h +++ b/paddle/fluid/operators/conv_shift_op.h @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc b/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc index 23bc97e13c1..0aed4ebeffa 100644 --- a/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc @@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/memory/memory.h" -#include "paddle/operators/conv_transpose_op.h" -#include "paddle/platform/assert.h" -#include "paddle/platform/cudnn_helper.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/memory/memory.h" +#include "paddle/fluid/operators/conv_transpose_op.h" +#include "paddle/fluid/platform/assert.h" +#include "paddle/fluid/platform/cudnn_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/conv_transpose_op.cc b/paddle/fluid/operators/conv_transpose_op.cc index 089290a506d..974cffad928 100644 --- a/paddle/fluid/operators/conv_transpose_op.cc +++ b/paddle/fluid/operators/conv_transpose_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/conv_transpose_op.h" +#include "paddle/fluid/operators/conv_transpose_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/conv_transpose_op.cu.cc b/paddle/fluid/operators/conv_transpose_op.cu.cc index f1d827c6062..ed90c6ec626 100644 --- a/paddle/fluid/operators/conv_transpose_op.cu.cc +++ b/paddle/fluid/operators/conv_transpose_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/conv_transpose_op.h" +#include "paddle/fluid/operators/conv_transpose_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/conv_transpose_op.h b/paddle/fluid/operators/conv_transpose_op.h index 8c0d57afcd2..f5125754686 100644 --- a/paddle/fluid/operators/conv_transpose_op.h +++ b/paddle/fluid/operators/conv_transpose_op.h @@ -14,11 +14,11 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/im2col.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/vol2col.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/im2col.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/vol2col.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/cos_sim_op.cc b/paddle/fluid/operators/cos_sim_op.cc index 9019a1edb37..57c5a6025a0 100644 --- a/paddle/fluid/operators/cos_sim_op.cc +++ b/paddle/fluid/operators/cos_sim_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/cos_sim_op.h" +#include "paddle/fluid/operators/cos_sim_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/cos_sim_op.cu b/paddle/fluid/operators/cos_sim_op.cu index 9e5d1b6e4f0..c8cf363cdc4 100644 --- a/paddle/fluid/operators/cos_sim_op.cu +++ b/paddle/fluid/operators/cos_sim_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/cos_sim_op.h" +#include "paddle/fluid/operators/cos_sim_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/cos_sim_op.h b/paddle/fluid/operators/cos_sim_op.h index eadcca55f9b..9cd8b196daf 100644 --- a/paddle/fluid/operators/cos_sim_op.h +++ b/paddle/fluid/operators/cos_sim_op.h @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/cos_sim_functor.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/platform/for_range.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/cos_sim_functor.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/create_reader_op.cc b/paddle/fluid/operators/create_reader_op.cc index 5ba2a25ab4c..2927ea2dafa 100644 --- a/paddle/fluid/operators/create_reader_op.cc +++ b/paddle/fluid/operators/create_reader_op.cc @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/framework/op_registry.h" -#include "paddle/framework/reader.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/reader.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/crf_decoding_op.cc b/paddle/fluid/operators/crf_decoding_op.cc index 30626028c13..e3c1fc95a3b 100644 --- a/paddle/fluid/operators/crf_decoding_op.cc +++ b/paddle/fluid/operators/crf_decoding_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/crf_decoding_op.h" +#include "paddle/fluid/operators/crf_decoding_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/crf_decoding_op.h b/paddle/fluid/operators/crf_decoding_op.h index ce2f4e6622c..c3c161eec5f 100644 --- a/paddle/fluid/operators/crf_decoding_op.h +++ b/paddle/fluid/operators/crf_decoding_op.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/crop_op.cc b/paddle/fluid/operators/crop_op.cc index 310e3514431..8e80f77e497 100644 --- a/paddle/fluid/operators/crop_op.cc +++ b/paddle/fluid/operators/crop_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/crop_op.h" +#include "paddle/fluid/operators/crop_op.h" #include namespace paddle { diff --git a/paddle/fluid/operators/crop_op.cu b/paddle/fluid/operators/crop_op.cu index bba5db4c6ce..f3610675aae 100644 --- a/paddle/fluid/operators/crop_op.cu +++ b/paddle/fluid/operators/crop_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/crop_op.h" +#include "paddle/fluid/operators/crop_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(crop, ops::CropKernel); diff --git a/paddle/fluid/operators/crop_op.h b/paddle/fluid/operators/crop_op.h index 69d1a929772..9c7c0446d4c 100644 --- a/paddle/fluid/operators/crop_op.h +++ b/paddle/fluid/operators/crop_op.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/strided_memcpy.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/strided_memcpy.h" namespace paddle { namespace operators { // Internal diff --git a/paddle/fluid/operators/cross_entropy_op.cc b/paddle/fluid/operators/cross_entropy_op.cc index 7abd5b1c61d..5e34b248b6a 100644 --- a/paddle/fluid/operators/cross_entropy_op.cc +++ b/paddle/fluid/operators/cross_entropy_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/cross_entropy_op.h" +#include "paddle/fluid/operators/cross_entropy_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/cross_entropy_op.cu b/paddle/fluid/operators/cross_entropy_op.cu index 3b04894e6cc..de0976c69fc 100644 --- a/paddle/fluid/operators/cross_entropy_op.cu +++ b/paddle/fluid/operators/cross_entropy_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/cross_entropy_op.h" +#include "paddle/fluid/operators/cross_entropy_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/cross_entropy_op.h b/paddle/fluid/operators/cross_entropy_op.h index 5623d2ded16..4a5b20ecb70 100644 --- a/paddle/fluid/operators/cross_entropy_op.h +++ b/paddle/fluid/operators/cross_entropy_op.h @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/cross_entropy.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/cross_entropy.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/ctc_align_op.cc b/paddle/fluid/operators/ctc_align_op.cc index eeecbd32127..3c7db78813e 100644 --- a/paddle/fluid/operators/ctc_align_op.cc +++ b/paddle/fluid/operators/ctc_align_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/ctc_align_op.h" +#include "paddle/fluid/operators/ctc_align_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/ctc_align_op.cu b/paddle/fluid/operators/ctc_align_op.cu index 6406825d4a5..f629e0a9f15 100644 --- a/paddle/fluid/operators/ctc_align_op.cu +++ b/paddle/fluid/operators/ctc_align_op.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include #include #include -#include "paddle/operators/ctc_align_op.h" +#include "paddle/fluid/operators/ctc_align_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/ctc_align_op.h b/paddle/fluid/operators/ctc_align_op.h index 54ad1d6f5cc..1ef034c2f5b 100644 --- a/paddle/fluid/operators/ctc_align_op.h +++ b/paddle/fluid/operators/ctc_align_op.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/cum_op.h b/paddle/fluid/operators/cum_op.h index e3813ac9036..3b224914784 100644 --- a/paddle/fluid/operators/cum_op.h +++ b/paddle/fluid/operators/cum_op.h @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" -#include "paddle/operators/detail/safe_ref.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/operators/detail/safe_ref.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/cumsum_op.cc b/paddle/fluid/operators/cumsum_op.cc index 4933cc923d4..d15d4e3db35 100644 --- a/paddle/fluid/operators/cumsum_op.cc +++ b/paddle/fluid/operators/cumsum_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/cum_op.h" +#include "paddle/fluid/operators/cum_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/cumsum_op.cu b/paddle/fluid/operators/cumsum_op.cu index 90661c4269a..e063cc0f65a 100644 --- a/paddle/fluid/operators/cumsum_op.cu +++ b/paddle/fluid/operators/cumsum_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/cum_op.h" +#include "paddle/fluid/operators/cum_op.h" namespace ops = paddle::operators; using CUDA = paddle::platform::CUDADeviceContext; diff --git a/paddle/fluid/operators/decayed_adagrad_op.cc b/paddle/fluid/operators/decayed_adagrad_op.cc index 739a8d881c3..d827155919e 100644 --- a/paddle/fluid/operators/decayed_adagrad_op.cc +++ b/paddle/fluid/operators/decayed_adagrad_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/decayed_adagrad_op.h" +#include "paddle/fluid/operators/decayed_adagrad_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/decayed_adagrad_op.cu b/paddle/fluid/operators/decayed_adagrad_op.cu index 7bc8161f233..215d6dbc7d8 100644 --- a/paddle/fluid/operators/decayed_adagrad_op.cu +++ b/paddle/fluid/operators/decayed_adagrad_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/decayed_adagrad_op.h" +#include "paddle/fluid/operators/decayed_adagrad_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/decayed_adagrad_op.h b/paddle/fluid/operators/decayed_adagrad_op.h index fec9705cfc1..52b67586ea3 100644 --- a/paddle/fluid/operators/decayed_adagrad_op.h +++ b/paddle/fluid/operators/decayed_adagrad_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detail/grpc_client.cc b/paddle/fluid/operators/detail/grpc_client.cc index 9b5f7afc6a4..0d395d347ba 100644 --- a/paddle/fluid/operators/detail/grpc_client.cc +++ b/paddle/fluid/operators/detail/grpc_client.cc @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "grpc_client.h" -#include "paddle/framework/threadpool.h" +#include "paddle/fluid/framework/threadpool.h" namespace paddle { namespace operators { namespace detail { diff --git a/paddle/fluid/operators/detail/grpc_client.h b/paddle/fluid/operators/detail/grpc_client.h index f9499f6dc70..314fe8168f0 100644 --- a/paddle/fluid/operators/detail/grpc_client.h +++ b/paddle/fluid/operators/detail/grpc_client.h @@ -25,12 +25,12 @@ limitations under the License. */ #include #include -#include "paddle/framework/data_type.h" -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/scope.h" -#include "paddle/framework/selected_rows.h" -#include "paddle/operators/detail/sendrecvop_utils.h" -#include "paddle/operators/detail/simple_block_queue.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/operators/detail/sendrecvop_utils.h" +#include "paddle/fluid/operators/detail/simple_block_queue.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc index 4f94e1315fb..96f4ea797b1 100644 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ b/paddle/fluid/operators/detail/grpc_server.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/detail/grpc_server.h" +#include "paddle/fluid/operators/detail/grpc_server.h" using grpc::ServerAsyncResponseWriter; diff --git a/paddle/fluid/operators/detail/grpc_server.h b/paddle/fluid/operators/detail/grpc_server.h index 3f8b9d93176..1382d173183 100644 --- a/paddle/fluid/operators/detail/grpc_server.h +++ b/paddle/fluid/operators/detail/grpc_server.h @@ -14,19 +14,19 @@ limitations under the License. */ #pragma once -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/scope.h" -#include "paddle/framework/selected_rows.h" -#include "paddle/framework/var_type.h" -#include "paddle/operators/detail/simple_block_queue.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/framework/var_type.h" +#include "paddle/fluid/operators/detail/simple_block_queue.h" -#include "paddle/operators/detail/send_recv.grpc.pb.h" -#include "paddle/operators/detail/send_recv.pb.h" +#include "paddle/fluid/operators/detail/send_recv.grpc.pb.h" +#include "paddle/fluid/operators/detail/send_recv.pb.h" #include #include #include -#include "paddle/operators/detail/sendrecvop_utils.h" +#include "paddle/fluid/operators/detail/sendrecvop_utils.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detail/sendrecvop_utils.cc b/paddle/fluid/operators/detail/sendrecvop_utils.cc index 7635b9e8dbd..ba3ae6add60 100644 --- a/paddle/fluid/operators/detail/sendrecvop_utils.cc +++ b/paddle/fluid/operators/detail/sendrecvop_utils.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/detail/sendrecvop_utils.h" +#include "paddle/fluid/operators/detail/sendrecvop_utils.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detail/sendrecvop_utils.h b/paddle/fluid/operators/detail/sendrecvop_utils.h index 8e66f7299c7..fed887c0279 100644 --- a/paddle/fluid/operators/detail/sendrecvop_utils.h +++ b/paddle/fluid/operators/detail/sendrecvop_utils.h @@ -17,14 +17,14 @@ limitations under the License. */ #include #include -#include "paddle/framework/data_type.h" -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/scope.h" -#include "paddle/framework/selected_rows.h" -#include "paddle/framework/var_type.h" - -#include "paddle/operators/detail/send_recv.grpc.pb.h" -#include "paddle/operators/detail/send_recv.pb.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/framework/var_type.h" + +#include "paddle/fluid/operators/detail/send_recv.grpc.pb.h" +#include "paddle/fluid/operators/detail/send_recv.pb.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detail/strided_memcpy.h b/paddle/fluid/operators/detail/strided_memcpy.h index 9ed524d4dcf..d7a7eed50b9 100644 --- a/paddle/fluid/operators/detail/strided_memcpy.h +++ b/paddle/fluid/operators/detail/strided_memcpy.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/ddim.h" -#include "paddle/memory/memcpy.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/memory/memcpy.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detection_output_op.cc b/paddle/fluid/operators/detection_output_op.cc index ea44cd32678..6dee5222959 100644 --- a/paddle/fluid/operators/detection_output_op.cc +++ b/paddle/fluid/operators/detection_output_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/detection_output_op.h" +#include "paddle/fluid/operators/detection_output_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/detection_output_op.cu.cc b/paddle/fluid/operators/detection_output_op.cu.cc index 4a6560e0492..309e03a25be 100644 --- a/paddle/fluid/operators/detection_output_op.cu.cc +++ b/paddle/fluid/operators/detection_output_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/detection_output_op.h" +#include "paddle/fluid/operators/detection_output_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/detection_output_op.h b/paddle/fluid/operators/detection_output_op.h index 86285b748a7..05e5b72bd35 100644 --- a/paddle/fluid/operators/detection_output_op.h +++ b/paddle/fluid/operators/detection_output_op.h @@ -13,12 +13,12 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/framework/tensor.h" -#include "paddle/operators/math/detection_util.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/softmax.h" -#include "paddle/operators/strided_memcpy.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/operators/math/detection_util.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/softmax.h" +#include "paddle/fluid/operators/strided_memcpy.h" namespace paddle { namespace operators { template diff --git a/paddle/fluid/operators/dropout_op.cc b/paddle/fluid/operators/dropout_op.cc index 5274aa204e6..e1dc900512c 100644 --- a/paddle/fluid/operators/dropout_op.cc +++ b/paddle/fluid/operators/dropout_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/dropout_op.h" +#include "paddle/fluid/operators/dropout_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/dropout_op.cu b/paddle/fluid/operators/dropout_op.cu index 84d78445a4f..4ae9f4ce54d 100644 --- a/paddle/fluid/operators/dropout_op.cu +++ b/paddle/fluid/operators/dropout_op.cu @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include -#include "paddle/operators/dropout_op.h" +#include "paddle/fluid/operators/dropout_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/dropout_op.h b/paddle/fluid/operators/dropout_op.h index 46e5dbc64ff..9dd1f33669c 100644 --- a/paddle/fluid/operators/dropout_op.h +++ b/paddle/fluid/operators/dropout_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once #include -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/edit_distance_op.cc b/paddle/fluid/operators/edit_distance_op.cc index 7e7dfc79eba..ae82408da71 100644 --- a/paddle/fluid/operators/edit_distance_op.cc +++ b/paddle/fluid/operators/edit_distance_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/edit_distance_op.h" +#include "paddle/fluid/operators/edit_distance_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/edit_distance_op.cu b/paddle/fluid/operators/edit_distance_op.cu index c3e116af086..bdfead75e71 100644 --- a/paddle/fluid/operators/edit_distance_op.cu +++ b/paddle/fluid/operators/edit_distance_op.cu @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/platform/cuda_helper.h" -#include "paddle/platform/gpu_info.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/edit_distance_op.h b/paddle/fluid/operators/edit_distance_op.h index 974299e604d..205e16e6bfe 100644 --- a/paddle/fluid/operators/edit_distance_op.h +++ b/paddle/fluid/operators/edit_distance_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once #include -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_add_op.cc b/paddle/fluid/operators/elementwise_add_op.cc index 37951fa7587..5b9947b8c93 100644 --- a/paddle/fluid/operators/elementwise_add_op.cc +++ b/paddle/fluid/operators/elementwise_add_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/elementwise_add_op.h" -#include "paddle/operators/elementwise_op.h" +#include "paddle/fluid/operators/elementwise_add_op.h" +#include "paddle/fluid/operators/elementwise_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_add_op.cu b/paddle/fluid/operators/elementwise_add_op.cu index 641cea323ac..2ac3a998ec4 100644 --- a/paddle/fluid/operators/elementwise_add_op.cu +++ b/paddle/fluid/operators/elementwise_add_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/elementwise_add_op.h" +#include "paddle/fluid/operators/elementwise_add_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/elementwise_add_op.h b/paddle/fluid/operators/elementwise_add_op.h index c24f97a8509..248e3b9d617 100644 --- a/paddle/fluid/operators/elementwise_add_op.h +++ b/paddle/fluid/operators/elementwise_add_op.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise_op_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_div_op.cc b/paddle/fluid/operators/elementwise_div_op.cc index 6ebd58b1b3d..818ae82f44c 100644 --- a/paddle/fluid/operators/elementwise_div_op.cc +++ b/paddle/fluid/operators/elementwise_div_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/elementwise_div_op.h" -#include "paddle/operators/elementwise_op.h" +#include "paddle/fluid/operators/elementwise_div_op.h" +#include "paddle/fluid/operators/elementwise_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_div_op.cu b/paddle/fluid/operators/elementwise_div_op.cu index a0372123d6f..d1bb7a474c0 100644 --- a/paddle/fluid/operators/elementwise_div_op.cu +++ b/paddle/fluid/operators/elementwise_div_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/elementwise_div_op.h" +#include "paddle/fluid/operators/elementwise_div_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/elementwise_div_op.h b/paddle/fluid/operators/elementwise_div_op.h index dc863cc598e..8e0726d9465 100644 --- a/paddle/fluid/operators/elementwise_div_op.h +++ b/paddle/fluid/operators/elementwise_div_op.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise_op_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_max_op.cc b/paddle/fluid/operators/elementwise_max_op.cc index 53c27ae5be4..1331bcadc8c 100644 --- a/paddle/fluid/operators/elementwise_max_op.cc +++ b/paddle/fluid/operators/elementwise_max_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/elementwise_max_op.h" -#include "paddle/operators/elementwise_op.h" +#include "paddle/fluid/operators/elementwise_max_op.h" +#include "paddle/fluid/operators/elementwise_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_max_op.cu b/paddle/fluid/operators/elementwise_max_op.cu index 5ff4af17477..7f0259ad002 100644 --- a/paddle/fluid/operators/elementwise_max_op.cu +++ b/paddle/fluid/operators/elementwise_max_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/elementwise_max_op.h" +#include "paddle/fluid/operators/elementwise_max_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/elementwise_max_op.h b/paddle/fluid/operators/elementwise_max_op.h index 67efe4e1511..e1db9bcc011 100644 --- a/paddle/fluid/operators/elementwise_max_op.h +++ b/paddle/fluid/operators/elementwise_max_op.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise_op_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_min_op.cc b/paddle/fluid/operators/elementwise_min_op.cc index 99482e1bf60..1d69099c8e6 100644 --- a/paddle/fluid/operators/elementwise_min_op.cc +++ b/paddle/fluid/operators/elementwise_min_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/elementwise_min_op.h" -#include "paddle/operators/elementwise_op.h" +#include "paddle/fluid/operators/elementwise_min_op.h" +#include "paddle/fluid/operators/elementwise_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_min_op.cu b/paddle/fluid/operators/elementwise_min_op.cu index 3547e6ccb77..ed532047350 100644 --- a/paddle/fluid/operators/elementwise_min_op.cu +++ b/paddle/fluid/operators/elementwise_min_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/elementwise_min_op.h" +#include "paddle/fluid/operators/elementwise_min_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/elementwise_min_op.h b/paddle/fluid/operators/elementwise_min_op.h index cf11759404d..bfe213dd431 100644 --- a/paddle/fluid/operators/elementwise_min_op.h +++ b/paddle/fluid/operators/elementwise_min_op.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise_op_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_mul_op.cc b/paddle/fluid/operators/elementwise_mul_op.cc index 450dd05c796..0cb96f21d1b 100644 --- a/paddle/fluid/operators/elementwise_mul_op.cc +++ b/paddle/fluid/operators/elementwise_mul_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/elementwise_mul_op.h" -#include "paddle/operators/elementwise_op.h" +#include "paddle/fluid/operators/elementwise_mul_op.h" +#include "paddle/fluid/operators/elementwise_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_mul_op.cu b/paddle/fluid/operators/elementwise_mul_op.cu index f73e8afda96..d72b6250eed 100644 --- a/paddle/fluid/operators/elementwise_mul_op.cu +++ b/paddle/fluid/operators/elementwise_mul_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/elementwise_mul_op.h" +#include "paddle/fluid/operators/elementwise_mul_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/elementwise_mul_op.h b/paddle/fluid/operators/elementwise_mul_op.h index 773125f5ca5..dc292eb1e72 100644 --- a/paddle/fluid/operators/elementwise_mul_op.h +++ b/paddle/fluid/operators/elementwise_mul_op.h @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise_op_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_op.h b/paddle/fluid/operators/elementwise_op.h index 1a0131d8b94..38f83d7ad36 100644 --- a/paddle/fluid/operators/elementwise_op.h +++ b/paddle/fluid/operators/elementwise_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_op_function.h b/paddle/fluid/operators/elementwise_op_function.h index 74abf7c4a58..c1269382a44 100644 --- a/paddle/fluid/operators/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise_op_function.h @@ -13,16 +13,16 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" -#include "paddle/platform/transform.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/platform/transform.h" #ifdef __NVCC__ #include #endif -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_pow_op.cc b/paddle/fluid/operators/elementwise_pow_op.cc index 5293cc7dd34..911b5dbd250 100644 --- a/paddle/fluid/operators/elementwise_pow_op.cc +++ b/paddle/fluid/operators/elementwise_pow_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/elementwise_pow_op.h" -#include "paddle/operators/elementwise_op.h" +#include "paddle/fluid/operators/elementwise_pow_op.h" +#include "paddle/fluid/operators/elementwise_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_pow_op.cu b/paddle/fluid/operators/elementwise_pow_op.cu index 643c978e635..2996600738f 100644 --- a/paddle/fluid/operators/elementwise_pow_op.cu +++ b/paddle/fluid/operators/elementwise_pow_op.cu @@ -10,7 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/elementwise_pow_op.h" +#include "paddle/fluid/operators/elementwise_pow_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/elementwise_pow_op.h b/paddle/fluid/operators/elementwise_pow_op.h index 0c5dd031ec4..b793c1eae0e 100644 --- a/paddle/fluid/operators/elementwise_pow_op.h +++ b/paddle/fluid/operators/elementwise_pow_op.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include -#include "paddle/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise_op_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_sub_op.cc b/paddle/fluid/operators/elementwise_sub_op.cc index d3c51f0a697..46ce01c7cf5 100644 --- a/paddle/fluid/operators/elementwise_sub_op.cc +++ b/paddle/fluid/operators/elementwise_sub_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/elementwise_sub_op.h" -#include "paddle/operators/elementwise_op.h" +#include "paddle/fluid/operators/elementwise_sub_op.h" +#include "paddle/fluid/operators/elementwise_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/elementwise_sub_op.cu b/paddle/fluid/operators/elementwise_sub_op.cu index 7a2516ef6a6..eb09d6c5edc 100644 --- a/paddle/fluid/operators/elementwise_sub_op.cu +++ b/paddle/fluid/operators/elementwise_sub_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/elementwise_sub_op.h" +#include "paddle/fluid/operators/elementwise_sub_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/elementwise_sub_op.h b/paddle/fluid/operators/elementwise_sub_op.h index 6a88c5f6b4c..af2d497b9ae 100644 --- a/paddle/fluid/operators/elementwise_sub_op.h +++ b/paddle/fluid/operators/elementwise_sub_op.h @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/elementwise_op_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/expand_op.cc b/paddle/fluid/operators/expand_op.cc index 043c93654d3..ccb9a94856f 100644 --- a/paddle/fluid/operators/expand_op.cc +++ b/paddle/fluid/operators/expand_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/expand_op.h" +#include "paddle/fluid/operators/expand_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/expand_op.cu b/paddle/fluid/operators/expand_op.cu index 84e8fa567b8..8a9f39708be 100644 --- a/paddle/fluid/operators/expand_op.cu +++ b/paddle/fluid/operators/expand_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/expand_op.h" +#include "paddle/fluid/operators/expand_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/expand_op.h b/paddle/fluid/operators/expand_op.h index a4994cf3a5b..8df1cd34d7d 100644 --- a/paddle/fluid/operators/expand_op.h +++ b/paddle/fluid/operators/expand_op.h @@ -21,9 +21,9 @@ limitations under the License. */ #include #include #include -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" #define MAX_RANK_SUPPORTED 6 diff --git a/paddle/fluid/operators/feed_op.cc b/paddle/fluid/operators/feed_op.cc index 789d01e0022..0b3f5f0d1d0 100644 --- a/paddle/fluid/operators/feed_op.cc +++ b/paddle/fluid/operators/feed_op.cc @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/feed_fetch_type.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" +#include "paddle/fluid/framework/feed_fetch_type.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/fetch_op.cc b/paddle/fluid/operators/fetch_op.cc index 7205ee2a879..54e5892016c 100644 --- a/paddle/fluid/operators/fetch_op.cc +++ b/paddle/fluid/operators/fetch_op.cc @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/feed_fetch_type.h" -#include "paddle/framework/op_registry.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/feed_fetch_type.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/fill_constant_batch_size_like_op.cc b/paddle/fluid/operators/fill_constant_batch_size_like_op.cc index c74a5b6ced3..e6992ba371c 100644 --- a/paddle/fluid/operators/fill_constant_batch_size_like_op.cc +++ b/paddle/fluid/operators/fill_constant_batch_size_like_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/fill_constant_batch_size_like_op.h" +#include "paddle/fluid/operators/fill_constant_batch_size_like_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/fill_constant_batch_size_like_op.cu.cc b/paddle/fluid/operators/fill_constant_batch_size_like_op.cu.cc index 608f4b91623..b4f4d2a5030 100644 --- a/paddle/fluid/operators/fill_constant_batch_size_like_op.cu.cc +++ b/paddle/fluid/operators/fill_constant_batch_size_like_op.cu.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/fill_constant_batch_size_like_op.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/operators/fill_constant_batch_size_like_op.h" +#include "paddle/fluid/framework/op_registry.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/fill_constant_batch_size_like_op.h b/paddle/fluid/operators/fill_constant_batch_size_like_op.h index 66da9d0307e..da4a20d99a1 100644 --- a/paddle/fluid/operators/fill_constant_batch_size_like_op.h +++ b/paddle/fluid/operators/fill_constant_batch_size_like_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/fill_constant_op.cc b/paddle/fluid/operators/fill_constant_op.cc index dcd43a30c86..d4bf6406e57 100644 --- a/paddle/fluid/operators/fill_constant_op.cc +++ b/paddle/fluid/operators/fill_constant_op.cc @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/data_type.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/fill_op.cc b/paddle/fluid/operators/fill_op.cc index 4f5a2ed1695..8e318f37cf0 100644 --- a/paddle/fluid/operators/fill_op.cc +++ b/paddle/fluid/operators/fill_op.cc @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/data_type.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/detail/safe_ref.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/fill_zeros_like_op.cc b/paddle/fluid/operators/fill_zeros_like_op.cc index b4ae1de8760..958bfb1557d 100644 --- a/paddle/fluid/operators/fill_zeros_like_op.cc +++ b/paddle/fluid/operators/fill_zeros_like_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/fill_zeros_like_op.h" +#include "paddle/fluid/operators/fill_zeros_like_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/fill_zeros_like_op.cu.cc b/paddle/fluid/operators/fill_zeros_like_op.cu.cc index b7048e8f585..07078573d8a 100644 --- a/paddle/fluid/operators/fill_zeros_like_op.cu.cc +++ b/paddle/fluid/operators/fill_zeros_like_op.cu.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/fill_zeros_like_op.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/operators/fill_zeros_like_op.h" +#include "paddle/fluid/framework/op_registry.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/fill_zeros_like_op.h b/paddle/fluid/operators/fill_zeros_like_op.h index 351ecf8b2f1..141c3809e9a 100644 --- a/paddle/fluid/operators/fill_zeros_like_op.h +++ b/paddle/fluid/operators/fill_zeros_like_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/ftrl_op.cc b/paddle/fluid/operators/ftrl_op.cc index d00700823d4..e72a173751e 100644 --- a/paddle/fluid/operators/ftrl_op.cc +++ b/paddle/fluid/operators/ftrl_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/ftrl_op.h" +#include "paddle/fluid/operators/ftrl_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/ftrl_op.cu b/paddle/fluid/operators/ftrl_op.cu index abbbe7adbe6..dbdfcb927e0 100644 --- a/paddle/fluid/operators/ftrl_op.cu +++ b/paddle/fluid/operators/ftrl_op.cu @@ -12,7 +12,7 @@ CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/ftrl_op.h" +#include "paddle/fluid/operators/ftrl_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/ftrl_op.h b/paddle/fluid/operators/ftrl_op.h index 4eea04cd8d6..0a9405fcef1 100644 --- a/paddle/fluid/operators/ftrl_op.h +++ b/paddle/fluid/operators/ftrl_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/gather.cu.h b/paddle/fluid/operators/gather.cu.h index 9840c066f05..af5898e29ec 100644 --- a/paddle/fluid/operators/gather.cu.h +++ b/paddle/fluid/operators/gather.cu.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/tensor.h" -#include "paddle/platform/place.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/place.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/gather.h b/paddle/fluid/operators/gather.h index 052db49cb3c..287732eeb6e 100644 --- a/paddle/fluid/operators/gather.h +++ b/paddle/fluid/operators/gather.h @@ -16,10 +16,10 @@ limitations under the License. */ #include #include -#include "paddle/framework/ddim.h" -#include "paddle/framework/eigen.h" -#include "paddle/framework/tensor.h" -#include "paddle/platform/place.h" +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/place.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/gather_op.cc b/paddle/fluid/operators/gather_op.cc index 597fdad0794..dceeb71ee35 100644 --- a/paddle/fluid/operators/gather_op.cc +++ b/paddle/fluid/operators/gather_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/gather_op.h" -#include "paddle/framework/ddim.h" +#include "paddle/fluid/operators/gather_op.h" +#include "paddle/fluid/framework/ddim.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/gather_op.cu b/paddle/fluid/operators/gather_op.cu index eec2415e1de..484f4232624 100644 --- a/paddle/fluid/operators/gather_op.cu +++ b/paddle/fluid/operators/gather_op.cu @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "gather.cu.h" -#include "paddle/framework/eigen.h" -#include "paddle/operators/gather_op.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/operators/gather_op.h" #include "scatter.cu.h" namespace paddle { diff --git a/paddle/fluid/operators/gather_op.h b/paddle/fluid/operators/gather_op.h index 1a1ba0c41ae..7ba4a31c81b 100644 --- a/paddle/fluid/operators/gather_op.h +++ b/paddle/fluid/operators/gather_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once #include "gather.h" -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" #include "scatter.h" namespace paddle { diff --git a/paddle/fluid/operators/gather_test.cc b/paddle/fluid/operators/gather_test.cc index cbd86b87961..4d86cf5ce33 100644 --- a/paddle/fluid/operators/gather_test.cc +++ b/paddle/fluid/operators/gather_test.cc @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/gather.h" -#include "paddle/framework/ddim.h" -#include "paddle/framework/tensor.h" -#include "paddle/platform/place.h" +#include "paddle/fluid/operators/gather.h" +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/place.h" #include #include diff --git a/paddle/fluid/operators/gaussian_random_op.cc b/paddle/fluid/operators/gaussian_random_op.cc index 2dca05760ec..b090f875976 100644 --- a/paddle/fluid/operators/gaussian_random_op.cc +++ b/paddle/fluid/operators/gaussian_random_op.cc @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/gaussian_random_op.cu b/paddle/fluid/operators/gaussian_random_op.cu index 8a70db17e17..70d655d4bb2 100644 --- a/paddle/fluid/operators/gaussian_random_op.cu +++ b/paddle/fluid/operators/gaussian_random_op.cu @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include #include -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/get_places_op.cc b/paddle/fluid/operators/get_places_op.cc index 24fafb23074..ba908e472bb 100644 --- a/paddle/fluid/operators/get_places_op.cc +++ b/paddle/fluid/operators/get_places_op.cc @@ -13,11 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/framework/op_registry.h" -#include "paddle/operators/detail/safe_ref.h" -#include "paddle/platform/place.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/platform/place.h" #ifdef PADDLE_WITH_CUDA -#include "paddle/platform/gpu_info.h" +#include "paddle/fluid/platform/gpu_info.h" #endif namespace paddle { diff --git a/paddle/fluid/operators/gru_op.cc b/paddle/fluid/operators/gru_op.cc index fb901b63949..1436e55b0e1 100644 --- a/paddle/fluid/operators/gru_op.cc +++ b/paddle/fluid/operators/gru_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/gru_op.h" +#include "paddle/fluid/operators/gru_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/gru_op.cu.cc b/paddle/fluid/operators/gru_op.cu.cc index 9cb0cc42d55..e908d01d292 100644 --- a/paddle/fluid/operators/gru_op.cu.cc +++ b/paddle/fluid/operators/gru_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/gru_op.h" +#include "paddle/fluid/operators/gru_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/gru_op.h b/paddle/fluid/operators/gru_op.h index a08bd4233b0..37f3ae1a837 100644 --- a/paddle/fluid/operators/gru_op.h +++ b/paddle/fluid/operators/gru_op.h @@ -14,13 +14,13 @@ limitations under the License. */ #pragma once -#include "paddle/operators/math/detail/activation_functions.h" -#include "paddle/operators/math/gru_compute.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/sequence2batch.h" +#include "paddle/fluid/operators/math/detail/activation_functions.h" +#include "paddle/fluid/operators/math/gru_compute.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/sequence2batch.h" -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/gru_unit_op.cc b/paddle/fluid/operators/gru_unit_op.cc index c354293be77..21ad3aeb492 100644 --- a/paddle/fluid/operators/gru_unit_op.cc +++ b/paddle/fluid/operators/gru_unit_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/gru_unit_op.h" +#include "paddle/fluid/operators/gru_unit_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/gru_unit_op.cu b/paddle/fluid/operators/gru_unit_op.cu index 95c8c23dada..88b707fd131 100644 --- a/paddle/fluid/operators/gru_unit_op.cu +++ b/paddle/fluid/operators/gru_unit_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/gru_unit_op.h" +#include "paddle/fluid/operators/gru_unit_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/gru_unit_op.h b/paddle/fluid/operators/gru_unit_op.h index a77be46718b..c4031a5a575 100644 --- a/paddle/fluid/operators/gru_unit_op.h +++ b/paddle/fluid/operators/gru_unit_op.h @@ -14,11 +14,11 @@ limitations under the License. */ #pragma once -#include "paddle/operators/activation_op.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/operators/activation_op.h" +#include "paddle/fluid/operators/math/math_function.h" -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/hinge_loss_op.cc b/paddle/fluid/operators/hinge_loss_op.cc index 19d2e9dc56f..f644c22c9f1 100644 --- a/paddle/fluid/operators/hinge_loss_op.cc +++ b/paddle/fluid/operators/hinge_loss_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/hinge_loss_op.h" +#include "paddle/fluid/operators/hinge_loss_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/hinge_loss_op.cu b/paddle/fluid/operators/hinge_loss_op.cu index b9cfbc50c49..cb53a9b7f4a 100644 --- a/paddle/fluid/operators/hinge_loss_op.cu +++ b/paddle/fluid/operators/hinge_loss_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/hinge_loss_op.h" +#include "paddle/fluid/operators/hinge_loss_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/hinge_loss_op.h b/paddle/fluid/operators/hinge_loss_op.h index 91369cfb8a5..1e924d236ea 100644 --- a/paddle/fluid/operators/hinge_loss_op.h +++ b/paddle/fluid/operators/hinge_loss_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/huber_loss_op.cc b/paddle/fluid/operators/huber_loss_op.cc index 5c92f2c7b2d..dc1f609dcfa 100644 --- a/paddle/fluid/operators/huber_loss_op.cc +++ b/paddle/fluid/operators/huber_loss_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/huber_loss_op.h" +#include "paddle/fluid/operators/huber_loss_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/huber_loss_op.cu b/paddle/fluid/operators/huber_loss_op.cu index ccc83a16ba2..ef5120c69d4 100644 --- a/paddle/fluid/operators/huber_loss_op.cu +++ b/paddle/fluid/operators/huber_loss_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/huber_loss_op.h" +#include "paddle/fluid/operators/huber_loss_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/huber_loss_op.h b/paddle/fluid/operators/huber_loss_op.h index 4dd20e8b080..caca89fcf63 100644 --- a/paddle/fluid/operators/huber_loss_op.h +++ b/paddle/fluid/operators/huber_loss_op.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/platform/hostdevice.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/im2sequence_op.cc b/paddle/fluid/operators/im2sequence_op.cc index 31baaedf691..936e5fe49ed 100644 --- a/paddle/fluid/operators/im2sequence_op.cc +++ b/paddle/fluid/operators/im2sequence_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/im2sequence_op.h" +#include "paddle/fluid/operators/im2sequence_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/im2sequence_op.cu b/paddle/fluid/operators/im2sequence_op.cu index 9db7529112f..1e7bf463122 100644 --- a/paddle/fluid/operators/im2sequence_op.cu +++ b/paddle/fluid/operators/im2sequence_op.cu @@ -13,7 +13,7 @@ limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/im2sequence_op.h" +#include "paddle/fluid/operators/im2sequence_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/im2sequence_op.h b/paddle/fluid/operators/im2sequence_op.h index f33aec71a92..59456f0ea29 100644 --- a/paddle/fluid/operators/im2sequence_op.h +++ b/paddle/fluid/operators/im2sequence_op.h @@ -14,11 +14,11 @@ #pragma once -#include "paddle/framework/data_layout.h" -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/im2col.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/data_layout.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/im2col.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/increment_op.cc b/paddle/fluid/operators/increment_op.cc index e0b80cc4e74..3d488067b25 100644 --- a/paddle/fluid/operators/increment_op.cc +++ b/paddle/fluid/operators/increment_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/iou_similarity_op.cc b/paddle/fluid/operators/iou_similarity_op.cc index c520b28b83e..c2e452cdfaa 100755 --- a/paddle/fluid/operators/iou_similarity_op.cc +++ b/paddle/fluid/operators/iou_similarity_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/iou_similarity_op.h" +#include "paddle/fluid/operators/iou_similarity_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/iou_similarity_op.cu b/paddle/fluid/operators/iou_similarity_op.cu index fa505262461..f8df1f4aa4c 100755 --- a/paddle/fluid/operators/iou_similarity_op.cu +++ b/paddle/fluid/operators/iou_similarity_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/iou_similarity_op.h" +#include "paddle/fluid/operators/iou_similarity_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/iou_similarity_op.h b/paddle/fluid/operators/iou_similarity_op.h index e36177069d7..2fb1b5f7070 100644 --- a/paddle/fluid/operators/iou_similarity_op.h +++ b/paddle/fluid/operators/iou_similarity_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/platform/for_range.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/for_range.h" template inline HOSTDEVICE T IOUSimilarity(T xmin1, T ymin1, T xmax1, T ymax1, T xmin2, diff --git a/paddle/fluid/operators/is_empty_op.cc b/paddle/fluid/operators/is_empty_op.cc index 492ae48845a..ea424018d66 100644 --- a/paddle/fluid/operators/is_empty_op.cc +++ b/paddle/fluid/operators/is_empty_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/l1_norm_op.cc b/paddle/fluid/operators/l1_norm_op.cc index 1a5d6e19263..974ee404f83 100644 --- a/paddle/fluid/operators/l1_norm_op.cc +++ b/paddle/fluid/operators/l1_norm_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/l1_norm_op.h" +#include "paddle/fluid/operators/l1_norm_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/l1_norm_op.cu b/paddle/fluid/operators/l1_norm_op.cu index 7ecc774670a..5e9e864a346 100644 --- a/paddle/fluid/operators/l1_norm_op.cu +++ b/paddle/fluid/operators/l1_norm_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/l1_norm_op.h" +#include "paddle/fluid/operators/l1_norm_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/l1_norm_op.h b/paddle/fluid/operators/l1_norm_op.h index 086d42705dc..7ddf2ac6a90 100644 --- a/paddle/fluid/operators/l1_norm_op.h +++ b/paddle/fluid/operators/l1_norm_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/label_smooth_op.cc b/paddle/fluid/operators/label_smooth_op.cc index c89082f44b3..c018965beef 100644 --- a/paddle/fluid/operators/label_smooth_op.cc +++ b/paddle/fluid/operators/label_smooth_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/label_smooth_op.h" +#include "paddle/fluid/operators/label_smooth_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/label_smooth_op.cu b/paddle/fluid/operators/label_smooth_op.cu index 5a0cec12bc5..4a40a4e9ec8 100644 --- a/paddle/fluid/operators/label_smooth_op.cu +++ b/paddle/fluid/operators/label_smooth_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/label_smooth_op.h" +#include "paddle/fluid/operators/label_smooth_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/label_smooth_op.h b/paddle/fluid/operators/label_smooth_op.h index 87bc9f793e3..15752377f66 100644 --- a/paddle/fluid/operators/label_smooth_op.h +++ b/paddle/fluid/operators/label_smooth_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/layer_norm_op.cc b/paddle/fluid/operators/layer_norm_op.cc index d9b774272cb..60e37ed01b3 100644 --- a/paddle/fluid/operators/layer_norm_op.cc +++ b/paddle/fluid/operators/layer_norm_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/layer_norm_op.h" +#include "paddle/fluid/operators/layer_norm_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/layer_norm_op.cu b/paddle/fluid/operators/layer_norm_op.cu index 77d13b216f0..aa54fd54155 100644 --- a/paddle/fluid/operators/layer_norm_op.cu +++ b/paddle/fluid/operators/layer_norm_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/layer_norm_op.h" +#include "paddle/fluid/operators/layer_norm_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/layer_norm_op.h b/paddle/fluid/operators/layer_norm_op.h index 3c436b89263..60c0b07add1 100644 --- a/paddle/fluid/operators/layer_norm_op.h +++ b/paddle/fluid/operators/layer_norm_op.h @@ -13,11 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" -#include "paddle/operators/elementwise_op_function.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/linear_chain_crf_op.cc b/paddle/fluid/operators/linear_chain_crf_op.cc index e24bf622b7f..3e1dfa49487 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.cc +++ b/paddle/fluid/operators/linear_chain_crf_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/linear_chain_crf_op.h" +#include "paddle/fluid/operators/linear_chain_crf_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/linear_chain_crf_op.cu b/paddle/fluid/operators/linear_chain_crf_op.cu index da612510b4d..6e04e76eebc 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.cu +++ b/paddle/fluid/operators/linear_chain_crf_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/linear_chain_crf_op.h" +#include "paddle/fluid/operators/linear_chain_crf_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/linear_chain_crf_op.h b/paddle/fluid/operators/linear_chain_crf_op.h index afc197a1c38..15b64c09bf3 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.h +++ b/paddle/fluid/operators/linear_chain_crf_op.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 099f6b23736..a72708d9baa 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -19,14 +19,14 @@ limitations under the License. */ #include -#include "paddle/framework/executor.h" -#include "paddle/framework/framework.pb.h" -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/proto_desc.h" -#include "paddle/operators/detail/grpc_server.h" -#include "paddle/operators/detail/sendrecvop_utils.h" -#include "paddle/operators/detail/simple_block_queue.h" +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/proto_desc.h" +#include "paddle/fluid/operators/detail/grpc_server.h" +#include "paddle/fluid/operators/detail/sendrecvop_utils.h" +#include "paddle/fluid/operators/detail/simple_block_queue.h" #include "paddle/string/printf.h" namespace paddle { diff --git a/paddle/fluid/operators/load_combine_op.cc b/paddle/fluid/operators/load_combine_op.cc index f4be793d7bf..1948063d886 100644 --- a/paddle/fluid/operators/load_combine_op.cc +++ b/paddle/fluid/operators/load_combine_op.cc @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/framework/op_registry.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/load_op.cc b/paddle/fluid/operators/load_op.cc index f886b423ac7..c9bf5d72b23 100644 --- a/paddle/fluid/operators/load_op.cc +++ b/paddle/fluid/operators/load_op.cc @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/framework/op_registry.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lod_array_length_op.cc b/paddle/fluid/operators/lod_array_length_op.cc index d2c52745cfd..f11f5a89f5a 100644 --- a/paddle/fluid/operators/lod_array_length_op.cc +++ b/paddle/fluid/operators/lod_array_length_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/lod_tensor_array.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/lod_tensor_array.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lod_rank_table_op.cc b/paddle/fluid/operators/lod_rank_table_op.cc index 692b9bf3710..0b9426a9f8f 100644 --- a/paddle/fluid/operators/lod_rank_table_op.cc +++ b/paddle/fluid/operators/lod_rank_table_op.cc @@ -11,8 +11,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/lod_rank_table.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/lod_rank_table.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lod_reset_op.cc b/paddle/fluid/operators/lod_reset_op.cc index 3d7b15edcfe..55ae71c1815 100644 --- a/paddle/fluid/operators/lod_reset_op.cc +++ b/paddle/fluid/operators/lod_reset_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/lod_reset_op.h" +#include "paddle/fluid/operators/lod_reset_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lod_reset_op.cu b/paddle/fluid/operators/lod_reset_op.cu index 910866ea633..8bfc8bd3bf0 100644 --- a/paddle/fluid/operators/lod_reset_op.cu +++ b/paddle/fluid/operators/lod_reset_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/lod_reset_op.h" +#include "paddle/fluid/operators/lod_reset_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/lod_reset_op.h b/paddle/fluid/operators/lod_reset_op.h index c1bbba7a83a..a10efee0bdd 100644 --- a/paddle/fluid/operators/lod_reset_op.h +++ b/paddle/fluid/operators/lod_reset_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lod_tensor_to_array_op.cc b/paddle/fluid/operators/lod_tensor_to_array_op.cc index 685a807a8ac..edc32bcec14 100644 --- a/paddle/fluid/operators/lod_tensor_to_array_op.cc +++ b/paddle/fluid/operators/lod_tensor_to_array_op.cc @@ -11,11 +11,11 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/lod_rank_table.h" -#include "paddle/framework/lod_tensor_array.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/detail/safe_ref.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/lod_rank_table.h" +#include "paddle/fluid/framework/lod_tensor_array.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/log_loss_op.cc b/paddle/fluid/operators/log_loss_op.cc index f714945354c..6c5cd295681 100644 --- a/paddle/fluid/operators/log_loss_op.cc +++ b/paddle/fluid/operators/log_loss_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/log_loss_op.h" +#include "paddle/fluid/operators/log_loss_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/log_loss_op.cu b/paddle/fluid/operators/log_loss_op.cu index be283e47005..c164a6d0405 100644 --- a/paddle/fluid/operators/log_loss_op.cu +++ b/paddle/fluid/operators/log_loss_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/log_loss_op.h" +#include "paddle/fluid/operators/log_loss_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/log_loss_op.h b/paddle/fluid/operators/log_loss_op.h index 743eddb7400..67fac7cfe55 100644 --- a/paddle/fluid/operators/log_loss_op.h +++ b/paddle/fluid/operators/log_loss_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/logical_op.cc b/paddle/fluid/operators/logical_op.cc index fedd325cf4f..ff49895df19 100644 --- a/paddle/fluid/operators/logical_op.cc +++ b/paddle/fluid/operators/logical_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/logical_op.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/operators/logical_op.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/logical_op.cu b/paddle/fluid/operators/logical_op.cu index 87f2287b8f1..2b174440612 100644 --- a/paddle/fluid/operators/logical_op.cu +++ b/paddle/fluid/operators/logical_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/logical_op.h" +#include "paddle/fluid/operators/logical_op.h" REGISTER_BINARY_LOGICAL_KERNEL(logical_and, CUDA, paddle::operators::LogicalAndFunctor); diff --git a/paddle/fluid/operators/logical_op.h b/paddle/fluid/operators/logical_op.h index 41385768560..f6d5866c2c8 100644 --- a/paddle/fluid/operators/logical_op.h +++ b/paddle/fluid/operators/logical_op.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include #include -#include "paddle/framework/op_registry.h" -#include "paddle/platform/transform.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/transform.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lookup_table_op.cc b/paddle/fluid/operators/lookup_table_op.cc index 2405852f53d..2c555f1a3fa 100644 --- a/paddle/fluid/operators/lookup_table_op.cc +++ b/paddle/fluid/operators/lookup_table_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/lookup_table_op.h" -#include "paddle/framework/var_type_inference.h" +#include "paddle/fluid/operators/lookup_table_op.h" +#include "paddle/fluid/framework/var_type_inference.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lookup_table_op.cu b/paddle/fluid/operators/lookup_table_op.cu index 9684b6d4612..801adba5a44 100644 --- a/paddle/fluid/operators/lookup_table_op.cu +++ b/paddle/fluid/operators/lookup_table_op.cu @@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/lookup_table_op.h" -#include "paddle/platform/assert.h" -#include "paddle/platform/cuda_helper.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/lookup_table_op.h" +#include "paddle/fluid/platform/assert.h" +#include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lookup_table_op.h b/paddle/fluid/operators/lookup_table_op.h index 0842c422f7b..d264496882a 100644 --- a/paddle/fluid/operators/lookup_table_op.h +++ b/paddle/fluid/operators/lookup_table_op.h @@ -14,10 +14,10 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/selected_rows.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/selected_rows.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lrn_op.cc b/paddle/fluid/operators/lrn_op.cc index 95673ba19e7..c84507f231c 100644 --- a/paddle/fluid/operators/lrn_op.cc +++ b/paddle/fluid/operators/lrn_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/lrn_op.h" +#include "paddle/fluid/operators/lrn_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lrn_op.cu b/paddle/fluid/operators/lrn_op.cu index eb9d66a73df..03112bf3e03 100644 --- a/paddle/fluid/operators/lrn_op.cu +++ b/paddle/fluid/operators/lrn_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/lrn_op.h" +#include "paddle/fluid/operators/lrn_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lrn_op.h b/paddle/fluid/operators/lrn_op.h index ef3a2883a88..b7b78b45914 100644 --- a/paddle/fluid/operators/lrn_op.h +++ b/paddle/fluid/operators/lrn_op.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lstm_op.cc b/paddle/fluid/operators/lstm_op.cc index afb095a04e7..d1f1b5f235f 100644 --- a/paddle/fluid/operators/lstm_op.cc +++ b/paddle/fluid/operators/lstm_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/lstm_op.h" +#include "paddle/fluid/operators/lstm_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lstm_op.cu.cc b/paddle/fluid/operators/lstm_op.cu.cc index cfcc1fc92a0..679d02b1f9a 100644 --- a/paddle/fluid/operators/lstm_op.cu.cc +++ b/paddle/fluid/operators/lstm_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/lstm_op.h" +#include "paddle/fluid/operators/lstm_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/lstm_op.h b/paddle/fluid/operators/lstm_op.h index 72e95b75e29..1c48495533c 100644 --- a/paddle/fluid/operators/lstm_op.h +++ b/paddle/fluid/operators/lstm_op.h @@ -13,11 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/detail/activation_functions.h" -#include "paddle/operators/math/lstm_compute.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/sequence2batch.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/detail/activation_functions.h" +#include "paddle/fluid/operators/math/lstm_compute.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/sequence2batch.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lstm_unit_op.cc b/paddle/fluid/operators/lstm_unit_op.cc index c2d2c439825..3d33d47e0c3 100644 --- a/paddle/fluid/operators/lstm_unit_op.cc +++ b/paddle/fluid/operators/lstm_unit_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/lstm_unit_op.h" +#include "paddle/fluid/operators/lstm_unit_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lstm_unit_op.cu b/paddle/fluid/operators/lstm_unit_op.cu index 5ee5ddd280f..12ebffca37f 100644 --- a/paddle/fluid/operators/lstm_unit_op.cu +++ b/paddle/fluid/operators/lstm_unit_op.cu @@ -16,10 +16,10 @@ limitations under the License. */ https://github.com/caffe2/caffe2/blob/master/caffe2/operators/lstm_unit_op_gpu.cu */ -#include "paddle/framework/op_registry.h" -#include "paddle/operators/cross_entropy_op.h" -#include "paddle/platform/assert.h" -#include "paddle/platform/hostdevice.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/cross_entropy_op.h" +#include "paddle/fluid/platform/assert.h" +#include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lstm_unit_op.h b/paddle/fluid/operators/lstm_unit_op.h index fa8d141bcb6..9f2370fe690 100644 --- a/paddle/fluid/operators/lstm_unit_op.h +++ b/paddle/fluid/operators/lstm_unit_op.h @@ -18,7 +18,7 @@ https://github.com/caffe2/caffe2/blob/master/caffe2/operators/lstm_unit_op.h #pragma once #include "glog/logging.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lstmp_op.cc b/paddle/fluid/operators/lstmp_op.cc index c96b30ba353..2d30edf5c3c 100644 --- a/paddle/fluid/operators/lstmp_op.cc +++ b/paddle/fluid/operators/lstmp_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/lstmp_op.h" +#include "paddle/fluid/operators/lstmp_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/lstmp_op.cu b/paddle/fluid/operators/lstmp_op.cu index 7fcbcfecc87..bcefb94c75b 100644 --- a/paddle/fluid/operators/lstmp_op.cu +++ b/paddle/fluid/operators/lstmp_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/lstmp_op.h" +#include "paddle/fluid/operators/lstmp_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/lstmp_op.h b/paddle/fluid/operators/lstmp_op.h index e064a155dfa..22ef4721860 100644 --- a/paddle/fluid/operators/lstmp_op.h +++ b/paddle/fluid/operators/lstmp_op.h @@ -13,14 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/operators/activation_op.h" -#include "paddle/operators/math/detail/activation_functions.h" -#include "paddle/operators/math/lstm_compute.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/sequence2batch.h" - -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/operators/activation_op.h" +#include "paddle/fluid/operators/math/detail/activation_functions.h" +#include "paddle/fluid/operators/math/lstm_compute.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/sequence2batch.h" + +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/margin_rank_loss_op.cc b/paddle/fluid/operators/margin_rank_loss_op.cc index e0df3077742..fc31befb205 100644 --- a/paddle/fluid/operators/margin_rank_loss_op.cc +++ b/paddle/fluid/operators/margin_rank_loss_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/margin_rank_loss_op.h" +#include "paddle/fluid/operators/margin_rank_loss_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/margin_rank_loss_op.cu b/paddle/fluid/operators/margin_rank_loss_op.cu index 798c3ed182b..ca4593a48d6 100644 --- a/paddle/fluid/operators/margin_rank_loss_op.cu +++ b/paddle/fluid/operators/margin_rank_loss_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/margin_rank_loss_op.h" +#include "paddle/fluid/operators/margin_rank_loss_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/margin_rank_loss_op.h b/paddle/fluid/operators/margin_rank_loss_op.h index 7438e881e1c..934a5da0f80 100644 --- a/paddle/fluid/operators/margin_rank_loss_op.h +++ b/paddle/fluid/operators/margin_rank_loss_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/context_project.cc b/paddle/fluid/operators/math/context_project.cc index 980dd90df87..b73d976d1b3 100644 --- a/paddle/fluid/operators/math/context_project.cc +++ b/paddle/fluid/operators/math/context_project.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/context_project.h" +#include "paddle/fluid/operators/math/context_project.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/context_project.cu b/paddle/fluid/operators/math/context_project.cu index 934e3df6459..bbd36a6e8f5 100644 --- a/paddle/fluid/operators/math/context_project.cu +++ b/paddle/fluid/operators/math/context_project.cu @@ -14,7 +14,7 @@ limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/math/context_project.h" +#include "paddle/fluid/operators/math/context_project.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/context_project.h b/paddle/fluid/operators/math/context_project.h index 218de9fb956..2fe593ec3af 100644 --- a/paddle/fluid/operators/math/context_project.h +++ b/paddle/fluid/operators/math/context_project.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/framework/lod_tensor.h" -#include "paddle/operators/math/im2col.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/operators/math/im2col.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/cos_sim_functor.cc b/paddle/fluid/operators/math/cos_sim_functor.cc index 6af9f0fcd96..701a9c23c0d 100644 --- a/paddle/fluid/operators/math/cos_sim_functor.cc +++ b/paddle/fluid/operators/math/cos_sim_functor.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/cos_sim_functor.h" +#include "paddle/fluid/operators/math/cos_sim_functor.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/cos_sim_functor.cu b/paddle/fluid/operators/math/cos_sim_functor.cu index 6eb0a4ea4c5..0323680870a 100644 --- a/paddle/fluid/operators/math/cos_sim_functor.cu +++ b/paddle/fluid/operators/math/cos_sim_functor.cu @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/cos_sim_functor.h" -#include "paddle/platform/cuda_helper.h" +#include "paddle/fluid/operators/math/cos_sim_functor.h" +#include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/cos_sim_functor.h b/paddle/fluid/operators/math/cos_sim_functor.h index aae8ab5b7a9..445d94f975f 100644 --- a/paddle/fluid/operators/math/cos_sim_functor.h +++ b/paddle/fluid/operators/math/cos_sim_functor.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include #include -#include "paddle/platform/device_context.h" -#include "paddle/platform/hostdevice.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/cross_entropy.cc b/paddle/fluid/operators/math/cross_entropy.cc index d9cb016fb44..76abd03ff8b 100644 --- a/paddle/fluid/operators/math/cross_entropy.cc +++ b/paddle/fluid/operators/math/cross_entropy.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/cross_entropy.h" +#include "paddle/fluid/operators/math/cross_entropy.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/cross_entropy.cu b/paddle/fluid/operators/math/cross_entropy.cu index 16c9e7b28ec..39222c484c2 100644 --- a/paddle/fluid/operators/math/cross_entropy.cu +++ b/paddle/fluid/operators/math/cross_entropy.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/cross_entropy.h" +#include "paddle/fluid/operators/math/cross_entropy.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/cross_entropy.h b/paddle/fluid/operators/math/cross_entropy.h index b3b6d767a8b..2fe216a8053 100644 --- a/paddle/fluid/operators/math/cross_entropy.h +++ b/paddle/fluid/operators/math/cross_entropy.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/tensor.h" -#include "paddle/platform/hostdevice.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/depthwise_conv.cu b/paddle/fluid/operators/math/depthwise_conv.cu index b212e782083..7b75e593071 100644 --- a/paddle/fluid/operators/math/depthwise_conv.cu +++ b/paddle/fluid/operators/math/depthwise_conv.cu @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/depthwise_conv.h" -#include "paddle/platform/cuda_helper.h" +#include "paddle/fluid/operators/math/depthwise_conv.h" +#include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/depthwise_conv.h b/paddle/fluid/operators/math/depthwise_conv.h index 4708920bb42..c3081e7a0de 100644 --- a/paddle/fluid/operators/math/depthwise_conv.h +++ b/paddle/fluid/operators/math/depthwise_conv.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/tensor.h" -#include "paddle/platform/device_context.h" -#include "paddle/platform/hostdevice.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/detail/activation_functions.h b/paddle/fluid/operators/math/detail/activation_functions.h index 585a0123437..3af7ba790c4 100644 --- a/paddle/fluid/operators/math/detail/activation_functions.h +++ b/paddle/fluid/operators/math/detail/activation_functions.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once #include -#include "paddle/platform/enforce.h" -#include "paddle/platform/hostdevice.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/hostdevice.h" #ifdef __AVX__ #include diff --git a/paddle/fluid/operators/math/detail/avx_functions.cc b/paddle/fluid/operators/math/detail/avx_functions.cc index 921364788cd..838cd30e3d5 100644 --- a/paddle/fluid/operators/math/detail/avx_functions.cc +++ b/paddle/fluid/operators/math/detail/avx_functions.cc @@ -15,7 +15,7 @@ limitations under the License. */ #ifdef __AVX__ #include -#include "paddle/operators/math/detail/activation_functions.h" +#include "paddle/fluid/operators/math/detail/activation_functions.h" // TODO(qingqing) refine this dependence #include "paddle/cuda/src/avx_mathfun.h" diff --git a/paddle/fluid/operators/math/detail/gru_cpu_kernel.h b/paddle/fluid/operators/math/detail/gru_cpu_kernel.h index a61b232f427..75c5c8eb29a 100644 --- a/paddle/fluid/operators/math/detail/gru_cpu_kernel.h +++ b/paddle/fluid/operators/math/detail/gru_cpu_kernel.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once #include -#include "paddle/operators/math/detail/activation_functions.h" -#include "paddle/operators/math/gru_compute.h" +#include "paddle/fluid/operators/math/detail/activation_functions.h" +#include "paddle/fluid/operators/math/gru_compute.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/detail/gru_gpu_kernel.h b/paddle/fluid/operators/math/detail/gru_gpu_kernel.h index 1783d460968..fbf69d4a858 100644 --- a/paddle/fluid/operators/math/detail/gru_gpu_kernel.h +++ b/paddle/fluid/operators/math/detail/gru_gpu_kernel.h @@ -14,10 +14,10 @@ limitations under the License. */ #pragma once #include -#include "paddle/operators/math/detail/activation_functions.h" -#include "paddle/operators/math/gru_compute.h" -#include "paddle/platform/cuda_helper.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/operators/math/detail/activation_functions.h" +#include "paddle/fluid/operators/math/gru_compute.h" +#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/detail/gru_kernel.h b/paddle/fluid/operators/math/detail/gru_kernel.h index 4d8245cb5d0..705787e2ff7 100644 --- a/paddle/fluid/operators/math/detail/gru_kernel.h +++ b/paddle/fluid/operators/math/detail/gru_kernel.h @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/detail/activation_functions.h" -#include "paddle/platform/hostdevice.h" +#include "paddle/fluid/operators/math/detail/activation_functions.h" +#include "paddle/fluid/platform/hostdevice.h" #include diff --git a/paddle/fluid/operators/math/detail/lstm_cpu_kernel.h b/paddle/fluid/operators/math/detail/lstm_cpu_kernel.h index 42888fcdb0a..bf26509ba17 100644 --- a/paddle/fluid/operators/math/detail/lstm_cpu_kernel.h +++ b/paddle/fluid/operators/math/detail/lstm_cpu_kernel.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once #include -#include "paddle/operators/math/detail/activation_functions.h" -#include "paddle/operators/math/lstm_compute.h" +#include "paddle/fluid/operators/math/detail/activation_functions.h" +#include "paddle/fluid/operators/math/lstm_compute.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h b/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h index e31e657e8b6..7865d0c0ba1 100644 --- a/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h +++ b/paddle/fluid/operators/math/detail/lstm_gpu_kernel.h @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/operators/math/detail/activation_functions.h" -#include "paddle/operators/math/lstm_compute.h" -#include "paddle/platform/cuda_helper.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/operators/math/detail/activation_functions.h" +#include "paddle/fluid/operators/math/lstm_compute.h" +#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/device_context.h" #include diff --git a/paddle/fluid/operators/math/detail/lstm_kernel.h b/paddle/fluid/operators/math/detail/lstm_kernel.h index fed8f9c4ca4..0679cc62ba9 100644 --- a/paddle/fluid/operators/math/detail/lstm_kernel.h +++ b/paddle/fluid/operators/math/detail/lstm_kernel.h @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/detail/activation_functions.h" -#include "paddle/platform/hostdevice.h" +#include "paddle/fluid/operators/math/detail/activation_functions.h" +#include "paddle/fluid/platform/hostdevice.h" #include diff --git a/paddle/fluid/operators/math/detection_util.h b/paddle/fluid/operators/math/detection_util.h index e3a3ef2badc..13e5d406c11 100644 --- a/paddle/fluid/operators/math/detection_util.h +++ b/paddle/fluid/operators/math/detection_util.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include -#include "paddle/framework/selected_rows.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/gru_compute.cc b/paddle/fluid/operators/math/gru_compute.cc index 101ab859624..10031804167 100644 --- a/paddle/fluid/operators/math/gru_compute.cc +++ b/paddle/fluid/operators/math/gru_compute.cc @@ -9,10 +9,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/gru_compute.h" -#include "paddle/operators/math/detail/gru_cpu_kernel.h" -#include "paddle/operators/math/detail/gru_kernel.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/operators/math/gru_compute.h" +#include "paddle/fluid/operators/math/detail/gru_cpu_kernel.h" +#include "paddle/fluid/operators/math/detail/gru_kernel.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/gru_compute.cu b/paddle/fluid/operators/math/gru_compute.cu index d5a0e630ea0..0d5d5d7a743 100644 --- a/paddle/fluid/operators/math/gru_compute.cu +++ b/paddle/fluid/operators/math/gru_compute.cu @@ -9,10 +9,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/detail/gru_gpu_kernel.h" -#include "paddle/operators/math/detail/gru_kernel.h" -#include "paddle/operators/math/gru_compute.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/operators/math/detail/gru_gpu_kernel.h" +#include "paddle/fluid/operators/math/detail/gru_kernel.h" +#include "paddle/fluid/operators/math/gru_compute.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/gru_compute.h b/paddle/fluid/operators/math/gru_compute.h index bf69147b506..93e19cf5578 100644 --- a/paddle/fluid/operators/math/gru_compute.h +++ b/paddle/fluid/operators/math/gru_compute.h @@ -11,9 +11,9 @@ limitations under the License. */ #pragma once -#include "paddle/operators/math/detail/activation_functions.h" -#include "paddle/platform/device_context.h" -#include "paddle/platform/enforce.h" +#include "paddle/fluid/operators/math/detail/activation_functions.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/im2col.cc b/paddle/fluid/operators/math/im2col.cc index c2633b2e164..c298b00bb4c 100644 --- a/paddle/fluid/operators/math/im2col.cc +++ b/paddle/fluid/operators/math/im2col.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/im2col.h" +#include "paddle/fluid/operators/math/im2col.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/im2col.cu b/paddle/fluid/operators/math/im2col.cu index a88e837b030..c26343aacf5 100644 --- a/paddle/fluid/operators/math/im2col.cu +++ b/paddle/fluid/operators/math/im2col.cu @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/im2col.h" -#include "paddle/platform/cuda_helper.h" +#include "paddle/fluid/operators/math/im2col.h" +#include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/im2col.h b/paddle/fluid/operators/math/im2col.h index 38f2c9fe0ad..525c0f5dda1 100644 --- a/paddle/fluid/operators/math/im2col.h +++ b/paddle/fluid/operators/math/im2col.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/framework/tensor.h" -#include "paddle/framework/tensor_util.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/framework/tensor_util.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/im2col_test.cc b/paddle/fluid/operators/math/im2col_test.cc index 1ba24325ffe..59d6a84b892 100644 --- a/paddle/fluid/operators/math/im2col_test.cc +++ b/paddle/fluid/operators/math/im2col_test.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/im2col.h" +#include "paddle/fluid/operators/math/im2col.h" #include template diff --git a/paddle/fluid/operators/math/lstm_compute.cc b/paddle/fluid/operators/math/lstm_compute.cc index d453102ecef..09eb89ec58d 100644 --- a/paddle/fluid/operators/math/lstm_compute.cc +++ b/paddle/fluid/operators/math/lstm_compute.cc @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/lstm_compute.h" -#include "paddle/operators/math/detail/lstm_cpu_kernel.h" -#include "paddle/operators/math/detail/lstm_kernel.h" +#include "paddle/fluid/operators/math/lstm_compute.h" +#include "paddle/fluid/operators/math/detail/lstm_cpu_kernel.h" +#include "paddle/fluid/operators/math/detail/lstm_kernel.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/lstm_compute.cu b/paddle/fluid/operators/math/lstm_compute.cu index 82065d699f7..adedee28bd0 100644 --- a/paddle/fluid/operators/math/lstm_compute.cu +++ b/paddle/fluid/operators/math/lstm_compute.cu @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/detail/lstm_gpu_kernel.h" -#include "paddle/operators/math/detail/lstm_kernel.h" -#include "paddle/operators/math/lstm_compute.h" +#include "paddle/fluid/operators/math/detail/lstm_gpu_kernel.h" +#include "paddle/fluid/operators/math/detail/lstm_kernel.h" +#include "paddle/fluid/operators/math/lstm_compute.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/lstm_compute.h b/paddle/fluid/operators/math/lstm_compute.h index e1ad6b64d20..8610e96cf1a 100644 --- a/paddle/fluid/operators/math/lstm_compute.h +++ b/paddle/fluid/operators/math/lstm_compute.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/operators/math/detail/activation_functions.h" -#include "paddle/platform/device_context.h" -#include "paddle/platform/enforce.h" +#include "paddle/fluid/operators/math/detail/activation_functions.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/math_function.cc b/paddle/fluid/operators/math/math_function.cc index ce0a5f6cff8..2636dbddde6 100644 --- a/paddle/fluid/operators/math/math_function.cc +++ b/paddle/fluid/operators/math/math_function.cc @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/math_function.h" -#include "paddle/framework/data_type.h" -#include "paddle/operators/math/math_function_impl.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/operators/math/math_function_impl.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/math_function.cu b/paddle/fluid/operators/math/math_function.cu index c0a107470a4..5764da71c84 100644 --- a/paddle/fluid/operators/math/math_function.cu +++ b/paddle/fluid/operators/math/math_function.cu @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/framework/data_type.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/math_function_impl.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/math_function_impl.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/math_function.h b/paddle/fluid/operators/math/math_function.h index cb14d1e5746..84916af1f8e 100644 --- a/paddle/fluid/operators/math/math_function.h +++ b/paddle/fluid/operators/math/math_function.h @@ -47,11 +47,11 @@ int LAPACKE_dgetri(int matrix_layout, int n, double* a, int lda, #include -#include "paddle/framework/eigen.h" -#include "paddle/framework/tensor.h" -#include "paddle/framework/tensor_util.h" -#include "paddle/platform/device_context.h" -#include "paddle/platform/enforce.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/framework/tensor_util.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/math_function_impl.h b/paddle/fluid/operators/math/math_function_impl.h index af4127788af..a55ed6c58ba 100644 --- a/paddle/fluid/operators/math/math_function_impl.h +++ b/paddle/fluid/operators/math/math_function_impl.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/data_type.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/math_function_test.cc b/paddle/fluid/operators/math/math_function_test.cc index c9f322b92e5..6cd8e8b35ab 100644 --- a/paddle/fluid/operators/math/math_function_test.cc +++ b/paddle/fluid/operators/math/math_function_test.cc @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/operators/math/math_function.h" #include "gtest/gtest.h" TEST(math_function, gemm_notrans_cblas) { diff --git a/paddle/fluid/operators/math/math_function_test.cu b/paddle/fluid/operators/math/math_function_test.cu index 6f16d667924..2ef53a82099 100644 --- a/paddle/fluid/operators/math/math_function_test.cu +++ b/paddle/fluid/operators/math/math_function_test.cu @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. #include "gtest/gtest.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/operators/math/math_function.h" TEST(math_function, notrans_mul_trans) { paddle::framework::Tensor input1; diff --git a/paddle/fluid/operators/math/matmul.h b/paddle/fluid/operators/math/matmul.h index ae7f1fe9be5..50f79979d99 100644 --- a/paddle/fluid/operators/math/matmul.h +++ b/paddle/fluid/operators/math/matmul.h @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/maxouting.cc b/paddle/fluid/operators/math/maxouting.cc index fea86675f75..746328cd45a 100644 --- a/paddle/fluid/operators/math/maxouting.cc +++ b/paddle/fluid/operators/math/maxouting.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/maxouting.h" +#include "paddle/fluid/operators/math/maxouting.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/maxouting.cu b/paddle/fluid/operators/math/maxouting.cu index 6056ad251c1..68e5dfc3c55 100644 --- a/paddle/fluid/operators/math/maxouting.cu +++ b/paddle/fluid/operators/math/maxouting.cu @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/maxouting.h" -#include "paddle/platform/cuda_helper.h" +#include "paddle/fluid/operators/math/maxouting.h" +#include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/maxouting.h b/paddle/fluid/operators/math/maxouting.h index 68f4743db07..0e81790f0ab 100644 --- a/paddle/fluid/operators/math/maxouting.h +++ b/paddle/fluid/operators/math/maxouting.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/tensor.h" -#include "paddle/platform/device_context.h" -#include "paddle/platform/hostdevice.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/pooling.cc b/paddle/fluid/operators/math/pooling.cc index 150de6fd59e..9adb142f14e 100644 --- a/paddle/fluid/operators/math/pooling.cc +++ b/paddle/fluid/operators/math/pooling.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/pooling.h" +#include "paddle/fluid/operators/math/pooling.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/pooling.cu b/paddle/fluid/operators/math/pooling.cu index 0243cf8316a..c65632de906 100644 --- a/paddle/fluid/operators/math/pooling.cu +++ b/paddle/fluid/operators/math/pooling.cu @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/pooling.h" -#include "paddle/platform/cuda_helper.h" +#include "paddle/fluid/operators/math/pooling.h" +#include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/pooling.h b/paddle/fluid/operators/math/pooling.h index 2759f06cb6a..1195038f6a0 100644 --- a/paddle/fluid/operators/math/pooling.h +++ b/paddle/fluid/operators/math/pooling.h @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/tensor.h" -#include "paddle/platform/device_context.h" -#include "paddle/platform/hostdevice.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/selected_rows_functor.cc b/paddle/fluid/operators/math/selected_rows_functor.cc index 4e15d01a307..01aa37ab35c 100644 --- a/paddle/fluid/operators/math/selected_rows_functor.cc +++ b/paddle/fluid/operators/math/selected_rows_functor.cc @@ -14,8 +14,8 @@ limitations under the License. */ #include -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/selected_rows_functor.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/selected_rows_functor.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/selected_rows_functor.cu b/paddle/fluid/operators/math/selected_rows_functor.cu index 54a41a67d06..ee3b5d52058 100644 --- a/paddle/fluid/operators/math/selected_rows_functor.cu +++ b/paddle/fluid/operators/math/selected_rows_functor.cu @@ -14,9 +14,9 @@ limitations under the License. */ #include -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/selected_rows_functor.h" -#include "paddle/platform/cuda_helper.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/selected_rows_functor.h" +#include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/selected_rows_functor.h b/paddle/fluid/operators/math/selected_rows_functor.h index 09d4631905f..510a9ed8be6 100644 --- a/paddle/fluid/operators/math/selected_rows_functor.h +++ b/paddle/fluid/operators/math/selected_rows_functor.h @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/selected_rows.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/platform/device_context.h" #define INLINE_FOR2(sizei, sizej) \ for (int64_t i = 0; i < sizei; i++) \ diff --git a/paddle/fluid/operators/math/selected_rows_functor_test.cc b/paddle/fluid/operators/math/selected_rows_functor_test.cc index 8c74cab0a1e..db6b41cd520 100644 --- a/paddle/fluid/operators/math/selected_rows_functor_test.cc +++ b/paddle/fluid/operators/math/selected_rows_functor_test.cc @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/selected_rows_functor.h" +#include "paddle/fluid/operators/math/selected_rows_functor.h" #include "gtest/gtest.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/operators/math/math_function.h" TEST(selected_rows_functor, cpu_add) { using namespace paddle::framework; diff --git a/paddle/fluid/operators/math/selected_rows_functor_test.cu b/paddle/fluid/operators/math/selected_rows_functor_test.cu index 38808e13014..b3c4bc9244f 100644 --- a/paddle/fluid/operators/math/selected_rows_functor_test.cu +++ b/paddle/fluid/operators/math/selected_rows_functor_test.cu @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "gtest/gtest.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/selected_rows_functor.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/selected_rows_functor.h" TEST(selected_rows_functor, gpu_add) { using namespace paddle::framework; diff --git a/paddle/fluid/operators/math/sequence2batch.cc b/paddle/fluid/operators/math/sequence2batch.cc index 17abce1c2f8..0485070fd9b 100644 --- a/paddle/fluid/operators/math/sequence2batch.cc +++ b/paddle/fluid/operators/math/sequence2batch.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/sequence2batch.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/operators/math/sequence2batch.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/sequence2batch.cu b/paddle/fluid/operators/math/sequence2batch.cu index eaed2c30a80..450be80ea2f 100644 --- a/paddle/fluid/operators/math/sequence2batch.cu +++ b/paddle/fluid/operators/math/sequence2batch.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/math/sequence2batch.h" +#include "paddle/fluid/operators/math/sequence2batch.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/sequence2batch.h b/paddle/fluid/operators/math/sequence2batch.h index 6db0427b417..00bd25ab613 100644 --- a/paddle/fluid/operators/math/sequence2batch.h +++ b/paddle/fluid/operators/math/sequence2batch.h @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/tensor.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/sequence_padding.cc b/paddle/fluid/operators/math/sequence_padding.cc index 2e69aa47eb8..ad8cd825676 100644 --- a/paddle/fluid/operators/math/sequence_padding.cc +++ b/paddle/fluid/operators/math/sequence_padding.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/sequence_padding.h" +#include "paddle/fluid/operators/math/sequence_padding.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/sequence_padding.cu b/paddle/fluid/operators/math/sequence_padding.cu index c2bd56448aa..c1a39057784 100644 --- a/paddle/fluid/operators/math/sequence_padding.cu +++ b/paddle/fluid/operators/math/sequence_padding.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/sequence_padding.h" +#include "paddle/fluid/operators/math/sequence_padding.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/sequence_padding.h b/paddle/fluid/operators/math/sequence_padding.h index 8f586c5eb46..0d84f9dcb38 100644 --- a/paddle/fluid/operators/math/sequence_padding.h +++ b/paddle/fluid/operators/math/sequence_padding.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/framework/lod_tensor.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/sequence_padding_test.cc b/paddle/fluid/operators/math/sequence_padding_test.cc index 3e504f4a15c..147cb37da2b 100644 --- a/paddle/fluid/operators/math/sequence_padding_test.cc +++ b/paddle/fluid/operators/math/sequence_padding_test.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/sequence_padding.h" +#include "paddle/fluid/operators/math/sequence_padding.h" #include template diff --git a/paddle/fluid/operators/math/sequence_pooling.cc b/paddle/fluid/operators/math/sequence_pooling.cc index 8fb92b1a130..b3b87ec93e1 100644 --- a/paddle/fluid/operators/math/sequence_pooling.cc +++ b/paddle/fluid/operators/math/sequence_pooling.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/sequence_pooling.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/operators/math/sequence_pooling.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/sequence_pooling.cu b/paddle/fluid/operators/math/sequence_pooling.cu index c69bd3da7e7..c4267e992a7 100644 --- a/paddle/fluid/operators/math/sequence_pooling.cu +++ b/paddle/fluid/operators/math/sequence_pooling.cu @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/sequence_pooling.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/sequence_pooling.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/sequence_pooling.h b/paddle/fluid/operators/math/sequence_pooling.h index 13ffb2ebef3..9ba9cad74b5 100644 --- a/paddle/fluid/operators/math/sequence_pooling.h +++ b/paddle/fluid/operators/math/sequence_pooling.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/tensor.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/sequence_scale.cc b/paddle/fluid/operators/math/sequence_scale.cc index 7e439e9a2ce..427689b9718 100644 --- a/paddle/fluid/operators/math/sequence_scale.cc +++ b/paddle/fluid/operators/math/sequence_scale.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/sequence_scale.h" +#include "paddle/fluid/operators/math/sequence_scale.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/sequence_scale.cu b/paddle/fluid/operators/math/sequence_scale.cu index 7cb9242db93..7c081ed7f45 100644 --- a/paddle/fluid/operators/math/sequence_scale.cu +++ b/paddle/fluid/operators/math/sequence_scale.cu @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/sequence_scale.h" -#include "paddle/platform/cuda_helper.h" +#include "paddle/fluid/operators/math/sequence_scale.h" +#include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/sequence_scale.h b/paddle/fluid/operators/math/sequence_scale.h index ecd9a57c3f4..e8e07fd3156 100644 --- a/paddle/fluid/operators/math/sequence_scale.h +++ b/paddle/fluid/operators/math/sequence_scale.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/framework/lod_tensor.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/softmax.cc b/paddle/fluid/operators/math/softmax.cc index 72f10f35f4e..eab31ec567d 100644 --- a/paddle/fluid/operators/math/softmax.cc +++ b/paddle/fluid/operators/math/softmax.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/softmax.h" -#include "paddle/operators/math/softmax_impl.h" +#include "paddle/fluid/operators/math/softmax.h" +#include "paddle/fluid/operators/math/softmax_impl.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/softmax.cu b/paddle/fluid/operators/math/softmax.cu index 9e73f6a371c..733d7eeee6d 100644 --- a/paddle/fluid/operators/math/softmax.cu +++ b/paddle/fluid/operators/math/softmax.cu @@ -14,8 +14,8 @@ limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/math/softmax.h" -#include "paddle/operators/math/softmax_impl.h" +#include "paddle/fluid/operators/math/softmax.h" +#include "paddle/fluid/operators/math/softmax_impl.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/softmax.h b/paddle/fluid/operators/math/softmax.h index 471f44d340c..b7d67d5f12d 100644 --- a/paddle/fluid/operators/math/softmax.h +++ b/paddle/fluid/operators/math/softmax.h @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/tensor.h" +#include "paddle/fluid/framework/tensor.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/softmax_impl.h b/paddle/fluid/operators/math/softmax_impl.h index 82f597ff792..f7c61cb647e 100644 --- a/paddle/fluid/operators/math/softmax_impl.h +++ b/paddle/fluid/operators/math/softmax_impl.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/tensor.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/tensor.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/unpooling.cc b/paddle/fluid/operators/math/unpooling.cc index ecd3a647e00..e02bc02e002 100644 --- a/paddle/fluid/operators/math/unpooling.cc +++ b/paddle/fluid/operators/math/unpooling.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/unpooling.h" +#include "paddle/fluid/operators/math/unpooling.h" namespace paddle { namespace operators { namespace math { diff --git a/paddle/fluid/operators/math/unpooling.cu b/paddle/fluid/operators/math/unpooling.cu index ecbde0f6a79..2e74270fdf1 100644 --- a/paddle/fluid/operators/math/unpooling.cu +++ b/paddle/fluid/operators/math/unpooling.cu @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/unpooling.h" -#include "paddle/platform/cuda_helper.h" +#include "paddle/fluid/operators/math/unpooling.h" +#include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/unpooling.h b/paddle/fluid/operators/math/unpooling.h index 0f0ff1371eb..f245ba7ba87 100644 --- a/paddle/fluid/operators/math/unpooling.h +++ b/paddle/fluid/operators/math/unpooling.h @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/tensor.h" +#include "paddle/fluid/framework/tensor.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/vol2col.cc b/paddle/fluid/operators/math/vol2col.cc index d574ed92343..ded0bbc7447 100644 --- a/paddle/fluid/operators/math/vol2col.cc +++ b/paddle/fluid/operators/math/vol2col.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/vol2col.h" +#include "paddle/fluid/operators/math/vol2col.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/vol2col.cu b/paddle/fluid/operators/math/vol2col.cu index b029442fe48..35ef24c7f5f 100644 --- a/paddle/fluid/operators/math/vol2col.cu +++ b/paddle/fluid/operators/math/vol2col.cu @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/vol2col.h" -#include "paddle/platform/cuda_helper.h" +#include "paddle/fluid/operators/math/vol2col.h" +#include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/vol2col.h b/paddle/fluid/operators/math/vol2col.h index dcd80370e85..3ce38b2d11f 100644 --- a/paddle/fluid/operators/math/vol2col.h +++ b/paddle/fluid/operators/math/vol2col.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/framework/tensor.h" -#include "paddle/framework/tensor_util.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/framework/tensor_util.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/vol2col_test.cc b/paddle/fluid/operators/math/vol2col_test.cc index 7a308ca8140..af0a900f80e 100644 --- a/paddle/fluid/operators/math/vol2col_test.cc +++ b/paddle/fluid/operators/math/vol2col_test.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/vol2col.h" +#include "paddle/fluid/operators/math/vol2col.h" #include #include diff --git a/paddle/fluid/operators/matmul_op.cc b/paddle/fluid/operators/matmul_op.cc index 3336978c8d8..267b0057bf4 100644 --- a/paddle/fluid/operators/matmul_op.cc +++ b/paddle/fluid/operators/matmul_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/matmul_op.h" +#include "paddle/fluid/operators/matmul_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/matmul_op.cu.cc b/paddle/fluid/operators/matmul_op.cu.cc index d28d12164e4..988787f0fe4 100644 --- a/paddle/fluid/operators/matmul_op.cu.cc +++ b/paddle/fluid/operators/matmul_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/matmul_op.h" +#include "paddle/fluid/operators/matmul_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/matmul_op.h b/paddle/fluid/operators/matmul_op.h index fe6a97465f8..f4cae3c91cb 100644 --- a/paddle/fluid/operators/matmul_op.h +++ b/paddle/fluid/operators/matmul_op.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/matmul.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/matmul.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/max_sequence_len_op.cc b/paddle/fluid/operators/max_sequence_len_op.cc index 019150e4914..eff8b927e52 100644 --- a/paddle/fluid/operators/max_sequence_len_op.cc +++ b/paddle/fluid/operators/max_sequence_len_op.cc @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/lod_rank_table.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" +#include "paddle/fluid/framework/lod_rank_table.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/maxout_op.cc b/paddle/fluid/operators/maxout_op.cc index 3ee32269417..8ce12cd4c4d 100644 --- a/paddle/fluid/operators/maxout_op.cc +++ b/paddle/fluid/operators/maxout_op.cc @@ -12,7 +12,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "paddle/operators/maxout_op.h" +#include "paddle/fluid/operators/maxout_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/maxout_op.cu.cc b/paddle/fluid/operators/maxout_op.cu.cc index c4a2d676d3a..f3f45c90cde 100644 --- a/paddle/fluid/operators/maxout_op.cu.cc +++ b/paddle/fluid/operators/maxout_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/maxout_op.h" +#include "paddle/fluid/operators/maxout_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/maxout_op.h b/paddle/fluid/operators/maxout_op.h index e8b12552b9f..e5de3e3760b 100644 --- a/paddle/fluid/operators/maxout_op.h +++ b/paddle/fluid/operators/maxout_op.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/maxouting.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/maxouting.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/mean_op.cc b/paddle/fluid/operators/mean_op.cc index 411f4d14efb..1043820345a 100644 --- a/paddle/fluid/operators/mean_op.cc +++ b/paddle/fluid/operators/mean_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/mean_op.h" +#include "paddle/fluid/operators/mean_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/mean_op.cu b/paddle/fluid/operators/mean_op.cu index 212d4481138..ccf2248760a 100644 --- a/paddle/fluid/operators/mean_op.cu +++ b/paddle/fluid/operators/mean_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/mean_op.h" +#include "paddle/fluid/operators/mean_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/mean_op.h b/paddle/fluid/operators/mean_op.h index 351b3459597..ae162287da6 100644 --- a/paddle/fluid/operators/mean_op.h +++ b/paddle/fluid/operators/mean_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/merge_lod_tensor_op.cc b/paddle/fluid/operators/merge_lod_tensor_op.cc index 87644d316d4..255f5533409 100644 --- a/paddle/fluid/operators/merge_lod_tensor_op.cc +++ b/paddle/fluid/operators/merge_lod_tensor_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/op_registry.h" -#include "paddle/memory/memcpy.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/memory/memcpy.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/mine_hard_examples_op.cc b/paddle/fluid/operators/mine_hard_examples_op.cc index 051cc24706d..73a6c0b6793 100644 --- a/paddle/fluid/operators/mine_hard_examples_op.cc +++ b/paddle/fluid/operators/mine_hard_examples_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/minus_op.cc b/paddle/fluid/operators/minus_op.cc index 3d7742dd4bc..8a35d668ccf 100644 --- a/paddle/fluid/operators/minus_op.cc +++ b/paddle/fluid/operators/minus_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/minus_op.h" -#include "paddle/operators/net_op.h" +#include "paddle/fluid/operators/minus_op.h" +#include "paddle/fluid/operators/net_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/minus_op.cu b/paddle/fluid/operators/minus_op.cu index 80cd9f7c168..ce0b1fdc041 100644 --- a/paddle/fluid/operators/minus_op.cu +++ b/paddle/fluid/operators/minus_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/minus_op.h" +#include "paddle/fluid/operators/minus_op.h" REGISTER_OP_CUDA_KERNEL( minus, diff --git a/paddle/fluid/operators/minus_op.h b/paddle/fluid/operators/minus_op.h index 20760b8cd5b..dc94cbbeca2 100644 --- a/paddle/fluid/operators/minus_op.h +++ b/paddle/fluid/operators/minus_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/modified_huber_loss_op.cc b/paddle/fluid/operators/modified_huber_loss_op.cc index f5d69071a86..f2d16531658 100644 --- a/paddle/fluid/operators/modified_huber_loss_op.cc +++ b/paddle/fluid/operators/modified_huber_loss_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/modified_huber_loss_op.h" +#include "paddle/fluid/operators/modified_huber_loss_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/modified_huber_loss_op.cu b/paddle/fluid/operators/modified_huber_loss_op.cu index 3d2a5562e8c..69ac2b1ed54 100644 --- a/paddle/fluid/operators/modified_huber_loss_op.cu +++ b/paddle/fluid/operators/modified_huber_loss_op.cu @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include #include -#include "paddle/framework/op_registry.h" -#include "paddle/operators/modified_huber_loss_op.h" -#include "paddle/platform/hostdevice.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/modified_huber_loss_op.h" +#include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/modified_huber_loss_op.h b/paddle/fluid/operators/modified_huber_loss_op.h index 6ce86feee57..a470a45e13b 100644 --- a/paddle/fluid/operators/modified_huber_loss_op.h +++ b/paddle/fluid/operators/modified_huber_loss_op.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/platform/hostdevice.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/momentum_op.cc b/paddle/fluid/operators/momentum_op.cc index 15b8b807767..a3950ac99da 100644 --- a/paddle/fluid/operators/momentum_op.cc +++ b/paddle/fluid/operators/momentum_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/momentum_op.h" +#include "paddle/fluid/operators/momentum_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/momentum_op.cu b/paddle/fluid/operators/momentum_op.cu index 2b9314162e6..28a14cd4b21 100644 --- a/paddle/fluid/operators/momentum_op.cu +++ b/paddle/fluid/operators/momentum_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/momentum_op.h b/paddle/fluid/operators/momentum_op.h index da69532ea58..fdab86b24ee 100644 --- a/paddle/fluid/operators/momentum_op.h +++ b/paddle/fluid/operators/momentum_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/mul_op.cc b/paddle/fluid/operators/mul_op.cc index c923e988a55..c9375d8ea12 100644 --- a/paddle/fluid/operators/mul_op.cc +++ b/paddle/fluid/operators/mul_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/mul_op.h" +#include "paddle/fluid/operators/mul_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/mul_op.cu.cc b/paddle/fluid/operators/mul_op.cu.cc index 43de9a71949..6f605fd84fb 100644 --- a/paddle/fluid/operators/mul_op.cu.cc +++ b/paddle/fluid/operators/mul_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/mul_op.h" +#include "paddle/fluid/operators/mul_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/mul_op.h b/paddle/fluid/operators/mul_op.h index 1fb0569b49c..745989f07f3 100644 --- a/paddle/fluid/operators/mul_op.h +++ b/paddle/fluid/operators/mul_op.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/operators/math/math_function.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/multiclass_nms_op.cc b/paddle/fluid/operators/multiclass_nms_op.cc index 41b9335fb8f..b2934f69cc9 100644 --- a/paddle/fluid/operators/multiclass_nms_op.cc +++ b/paddle/fluid/operators/multiclass_nms_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/multiplex_op.cc b/paddle/fluid/operators/multiplex_op.cc index d275fa5cbbf..f89b00376ba 100644 --- a/paddle/fluid/operators/multiplex_op.cc +++ b/paddle/fluid/operators/multiplex_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/multiplex_op.h" +#include "paddle/fluid/operators/multiplex_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/multiplex_op.cu b/paddle/fluid/operators/multiplex_op.cu index 546e6e7a24d..3ef7ef1dfcd 100644 --- a/paddle/fluid/operators/multiplex_op.cu +++ b/paddle/fluid/operators/multiplex_op.cu @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/op_registry.h" -#include "paddle/operators/multiplex_op.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/multiplex_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/multiplex_op.h b/paddle/fluid/operators/multiplex_op.h index ef66be5556e..682117cb1b4 100644 --- a/paddle/fluid/operators/multiplex_op.h +++ b/paddle/fluid/operators/multiplex_op.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/memory/memcpy.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/memory/memcpy.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/nccl/nccl_gpu_common.cc b/paddle/fluid/operators/nccl/nccl_gpu_common.cc index 1602a3d9b54..2a8ce932ec5 100644 --- a/paddle/fluid/operators/nccl/nccl_gpu_common.cc +++ b/paddle/fluid/operators/nccl/nccl_gpu_common.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/nccl/nccl_gpu_common.h" -#include "paddle/platform/gpu_info.h" +#include "paddle/fluid/operators/nccl/nccl_gpu_common.h" +#include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace platform {} // namespace platform diff --git a/paddle/fluid/operators/nccl/nccl_gpu_common.h b/paddle/fluid/operators/nccl/nccl_gpu_common.h index 5173996f202..6e78613239e 100644 --- a/paddle/fluid/operators/nccl/nccl_gpu_common.h +++ b/paddle/fluid/operators/nccl/nccl_gpu_common.h @@ -22,10 +22,10 @@ limitations under the License. */ #include #include -#include "paddle/platform/device_context.h" -#include "paddle/platform/dynload/nccl.h" -#include "paddle/platform/enforce.h" -#include "paddle/platform/macros.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/dynload/nccl.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/macros.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/operators/nccl_op.cc b/paddle/fluid/operators/nccl_op.cc index 9d51153b063..52420ceba0d 100644 --- a/paddle/fluid/operators/nccl_op.cc +++ b/paddle/fluid/operators/nccl_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/op_registry.h" -#include "paddle/operators/nccl/nccl_gpu_common.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/nccl/nccl_gpu_common.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/nccl_op.cu.cc b/paddle/fluid/operators/nccl_op.cu.cc index 1b986a13650..333aed2903e 100644 --- a/paddle/fluid/operators/nccl_op.cu.cc +++ b/paddle/fluid/operators/nccl_op.cu.cc @@ -11,9 +11,9 @@ limitations under the License. */ #include -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/nccl/nccl_gpu_common.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/nccl/nccl_gpu_common.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/nccl_op_test.cu.cc b/paddle/fluid/operators/nccl_op_test.cu.cc index 827a6253477..212ed2f9b63 100644 --- a/paddle/fluid/operators/nccl_op_test.cu.cc +++ b/paddle/fluid/operators/nccl_op_test.cu.cc @@ -21,17 +21,17 @@ limitations under the License. */ #include #include -#include "paddle/framework/block_desc.h" -#include "paddle/framework/init.h" -#include "paddle/framework/op_desc.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/program_desc.h" -#include "paddle/framework/var_desc.h" -#include "paddle/operators/nccl/nccl_gpu_common.h" -#include "paddle/platform/device_context.h" -#include "paddle/platform/enforce.h" -#include "paddle/platform/gpu_info.h" -#include "paddle/platform/place.h" +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/init.h" +#include "paddle/fluid/framework/op_desc.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/var_desc.h" +#include "paddle/fluid/operators/nccl/nccl_gpu_common.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/place.h" USE_NO_KERNEL_OP(ncclInit); USE_CUDA_ONLY_OP(ncclAllReduce); diff --git a/paddle/fluid/operators/nce_op.cc b/paddle/fluid/operators/nce_op.cc index 994ddf717e7..0841313a104 100644 --- a/paddle/fluid/operators/nce_op.cc +++ b/paddle/fluid/operators/nce_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/nce_op.h" +#include "paddle/fluid/operators/nce_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/nce_op.h b/paddle/fluid/operators/nce_op.h index 86fa13a649c..624c2d9bbd3 100644 --- a/paddle/fluid/operators/nce_op.h +++ b/paddle/fluid/operators/nce_op.h @@ -16,8 +16,8 @@ limitations under the License. */ #include #include -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" #include "unsupported/Eigen/CXX11/Tensor" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/net_op.cc b/paddle/fluid/operators/net_op.cc index 000e029840c..c0ca5873adc 100644 --- a/paddle/fluid/operators/net_op.cc +++ b/paddle/fluid/operators/net_op.cc @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/operators/net_op.h" +#include "paddle/fluid/operators/net_op.h" #include -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/net_op.h b/paddle/fluid/operators/net_op.h index b24042f5ef5..14e5909851c 100644 --- a/paddle/fluid/operators/net_op.h +++ b/paddle/fluid/operators/net_op.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include -#include "paddle/framework/framework.pb.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/net_op_test.cc b/paddle/fluid/operators/net_op_test.cc index 9358f29f62f..cc20be0c817 100644 --- a/paddle/fluid/operators/net_op_test.cc +++ b/paddle/fluid/operators/net_op_test.cc @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/operators/net_op.h" +#include "paddle/fluid/operators/net_op.h" #include diff --git a/paddle/fluid/operators/norm_op.cc b/paddle/fluid/operators/norm_op.cc index 0eeafcaae0a..ee85b1a90a8 100644 --- a/paddle/fluid/operators/norm_op.cc +++ b/paddle/fluid/operators/norm_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/norm_op.h" +#include "paddle/fluid/operators/norm_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/norm_op.cu b/paddle/fluid/operators/norm_op.cu index 2941c89b931..438bb3b86e7 100644 --- a/paddle/fluid/operators/norm_op.cu +++ b/paddle/fluid/operators/norm_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/norm_op.h" +#include "paddle/fluid/operators/norm_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/norm_op.h b/paddle/fluid/operators/norm_op.h index 5759d6f1f07..db74c9b02a7 100644 --- a/paddle/fluid/operators/norm_op.h +++ b/paddle/fluid/operators/norm_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/one_hot_op.cc b/paddle/fluid/operators/one_hot_op.cc index e78b7468de4..2c3a60da729 100644 --- a/paddle/fluid/operators/one_hot_op.cc +++ b/paddle/fluid/operators/one_hot_op.cc @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/operators/one_hot_op.h" -#include "paddle/framework/framework.pb.h" +#include "paddle/fluid/operators/one_hot_op.h" +#include "paddle/fluid/framework/framework.pb.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/one_hot_op.cu b/paddle/fluid/operators/one_hot_op.cu index 16f6d9433ea..6a8061edaab 100644 --- a/paddle/fluid/operators/one_hot_op.cu +++ b/paddle/fluid/operators/one_hot_op.cu @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/operators/one_hot_op.h" -#include "paddle/platform/cuda_helper.h" -#include "paddle/platform/gpu_info.h" +#include "paddle/fluid/operators/one_hot_op.h" +#include "paddle/fluid/platform/cuda_helper.h" +#include "paddle/fluid/platform/gpu_info.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/one_hot_op.h b/paddle/fluid/operators/one_hot_op.h index 12031ede2c3..ddac6edd0ec 100644 --- a/paddle/fluid/operators/one_hot_op.h +++ b/paddle/fluid/operators/one_hot_op.h @@ -13,8 +13,8 @@ // limitations under the License. #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/pad_op.cc b/paddle/fluid/operators/pad_op.cc index 90c53bd1773..4b021fde7cb 100644 --- a/paddle/fluid/operators/pad_op.cc +++ b/paddle/fluid/operators/pad_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/pad_op.h" +#include "paddle/fluid/operators/pad_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/pad_op.cu b/paddle/fluid/operators/pad_op.cu index 433b5f1112a..203c3144037 100644 --- a/paddle/fluid/operators/pad_op.cu +++ b/paddle/fluid/operators/pad_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/pad_op.h" +#include "paddle/fluid/operators/pad_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/pad_op.h b/paddle/fluid/operators/pad_op.h index fdf91a57766..244d8f9b6cf 100644 --- a/paddle/fluid/operators/pad_op.h +++ b/paddle/fluid/operators/pad_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/parallel_do_op.cc b/paddle/fluid/operators/parallel_do_op.cc index edb9de82509..e25df924799 100644 --- a/paddle/fluid/operators/parallel_do_op.cc +++ b/paddle/fluid/operators/parallel_do_op.cc @@ -14,10 +14,10 @@ limitations under the License. */ #include -#include "paddle/framework/executor.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/threadpool.h" -#include "paddle/operators/detail/safe_ref.h" +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/threadpool.h" +#include "paddle/fluid/operators/detail/safe_ref.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/pool_cudnn_op.cu.cc b/paddle/fluid/operators/pool_cudnn_op.cu.cc index 446fb0819d9..75984b7721c 100644 --- a/paddle/fluid/operators/pool_cudnn_op.cu.cc +++ b/paddle/fluid/operators/pool_cudnn_op.cu.cc @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/op_registry.h" -#include "paddle/operators/pool_op.h" -#include "paddle/platform/cudnn_helper.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/pool_op.h" +#include "paddle/fluid/platform/cudnn_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/pool_op.cc b/paddle/fluid/operators/pool_op.cc index b97333bb1a1..9dd33eefc5f 100644 --- a/paddle/fluid/operators/pool_op.cc +++ b/paddle/fluid/operators/pool_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/pool_op.h" +#include "paddle/fluid/operators/pool_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/pool_op.cu.cc b/paddle/fluid/operators/pool_op.cu.cc index 39a9dfbf794..14486c07402 100644 --- a/paddle/fluid/operators/pool_op.cu.cc +++ b/paddle/fluid/operators/pool_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/pool_op.h" +#include "paddle/fluid/operators/pool_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/pool_op.h b/paddle/fluid/operators/pool_op.h index d6ba5e298a4..4cabd634d66 100644 --- a/paddle/fluid/operators/pool_op.h +++ b/paddle/fluid/operators/pool_op.h @@ -14,10 +14,10 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/pooling.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/pooling.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/pool_with_index_op.cc b/paddle/fluid/operators/pool_with_index_op.cc index 1d31d813af4..ef6d5d867b2 100644 --- a/paddle/fluid/operators/pool_with_index_op.cc +++ b/paddle/fluid/operators/pool_with_index_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/pool_with_index_op.h" +#include "paddle/fluid/operators/pool_with_index_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/pool_with_index_op.cu.cc b/paddle/fluid/operators/pool_with_index_op.cu.cc index 4c9804da639..722a4d1e2a4 100644 --- a/paddle/fluid/operators/pool_with_index_op.cu.cc +++ b/paddle/fluid/operators/pool_with_index_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/pool_with_index_op.h" +#include "paddle/fluid/operators/pool_with_index_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/pool_with_index_op.h b/paddle/fluid/operators/pool_with_index_op.h index 4f4087d1dd3..da7ef9df73a 100644 --- a/paddle/fluid/operators/pool_with_index_op.h +++ b/paddle/fluid/operators/pool_with_index_op.h @@ -14,10 +14,10 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/pooling.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/pooling.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/positive_negative_pair_op.cc b/paddle/fluid/operators/positive_negative_pair_op.cc index 5aa5167dbb8..d237da25a00 100644 --- a/paddle/fluid/operators/positive_negative_pair_op.cc +++ b/paddle/fluid/operators/positive_negative_pair_op.cc @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/positive_negative_pair_op.h" +#include "paddle/fluid/operators/positive_negative_pair_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/positive_negative_pair_op.h b/paddle/fluid/operators/positive_negative_pair_op.h index 977e59b7d2f..f20f33bbeb1 100644 --- a/paddle/fluid/operators/positive_negative_pair_op.h +++ b/paddle/fluid/operators/positive_negative_pair_op.h @@ -12,8 +12,8 @@ limitations under the License. */ #pragma once #include #include -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" #include "paddle/utils/Logging.h" namespace paddle { diff --git a/paddle/fluid/operators/precision_recall_op.cc b/paddle/fluid/operators/precision_recall_op.cc index f1598d53cae..30d594719c7 100644 --- a/paddle/fluid/operators/precision_recall_op.cc +++ b/paddle/fluid/operators/precision_recall_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/precision_recall_op.h" +#include "paddle/fluid/operators/precision_recall_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/precision_recall_op.h b/paddle/fluid/operators/precision_recall_op.h index c0d55405a36..7dae86b76fc 100644 --- a/paddle/fluid/operators/precision_recall_op.h +++ b/paddle/fluid/operators/precision_recall_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/prelu_op.cc b/paddle/fluid/operators/prelu_op.cc index ddc21a65702..22b970d9712 100644 --- a/paddle/fluid/operators/prelu_op.cc +++ b/paddle/fluid/operators/prelu_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/prelu_op.h" -#include "paddle/operators/net_op.h" +#include "paddle/fluid/operators/prelu_op.h" +#include "paddle/fluid/operators/net_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/prelu_op.cu b/paddle/fluid/operators/prelu_op.cu index 1718bb5cd65..038b09a493c 100644 --- a/paddle/fluid/operators/prelu_op.cu +++ b/paddle/fluid/operators/prelu_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/prelu_op.h" +#include "paddle/fluid/operators/prelu_op.h" REGISTER_OP_CUDA_KERNEL( prelu, diff --git a/paddle/fluid/operators/prelu_op.h b/paddle/fluid/operators/prelu_op.h index 56f9a553ec1..85ad75d4790 100644 --- a/paddle/fluid/operators/prelu_op.h +++ b/paddle/fluid/operators/prelu_op.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/platform/transform.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/transform.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/print_op.cc b/paddle/fluid/operators/print_op.cc index 8b233d64c90..3616545309e 100644 --- a/paddle/fluid/operators/print_op.cc +++ b/paddle/fluid/operators/print_op.cc @@ -15,8 +15,8 @@ #include #include -#include "paddle/framework/op_registry.h" -#include "paddle/framework/variable.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/variable.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/prior_box_op.cc b/paddle/fluid/operators/prior_box_op.cc index 1dc4b288559..ed48603e17f 100644 --- a/paddle/fluid/operators/prior_box_op.cc +++ b/paddle/fluid/operators/prior_box_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/prior_box_op.h" +#include "paddle/fluid/operators/prior_box_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/prior_box_op.h b/paddle/fluid/operators/prior_box_op.h index 6b221cb74eb..fd070412334 100644 --- a/paddle/fluid/operators/prior_box_op.h +++ b/paddle/fluid/operators/prior_box_op.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/platform/transform.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/transform.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/proximal_adagrad_op.cc b/paddle/fluid/operators/proximal_adagrad_op.cc index b92f46b5bd4..d9e3894c576 100644 --- a/paddle/fluid/operators/proximal_adagrad_op.cc +++ b/paddle/fluid/operators/proximal_adagrad_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/proximal_adagrad_op.h" +#include "paddle/fluid/operators/proximal_adagrad_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/proximal_adagrad_op.cu b/paddle/fluid/operators/proximal_adagrad_op.cu index 42a178f94b9..54c75b3abb8 100644 --- a/paddle/fluid/operators/proximal_adagrad_op.cu +++ b/paddle/fluid/operators/proximal_adagrad_op.cu @@ -12,7 +12,7 @@ CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/proximal_adagrad_op.h" +#include "paddle/fluid/operators/proximal_adagrad_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/proximal_adagrad_op.h b/paddle/fluid/operators/proximal_adagrad_op.h index 523924d80e1..70205a8d11f 100644 --- a/paddle/fluid/operators/proximal_adagrad_op.h +++ b/paddle/fluid/operators/proximal_adagrad_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/proximal_gd_op.cc b/paddle/fluid/operators/proximal_gd_op.cc index 2d3bbdaf320..de7c6843c8b 100644 --- a/paddle/fluid/operators/proximal_gd_op.cc +++ b/paddle/fluid/operators/proximal_gd_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/proximal_gd_op.h" +#include "paddle/fluid/operators/proximal_gd_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/proximal_gd_op.cu b/paddle/fluid/operators/proximal_gd_op.cu index b7dd840d19a..97b672e872c 100644 --- a/paddle/fluid/operators/proximal_gd_op.cu +++ b/paddle/fluid/operators/proximal_gd_op.cu @@ -12,7 +12,7 @@ CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/proximal_gd_op.h" +#include "paddle/fluid/operators/proximal_gd_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/proximal_gd_op.h b/paddle/fluid/operators/proximal_gd_op.h index 64648b3ccaf..8372380f252 100644 --- a/paddle/fluid/operators/proximal_gd_op.h +++ b/paddle/fluid/operators/proximal_gd_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/rank_loss_op.cc b/paddle/fluid/operators/rank_loss_op.cc index f2164a0f805..222ca73d2ac 100644 --- a/paddle/fluid/operators/rank_loss_op.cc +++ b/paddle/fluid/operators/rank_loss_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/rank_loss_op.h" +#include "paddle/fluid/operators/rank_loss_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/rank_loss_op.cu b/paddle/fluid/operators/rank_loss_op.cu index 294b2273834..1b182ced70d 100644 --- a/paddle/fluid/operators/rank_loss_op.cu +++ b/paddle/fluid/operators/rank_loss_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/rank_loss_op.h" +#include "paddle/fluid/operators/rank_loss_op.h" REGISTER_OP_CUDA_KERNEL(rank_loss, paddle::operators::RankLossKernel< diff --git a/paddle/fluid/operators/rank_loss_op.h b/paddle/fluid/operators/rank_loss_op.h index bd0c49ca6e4..08bb2c28218 100644 --- a/paddle/fluid/operators/rank_loss_op.h +++ b/paddle/fluid/operators/rank_loss_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/read_op.cc b/paddle/fluid/operators/read_op.cc index 3ae454101f5..4d562c29191 100644 --- a/paddle/fluid/operators/read_op.cc +++ b/paddle/fluid/operators/read_op.cc @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/framework/op_registry.h" -#include "paddle/framework/reader.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/reader.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/recurrent_op.cc b/paddle/fluid/operators/recurrent_op.cc index a136c5b447d..e4b9b8dab9b 100644 --- a/paddle/fluid/operators/recurrent_op.cc +++ b/paddle/fluid/operators/recurrent_op.cc @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/framework/executor.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/recv_op.cc b/paddle/fluid/operators/recv_op.cc index ba71094219f..c093f60ceed 100644 --- a/paddle/fluid/operators/recv_op.cc +++ b/paddle/fluid/operators/recv_op.cc @@ -14,13 +14,13 @@ limitations under the License. */ #include -#include "paddle/framework/data_type.h" -#include "paddle/framework/framework.pb.h" -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" #include -#include "paddle/operators/detail/grpc_client.h" +#include "paddle/fluid/operators/detail/grpc_client.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/reduce_op.cc b/paddle/fluid/operators/reduce_op.cc index 84f24a90959..f4d9d4cc07b 100644 --- a/paddle/fluid/operators/reduce_op.cc +++ b/paddle/fluid/operators/reduce_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/reduce_op.h" +#include "paddle/fluid/operators/reduce_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/reduce_op.cu b/paddle/fluid/operators/reduce_op.cu index 4ed1e051db4..1ca107ebfe9 100644 --- a/paddle/fluid/operators/reduce_op.cu +++ b/paddle/fluid/operators/reduce_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/reduce_op.h" +#include "paddle/fluid/operators/reduce_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/reduce_op.h b/paddle/fluid/operators/reduce_op.h index da5f3977769..a153cf272b5 100644 --- a/paddle/fluid/operators/reduce_op.h +++ b/paddle/fluid/operators/reduce_op.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include "glog/logging.h" -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc b/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc index 3c304479494..148a65bb4b7 100644 --- a/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc +++ b/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/lod_rank_table.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/detail/safe_ref.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/lod_rank_table.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index b9743a5df10..b4f80cc06ab 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/reshape_op.h" +#include "paddle/fluid/operators/reshape_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/reshape_op.cu b/paddle/fluid/operators/reshape_op.cu index f487e43b99d..f9ae6da29e5 100644 --- a/paddle/fluid/operators/reshape_op.cu +++ b/paddle/fluid/operators/reshape_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/reshape_op.h" +#include "paddle/fluid/operators/reshape_op.h" REGISTER_OP_CUDA_KERNEL( reshape, diff --git a/paddle/fluid/operators/reshape_op.h b/paddle/fluid/operators/reshape_op.h index d884b03cadb..a17ba7c6194 100644 --- a/paddle/fluid/operators/reshape_op.h +++ b/paddle/fluid/operators/reshape_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/rmsprop_op.cc b/paddle/fluid/operators/rmsprop_op.cc index f7c250bf913..06d3ccafefd 100644 --- a/paddle/fluid/operators/rmsprop_op.cc +++ b/paddle/fluid/operators/rmsprop_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/rmsprop_op.h" +#include "paddle/fluid/operators/rmsprop_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/rmsprop_op.cu b/paddle/fluid/operators/rmsprop_op.cu index 0295dc262f0..a909c942791 100644 --- a/paddle/fluid/operators/rmsprop_op.cu +++ b/paddle/fluid/operators/rmsprop_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/rmsprop_op.h" +#include "paddle/fluid/operators/rmsprop_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/rmsprop_op.h b/paddle/fluid/operators/rmsprop_op.h index 16a561835d0..469c102a472 100644 --- a/paddle/fluid/operators/rmsprop_op.h +++ b/paddle/fluid/operators/rmsprop_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/rnn_memory_helper_op.cc b/paddle/fluid/operators/rnn_memory_helper_op.cc index eb55ed6a05b..504456c4b06 100644 --- a/paddle/fluid/operators/rnn_memory_helper_op.cc +++ b/paddle/fluid/operators/rnn_memory_helper_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/roi_pool_op.cc b/paddle/fluid/operators/roi_pool_op.cc index a7351f11c5d..09238f89a77 100644 --- a/paddle/fluid/operators/roi_pool_op.cc +++ b/paddle/fluid/operators/roi_pool_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/roi_pool_op.h" +#include "paddle/fluid/operators/roi_pool_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/roi_pool_op.cu b/paddle/fluid/operators/roi_pool_op.cu index a874befe4d1..0e8fc9ec7a6 100644 --- a/paddle/fluid/operators/roi_pool_op.cu +++ b/paddle/fluid/operators/roi_pool_op.cu @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/roi_pool_op.h" -#include "paddle/platform/cuda_helper.h" +#include "paddle/fluid/operators/roi_pool_op.h" +#include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/roi_pool_op.h b/paddle/fluid/operators/roi_pool_op.h index 09a9d3d870c..15f3b36fcd1 100644 --- a/paddle/fluid/operators/roi_pool_op.h +++ b/paddle/fluid/operators/roi_pool_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/row_conv_op.cc b/paddle/fluid/operators/row_conv_op.cc index 68f4e353156..92661ea9716 100644 --- a/paddle/fluid/operators/row_conv_op.cc +++ b/paddle/fluid/operators/row_conv_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/row_conv_op.h" -#include "paddle/framework/eigen.h" +#include "paddle/fluid/operators/row_conv_op.h" +#include "paddle/fluid/framework/eigen.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/row_conv_op.cu b/paddle/fluid/operators/row_conv_op.cu index d1a6d119d3d..832072edf81 100644 --- a/paddle/fluid/operators/row_conv_op.cu +++ b/paddle/fluid/operators/row_conv_op.cu @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/row_conv_op.h" -#include "paddle/platform/cuda_helper.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/row_conv_op.h" +#include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/row_conv_op.h b/paddle/fluid/operators/row_conv_op.h index 10d435ab080..59164b52159 100644 --- a/paddle/fluid/operators/row_conv_op.h +++ b/paddle/fluid/operators/row_conv_op.h @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/save_combine_op.cc b/paddle/fluid/operators/save_combine_op.cc index bffa2908bc4..c23de9073ef 100644 --- a/paddle/fluid/operators/save_combine_op.cc +++ b/paddle/fluid/operators/save_combine_op.cc @@ -17,11 +17,11 @@ limitations under the License. */ #include #include #include -#include "paddle/framework/data_type.h" -#include "paddle/framework/framework.pb.h" -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/op_registry.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/save_load_combine_op_test.cc b/paddle/fluid/operators/save_load_combine_op_test.cc index f3ddc4a6c55..f8325bac6bc 100644 --- a/paddle/fluid/operators/save_load_combine_op_test.cc +++ b/paddle/fluid/operators/save_load_combine_op_test.cc @@ -16,7 +16,7 @@ limitations under the License. */ #include #include #include "gtest/gtest.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/op_registry.h" USE_NO_KERNEL_OP(save_combine); USE_NO_KERNEL_OP(load_combine); diff --git a/paddle/fluid/operators/save_load_op_test.cc b/paddle/fluid/operators/save_load_op_test.cc index d829d5da174..da4573a8ed9 100644 --- a/paddle/fluid/operators/save_load_op_test.cc +++ b/paddle/fluid/operators/save_load_op_test.cc @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "gtest/gtest.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/op_registry.h" USE_NO_KERNEL_OP(save); USE_NO_KERNEL_OP(load); diff --git a/paddle/fluid/operators/save_op.cc b/paddle/fluid/operators/save_op.cc index 4b1cbe88836..483cdfa4c3b 100644 --- a/paddle/fluid/operators/save_op.cc +++ b/paddle/fluid/operators/save_op.cc @@ -17,11 +17,11 @@ limitations under the License. */ #include #include -#include "paddle/framework/data_type.h" -#include "paddle/framework/framework.pb.h" -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/op_registry.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/scale_op.cc b/paddle/fluid/operators/scale_op.cc index c0e614743a8..017fc2c00e4 100644 --- a/paddle/fluid/operators/scale_op.cc +++ b/paddle/fluid/operators/scale_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/scale_op.h" -#include "paddle/operators/net_op.h" +#include "paddle/fluid/operators/scale_op.h" +#include "paddle/fluid/operators/net_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/scale_op.cu b/paddle/fluid/operators/scale_op.cu index 7202c0de707..a9b46077aa0 100644 --- a/paddle/fluid/operators/scale_op.cu +++ b/paddle/fluid/operators/scale_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/scale_op.h" +#include "paddle/fluid/operators/scale_op.h" REGISTER_OP_CUDA_KERNEL( scale, diff --git a/paddle/fluid/operators/scale_op.h b/paddle/fluid/operators/scale_op.h index 395268c2eee..b1c2964ca63 100644 --- a/paddle/fluid/operators/scale_op.h +++ b/paddle/fluid/operators/scale_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/scatter.cu.h b/paddle/fluid/operators/scatter.cu.h index 55555300fc3..0f1b9426a74 100644 --- a/paddle/fluid/operators/scatter.cu.h +++ b/paddle/fluid/operators/scatter.cu.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/tensor.h" -#include "paddle/platform/place.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/place.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/scatter.h b/paddle/fluid/operators/scatter.h index c1fb844ebd2..70cae1286ca 100644 --- a/paddle/fluid/operators/scatter.h +++ b/paddle/fluid/operators/scatter.h @@ -15,10 +15,10 @@ limitations under the License. */ #pragma once #include -#include "paddle/framework/ddim.h" -#include "paddle/framework/eigen.h" -#include "paddle/framework/tensor.h" -#include "paddle/platform/place.h" +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/place.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/scatter_op.cc b/paddle/fluid/operators/scatter_op.cc index b6533489063..e35930af534 100644 --- a/paddle/fluid/operators/scatter_op.cc +++ b/paddle/fluid/operators/scatter_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/scatter_op.h" -#include "paddle/framework/ddim.h" +#include "paddle/fluid/operators/scatter_op.h" +#include "paddle/fluid/framework/ddim.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/scatter_op.cu b/paddle/fluid/operators/scatter_op.cu index 0c198d22589..f9eaae33a80 100644 --- a/paddle/fluid/operators/scatter_op.cu +++ b/paddle/fluid/operators/scatter_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "gather.cu.h" -#include "paddle/operators/gather_op.h" +#include "paddle/fluid/operators/gather_op.h" #include "scatter.cu.h" namespace paddle { diff --git a/paddle/fluid/operators/scatter_op.h b/paddle/fluid/operators/scatter_op.h index 1a4f6f99bfe..65d10546328 100644 --- a/paddle/fluid/operators/scatter_op.h +++ b/paddle/fluid/operators/scatter_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once #include "gather.h" -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" #include "scatter.h" namespace paddle { diff --git a/paddle/fluid/operators/scatter_test.cc b/paddle/fluid/operators/scatter_test.cc index 00dbdacbfef..8fb5ef96af3 100644 --- a/paddle/fluid/operators/scatter_test.cc +++ b/paddle/fluid/operators/scatter_test.cc @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/scatter.h" -#include "paddle/framework/ddim.h" -#include "paddle/framework/tensor.h" -#include "paddle/platform/place.h" +#include "paddle/fluid/operators/scatter.h" +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/place.h" #include #include diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc index ee0f268b0e4..a8390aa6596 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/send_op.cc @@ -14,13 +14,13 @@ limitations under the License. */ #include -#include "paddle/framework/data_type.h" -#include "paddle/framework/framework.pb.h" -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" #include -#include "paddle/operators/detail/grpc_client.h" +#include "paddle/fluid/operators/detail/grpc_client.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index 31527a906d5..716f687044a 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -17,11 +17,11 @@ limitations under the License. */ #include #include "gtest/gtest.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" -#include "paddle/framework/program_desc.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/selected_rows_functor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/string/printf.h" USE_NO_KERNEL_OP(send); diff --git a/paddle/fluid/operators/sequence_concat_op.cc b/paddle/fluid/operators/sequence_concat_op.cc index 2f0aad2003e..4ddf800d85e 100644 --- a/paddle/fluid/operators/sequence_concat_op.cc +++ b/paddle/fluid/operators/sequence_concat_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/sequence_concat_op.h" +#include "paddle/fluid/operators/sequence_concat_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_concat_op.cu.cc b/paddle/fluid/operators/sequence_concat_op.cu.cc index 144bdb5af63..c5a280ef9e2 100644 --- a/paddle/fluid/operators/sequence_concat_op.cu.cc +++ b/paddle/fluid/operators/sequence_concat_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/sequence_concat_op.h" +#include "paddle/fluid/operators/sequence_concat_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/sequence_concat_op.h b/paddle/fluid/operators/sequence_concat_op.h index 8445224f46a..9121196369f 100644 --- a/paddle/fluid/operators/sequence_concat_op.h +++ b/paddle/fluid/operators/sequence_concat_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/strided_memcpy.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/strided_memcpy.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_conv_op.cc b/paddle/fluid/operators/sequence_conv_op.cc index c5b7c81bd7c..af9938b1806 100644 --- a/paddle/fluid/operators/sequence_conv_op.cc +++ b/paddle/fluid/operators/sequence_conv_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/sequence_conv_op.h" +#include "paddle/fluid/operators/sequence_conv_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_conv_op.cu.cc b/paddle/fluid/operators/sequence_conv_op.cu.cc index 0b8f2c69556..36f9e8da95d 100644 --- a/paddle/fluid/operators/sequence_conv_op.cu.cc +++ b/paddle/fluid/operators/sequence_conv_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/sequence_conv_op.h" +#include "paddle/fluid/operators/sequence_conv_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/sequence_conv_op.h b/paddle/fluid/operators/sequence_conv_op.h index bb584b7bfa5..1c81067fea2 100644 --- a/paddle/fluid/operators/sequence_conv_op.h +++ b/paddle/fluid/operators/sequence_conv_op.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/context_project.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/context_project.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_erase_op.cc b/paddle/fluid/operators/sequence_erase_op.cc index aa0c00aa6f7..2e0adf8b190 100644 --- a/paddle/fluid/operators/sequence_erase_op.cc +++ b/paddle/fluid/operators/sequence_erase_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/sequence_erase_op.h" +#include "paddle/fluid/operators/sequence_erase_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_erase_op.cu b/paddle/fluid/operators/sequence_erase_op.cu index 4a7217cfd65..43fc352fe78 100644 --- a/paddle/fluid/operators/sequence_erase_op.cu +++ b/paddle/fluid/operators/sequence_erase_op.cu @@ -14,8 +14,8 @@ limitations under the License. */ #include #include -#include "paddle/operators/sequence_erase_op.h" -#include "paddle/platform/cuda_helper.h" +#include "paddle/fluid/operators/sequence_erase_op.h" +#include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_erase_op.h b/paddle/fluid/operators/sequence_erase_op.h index cb2d7be009d..e151279c7fc 100644 --- a/paddle/fluid/operators/sequence_erase_op.h +++ b/paddle/fluid/operators/sequence_erase_op.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_expand_op.cc b/paddle/fluid/operators/sequence_expand_op.cc index d34dbd35b6d..4ebce641d28 100644 --- a/paddle/fluid/operators/sequence_expand_op.cc +++ b/paddle/fluid/operators/sequence_expand_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/sequence_expand_op.h" +#include "paddle/fluid/operators/sequence_expand_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_expand_op.cu b/paddle/fluid/operators/sequence_expand_op.cu index 0b9638b2ce6..5ac76d83da6 100644 --- a/paddle/fluid/operators/sequence_expand_op.cu +++ b/paddle/fluid/operators/sequence_expand_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/sequence_expand_op.h" +#include "paddle/fluid/operators/sequence_expand_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/sequence_expand_op.h b/paddle/fluid/operators/sequence_expand_op.h index 6021526eee8..8010627ff6f 100644 --- a/paddle/fluid/operators/sequence_expand_op.h +++ b/paddle/fluid/operators/sequence_expand_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/memory/memcpy.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/memory/memcpy.h" #include "unsupported/Eigen/CXX11/Tensor" namespace paddle { diff --git a/paddle/fluid/operators/sequence_pool_op.cc b/paddle/fluid/operators/sequence_pool_op.cc index 549d9620ef2..2cfb336b2e0 100644 --- a/paddle/fluid/operators/sequence_pool_op.cc +++ b/paddle/fluid/operators/sequence_pool_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/sequence_pool_op.h" +#include "paddle/fluid/operators/sequence_pool_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_pool_op.cu b/paddle/fluid/operators/sequence_pool_op.cu index 265f6959352..364769c39bd 100644 --- a/paddle/fluid/operators/sequence_pool_op.cu +++ b/paddle/fluid/operators/sequence_pool_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/sequence_pool_op.h" +#include "paddle/fluid/operators/sequence_pool_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/sequence_pool_op.h b/paddle/fluid/operators/sequence_pool_op.h index 7519aa1d720..7b67e6201eb 100644 --- a/paddle/fluid/operators/sequence_pool_op.h +++ b/paddle/fluid/operators/sequence_pool_op.h @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/sequence_pooling.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/sequence_pooling.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_reshape_op.cc b/paddle/fluid/operators/sequence_reshape_op.cc index d89a46a712c..c4e42d3eeb5 100644 --- a/paddle/fluid/operators/sequence_reshape_op.cc +++ b/paddle/fluid/operators/sequence_reshape_op.cc @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/operators/sequence_reshape_op.h" -#include "paddle/framework/ddim.h" +#include "paddle/fluid/operators/sequence_reshape_op.h" +#include "paddle/fluid/framework/ddim.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_reshape_op.cu b/paddle/fluid/operators/sequence_reshape_op.cu index d9c2f7e9a41..5ca3497396e 100644 --- a/paddle/fluid/operators/sequence_reshape_op.cu +++ b/paddle/fluid/operators/sequence_reshape_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/sequence_reshape_op.h" +#include "paddle/fluid/operators/sequence_reshape_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/sequence_reshape_op.h b/paddle/fluid/operators/sequence_reshape_op.h index aaae7ab2928..7a5d1261da9 100644 --- a/paddle/fluid/operators/sequence_reshape_op.h +++ b/paddle/fluid/operators/sequence_reshape_op.h @@ -13,8 +13,8 @@ // limitations under the License. #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_slice_op.cc b/paddle/fluid/operators/sequence_slice_op.cc index f79106ff0f7..87b8eff6462 100644 --- a/paddle/fluid/operators/sequence_slice_op.cc +++ b/paddle/fluid/operators/sequence_slice_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/sequence_slice_op.h" +#include "paddle/fluid/operators/sequence_slice_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_slice_op.cu b/paddle/fluid/operators/sequence_slice_op.cu index 43a21d619f4..041fabdf9a2 100755 --- a/paddle/fluid/operators/sequence_slice_op.cu +++ b/paddle/fluid/operators/sequence_slice_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/sequence_slice_op.h" +#include "paddle/fluid/operators/sequence_slice_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/sequence_slice_op.h b/paddle/fluid/operators/sequence_slice_op.h index 0e4e4cf65fc..65c36a32aa1 100644 --- a/paddle/fluid/operators/sequence_slice_op.h +++ b/paddle/fluid/operators/sequence_slice_op.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/strided_memcpy.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/strided_memcpy.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_softmax_op.cc b/paddle/fluid/operators/sequence_softmax_op.cc index b74766f012e..f966b716207 100644 --- a/paddle/fluid/operators/sequence_softmax_op.cc +++ b/paddle/fluid/operators/sequence_softmax_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/sequence_softmax_op.h" +#include "paddle/fluid/operators/sequence_softmax_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sequence_softmax_op.cu.cc b/paddle/fluid/operators/sequence_softmax_op.cu.cc index 5f65b4daf97..c42dfd75409 100644 --- a/paddle/fluid/operators/sequence_softmax_op.cu.cc +++ b/paddle/fluid/operators/sequence_softmax_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/sequence_softmax_op.h" +#include "paddle/fluid/operators/sequence_softmax_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/sequence_softmax_op.h b/paddle/fluid/operators/sequence_softmax_op.h index e889e88cb34..e6c21c67b33 100644 --- a/paddle/fluid/operators/sequence_softmax_op.h +++ b/paddle/fluid/operators/sequence_softmax_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/softmax.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/softmax.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sgd_op.cc b/paddle/fluid/operators/sgd_op.cc index a11c9624ce5..f1e23a62f4e 100644 --- a/paddle/fluid/operators/sgd_op.cc +++ b/paddle/fluid/operators/sgd_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/sgd_op.h" +#include "paddle/fluid/operators/sgd_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sgd_op.cu b/paddle/fluid/operators/sgd_op.cu index d27befe4460..09374e20494 100644 --- a/paddle/fluid/operators/sgd_op.cu +++ b/paddle/fluid/operators/sgd_op.cu @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/sgd_op.h" -#include "paddle/platform/cuda_helper.h" +#include "paddle/fluid/operators/sgd_op.h" +#include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sgd_op.h b/paddle/fluid/operators/sgd_op.h index a6c544591e1..f1eaaecdb1e 100644 --- a/paddle/fluid/operators/sgd_op.h +++ b/paddle/fluid/operators/sgd_op.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/selected_rows.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/selected_rows.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/shrink_rnn_memory_op.cc b/paddle/fluid/operators/shrink_rnn_memory_op.cc index bf870115a4d..df50a324fde 100644 --- a/paddle/fluid/operators/shrink_rnn_memory_op.cc +++ b/paddle/fluid/operators/shrink_rnn_memory_op.cc @@ -11,10 +11,10 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/lod_rank_table.h" -#include "paddle/framework/lod_tensor.h" -#include "paddle/operators/array_operator.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/lod_rank_table.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/operators/array_operator.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc index c526a88a127..3188415a2bd 100644 --- a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc +++ b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/sigmoid_cross_entropy_with_logits_op.h" +#include "paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu index 3f393265f48..daa9d3e4fa5 100644 --- a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu +++ b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/sigmoid_cross_entropy_with_logits_op.h" +#include "paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(sigmoid_cross_entropy_with_logits, diff --git a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.h b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.h index b78bcc436e9..977849f7627 100644 --- a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.h +++ b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sign_op.cc b/paddle/fluid/operators/sign_op.cc index f63eaa4464c..54b962538b8 100644 --- a/paddle/fluid/operators/sign_op.cc +++ b/paddle/fluid/operators/sign_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/sign_op.h" +#include "paddle/fluid/operators/sign_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sign_op.cu b/paddle/fluid/operators/sign_op.cu index f224880cffb..93cdb311eb4 100644 --- a/paddle/fluid/operators/sign_op.cu +++ b/paddle/fluid/operators/sign_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/sign_op.h" +#include "paddle/fluid/operators/sign_op.h" REGISTER_OP_CUDA_KERNEL( sign, diff --git a/paddle/fluid/operators/sign_op.h b/paddle/fluid/operators/sign_op.h index 9fe49ae1a21..1c2ebebee40 100644 --- a/paddle/fluid/operators/sign_op.h +++ b/paddle/fluid/operators/sign_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/smooth_l1_loss_op.cc b/paddle/fluid/operators/smooth_l1_loss_op.cc index dcb18d729da..be4c7a56a84 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.cc +++ b/paddle/fluid/operators/smooth_l1_loss_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/smooth_l1_loss_op.h" +#include "paddle/fluid/operators/smooth_l1_loss_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/smooth_l1_loss_op.cu b/paddle/fluid/operators/smooth_l1_loss_op.cu index 213429bc370..94c0d6cd299 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.cu +++ b/paddle/fluid/operators/smooth_l1_loss_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/smooth_l1_loss_op.h" +#include "paddle/fluid/operators/smooth_l1_loss_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/smooth_l1_loss_op.h b/paddle/fluid/operators/smooth_l1_loss_op.h index 3facfae116d..325ad824e18 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.h +++ b/paddle/fluid/operators/smooth_l1_loss_op.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/platform/hostdevice.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc index cef1f1fc99d..1d9462d08b9 100644 --- a/paddle/fluid/operators/softmax_op.cc +++ b/paddle/fluid/operators/softmax_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/softmax_op.h" +#include "paddle/fluid/operators/softmax_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/softmax_op.cu.cc b/paddle/fluid/operators/softmax_op.cu.cc index e7da40f3e82..c53d8a2bc82 100644 --- a/paddle/fluid/operators/softmax_op.cu.cc +++ b/paddle/fluid/operators/softmax_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/softmax_op.h" +#include "paddle/fluid/operators/softmax_op.h" namespace ops = paddle::operators; diff --git a/paddle/fluid/operators/softmax_op.h b/paddle/fluid/operators/softmax_op.h index 63e379a3b31..9287f023103 100644 --- a/paddle/fluid/operators/softmax_op.h +++ b/paddle/fluid/operators/softmax_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/softmax.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/softmax.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc index 7135780c92d..79d56cb97d3 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/softmax_with_cross_entropy_op.h" +#include "paddle/fluid/operators/softmax_with_cross_entropy_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu index 61583c6161c..410d9e8887c 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/softmax_with_cross_entropy_op.h" +#include "paddle/fluid/operators/softmax_with_cross_entropy_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.h b/paddle/fluid/operators/softmax_with_cross_entropy_op.h index 6bde0f37e06..0927efd42ce 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.h +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.h @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/cross_entropy.h" -#include "paddle/operators/math/softmax.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/cross_entropy.h" +#include "paddle/fluid/operators/math/softmax.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/split_lod_tensor_op.cc b/paddle/fluid/operators/split_lod_tensor_op.cc index bd93c492015..f821dc54d7b 100644 --- a/paddle/fluid/operators/split_lod_tensor_op.cc +++ b/paddle/fluid/operators/split_lod_tensor_op.cc @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/op_registry.h" -#include "paddle/memory/memcpy.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/memory/memcpy.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/split_op.cc b/paddle/fluid/operators/split_op.cc index 8d55ae5dd7b..f8bc22fe1d3 100644 --- a/paddle/fluid/operators/split_op.cc +++ b/paddle/fluid/operators/split_op.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/split_op.h" -#include "paddle/operators/net_op.h" +#include "paddle/fluid/operators/split_op.h" +#include "paddle/fluid/operators/net_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/split_op.cu.cc b/paddle/fluid/operators/split_op.cu.cc index dbad0bbf68d..279691c759e 100644 --- a/paddle/fluid/operators/split_op.cu.cc +++ b/paddle/fluid/operators/split_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/split_op.h" +#include "paddle/fluid/operators/split_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( split, ops::SplitOpKernel); diff --git a/paddle/fluid/operators/split_op.h b/paddle/fluid/operators/split_op.h index a38c435d531..e78218f2fb1 100644 --- a/paddle/fluid/operators/split_op.h +++ b/paddle/fluid/operators/split_op.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include -#include "paddle/framework/op_registry.h" -#include "paddle/operators/strided_memcpy.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/strided_memcpy.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/split_selected_rows_op.cc b/paddle/fluid/operators/split_selected_rows_op.cc index 0515ea13aad..113ce2ce109 100644 --- a/paddle/fluid/operators/split_selected_rows_op.cc +++ b/paddle/fluid/operators/split_selected_rows_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/split_selected_rows_op.h" +#include "paddle/fluid/operators/split_selected_rows_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/split_selected_rows_op.cu b/paddle/fluid/operators/split_selected_rows_op.cu index 983285480fd..0bbf1ecfaef 100644 --- a/paddle/fluid/operators/split_selected_rows_op.cu +++ b/paddle/fluid/operators/split_selected_rows_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/split_selected_rows_op.h" +#include "paddle/fluid/operators/split_selected_rows_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( split_selected_rows, diff --git a/paddle/fluid/operators/split_selected_rows_op.h b/paddle/fluid/operators/split_selected_rows_op.h index 12e64e2901e..527264bd675 100644 --- a/paddle/fluid/operators/split_selected_rows_op.h +++ b/paddle/fluid/operators/split_selected_rows_op.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/selected_rows_functor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/selected_rows_functor.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/spp_op.cc b/paddle/fluid/operators/spp_op.cc index c0aa87b0f06..e6755b12000 100644 --- a/paddle/fluid/operators/spp_op.cc +++ b/paddle/fluid/operators/spp_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/spp_op.h" +#include "paddle/fluid/operators/spp_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/spp_op.cu.cc b/paddle/fluid/operators/spp_op.cu.cc index 761e4d6c4a9..cad2ca5ef8e 100644 --- a/paddle/fluid/operators/spp_op.cu.cc +++ b/paddle/fluid/operators/spp_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/spp_op.h" +#include "paddle/fluid/operators/spp_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/spp_op.h b/paddle/fluid/operators/spp_op.h index f35b305d02c..1da1f805807 100644 --- a/paddle/fluid/operators/spp_op.h +++ b/paddle/fluid/operators/spp_op.h @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/pooling.h" -#include "paddle/operators/strided_memcpy.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/pooling.h" +#include "paddle/fluid/operators/strided_memcpy.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/squared_l2_distance_op.cc b/paddle/fluid/operators/squared_l2_distance_op.cc index 9e097176f34..c1d0c2c7f39 100644 --- a/paddle/fluid/operators/squared_l2_distance_op.cc +++ b/paddle/fluid/operators/squared_l2_distance_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/squared_l2_distance_op.h" +#include "paddle/fluid/operators/squared_l2_distance_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/squared_l2_distance_op.cu b/paddle/fluid/operators/squared_l2_distance_op.cu index f2648dde5eb..959e7afac99 100644 --- a/paddle/fluid/operators/squared_l2_distance_op.cu +++ b/paddle/fluid/operators/squared_l2_distance_op.cu @@ -14,7 +14,7 @@ limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/squared_l2_distance_op.h" +#include "paddle/fluid/operators/squared_l2_distance_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/squared_l2_distance_op.h b/paddle/fluid/operators/squared_l2_distance_op.h index 5bd5f4819a3..aab241247e5 100644 --- a/paddle/fluid/operators/squared_l2_distance_op.h +++ b/paddle/fluid/operators/squared_l2_distance_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/squared_l2_norm_op.cc b/paddle/fluid/operators/squared_l2_norm_op.cc index 6626bf03755..a43cc22994b 100644 --- a/paddle/fluid/operators/squared_l2_norm_op.cc +++ b/paddle/fluid/operators/squared_l2_norm_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/squared_l2_norm_op.h" +#include "paddle/fluid/operators/squared_l2_norm_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/squared_l2_norm_op.cu b/paddle/fluid/operators/squared_l2_norm_op.cu index b222113a8c8..52f4ab79b21 100644 --- a/paddle/fluid/operators/squared_l2_norm_op.cu +++ b/paddle/fluid/operators/squared_l2_norm_op.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/squared_l2_norm_op.h" +#include "paddle/fluid/operators/squared_l2_norm_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/squared_l2_norm_op.h b/paddle/fluid/operators/squared_l2_norm_op.h index 1ce26c775ed..56524636b8f 100644 --- a/paddle/fluid/operators/squared_l2_norm_op.h +++ b/paddle/fluid/operators/squared_l2_norm_op.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/strided_memcpy.h b/paddle/fluid/operators/strided_memcpy.h index 735cabcd973..8a99b405e26 100644 --- a/paddle/fluid/operators/strided_memcpy.h +++ b/paddle/fluid/operators/strided_memcpy.h @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/operators/detail/strided_memcpy.h" +#include "paddle/fluid/operators/detail/strided_memcpy.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/strided_memcpy_test.cc b/paddle/fluid/operators/strided_memcpy_test.cc index 06d81188558..a369941a993 100644 --- a/paddle/fluid/operators/strided_memcpy_test.cc +++ b/paddle/fluid/operators/strided_memcpy_test.cc @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/strided_memcpy.h" +#include "paddle/fluid/operators/strided_memcpy.h" #include "gtest/gtest.h" -#include "paddle/memory/memory.h" +#include "paddle/fluid/memory/memory.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sum_op.cc b/paddle/fluid/operators/sum_op.cc index 88ed67f7ba2..96f851720ae 100644 --- a/paddle/fluid/operators/sum_op.cc +++ b/paddle/fluid/operators/sum_op.cc @@ -9,10 +9,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/sum_op.h" +#include "paddle/fluid/operators/sum_op.h" #include -#include "paddle/framework/var_type_inference.h" -#include "paddle/operators/detail/safe_ref.h" +#include "paddle/fluid/framework/var_type_inference.h" +#include "paddle/fluid/operators/detail/safe_ref.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/sum_op.cu b/paddle/fluid/operators/sum_op.cu index 873155076c1..8d8f90d7510 100644 --- a/paddle/fluid/operators/sum_op.cu +++ b/paddle/fluid/operators/sum_op.cu @@ -10,7 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU -#include "paddle/operators/sum_op.h" +#include "paddle/fluid/operators/sum_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/sum_op.h b/paddle/fluid/operators/sum_op.h index 3d8102c3ae2..5e1222c6ef7 100644 --- a/paddle/fluid/operators/sum_op.h +++ b/paddle/fluid/operators/sum_op.h @@ -10,11 +10,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/eigen.h" -#include "paddle/framework/lod_tensor_array.h" -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/selected_rows_functor.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/lod_tensor_array.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/selected_rows_functor.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/target_assign_op.cc b/paddle/fluid/operators/target_assign_op.cc index 615ca857ceb..24f1b725231 100644 --- a/paddle/fluid/operators/target_assign_op.cc +++ b/paddle/fluid/operators/target_assign_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/target_assign_op.h" +#include "paddle/fluid/operators/target_assign_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/target_assign_op.cu b/paddle/fluid/operators/target_assign_op.cu index fc0a1000a42..5c012d27ad8 100644 --- a/paddle/fluid/operators/target_assign_op.cu +++ b/paddle/fluid/operators/target_assign_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/target_assign_op.h" +#include "paddle/fluid/operators/target_assign_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/target_assign_op.h b/paddle/fluid/operators/target_assign_op.h index 574919e1ef8..876111523af 100644 --- a/paddle/fluid/operators/target_assign_op.h +++ b/paddle/fluid/operators/target_assign_op.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/platform/assert.h" -#include "paddle/platform/for_range.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/assert.h" +#include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/tensor_array_read_write_op.cc b/paddle/fluid/operators/tensor_array_read_write_op.cc index a70be8b8752..50811fb2249 100644 --- a/paddle/fluid/operators/tensor_array_read_write_op.cc +++ b/paddle/fluid/operators/tensor_array_read_write_op.cc @@ -11,8 +11,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/array_operator.h" -#include "paddle/operators/detail/safe_ref.h" +#include "paddle/fluid/operators/array_operator.h" +#include "paddle/fluid/operators/detail/safe_ref.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/top_k_op.cc b/paddle/fluid/operators/top_k_op.cc index a8ddd729732..c81ea860d0c 100644 --- a/paddle/fluid/operators/top_k_op.cc +++ b/paddle/fluid/operators/top_k_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/top_k_op.h" +#include "paddle/fluid/operators/top_k_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/top_k_op.cu b/paddle/fluid/operators/top_k_op.cu index f7bf58e7218..5390cb5063b 100644 --- a/paddle/fluid/operators/top_k_op.cu +++ b/paddle/fluid/operators/top_k_op.cu @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/op_registry.h" -#include "paddle/platform/assert.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/assert.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/top_k_op.h b/paddle/fluid/operators/top_k_op.h index bf42e15e6b2..e32b3515007 100644 --- a/paddle/fluid/operators/top_k_op.h +++ b/paddle/fluid/operators/top_k_op.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include #include -#include "paddle/framework/eigen.h" -#include "paddle/framework/op_registry.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc index c7ae162638c..a3d8acffc26 100644 --- a/paddle/fluid/operators/transpose_op.cc +++ b/paddle/fluid/operators/transpose_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/transpose_op.h" +#include "paddle/fluid/operators/transpose_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/transpose_op.cu.cc b/paddle/fluid/operators/transpose_op.cu.cc index 281c4468cc2..f8667ab369e 100644 --- a/paddle/fluid/operators/transpose_op.cu.cc +++ b/paddle/fluid/operators/transpose_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/transpose_op.h" +#include "paddle/fluid/operators/transpose_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/transpose_op.h b/paddle/fluid/operators/transpose_op.h index b9686a2db3f..1fb419474ab 100644 --- a/paddle/fluid/operators/transpose_op.h +++ b/paddle/fluid/operators/transpose_op.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index 3a314bdb9b0..b6fea1d4485 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -11,8 +11,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/uniform_random_op.cu b/paddle/fluid/operators/uniform_random_op.cu index 719d0872a7c..9afca68e59f 100644 --- a/paddle/fluid/operators/uniform_random_op.cu +++ b/paddle/fluid/operators/uniform_random_op.cu @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include #include -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/unpool_op.cc b/paddle/fluid/operators/unpool_op.cc index 50cee11a7a2..2e0b271fed6 100644 --- a/paddle/fluid/operators/unpool_op.cc +++ b/paddle/fluid/operators/unpool_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/unpool_op.h" +#include "paddle/fluid/operators/unpool_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/unpool_op.cu.cc b/paddle/fluid/operators/unpool_op.cu.cc index 9b002e35c43..15d81eb296b 100644 --- a/paddle/fluid/operators/unpool_op.cu.cc +++ b/paddle/fluid/operators/unpool_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/unpool_op.h" +#include "paddle/fluid/operators/unpool_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/unpool_op.h b/paddle/fluid/operators/unpool_op.h index ee18b118c95..ceed5507391 100644 --- a/paddle/fluid/operators/unpool_op.h +++ b/paddle/fluid/operators/unpool_op.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/unpooling.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/unpooling.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/warpctc_op.cc b/paddle/fluid/operators/warpctc_op.cc index bd0c5f99576..1c05fed0b47 100644 --- a/paddle/fluid/operators/warpctc_op.cc +++ b/paddle/fluid/operators/warpctc_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/warpctc_op.h" +#include "paddle/fluid/operators/warpctc_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/warpctc_op.cu.cc b/paddle/fluid/operators/warpctc_op.cu.cc index 7d8527ac75f..9ee7f970a9a 100644 --- a/paddle/fluid/operators/warpctc_op.cu.cc +++ b/paddle/fluid/operators/warpctc_op.cu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/operators/warpctc_op.h" +#include "paddle/fluid/operators/warpctc_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/warpctc_op.h b/paddle/fluid/operators/warpctc_op.h index 8aea061c00c..a1de71627ee 100644 --- a/paddle/fluid/operators/warpctc_op.h +++ b/paddle/fluid/operators/warpctc_op.h @@ -14,11 +14,11 @@ limitations under the License. */ #pragma once -#include "paddle/framework/op_registry.h" -#include "paddle/operators/math/math_function.h" -#include "paddle/operators/math/sequence_padding.h" -#include "paddle/operators/math/sequence_scale.h" -#include "paddle/platform/dynload/warpctc.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/sequence_padding.h" +#include "paddle/fluid/operators/math/sequence_scale.h" +#include "paddle/fluid/platform/dynload/warpctc.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/while_op.cc b/paddle/fluid/operators/while_op.cc index a744ebd6159..d254c572acf 100644 --- a/paddle/fluid/operators/while_op.cc +++ b/paddle/fluid/operators/while_op.cc @@ -13,11 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/framework/executor.h" -#include "paddle/framework/lod_tensor_array.h" -#include "paddle/framework/op_registry.h" -#include "paddle/framework/operator.h" -#include "paddle/operators/detail/safe_ref.h" +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/lod_tensor_array.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/operators/detail/safe_ref.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/platform/cpu_info.cc b/paddle/fluid/platform/cpu_info.cc index 78e1fa9df56..47473aead0e 100644 --- a/paddle/fluid/platform/cpu_info.cc +++ b/paddle/fluid/platform/cpu_info.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/platform/cpu_info.h" +#include "paddle/fluid/platform/cpu_info.h" #ifdef __APPLE__ #include diff --git a/paddle/fluid/platform/cpu_info_test.cc b/paddle/fluid/platform/cpu_info_test.cc index 1bfe62c1fb6..d1fdba13b80 100644 --- a/paddle/fluid/platform/cpu_info_test.cc +++ b/paddle/fluid/platform/cpu_info_test.cc @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/platform/cpu_info.h" +#include "paddle/fluid/platform/cpu_info.h" #include "paddle/string/printf.h" #include diff --git a/paddle/fluid/platform/cudnn_helper.h b/paddle/fluid/platform/cudnn_helper.h index 80a4c9bb4bb..f2daa4f4fcc 100644 --- a/paddle/fluid/platform/cudnn_helper.h +++ b/paddle/fluid/platform/cudnn_helper.h @@ -15,9 +15,9 @@ limitations under the License. */ #pragma once #include -#include "paddle/platform/dynload/cudnn.h" -#include "paddle/platform/enforce.h" -#include "paddle/platform/macros.h" +#include "paddle/fluid/platform/dynload/cudnn.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/macros.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/cudnn_helper_test.cc b/paddle/fluid/platform/cudnn_helper_test.cc index 427359f6971..cd0bd3fe3ed 100644 --- a/paddle/fluid/platform/cudnn_helper_test.cc +++ b/paddle/fluid/platform/cudnn_helper_test.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/platform/cudnn_helper.h" +#include "paddle/fluid/platform/cudnn_helper.h" #include TEST(CudnnHelper, ScopedTensorDescriptor) { diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index 9d9348079a0..c4da846bb1c 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -9,8 +9,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/platform/device_context.h" -#include "paddle/memory/memory.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/memory/memory.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index 9826a642768..10b581f41a1 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -15,18 +15,18 @@ limitations under the License. */ #include #ifdef PADDLE_WITH_CUDA -#include "paddle/platform/dynload/cublas.h" -#include "paddle/platform/dynload/cudnn.h" -#include "paddle/platform/gpu_info.h" +#include "paddle/fluid/platform/dynload/cublas.h" +#include "paddle/fluid/platform/dynload/cudnn.h" +#include "paddle/fluid/platform/gpu_info.h" #define EIGEN_USE_GPU #endif #ifdef PADDLE_WITH_MKLDNN -#include "paddle/platform/mkldnn_helper.h" +#include "paddle/fluid/platform/mkldnn_helper.h" #endif -#include "paddle/platform/enforce.h" -#include "paddle/platform/place.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/place.h" #include "unsupported/Eigen/CXX11/Tensor" #include "glog/logging.h" diff --git a/paddle/fluid/platform/device_context_test.cu b/paddle/fluid/platform/device_context_test.cu index 767fe9b24a5..f4dae6e90a8 100644 --- a/paddle/fluid/platform/device_context_test.cu +++ b/paddle/fluid/platform/device_context_test.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "gtest/gtest.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/platform/device_context.h" #include "glog/logging.h" diff --git a/paddle/fluid/platform/dynload/cublas.cc b/paddle/fluid/platform/dynload/cublas.cc index 6aca716657c..c599712554b 100644 --- a/paddle/fluid/platform/dynload/cublas.cc +++ b/paddle/fluid/platform/dynload/cublas.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/platform/dynload/cublas.h" +#include "paddle/fluid/platform/dynload/cublas.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/dynload/cublas.h b/paddle/fluid/platform/dynload/cublas.h index 61a22d9db3e..05f69e50651 100644 --- a/paddle/fluid/platform/dynload/cublas.h +++ b/paddle/fluid/platform/dynload/cublas.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include -#include "paddle/platform/dynload/dynamic_loader.h" +#include "paddle/fluid/platform/dynload/dynamic_loader.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/dynload/cudnn.cc b/paddle/fluid/platform/dynload/cudnn.cc index 701f6240fef..0b1c4c4f960 100644 --- a/paddle/fluid/platform/dynload/cudnn.cc +++ b/paddle/fluid/platform/dynload/cudnn.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/platform/dynload/cudnn.h" -#include "paddle/platform/enforce.h" +#include "paddle/fluid/platform/dynload/cudnn.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/dynload/cudnn.h b/paddle/fluid/platform/dynload/cudnn.h index b9263479494..00dfbc83872 100644 --- a/paddle/fluid/platform/dynload/cudnn.h +++ b/paddle/fluid/platform/dynload/cudnn.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include -#include "paddle/platform/dynload/dynamic_loader.h" +#include "paddle/fluid/platform/dynload/dynamic_loader.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/dynload/curand.cc b/paddle/fluid/platform/dynload/curand.cc index d05dd88126b..eac690b1458 100644 --- a/paddle/fluid/platform/dynload/curand.cc +++ b/paddle/fluid/platform/dynload/curand.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include "paddle/fluid/platform/dynload/curand.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/dynload/curand.h b/paddle/fluid/platform/dynload/curand.h index 7bfe0778c78..ce3115b3ce0 100644 --- a/paddle/fluid/platform/dynload/curand.h +++ b/paddle/fluid/platform/dynload/curand.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include -#include "paddle/platform/dynload/dynamic_loader.h" +#include "paddle/fluid/platform/dynload/dynamic_loader.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/dynload/dynamic_loader.cc b/paddle/fluid/platform/dynload/dynamic_loader.cc index c8c09ae608f..eb00f93b7cd 100644 --- a/paddle/fluid/platform/dynload/dynamic_loader.cc +++ b/paddle/fluid/platform/dynload/dynamic_loader.cc @@ -12,14 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/platform/dynload/dynamic_loader.h" +#include "paddle/fluid/platform/dynload/dynamic_loader.h" #include #include #include #include #include "gflags/gflags.h" #include "glog/logging.h" -#include "paddle/platform/enforce.h" +#include "paddle/fluid/platform/enforce.h" DEFINE_string(cudnn_dir, "", "Specify path for loading libcudnn.so. For instance, " diff --git a/paddle/fluid/platform/dynload/nccl.cc b/paddle/fluid/platform/dynload/nccl.cc index 4cec829a8ad..1dc3e96f04a 100644 --- a/paddle/fluid/platform/dynload/nccl.cc +++ b/paddle/fluid/platform/dynload/nccl.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/platform/dynload/nccl.h" +#include "paddle/fluid/platform/dynload/nccl.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/dynload/nccl.h b/paddle/fluid/platform/dynload/nccl.h index 6c776afc97a..349a4d0ba32 100644 --- a/paddle/fluid/platform/dynload/nccl.h +++ b/paddle/fluid/platform/dynload/nccl.h @@ -17,8 +17,8 @@ limitations under the License. */ #include #include #include -#include "paddle/platform/call_once.h" -#include "paddle/platform/dynload/dynamic_loader.h" +#include "paddle/fluid/platform/call_once.h" +#include "paddle/fluid/platform/dynload/dynamic_loader.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/dynload/warpctc.cc b/paddle/fluid/platform/dynload/warpctc.cc index 9b7d01a6e8f..84de2cae947 100644 --- a/paddle/fluid/platform/dynload/warpctc.cc +++ b/paddle/fluid/platform/dynload/warpctc.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/platform/dynload/warpctc.h" +#include "paddle/fluid/platform/dynload/warpctc.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/dynload/warpctc.h b/paddle/fluid/platform/dynload/warpctc.h index acafcaff2cc..f1955818ded 100644 --- a/paddle/fluid/platform/dynload/warpctc.h +++ b/paddle/fluid/platform/dynload/warpctc.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include "ctc.h" -#include "paddle/platform/dynload/dynamic_loader.h" +#include "paddle/fluid/platform/dynload/dynamic_loader.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/enforce.cc b/paddle/fluid/platform/enforce.cc index e8d31bc782e..55cd80943cf 100644 --- a/paddle/fluid/platform/enforce.cc +++ b/paddle/fluid/platform/enforce.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/platform/enforce.h" +#include "paddle/fluid/platform/enforce.h" namespace paddle { namespace platform {} // namespace platform diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index d1c7be0790b..b22893c0a56 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -22,7 +22,7 @@ limitations under the License. */ #include #include -#include "paddle/platform/macros.h" +#include "paddle/fluid/platform/macros.h" #include "paddle/string/printf.h" #include "paddle/string/to_string.h" @@ -34,10 +34,10 @@ limitations under the License. */ #ifdef PADDLE_WITH_CUDA -#include "paddle/platform/dynload/cublas.h" -#include "paddle/platform/dynload/cudnn.h" -#include "paddle/platform/dynload/curand.h" -#include "paddle/platform/dynload/nccl.h" +#include "paddle/fluid/platform/dynload/cublas.h" +#include "paddle/fluid/platform/dynload/cudnn.h" +#include "paddle/fluid/platform/dynload/curand.h" +#include "paddle/fluid/platform/dynload/nccl.h" #include #include diff --git a/paddle/fluid/platform/enforce_test.cc b/paddle/fluid/platform/enforce_test.cc index 8206a055eab..896a9a04eca 100644 --- a/paddle/fluid/platform/enforce_test.cc +++ b/paddle/fluid/platform/enforce_test.cc @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "gtest/gtest.h" -#include "paddle/platform/enforce.h" +#include "paddle/fluid/platform/enforce.h" #include "paddle/string/piece.h" using StringPiece = paddle::string::Piece; diff --git a/paddle/fluid/platform/for_range.h b/paddle/fluid/platform/for_range.h index 694a66d9ac4..0e695328c39 100644 --- a/paddle/fluid/platform/for_range.h +++ b/paddle/fluid/platform/for_range.h @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/platform/device_context.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/gpu_info.cc b/paddle/fluid/platform/gpu_info.cc index 7037551d754..1797f59a9c9 100644 --- a/paddle/fluid/platform/gpu_info.cc +++ b/paddle/fluid/platform/gpu_info.cc @@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/platform/gpu_info.h" +#include "paddle/fluid/platform/gpu_info.h" #include "gflags/gflags.h" -#include "paddle/platform/enforce.h" +#include "paddle/fluid/platform/enforce.h" DEFINE_double(fraction_of_gpu_memory_to_use, 0.92, "Default use 92% of GPU memory for PaddlePaddle," diff --git a/paddle/fluid/platform/nccl_test.cu b/paddle/fluid/platform/nccl_test.cu index 84f5ac28be3..75b95aff1a4 100644 --- a/paddle/fluid/platform/nccl_test.cu +++ b/paddle/fluid/platform/nccl_test.cu @@ -19,11 +19,11 @@ limitations under the License. */ #include "glog/logging.h" #include "gtest/gtest.h" -#include "paddle/framework/init.h" -#include "paddle/platform/device_context.h" -#include "paddle/platform/dynload/nccl.h" -#include "paddle/platform/enforce.h" -#include "paddle/platform/gpu_info.h" +#include "paddle/fluid/framework/init.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/dynload/nccl.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/gpu_info.h" static int dev_count = 0; diff --git a/paddle/fluid/platform/place.cc b/paddle/fluid/platform/place.cc index f05260ccac4..e99b75d761a 100644 --- a/paddle/fluid/platform/place.cc +++ b/paddle/fluid/platform/place.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/platform/place.h" +#include "paddle/fluid/platform/place.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/place.h b/paddle/fluid/platform/place.h index fbb43fa043a..2977a41036e 100644 --- a/paddle/fluid/platform/place.h +++ b/paddle/fluid/platform/place.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include -#include "paddle/platform/enforce.h" -#include "paddle/platform/variant.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/variant.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/place_test.cc b/paddle/fluid/platform/place_test.cc index 150b2d3b1fb..f248902d91c 100644 --- a/paddle/fluid/platform/place_test.cc +++ b/paddle/fluid/platform/place_test.cc @@ -11,7 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/platform/place.h" +#include "paddle/fluid/platform/place.h" #include #include "gtest/gtest.h" diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index 6df087d154c..28d2675f799 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/platform/profiler.h" +#include "paddle/fluid/platform/profiler.h" #include #include #include "glog/logging.h" diff --git a/paddle/fluid/platform/profiler.h b/paddle/fluid/platform/profiler.h index 8de1e6ad296..0bc5e666cb4 100644 --- a/paddle/fluid/platform/profiler.h +++ b/paddle/fluid/platform/profiler.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include -#include "paddle/platform/device_context.h" +#include "paddle/fluid/platform/device_context.h" namespace paddle { namespace platform { diff --git a/paddle/fluid/platform/profiler_test.cc b/paddle/fluid/platform/profiler_test.cc index 81f10c91342..d2525c38b6f 100644 --- a/paddle/fluid/platform/profiler_test.cc +++ b/paddle/fluid/platform/profiler_test.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/platform/profiler.h" +#include "paddle/fluid/platform/profiler.h" #include "gtest/gtest.h" TEST(Event, CpuElapsedTime) { diff --git a/paddle/fluid/platform/transform.h b/paddle/fluid/platform/transform.h index a88902b164c..879daed1910 100644 --- a/paddle/fluid/platform/transform.h +++ b/paddle/fluid/platform/transform.h @@ -14,17 +14,17 @@ limitations under the License. */ #pragma once -#include "paddle/platform/device_context.h" -#include "paddle/platform/enforce.h" -#include "paddle/platform/hostdevice.h" -#include "paddle/platform/place.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/fluid/platform/place.h" #include #include #ifdef __NVCC__ #include #include -#include "paddle/platform/details/device_ptr_cast.h" +#include "paddle/fluid/platform/details/device_ptr_cast.h" #endif namespace paddle { diff --git a/paddle/fluid/platform/transform_test.cu b/paddle/fluid/platform/transform_test.cu index af9204a0a7b..0e4b9edc2fd 100644 --- a/paddle/fluid/platform/transform_test.cu +++ b/paddle/fluid/platform/transform_test.cu @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/memory/memcpy.h" -#include "paddle/memory/memory.h" -#include "paddle/platform/hostdevice.h" -#include "paddle/platform/transform.h" +#include "paddle/fluid/memory/memcpy.h" +#include "paddle/fluid/memory/memory.h" +#include "paddle/fluid/platform/hostdevice.h" +#include "paddle/fluid/platform/transform.h" template class Scale { diff --git a/paddle/fluid/pybind/.clang-format b/paddle/fluid/pybind/.clang-format deleted file mode 120000 index 7d28cb39247..00000000000 --- a/paddle/fluid/pybind/.clang-format +++ /dev/null @@ -1 +0,0 @@ -../framework/.clang-format \ No newline at end of file diff --git a/paddle/fluid/pybind/.clang-format b/paddle/fluid/pybind/.clang-format new file mode 100644 index 00000000000..29282dc87e2 --- /dev/null +++ b/paddle/fluid/pybind/.clang-format @@ -0,0 +1,5 @@ +--- +Language: Cpp +BasedOnStyle: Google +Standard: Cpp11 +... diff --git a/paddle/fluid/pybind/const_value.cc b/paddle/fluid/pybind/const_value.cc index b13ad42ea29..098252a83d3 100644 --- a/paddle/fluid/pybind/const_value.cc +++ b/paddle/fluid/pybind/const_value.cc @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "const_value.h" -#include "paddle/framework/operator.h" +#include "paddle/fluid/framework/operator.h" namespace paddle { namespace pybind { diff --git a/paddle/fluid/pybind/const_value.h b/paddle/fluid/pybind/const_value.h index 3d57c972a9d..67d14ac9ff0 100644 --- a/paddle/fluid/pybind/const_value.h +++ b/paddle/fluid/pybind/const_value.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once #include -#include "paddle/platform/enforce.h" +#include "paddle/fluid/platform/enforce.h" #include "pybind11/pybind11.h" namespace py = pybind11; diff --git a/paddle/fluid/pybind/exception.cc b/paddle/fluid/pybind/exception.cc index e29ac3ebab7..7398a88541b 100644 --- a/paddle/fluid/pybind/exception.cc +++ b/paddle/fluid/pybind/exception.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/pybind/exception.h" +#include "paddle/fluid/pybind/exception.h" namespace paddle { namespace pybind { diff --git a/paddle/fluid/pybind/exception.h b/paddle/fluid/pybind/exception.h index 436ddd5707a..43e91a70630 100644 --- a/paddle/fluid/pybind/exception.h +++ b/paddle/fluid/pybind/exception.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once #include -#include "paddle/platform/enforce.h" +#include "paddle/fluid/platform/enforce.h" #include "pybind11/pybind11.h" namespace paddle { namespace pybind { diff --git a/paddle/fluid/pybind/protobuf.cc b/paddle/fluid/pybind/protobuf.cc index 0a92e10927c..4aefcf1a1cd 100644 --- a/paddle/fluid/pybind/protobuf.cc +++ b/paddle/fluid/pybind/protobuf.cc @@ -12,14 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/pybind/protobuf.h" +#include "paddle/fluid/pybind/protobuf.h" #include #include -#include "paddle/framework/backward.h" -#include "paddle/framework/block_desc.h" -#include "paddle/framework/op_desc.h" -#include "paddle/framework/program_desc.h" -#include "paddle/framework/var_desc.h" +#include "paddle/fluid/framework/backward.h" +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/op_desc.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/var_desc.h" // Cast boost::variant for PyBind. // Copy from diff --git a/paddle/fluid/pybind/protobuf.h b/paddle/fluid/pybind/protobuf.h index 9e747e9ea60..c828e4583d2 100644 --- a/paddle/fluid/pybind/protobuf.h +++ b/paddle/fluid/pybind/protobuf.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include -#include "paddle/platform/variant.h" +#include "paddle/fluid/platform/variant.h" #include "pybind11/numpy.h" #include "pybind11/pybind11.h" #include "pybind11/stl.h" diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index a880d9bdbc6..8924aabd17b 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -12,35 +12,35 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/pybind/protobuf.h" +#include "paddle/fluid/pybind/protobuf.h" #include // for call_once #include -#include "paddle/framework/backward.h" -#include "paddle/framework/executor.h" -#include "paddle/framework/feed_fetch_method.h" -#include "paddle/framework/framework.pb.h" -#include "paddle/framework/init.h" -#include "paddle/framework/lod_rank_table.h" -#include "paddle/framework/lod_tensor.h" -#include "paddle/framework/lod_tensor_array.h" -#include "paddle/framework/prune.h" -#include "paddle/framework/selected_rows.h" -#include "paddle/operators/cond_op.h" -#include "paddle/operators/net_op.h" -#include "paddle/platform/enforce.h" -#include "paddle/platform/place.h" -#include "paddle/platform/profiler.h" -#include "paddle/pybind/const_value.h" -#include "paddle/pybind/exception.h" -#include "paddle/pybind/pybind.h" -#include "paddle/pybind/tensor_py.h" +#include "paddle/fluid/framework/backward.h" +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/feed_fetch_method.h" +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/init.h" +#include "paddle/fluid/framework/lod_rank_table.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/lod_tensor_array.h" +#include "paddle/fluid/framework/prune.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/operators/cond_op.h" +#include "paddle/fluid/operators/net_op.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/place.h" +#include "paddle/fluid/platform/profiler.h" +#include "paddle/fluid/pybind/const_value.h" +#include "paddle/fluid/pybind/exception.h" +#include "paddle/fluid/pybind/pybind.h" +#include "paddle/fluid/pybind/tensor_py.h" #include "paddle/string/to_string.h" #ifdef PADDLE_WITH_CUDA -#include "paddle/operators/nccl/nccl_gpu_common.h" -#include "paddle/platform/cuda_profiler.h" -#include "paddle/platform/gpu_info.h" +#include "paddle/fluid/operators/nccl/nccl_gpu_common.h" +#include "paddle/fluid/platform/cuda_profiler.h" +#include "paddle/fluid/platform/gpu_info.h" #endif // disable auto conversion to list in Python diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index 3b5210e2b91..0261709f1e4 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -14,9 +14,9 @@ limitations under the License. */ #pragma once #include -#include "paddle/framework/lod_tensor.h" -#include "paddle/memory/memcpy.h" -#include "paddle/platform/device_context.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/memory/memcpy.h" +#include "paddle/fluid/platform/device_context.h" #include "pybind11/numpy.h" #include "pybind11/pybind11.h" diff --git a/paddle/math/float16.h b/paddle/math/float16.h index efebbce5040..63248d36f9d 100644 --- a/paddle/math/float16.h +++ b/paddle/math/float16.h @@ -22,7 +22,7 @@ limitations under the License. */ #include "unsupported/Eigen/CXX11/Tensor" -#include "paddle/platform/hostdevice.h" +#include "paddle/fluid/platform/hostdevice.h" #ifdef __GNUC__ #define PADDLE_GNUC_VER (__GNUC__ * 10 + __GNUC_MINOR__) diff --git a/paddle/testing/paddle_gtest_main.cc b/paddle/testing/paddle_gtest_main.cc index ab84f1c292b..270f2f4c181 100644 --- a/paddle/testing/paddle_gtest_main.cc +++ b/paddle/testing/paddle_gtest_main.cc @@ -16,8 +16,8 @@ limitations under the License. */ #include "gflags/gflags.h" #include "gtest/gtest.h" -#include "paddle/framework/init.h" -#include "paddle/memory/memory.h" +#include "paddle/fluid/framework/init.h" +#include "paddle/fluid/memory/memory.h" int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); -- GitLab From e0fcaa518f4d15b895777e233c56acc4298a9c65 Mon Sep 17 00:00:00 2001 From: kavyasrinet Date: Fri, 9 Feb 2018 17:25:58 -0800 Subject: [PATCH 128/138] Added an elementary unit test for CSP (#8340) * Added an elementary test case for CSP * removed input * Rename test file to avoid running in CI * Fix YAPF error * Remove one line function handler --- python/paddle/v2/fluid/tests/notest_csp.py | 37 ++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 python/paddle/v2/fluid/tests/notest_csp.py diff --git a/python/paddle/v2/fluid/tests/notest_csp.py b/python/paddle/v2/fluid/tests/notest_csp.py new file mode 100644 index 00000000000..7fe234a20b5 --- /dev/null +++ b/python/paddle/v2/fluid/tests/notest_csp.py @@ -0,0 +1,37 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import paddle.v2.fluid as fluid + + +class TestCSPFramework(unittest.TestCase): + def daisy_chain(self): + n = 10000 + leftmost = fluid.make_channel(dtype=int) + right = leftmost + left = leftmost + with fluid.While(steps=n): + right = fluid.make_channel(dtype=int) + with fluid.go(): + fluid.send(left, 1 + fluid.recv(right)) + left = right + + with fluid.go(): + fluid.send(right, 1) + fluid.Print(fluid.recv(leftmost)) + + +if __name__ == '__main__': + unittest.main() -- GitLab From 154368b1a0f81dda8393a9aa8687d7da44b3bead Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Fri, 9 Feb 2018 17:34:15 -0800 Subject: [PATCH 129/138] Re-add the lack pybind.h --- paddle/fluid/pybind/pybind.h | 158 +++++++++++++++++++++++++++++++++++ 1 file changed, 158 insertions(+) create mode 100644 paddle/fluid/pybind/pybind.h diff --git a/paddle/fluid/pybind/pybind.h b/paddle/fluid/pybind/pybind.h new file mode 100644 index 00000000000..b3ea649a5b0 --- /dev/null +++ b/paddle/fluid/pybind/pybind.h @@ -0,0 +1,158 @@ +// Generated by the paddle/operator/CMakeLists.txt. DO NOT EDIT! + +USE_CUDA_ONLY_OP(ncclAllReduce); +USE_NO_KERNEL_OP(cond); +USE_OP(cross_entropy); +USE_OP(softmax_with_cross_entropy); +USE_OP(softmax); +USE_OP(detection_output); +USE_OP(sequence_softmax); +USE_OP(sum); +USE_OP(sgd); +USE_NO_KERNEL_OP(print); +USE_OP(adagrad); +USE_OP(maxout); +USE_OP(unpool); +USE_OP(max_pool2d_with_index); +USE_NO_KERNEL_OP(lod_rank_table); +USE_NO_KERNEL_OP(lod_tensor_to_array); +USE_NO_KERNEL_OP(array_to_lod_tensor); +USE_NO_KERNEL_OP(max_sequence_len); +USE_OP(sequence_conv); +USE_OP(sequence_pool); +USE_OP(lstm); +USE_OP(lstmp); +USE_OP(gru); +USE_NO_KERNEL_OP(recurrent); +USE_OP(warpctc); +USE_OP(cos_sim); +USE_NO_KERNEL_OP(parallel_do); +USE_OP(conv2d); +USE_OP(edit_distance); +USE_OP(pool2d); +USE_OP(conv2d_transpose); +USE_OP_DEVICE_KERNEL(conv2d, CUDNN); +USE_OP_DEVICE_KERNEL(pool2d, CUDNN); +USE_OP_DEVICE_KERNEL(conv2d_transpose, CUDNN); +USE_NO_KERNEL_OP(save); +USE_NO_KERNEL_OP(load); +USE_NO_KERNEL_OP(save_combine); +USE_NO_KERNEL_OP(load_combine); +USE_NO_KERNEL_OP(shrink_rnn_memory); +USE_OP(multiplex); +USE_OP(split); +USE_NO_KERNEL_OP(feed); +USE_OP(proximal_gd); +USE_OP(lstm_unit); +USE_NO_KERNEL_OP(merge_lod_tensor); +USE_OP(matmul); +USE_CPU_ONLY_OP(precision_recall); +USE_OP(ctc_align); +USE_OP(crop); +USE_OP(iou_similarity); +USE_OP(scatter); +USE_OP(clip_by_norm); +USE_OP(fill_constant_batch_size_like); +USE_OP(rmsprop); +USE_NO_KERNEL_OP(lod_array_length); +USE_NO_KERNEL_OP(increment); +USE_OP(squared_l2_distance); +USE_NO_KERNEL_OP(get_places); +USE_OP(smooth_l1_loss); +USE_CPU_ONLY_OP(crf_decoding); +USE_OP(bilinear_tensor_product); +USE_OP(scale); +USE_OP(assign_value); +USE_CPU_ONLY_OP(mine_hard_examples); +USE_OP(elementwise_div); +USE_OP(sigmoid_cross_entropy_with_logits); +USE_OP(log_loss); +USE_OP(momentum); +USE_OP(box_coder); +USE_OP(sequence_reshape); +USE_OP(reduce_sum); +USE_OP(split_selected_rows); +USE_OP(decayed_adagrad); +USE_OP(elementwise_sub); +USE_OP(layer_norm); +USE_OP(roi_pool); +USE_NO_KERNEL_OP(while); +USE_NO_KERNEL_OP(is_empty); +USE_CPU_ONLY_OP(nce); +USE_OP(expand); +USE_OP(linear_chain_crf); +USE_OP(sigmoid); +USE_NO_KERNEL_OP(read); +USE_OP(concat); +USE_OP(one_hot); +USE_OP(top_k); +USE_CPU_ONLY_OP(positive_negative_pair); +USE_OP(im2sequence); +USE_CPU_ONLY_OP(chunk_eval); +USE_OP(sequence_expand); +USE_OP(modified_huber_loss); +USE_OP(minus); +USE_OP(huber_loss); +USE_OP(gaussian_random); +USE_OP(elementwise_max); +USE_OP(adamax); +USE_OP(batch_norm); +USE_NO_KERNEL_OP(beam_search); +USE_OP(hinge_loss); +USE_OP(dropout); +USE_OP(row_conv); +USE_OP(conv_shift); +USE_NO_KERNEL_OP(fill); +USE_CPU_ONLY_OP(auc); +USE_OP(ftrl); +USE_NO_KERNEL_OP(fill_constant); +USE_CPU_ONLY_OP(bipartite_match); +USE_OP(spp); +USE_OP(sequence_slice); +USE_OP(sign); +USE_OP(prelu); +USE_OP(mul); +USE_OP(proximal_adagrad); +USE_OP(reshape); +USE_OP(cumsum); +USE_OP(cast); +USE_OP(elementwise_pow); +USE_OP(lookup_table); +USE_OP(label_smooth); +USE_OP(squared_l2_norm); +USE_CPU_ONLY_OP(multiclass_nms); +USE_NO_KERNEL_OP(conditional_block); +USE_OP(adadelta); +USE_OP(gather); +USE_OP(pad); +USE_NO_KERNEL_OP(fetch); +USE_OP(sequence_erase); +USE_OP(uniform_random); +USE_OP(gru_unit); +USE_OP(accuracy); +USE_OP(elementwise_min); +USE_OP(elementwise_add); +USE_OP(fill_zeros_like); +USE_OP(mean); +USE_OP(clip); +USE_OP(rank_loss); +USE_OP(sequence_concat); +USE_NO_KERNEL_OP(assign); +USE_OP(elementwise_mul); +USE_OP(target_assign); +USE_OP(lrn); +USE_OP(margin_rank_loss); +USE_NO_KERNEL_OP(reorder_lod_tensor_by_rank); +USE_NO_KERNEL_OP(beam_search_decode); +USE_NO_KERNEL_OP(rnn_memory_helper); +USE_OP(l1_norm); +USE_NO_KERNEL_OP(split_lod_tensor); +USE_OP(lod_reset); +USE_OP(norm); +USE_OP(adam); +USE_OP(transpose); +USE_CPU_ONLY_OP(prior_box); +USE_OP(less_than); +USE_OP(logical_and); +USE_NO_KERNEL_OP(read_from_array); +USE_NO_KERNEL_OP(create_random_data_generator); -- GitLab From 12266656e0030b4bb8a3560f91ba8dbfd54e2e2e Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Fri, 9 Feb 2018 18:00:11 -0800 Subject: [PATCH 130/138] Correct setup.in --- python/setup.py.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/setup.py.in b/python/setup.py.in index 65ec58ecf98..5a0d9999543 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -109,7 +109,7 @@ setup(name='${PACKAGE_NAME}', '': '${CMAKE_CURRENT_SOURCE_DIR}', # The paddle.v2.fluid.proto will be generated while compiling. # So that package points to other directory. - 'paddle.v2.fluid.proto': '${PADDLE_BINARY_DIR}/paddle/framework', + 'paddle.v2.fluid.proto': '${PADDLE_BINARY_DIR}/paddle/fluid/framework', 'py_paddle': '${PADDLE_SOURCE_DIR}/paddle/py_paddle' }, scripts=paddle_bins, -- GitLab From 74492d5d91bf12894d87d20a8c481b45e963364a Mon Sep 17 00:00:00 2001 From: Siddharth Goyal Date: Fri, 9 Feb 2018 18:16:45 -0800 Subject: [PATCH 131/138] Add proper casts for avoiding warnings (#8346) --- paddle/inference/tests/book/test_helper.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/inference/tests/book/test_helper.h b/paddle/inference/tests/book/test_helper.h index 02104306e71..9774f3fbcb4 100644 --- a/paddle/inference/tests/book/test_helper.h +++ b/paddle/inference/tests/book/test_helper.h @@ -34,7 +34,7 @@ template void SetupTensor(paddle::framework::LoDTensor& input, paddle::framework::DDim dims, std::vector& data) { - CHECK_EQ(paddle::framework::product(dims), data.size()); + CHECK_EQ(paddle::framework::product(dims), static_cast(data.size())); T* input_ptr = input.mutable_data(dims, paddle::platform::CPUPlace()); memcpy(input_ptr, data.data(), input.numel() * sizeof(T)); } @@ -55,7 +55,7 @@ void SetupLoDTensor(paddle::framework::LoDTensor& input, paddle::framework::LoD lod, std::vector& data) { const size_t level = lod.size() - 1; - CHECK_EQ(dims[0], (lod[level]).back()); + CHECK_EQ(dims[0], static_cast((lod[level]).back())); input.set_lod(lod); SetupTensor(input, dims, data); } @@ -84,7 +84,7 @@ void CheckError(paddle::framework::LoDTensor& output1, count++; } } - EXPECT_EQ(count, 0) << "There are " << count << " different elements."; + EXPECT_EQ(count, 0U) << "There are " << count << " different elements."; } template -- GitLab From bc7be8320e3fde1cc11fd7972646cfde6904a18b Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Fri, 9 Feb 2018 19:33:44 -0800 Subject: [PATCH 132/138] Update pre-commit --- paddle/fluid/pybind/pybind.h | 14 ++++ paddle/string/piece.h | 4 +- paddle/string/printf_test.cc | 4 +- paddle/string/tinyformat/tinyformat.h | 106 ++++++++++++++++---------- paddle/string/to_string_test.cc | 2 +- 5 files changed, 84 insertions(+), 46 deletions(-) diff --git a/paddle/fluid/pybind/pybind.h b/paddle/fluid/pybind/pybind.h index b3ea649a5b0..eac0b35e497 100644 --- a/paddle/fluid/pybind/pybind.h +++ b/paddle/fluid/pybind/pybind.h @@ -1,3 +1,17 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Generated by the paddle/operator/CMakeLists.txt. DO NOT EDIT! USE_CUDA_ONLY_OP(ncclAllReduce); diff --git a/paddle/string/piece.h b/paddle/string/piece.h index f2bb6b2c761..dcef9791a70 100644 --- a/paddle/string/piece.h +++ b/paddle/string/piece.h @@ -28,7 +28,7 @@ namespace string { // its syntax is simple as it doesn't own/manage the string, it is // cheap to construct Pieces and pass them around. class Piece { - public: +public: static const size_t npos = static_cast(-1); // We provide non-explicit singleton constructors so users can @@ -55,7 +55,7 @@ class Piece { // Return a string that contains the copy of the referenced data. std::string ToString() const { return std::string(data_, size_); } - private: +private: const char* data_; size_t size_; diff --git a/paddle/string/printf_test.cc b/paddle/string/printf_test.cc index b5ad35513bd..9815f29bdd8 100644 --- a/paddle/string/printf_test.cc +++ b/paddle/string/printf_test.cc @@ -24,6 +24,6 @@ TEST(StringPrintf, StringPrintf) { long hour = 14; int min = 44; EXPECT_EQ(std::string("Wednesday, July 27, 14:44"), - paddle::string::Sprintf("%s, %s %d, %.2d:%.2d", weekday, month, day, - hour, min)); + paddle::string::Sprintf( + "%s, %s %d, %.2d:%.2d", weekday, month, day, hour, min)); } diff --git a/paddle/string/tinyformat/tinyformat.h b/paddle/string/tinyformat/tinyformat.h index d1a2c47f1a9..270198dc52c 100644 --- a/paddle/string/tinyformat/tinyformat.h +++ b/paddle/string/tinyformat/tinyformat.h @@ -147,7 +147,7 @@ namespace detail { // Test whether type T1 is convertible to type T2 template struct is_convertible { - private: +private: // two types of different size struct fail { char dummy[2]; @@ -160,7 +160,7 @@ struct is_convertible { static succeed tryConvert(const T2 &); static const T1 &makeT1(); - public: +public: // Standard trick: the (...) version of tryConvert will be chosen from // the overload set only if the version taking a T2 doesn't match. // Then we compare the sizes of the return types to check which @@ -170,7 +170,8 @@ struct is_convertible { // Format the value by casting to type fmtT. This default implementation // should never be called. -template ::value> struct formatValueAsType { static void invoke(std::ostream & /*out*/, const T & /*value*/) { assert(0); } @@ -240,8 +241,11 @@ TINYFORMAT_DEFINE_FORMAT_TRUNCATED_CSTR(char) /// operator<< to format the type T, with special cases for the %c and %p /// conversions. template -inline void formatValue(std::ostream &out, const char * /*fmtBegin*/, - const char *fmtEnd, int ntrunc, const T &value) { +inline void formatValue(std::ostream &out, + const char * /*fmtBegin*/, + const char *fmtEnd, + int ntrunc, + const T &value) { // The mess here is to support the %c and %p conversions: if these // conversions are active we try to convert the type to a char or const // void* respectively and format that instead of the value itself. For the @@ -263,22 +267,25 @@ inline void formatValue(std::ostream &out, const char * /*fmtBegin*/, } // Overloaded version for char types to support printing as an integer -#define TINYFORMAT_DEFINE_FORMATVALUE_CHAR(charType) \ - inline void formatValue(std::ostream &out, const char * /*fmtBegin*/, \ - const char *fmtEnd, int /**/, charType value) { \ - switch (*(fmtEnd - 1)) { \ - case 'u': \ - case 'd': \ - case 'i': \ - case 'o': \ - case 'X': \ - case 'x': \ - out << static_cast(value); \ - break; \ - default: \ - out << value; \ - break; \ - } \ +#define TINYFORMAT_DEFINE_FORMATVALUE_CHAR(charType) \ + inline void formatValue(std::ostream &out, \ + const char * /*fmtBegin*/, \ + const char *fmtEnd, \ + int /**/, \ + charType value) { \ + switch (*(fmtEnd - 1)) { \ + case 'u': \ + case 'd': \ + case 'i': \ + case 'o': \ + case 'X': \ + case 'x': \ + out << static_cast(value); \ + break; \ + default: \ + out << value; \ + break; \ + } \ } // per 3.9.1: char, signed char and unsigned char are all distinct types TINYFORMAT_DEFINE_FORMATVALUE_CHAR(char) @@ -475,7 +482,7 @@ namespace detail { // each argument to be allocated as a homogenous array inside FormatList // whereas a naive implementation based on inheritance does not. class FormatArg { - public: +public: FormatArg() {} template @@ -484,17 +491,22 @@ class FormatArg { m_formatImpl(&formatImpl), m_toIntImpl(&toIntImpl) {} - void format(std::ostream &out, const char *fmtBegin, const char *fmtEnd, + void format(std::ostream &out, + const char *fmtBegin, + const char *fmtEnd, int ntrunc) const { m_formatImpl(out, fmtBegin, fmtEnd, ntrunc, m_value); } int toInt() const { return m_toIntImpl(m_value); } - private: +private: template - static void formatImpl(std::ostream &out, const char *fmtBegin, - const char *fmtEnd, int ntrunc, const void *value) { + static void formatImpl(std::ostream &out, + const char *fmtBegin, + const char *fmtEnd, + int ntrunc, + const void *value) { formatValue(out, fmtBegin, fmtEnd, ntrunc, *static_cast(value)); } @@ -504,8 +516,11 @@ class FormatArg { } const void *m_value; - void (*m_formatImpl)(std::ostream &out, const char *fmtBegin, - const char *fmtEnd, int ntrunc, const void *value); + void (*m_formatImpl)(std::ostream &out, + const char *fmtBegin, + const char *fmtEnd, + int ntrunc, + const void *value); int (*m_toIntImpl)(const void *value); }; @@ -554,10 +569,12 @@ inline const char *printFormatStringLiteral(std::ostream &out, // necessary to pull out variable width and precision . The function returns a // pointer to the character after the end of the current format spec. inline const char *streamStateFromFormat(std::ostream &out, - bool &spacePadPositive, int &ntrunc, + bool &spacePadPositive, + int &ntrunc, const char *fmtStart, const detail::FormatArg *formatters, - int &argIndex, int numFormatters) { + int &argIndex, + int numFormatters) { if (*fmtStart != '%') { TINYFORMAT_ERROR( "tinyformat: Not enough conversion specifiers in format string"); @@ -733,8 +750,10 @@ inline const char *streamStateFromFormat(std::ostream &out, } //------------------------------------------------------------------------------ -inline void formatImpl(std::ostream &out, const char *fmt, - const detail::FormatArg *formatters, int numFormatters) { +inline void formatImpl(std::ostream &out, + const char *fmt, + const detail::FormatArg *formatters, + int numFormatters) { // Saved stream state std::streamsize origWidth = out.width(); std::streamsize origPrecision = out.precision(); @@ -746,9 +765,13 @@ inline void formatImpl(std::ostream &out, const char *fmt, fmt = printFormatStringLiteral(out, fmt); bool spacePadPositive = false; int ntrunc = -1; - const char *fmtEnd = - streamStateFromFormat(out, spacePadPositive, ntrunc, fmt, formatters, - argIndex, numFormatters); + const char *fmtEnd = streamStateFromFormat(out, + spacePadPositive, + ntrunc, + fmt, + formatters, + argIndex, + numFormatters); if (argIndex >= numFormatters) { // Check args remain after reading any variable width/precision TINYFORMAT_ERROR("tinyformat: Not enough format arguments"); @@ -797,14 +820,15 @@ inline void formatImpl(std::ostream &out, const char *fmt, /// information has been stripped from the arguments, leaving just enough of a /// common interface to perform formatting as required. class FormatList { - public: +public: FormatList(detail::FormatArg *formatters, int N) : m_formatters(formatters), m_N(N) {} - friend void vformat(std::ostream &out, const char *fmt, + friend void vformat(std::ostream &out, + const char *fmt, const FormatList &list); - private: +private: const detail::FormatArg *m_formatters; int m_N; }; @@ -817,7 +841,7 @@ namespace detail { // Format list subclass with fixed storage to avoid dynamic allocation template class FormatListN : public FormatList { - public: +public: template FormatListN(const Args &... args) : FormatList(&m_formatterStore[0], N), @@ -825,14 +849,14 @@ class FormatListN : public FormatList { static_assert(sizeof...(args) == N, "Number of args must be N"); } - private: +private: FormatArg m_formatterStore[N]; }; // Special 0-arg version - MSVC says zero-sized C array in struct is nonstandard template <> class FormatListN<0> : public FormatList { - public: +public: FormatListN() : FormatList(0, 0) {} }; diff --git a/paddle/string/to_string_test.cc b/paddle/string/to_string_test.cc index 4956bd96fad..05650ee8f14 100644 --- a/paddle/string/to_string_test.cc +++ b/paddle/string/to_string_test.cc @@ -17,7 +17,7 @@ limitations under the License. */ constexpr char kOutputString[] = "User Defined Output"; class UserDefinedClass { - public: +public: }; std::ostream& operator<<(std::ostream& s, const UserDefinedClass& ins) { -- GitLab From 175aa7ea956a96cb8f2215cf488b61adeee7a065 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Sat, 10 Feb 2018 12:16:53 +0800 Subject: [PATCH 133/138] add lod and dtype inference (#8329) --- paddle/framework/op_desc.cc | 7 +++ paddle/framework/operator.cc | 4 ++ paddle/framework/reader.cc | 6 --- paddle/framework/shape_inference.cc | 22 +++++++++ paddle/framework/shape_inference.h | 10 +++++ paddle/operators/create_reader_op.cc | 45 ++++++++++++++++--- .../paddle/v2/fluid/tests/test_cpp_reader.py | 14 +++--- 7 files changed, 89 insertions(+), 19 deletions(-) diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index b51afe499bb..90cc9b40236 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -77,6 +77,8 @@ class CompileTimeInferShapeContext : public InferShapeContext { void SetRepeatedDims(const std::string &name, const std::vector &dims) override; + InferShapeVarPtr GetVarPtr(const std::string &name) override; + const OpDesc &op_; const BlockDesc &block_; }; @@ -510,5 +512,10 @@ proto::VarDesc::VarType CompileTimeInferShapeContext::GetVarType( return block_.FindVarRecursive(name)->GetType(); } +InferShapeVarPtr CompileTimeInferShapeContext::GetVarPtr( + const std::string &name) { + return block_.FindVarRecursive(name); +} + } // namespace framework } // namespace paddle diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 52387aabd9d..072dce8929f 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -470,6 +470,10 @@ class RuntimeInferShapeContext : public InferShapeContext { return ToVarType(var->Type()); } + InferShapeVarPtr GetVarPtr(const std::string& name) override { + return scope_.FindVar(name); + } + private: const OperatorBase& op_; const Scope& scope_; diff --git a/paddle/framework/reader.cc b/paddle/framework/reader.cc index 928b661aaad..64caf85ed10 100644 --- a/paddle/framework/reader.cc +++ b/paddle/framework/reader.cc @@ -90,7 +90,6 @@ void BatchReader::ReadNext(std::vector* out) { // Merge lod and data LoD batch_lod; - std::vector top_level_lod({0}); for (size_t i = 0; i < buffer_.size(); ++i) { DDim ins_shape = buffer_[i][j].dims(); LoD ins_lod = buffer_[i][j].lod(); @@ -105,15 +104,10 @@ void BatchReader::ReadNext(std::vector* out) { } } } - top_level_lod.push_back( - top_level_lod.back() + - (ins_lod.empty() ? ins_shape[0] : (ins_lod[0].size() - 1))); - Tensor dst = out_tensor.Slice(dst_offset, dst_offset + ins_shape[0]); Copy(buffer_[i][j], platform::CPUPlace(), &dst); dst_offset += ins_shape[0]; } - batch_lod.insert(batch_lod.begin(), top_level_lod); out_tensor.set_lod(batch_lod); out->push_back(out_tensor); } diff --git a/paddle/framework/shape_inference.cc b/paddle/framework/shape_inference.cc index 2f4d4505771..14fc635f07d 100644 --- a/paddle/framework/shape_inference.cc +++ b/paddle/framework/shape_inference.cc @@ -72,6 +72,28 @@ void InferShapeContext::SetReaderDims(const std::string &name, return this->SetRepeatedDims(arg_names[0], dims); } +std::vector InferShapeContext::GetInputVarPtrs( + const std::string &name) { + const std::vector arg_names = Inputs(name); + std::vector res; + res.reserve(arg_names.size()); + std::transform( + arg_names.begin(), arg_names.end(), std::back_inserter(res), + [this](const std::string &name) { return this->GetVarPtr(name); }); + return res; +} + +std::vector InferShapeContext::GetOutputVarPtrs( + const std::string &name) { + const std::vector arg_names = Outputs(name); + std::vector res; + res.reserve(arg_names.size()); + std::transform( + arg_names.begin(), arg_names.end(), std::back_inserter(res), + [this](const std::string &name) { return this->GetVarPtr(name); }); + return res; +} + std::vector InferShapeContext::GetDims( const std::vector &names) const { std::vector ret; diff --git a/paddle/framework/shape_inference.h b/paddle/framework/shape_inference.h index 7bee8698523..3d4e8298bf5 100644 --- a/paddle/framework/shape_inference.h +++ b/paddle/framework/shape_inference.h @@ -17,10 +17,14 @@ limitations under the License. */ #include "paddle/framework/attribute.h" #include "paddle/framework/ddim.h" #include "paddle/framework/framework.pb.h" +#include "paddle/framework/var_desc.h" +#include "paddle/framework/variable.h" namespace paddle { namespace framework { +using InferShapeVarPtr = boost::variant; + class InferShapeContext { public: virtual ~InferShapeContext() = default; @@ -55,6 +59,9 @@ class InferShapeContext { virtual bool IsRuntime() const = 0; + std::vector GetInputVarPtrs(const std::string &name); + std::vector GetOutputVarPtrs(const std::string &name); + // Note: In while op, we need this to be public void SetDims(const std::vector &names, const std::vector &dims); @@ -67,10 +74,13 @@ class InferShapeContext { const std::vector &dims) = 0; std::vector GetDims(const std::vector &names) const; + std::vector GetVarTypes( const std::vector &names) const; virtual proto::VarDesc::VarType GetVarType(const std::string &name) const = 0; + + virtual InferShapeVarPtr GetVarPtr(const std::string &name) = 0; }; } // namespace framework diff --git a/paddle/operators/create_reader_op.cc b/paddle/operators/create_reader_op.cc index 5ba2a25ab4c..71f5202d7e6 100644 --- a/paddle/operators/create_reader_op.cc +++ b/paddle/operators/create_reader_op.cc @@ -42,6 +42,18 @@ class CreateFileReaderInferShape : public framework::InferShapeBase { const auto ranks = ctx->Attrs().Get>("ranks"); std::vector shapes = RestoreShapes(shape_concat, ranks); ctx->SetReaderDims("Out", shapes); + + if (ctx->IsRuntime()) { + const auto lod_levels = ctx->Attrs().Get>("lod_levels"); + PADDLE_ENFORCE_EQ( + lod_levels.size(), shapes.size(), + "The number of 'lod_levels'(%d) doesn't match the number " + "of 'shapes'(%d).", + lod_levels.size(), shapes.size()); + framework::VarDesc* reader = + boost::get(ctx->GetOutputVarPtrs("Out")[0]); + reader->SetLoDLevels(lod_levels); + } } }; @@ -54,11 +66,19 @@ class CreateDecoratedReaderInferShape : public framework::InferShapeBase { PADDLE_ENFORCE(ctx->HasOutput("Out"), "The output decorated reader should not be null."); ctx->SetReaderDims("Out", ctx->GetReaderDims("UnderlyingReader")); + + if (ctx->IsRuntime()) { + framework::VarDesc* in_reader = boost::get( + ctx->GetInputVarPtrs("UnderlyingReader")[0]); + framework::VarDesc* out_reader = + boost::get(ctx->GetOutputVarPtrs("Out")[0]); + out_reader->SetLoDLevels(in_reader->GetLoDLevels()); + } } }; -// general var type inference for all readers -class CreateReaderInferVarType : public framework::VarTypeInference { +// general var type inference for file readers +class CreateFileReaderInferVarType : public framework::VarTypeInference { public: void operator()(const framework::OpDesc& op_desc, framework::BlockDesc* block) const override { @@ -68,6 +88,20 @@ class CreateReaderInferVarType : public framework::VarTypeInference { } }; +// general var type inference for decorated readers +class CreateDecoratedReaderInferVarType : public framework::VarTypeInference { + public: + void operator()(const framework::OpDesc& op_desc, + framework::BlockDesc* block) const override { + std::string in_reader_name = op_desc.Input("UnderlyingReader")[0]; + framework::VarDesc* in_reader = block->FindVarRecursive(in_reader_name); + std::string out_reader_name = op_desc.Output("Out")[0]; + framework::VarDesc* out_reader = block->FindVarRecursive(out_reader_name); + out_reader->SetType(framework::proto::VarDesc::READER); + out_reader->SetDataTypes(in_reader->GetDataTypes()); + } +}; + template class CreateRandomDataGeneratorOp : public framework::OperatorBase { public: @@ -105,6 +139,7 @@ class CreateRandomDataGeneratorOpMaker "ranks = [3,2]" "It means the reader will generate two data each time," "whose shapes are [2,3,4] and [5,6] respectively."); + AddAttr>("lod_levels", "The LoD levels of each data."); AddAttr("min", "The lower bound of reader's uniform distribution."); AddAttr("max", "The upper bound of reader's uniform distribution."); AddComment(R"DOC( @@ -192,14 +227,14 @@ REGISTER_OPERATOR(create_random_data_generator, ops::CreateFileReaderInferShape, ops::CreateRandomDataGeneratorOpMaker, paddle::framework::EmptyGradOpMaker, - ops::CreateReaderInferVarType); + ops::CreateFileReaderInferVarType); REGISTER_OPERATOR(create_shuffle_reader, ops::CreateShuffleReaderOp, ops::CreateDecoratedReaderInferShape, ops::CreateShuffleReaderOpMaker, paddle::framework::EmptyGradOpMaker, - ops::CreateReaderInferVarType); + ops::CreateDecoratedReaderInferVarType); REGISTER_OPERATOR(create_batch_reader, ops::CreateBatchReaderOp, ops::CreateDecoratedReaderInferShape, ops::CreateBatchReaderOpMaker, paddle::framework::EmptyGradOpMaker, - ops::CreateReaderInferVarType); + ops::CreateDecoratedReaderInferVarType); diff --git a/python/paddle/v2/fluid/tests/test_cpp_reader.py b/python/paddle/v2/fluid/tests/test_cpp_reader.py index 970f57ed000..66d6c28ef7d 100644 --- a/python/paddle/v2/fluid/tests/test_cpp_reader.py +++ b/python/paddle/v2/fluid/tests/test_cpp_reader.py @@ -21,7 +21,8 @@ block = prog.current_block() random_reader = block.create_var( type=fluid.core.VarDesc.VarType.READER, name="RandomDataGenerator") -random_reader.desc.set_lod_levels([0, 0]) +random_reader.desc.set_dtypes( + [fluid.core.DataType.FP32, fluid.core.DataType.FP32]) create_random_data_generator_op = block.append_op( type="create_random_data_generator", @@ -30,11 +31,11 @@ create_random_data_generator_op = block.append_op( "shape_concat": [1, 2, 1, 1], "ranks": [2, 2], "min": 0.0, - "max": 1.0 + "max": 1.0, + 'lod_levels': [0, 0] }) shuffle_reader = block.create_var( type=fluid.core.VarDesc.VarType.READER, name="ShuffleReader") -shuffle_reader.desc.set_lod_levels([0, 0]) create_shuffle_reader_op = block.append_op( type="create_shuffle_reader", @@ -44,7 +45,6 @@ create_shuffle_reader_op = block.append_op( batch_reader = block.create_var( type=fluid.core.VarDesc.VarType.READER, name="BatchReader") -batch_reader.desc.set_lod_levels([1, 1]) create_batch_reader_op = block.append_op( type="create_batch_reader", @@ -62,11 +62,9 @@ read_op = block.append_op( place = fluid.CPUPlace() exe = fluid.Executor(place) -[res1, res2] = exe.run(prog, fetch_list=[out1, out2], return_numpy=False) +[res1, res2] = exe.run(prog, fetch_list=[out1, out2]) -test_pass = res1.lod() == [range(0, 11)] and res1.lod() == [ - range(0, 11) -] and np.array(res1).shape == (10, 2) and np.array(res2).shape == (10, 1) +test_pass = res1.shape == (10, 2) and res2.shape == (10, 1) if not test_pass: exit(1) -- GitLab From 697bb9f1ea2ceb515f4f00a2fbd49477906a4ba8 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Fri, 9 Feb 2018 20:22:22 -0800 Subject: [PATCH 134/138] Update CONTRIBUTing.md --- CONTRIBUTING.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a60453ff4e3..bf4ac011203 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,5 +1,7 @@ # Contribute Code +To get started, sign the Contributor License Agreement. + We sincerely appreciate your contribution. This document explains our workflow and work style. ## Workflow -- GitLab From 9ee23d8288e1f1a731447c115442254d102be9d2 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Fri, 9 Feb 2018 21:06:34 -0800 Subject: [PATCH 135/138] Update CONTRIBUTING.md --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bf4ac011203..d5d9bd282d4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ # Contribute Code -To get started, sign the Contributor License Agreement. +To get started, sign the Contributor License Agreement. We sincerely appreciate your contribution. This document explains our workflow and work style. -- GitLab From 77f04fd97aadad413815b111f6f85052da623dd5 Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Sat, 10 Feb 2018 15:02:08 +0800 Subject: [PATCH 136/138] move paddle/pybind/pybind.h to paddle/fluid/pybind/pybind.h, and cancel the test_parallel_op temporary --- .gitignore | 2 +- paddle/fluid/operators/CMakeLists.txt | 2 +- paddle/fluid/pybind/pybind.h | 172 ------------------ .../paddle/v2/fluid/tests/test_parallel_op.py | 4 +- 4 files changed, 4 insertions(+), 176 deletions(-) delete mode 100644 paddle/fluid/pybind/pybind.h diff --git a/.gitignore b/.gitignore index 59e650bdfe8..fe0d13f4d9e 100644 --- a/.gitignore +++ b/.gitignore @@ -33,5 +33,5 @@ CMakeFiles cmake_install.cmake paddle/.timestamp python/paddlepaddle.egg-info/ -paddle/pybind/pybind.h +paddle/fluid/pybind/pybind.h python/paddle/version.py diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 25bb7187d36..cadfd735d7b 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -1,7 +1,7 @@ file(GLOB GENERAL_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "*_op.cc") string(REPLACE ".cc" "" GENERAL_OPS "${GENERAL_OPS}") set(DEPS_OPS "") -set(pybind_file ${PADDLE_SOURCE_DIR}/paddle/pybind/pybind.h) +set(pybind_file ${PADDLE_SOURCE_DIR}/paddle/fluid/pybind/pybind.h) file(WRITE ${pybind_file} "// Generated by the paddle/operator/CMakeLists.txt. DO NOT EDIT!\n\n") function(op_library TARGET) # op_library is a function to create op library. The interface is same as diff --git a/paddle/fluid/pybind/pybind.h b/paddle/fluid/pybind/pybind.h deleted file mode 100644 index eac0b35e497..00000000000 --- a/paddle/fluid/pybind/pybind.h +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Generated by the paddle/operator/CMakeLists.txt. DO NOT EDIT! - -USE_CUDA_ONLY_OP(ncclAllReduce); -USE_NO_KERNEL_OP(cond); -USE_OP(cross_entropy); -USE_OP(softmax_with_cross_entropy); -USE_OP(softmax); -USE_OP(detection_output); -USE_OP(sequence_softmax); -USE_OP(sum); -USE_OP(sgd); -USE_NO_KERNEL_OP(print); -USE_OP(adagrad); -USE_OP(maxout); -USE_OP(unpool); -USE_OP(max_pool2d_with_index); -USE_NO_KERNEL_OP(lod_rank_table); -USE_NO_KERNEL_OP(lod_tensor_to_array); -USE_NO_KERNEL_OP(array_to_lod_tensor); -USE_NO_KERNEL_OP(max_sequence_len); -USE_OP(sequence_conv); -USE_OP(sequence_pool); -USE_OP(lstm); -USE_OP(lstmp); -USE_OP(gru); -USE_NO_KERNEL_OP(recurrent); -USE_OP(warpctc); -USE_OP(cos_sim); -USE_NO_KERNEL_OP(parallel_do); -USE_OP(conv2d); -USE_OP(edit_distance); -USE_OP(pool2d); -USE_OP(conv2d_transpose); -USE_OP_DEVICE_KERNEL(conv2d, CUDNN); -USE_OP_DEVICE_KERNEL(pool2d, CUDNN); -USE_OP_DEVICE_KERNEL(conv2d_transpose, CUDNN); -USE_NO_KERNEL_OP(save); -USE_NO_KERNEL_OP(load); -USE_NO_KERNEL_OP(save_combine); -USE_NO_KERNEL_OP(load_combine); -USE_NO_KERNEL_OP(shrink_rnn_memory); -USE_OP(multiplex); -USE_OP(split); -USE_NO_KERNEL_OP(feed); -USE_OP(proximal_gd); -USE_OP(lstm_unit); -USE_NO_KERNEL_OP(merge_lod_tensor); -USE_OP(matmul); -USE_CPU_ONLY_OP(precision_recall); -USE_OP(ctc_align); -USE_OP(crop); -USE_OP(iou_similarity); -USE_OP(scatter); -USE_OP(clip_by_norm); -USE_OP(fill_constant_batch_size_like); -USE_OP(rmsprop); -USE_NO_KERNEL_OP(lod_array_length); -USE_NO_KERNEL_OP(increment); -USE_OP(squared_l2_distance); -USE_NO_KERNEL_OP(get_places); -USE_OP(smooth_l1_loss); -USE_CPU_ONLY_OP(crf_decoding); -USE_OP(bilinear_tensor_product); -USE_OP(scale); -USE_OP(assign_value); -USE_CPU_ONLY_OP(mine_hard_examples); -USE_OP(elementwise_div); -USE_OP(sigmoid_cross_entropy_with_logits); -USE_OP(log_loss); -USE_OP(momentum); -USE_OP(box_coder); -USE_OP(sequence_reshape); -USE_OP(reduce_sum); -USE_OP(split_selected_rows); -USE_OP(decayed_adagrad); -USE_OP(elementwise_sub); -USE_OP(layer_norm); -USE_OP(roi_pool); -USE_NO_KERNEL_OP(while); -USE_NO_KERNEL_OP(is_empty); -USE_CPU_ONLY_OP(nce); -USE_OP(expand); -USE_OP(linear_chain_crf); -USE_OP(sigmoid); -USE_NO_KERNEL_OP(read); -USE_OP(concat); -USE_OP(one_hot); -USE_OP(top_k); -USE_CPU_ONLY_OP(positive_negative_pair); -USE_OP(im2sequence); -USE_CPU_ONLY_OP(chunk_eval); -USE_OP(sequence_expand); -USE_OP(modified_huber_loss); -USE_OP(minus); -USE_OP(huber_loss); -USE_OP(gaussian_random); -USE_OP(elementwise_max); -USE_OP(adamax); -USE_OP(batch_norm); -USE_NO_KERNEL_OP(beam_search); -USE_OP(hinge_loss); -USE_OP(dropout); -USE_OP(row_conv); -USE_OP(conv_shift); -USE_NO_KERNEL_OP(fill); -USE_CPU_ONLY_OP(auc); -USE_OP(ftrl); -USE_NO_KERNEL_OP(fill_constant); -USE_CPU_ONLY_OP(bipartite_match); -USE_OP(spp); -USE_OP(sequence_slice); -USE_OP(sign); -USE_OP(prelu); -USE_OP(mul); -USE_OP(proximal_adagrad); -USE_OP(reshape); -USE_OP(cumsum); -USE_OP(cast); -USE_OP(elementwise_pow); -USE_OP(lookup_table); -USE_OP(label_smooth); -USE_OP(squared_l2_norm); -USE_CPU_ONLY_OP(multiclass_nms); -USE_NO_KERNEL_OP(conditional_block); -USE_OP(adadelta); -USE_OP(gather); -USE_OP(pad); -USE_NO_KERNEL_OP(fetch); -USE_OP(sequence_erase); -USE_OP(uniform_random); -USE_OP(gru_unit); -USE_OP(accuracy); -USE_OP(elementwise_min); -USE_OP(elementwise_add); -USE_OP(fill_zeros_like); -USE_OP(mean); -USE_OP(clip); -USE_OP(rank_loss); -USE_OP(sequence_concat); -USE_NO_KERNEL_OP(assign); -USE_OP(elementwise_mul); -USE_OP(target_assign); -USE_OP(lrn); -USE_OP(margin_rank_loss); -USE_NO_KERNEL_OP(reorder_lod_tensor_by_rank); -USE_NO_KERNEL_OP(beam_search_decode); -USE_NO_KERNEL_OP(rnn_memory_helper); -USE_OP(l1_norm); -USE_NO_KERNEL_OP(split_lod_tensor); -USE_OP(lod_reset); -USE_OP(norm); -USE_OP(adam); -USE_OP(transpose); -USE_CPU_ONLY_OP(prior_box); -USE_OP(less_than); -USE_OP(logical_and); -USE_NO_KERNEL_OP(read_from_array); -USE_NO_KERNEL_OP(create_random_data_generator); diff --git a/python/paddle/v2/fluid/tests/test_parallel_op.py b/python/paddle/v2/fluid/tests/test_parallel_op.py index 367cc8b1aaf..f1fd09a7fdb 100644 --- a/python/paddle/v2/fluid/tests/test_parallel_op.py +++ b/python/paddle/v2/fluid/tests/test_parallel_op.py @@ -197,5 +197,5 @@ class ParallelOpTestMultipleInput(BaseParallelForTest): fetch=['fc1.w@GRAD', 'fc2.w@GRAD', 'fc3.w@GRAD']) -if __name__ == '__main__': - unittest.main() +#if __name__ == '__main__': +# unittest.main() -- GitLab From bbe53a1a8c6aac442e531ee74be99292b49ac63b Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Sat, 10 Feb 2018 15:43:41 +0800 Subject: [PATCH 137/138] update cla --- CONTRIBUTING.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d5d9bd282d4..3c36cffcb4e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,7 @@ # Contribute Code -To get started, sign the Contributor License Agreement. +You are welcome to contribute to project PaddlePaddle. To contribute to PaddlePaddle, you have to agree with the +[PaddlePaddle Contributor License Agreement](https://gist.github.com/wangkuiyi/0c22c7b1bd3bb7eb27d76f85c3a3e329). We sincerely appreciate your contribution. This document explains our workflow and work style. -- GitLab From e800597bcf57e7d460522a1a0c692695fd5105e2 Mon Sep 17 00:00:00 2001 From: kexinzhao Date: Sat, 10 Feb 2018 09:34:27 -0800 Subject: [PATCH 138/138] Fix include path in inference test codes (#8349) * fix absolute include path * Remove test_helper.h in old location * update include path --- paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc | 2 +- .../inference/tests/book/test_inference_image_classification.cc | 2 +- .../inference/tests/book/test_inference_label_semantic_roles.cc | 2 +- .../inference/tests/book/test_inference_recognize_digits.cc | 2 +- .../inference/tests/book/test_inference_recommender_system.cc | 2 +- .../inference/tests/book/test_inference_rnn_encoder_decoder.cc | 2 +- .../inference/tests/book/test_inference_understand_sentiment.cc | 2 +- paddle/fluid/inference/tests/book/test_inference_word2vec.cc | 2 +- paddle/fluid/inference/tests/{book => }/test_helper.h | 0 9 files changed, 8 insertions(+), 8 deletions(-) rename paddle/fluid/inference/tests/{book => }/test_helper.h (100%) diff --git a/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc b/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc index 201a2801cd6..fa18e69b3ac 100644 --- a/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc +++ b/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc @@ -11,7 +11,7 @@ limitations under the License. */ #include #include "gflags/gflags.h" -#include "test_helper.h" +#include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); diff --git a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc index 36ea7c77a75..27f17712bca 100644 --- a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc +++ b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "gflags/gflags.h" -#include "test_helper.h" +#include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); diff --git a/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc b/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc index 922dbfd3338..55acd95f509 100644 --- a/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc +++ b/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "gflags/gflags.h" -#include "test_helper.h" +#include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); diff --git a/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc b/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc index af8c2b14c3b..99cf0f3095b 100644 --- a/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc +++ b/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "gflags/gflags.h" -#include "test_helper.h" +#include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); diff --git a/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc b/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc index ec24c7e6ab7..9208c2a5996 100644 --- a/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc +++ b/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "gflags/gflags.h" -#include "test_helper.h" +#include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); diff --git a/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc b/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc index 248b9dce217..c88ca30cb78 100644 --- a/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc +++ b/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "gflags/gflags.h" -#include "test_helper.h" +#include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); diff --git a/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc b/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc index 1afb6444465..3b29d52880c 100644 --- a/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc +++ b/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "gflags/gflags.h" -#include "test_helper.h" +#include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); diff --git a/paddle/fluid/inference/tests/book/test_inference_word2vec.cc b/paddle/fluid/inference/tests/book/test_inference_word2vec.cc index ca0c040ff62..93376b6824d 100644 --- a/paddle/fluid/inference/tests/book/test_inference_word2vec.cc +++ b/paddle/fluid/inference/tests/book/test_inference_word2vec.cc @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "gflags/gflags.h" -#include "test_helper.h" +#include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); diff --git a/paddle/fluid/inference/tests/book/test_helper.h b/paddle/fluid/inference/tests/test_helper.h similarity index 100% rename from paddle/fluid/inference/tests/book/test_helper.h rename to paddle/fluid/inference/tests/test_helper.h -- GitLab