diff --git a/.travis.yml b/.travis.yml
index 8c772030925dcad3909f142b08e4d8057a3f89b7..a406841f6abf01f15826f34fe4c63b4c24486ccd 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -31,7 +31,7 @@ script:
if [[ "$JOB" != "doc" ]]; then exit 0; fi;
# For document only
if [[ "$TRAVIS_PULL_REQUEST" != "false" ]]; then exit 0; fi;
- if [[ "$TRAVIS_BRANCH" != "develop" && ! "$TRAVIS_BRANCH" =~ ^v[[:digit:]]+\.[[:digit:]]+(\.[[:digit:]]+)?(-\S*)?$ ]]; then exit 0; fi;
+ if [[ "$TRAVIS_BRANCH" != "develop" && ! "$TRAVIS_BRANCH" =~ ^v|release/[[:digit:]]+\.[[:digit:]]+(\.[[:digit:]]+)?(-\S*)?$ ]]; then exit 0; fi;
export DEPLOY_DOCS_SH=https://raw.githubusercontent.com/PaddlePaddle/PaddlePaddle.org/master/scripts/deploy/deploy_docs.sh
export DOCS_DIR=`pwd`
cd ..
diff --git a/Dockerfile b/Dockerfile
index fc5069a6c080ed23317695e6822c4c46b5b5c7f9..48c750358cfcb227667c429f19befcaa2f51ebbd 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -23,7 +23,7 @@ ENV HOME /root
COPY ./paddle/scripts/docker/root/ /root/
RUN apt-get update && \
- apt-get install -y --allow-downgrades \
+ apt-get install -y --allow-downgrades patchelf \
git python-pip python-dev python-opencv openssh-server bison \
libnccl2=2.1.2-1+cuda8.0 libnccl-dev=2.1.2-1+cuda8.0 \
wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \
diff --git a/cmake/version.cmake b/cmake/version.cmake
index cde650128a068faf32f4abfff5cdfdeb656d8577..79b8e8ac496250d85427b77fbd6a9924a962a15b 100644
--- a/cmake/version.cmake
+++ b/cmake/version.cmake
@@ -1,16 +1,21 @@
# Get the latest git tag.
set(PADDLE_VERSION $ENV{PADDLE_VERSION})
set(tmp_version "HEAD")
+set(TAG_VERSION_REGEX "[0-9]+\\.[0-9]+\\.[0-9]+(\\.(a|b|rc)\\.[0-9]+)?")
+set(COMMIT_VERSION_REGEX "[0-9a-f]+[0-9a-f]+[0-9a-f]+[0-9a-f]+[0-9a-f]+")
while ("${PADDLE_VERSION}" STREQUAL "")
execute_process(
- COMMAND ${GIT_EXECUTABLE} describe --tags --abbrev=0 ${tmp_version}
+ COMMAND ${GIT_EXECUTABLE} describe --tags --abbrev=0 --always ${tmp_version}
WORKING_DIRECTORY ${PADDLE_SOURCE_DIR}
OUTPUT_VARIABLE GIT_TAG_NAME
RESULT_VARIABLE GIT_RESULT
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
if (NOT ${GIT_RESULT})
# Check the tag is a correct version
- if (${GIT_TAG_NAME} MATCHES "v[0-9]+\\.[0-9]+\\.[0-9]+(\\.(a|b|rc)\\.[0-9]+)?")
+ if (${GIT_TAG_NAME} MATCHES "${COMMIT_VERSION_REGEX}")
+ # if no tag was found, set PADDLE_VERSION to latest
+ set(PADDLE_VERSION "latest")
+ elseif (${GIT_TAG_NAME} MATCHES "v${TAG_VERSION_REGEX}")
string(REPLACE "v" "" PADDLE_VERSION ${GIT_TAG_NAME})
else() # otherwise, get the previous git tag name.
set(tmp_version "${GIT_TAG_NAME}~1")
diff --git a/doc/fluid/api/transpiler.rst b/doc/fluid/api/transpiler.rst
index 943d39331d26c05764c90cb24f6774997c976bfe..d2ac04f1449c32cb414cea1b76d7469bbe9ccb85 100644
--- a/doc/fluid/api/transpiler.rst
+++ b/doc/fluid/api/transpiler.rst
@@ -14,6 +14,15 @@ DistributeTranspiler
:members:
:noindex:
+.. _api_fluid_transpiler_InferenceTranspiler:
+
+InferenceTranspiler
+-------------------
+
+.. autoclass:: paddle.fluid.transpiler.InferenceTranspiler
+ :members:
+ :noindex:
+
.. _api_fluid_transpiler_memory_optimize:
memory_optimize
diff --git a/doc/fluid/design/dist_train/distributed_lookup_table_design.md b/doc/fluid/design/dist_train/distributed_lookup_table_design.md
index 97f890c88e778a59ea475e984ccbc28cf026fc5b..e284e1ec5cdd18d0049ce3c1a8349bbe1248cb48 100644
--- a/doc/fluid/design/dist_train/distributed_lookup_table_design.md
+++ b/doc/fluid/design/dist_train/distributed_lookup_table_design.md
@@ -1,6 +1,6 @@
# Design Doc: Distributed Lookup Table Operator
-A lookup table operator in PaddlePaddle where the table could be out
+A distribute lookup table operator in PaddlePaddle where the table could be out
of the memory of a computer.
## Background
@@ -24,14 +24,14 @@ memory, so we'd need a distributed storage service, which supports the
lookup of rows.
The following figure illustrates the multiplication of x with two
-non-zero elements, or say, two symbols, and a lookup table W:
+non-zero elements, or say two symbols, and a lookup table W:
![lookup table](./src/lookup_table.png)
### The Backward Algorithm
The backward algorithm computes W'(x) using W(x). W'(x) has the same
-scale of size as W(x) and is much smaller than W.
+the scale of size as W(x) and is much smaller than W.
To optimize W given W', we can do simple SGD update:
@@ -44,111 +44,46 @@ $$W = f(W, W')$$
The following figure illustrates the backward pass of the lookup
operator: ![lookup table training](./src/lookup_table_training.png)
-## Distributed Storage Service
-
-The forward algorithm requires a distributed storage service for W.
-The backward algorithm prefers that the storage system can apply the
-optimization algorithm on W. The following two sections describe two
-solutions -- the former doesn't require that the storage service can
-do optimization, the latter does.
-
-### Storage Service Doesn't Optimize
-
-In this design, we use highly-optimized distributed storage, e.g.,
-memcached, as the storage service, and we run the optimization
-algorithm on parameter servers of PaddlePaddle. The following figure
-illustrates the training process.
-
-
-
-
-
-Each trainer runs the forward and backward passes using their local
-data:
-
-1. In the forward pass, when a trainer runs the forward algorithm of a
- lookup operator, it retrieves W(x) from the storage service.
-1. The trainer computes W'(x) in the backward pass using W(x).
-
-During the global update process:
-
-1. Each trainer uploads its W'(x) to parameter servers.
-1. The parameter server runs the optimization algorithm, e.g., the
- Adam optimization algorithm, which requires that
- 1. The parameter server retrieves W(x) from memcached, and
- 1. The parameter server pushes $\Delta W(x)=f(W(x), lambda \sum_j
- W'(x))$ to memcached, where $f$ denotes the optimization
- algorithm.
-
-### Storage Service Does Optimize
-
-This design is very similar to the above one, except that the
-optimization algorithm $f$ runs on the storage service.
-
-- Pro: parameter servers do not retrieve W(x) from the storage
- service, thus saves half network communication.
-- Con: the storage service needs to be able to run the optimization
- algorithm.
-
-## Distributed Sparse Table in Fluid
-
-For another design, we can implement a distributed sparse table in Fluid,
-and don't need to maintain an external storage component while training.
-
-You may need to read Fluid [Distributed Training Architecture](./distributed_architecture.md)
-and [Parameter Server](./parameter_server.md) before going on.
-
-![fluid lookup remote table](./src/fluid_lookup_remote_table.png)
-
-Partition a large table into multiple pserver instances
-1. `DistributeTranspiler` would split the table partitioned into some small
-table blocks with some partitioned algorithms such as
-[RoundRobin](https://en.wikipedia.org/wiki/Round-robin_scheduling),
-[Hash](https://en.wikipedia.org/wiki/Hash) and etc...
-1. For some cases, the range of input `Ids` is very wide and unpredictable, so the sparse
-table would be able to fill a new value for the id that didn't appear before with
-zero, uniform random or Gaussian distribution.
-
-For each Trainer's training process:
-1. In the forward pass, we use `pre-fetch` op to pre-fetch parameter blocks according to the
-input `Ids` from PServers instead of the local `lookup_table` op, and then merge the blocks
-into a parameter `W`.
-1. Compute `GRAD@W'` in the backward pass using the pre-fetched `W` and send it to PServer to
-execute the optimize pass.
-
-## Conclusion
-
-Let us do the "storage service does not optimize" solution first, as a
-baseline at least, because it is easier to use a well-optimized
-distributed storage service like memcached. We can do the "storage
-service does optimize" solution later or at the same time, which, if
-implemented carefully, should have better performance than the former.
+## Distributed Lookup Table
+### Problem 1: The lookup table may be very large.
+
+ In the condition like the search engine and recommendation system, the number of feature Id may be very large, say 100,000,000,000, then for a float value lookup table of size 8, the total size of the table is:
+
+ ```
+ 100,000,000,000 * 8 * 4(Bytes) = 2980.23 GB
+ ```
+
+### Solution: Distributed storage
+
+1. Paddle use [SelectedRows](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/selected_rows.md) as the storage format for the lookup table, the lookup table parameter will be split to multi-machine according to the hash of the feature ID, and data will also be split and send to the same machine to prefetch the parameter.
+
+1. For common parameters, the trainer will get the whole parameter for training, but for the big lookup table, the trainer can not store the whole parameter. Because the input data feature is very sparse, every time we only need a few parameters for training, so we use `prefetch_op` to only prefetch the parameter needed to trainer.
+
+### Problem 2. The Id in the lookup table is not sure before training.
+
+ The feature Id is calculated by the hash function because the feature data source is so large, we can not get all the Id before training. So we can not initialize the table before training.
+
+### Solution: Id auto growth
+
+At the beginning of training, paddle only malloc the memory for the lookup table at parameter server side, the Id and it's value will not be initialized. During training, when a parameter server received an Id, if it is already in the lookup table, it will return the existing parameter, if the Id does not exist, paddle will add it into the lookup table and initialize the value for it.
+
+### Problem 3: parameter load and save
+
+For common parameters, paddle use trainer to save and load them. But for distributed lookup table, trainer cannot do this because it's large size.
+
+### Solution: Parameter server side save and load
+
+Paddle support parameter server side save and load for distribute lookup table. Each machine of parameter servers will only save and load part of the whole table.
+
+## Architecture
+The whole architecture of the distribute lookup table is as below:
+
+### Training steps:
+1. Read a batch of data, the data is feature ids.
+1. The input ids will be split by `split_ids_op` with the same hash function of the lookup table.
+1. The `prefetch_op` use the split result to prefetch parameters back from the lookup table.
+1. Run forward-backward to get the gradient of the lookup table.
+1. `split_ids_op` split the gradient and then use `send_op` to the parameter server.
+1. parameter server update the table with the received gradient.
+
+![distribute lookup table](./src/distributed_lookup_table.jpeg)
diff --git a/doc/fluid/design/dist_train/src/distributed_lookup_table.graffle b/doc/fluid/design/dist_train/src/distributed_lookup_table.graffle
new file mode 100644
index 0000000000000000000000000000000000000000..65dfdbbacd219739db6ddfdf243cc16c3c4e8d1e
Binary files /dev/null and b/doc/fluid/design/dist_train/src/distributed_lookup_table.graffle differ
diff --git a/doc/fluid/design/dist_train/src/distributed_lookup_table.jpeg b/doc/fluid/design/dist_train/src/distributed_lookup_table.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..5353a16fd329f62ff893d32706b9c3c0bcc46a07
Binary files /dev/null and b/doc/fluid/design/dist_train/src/distributed_lookup_table.jpeg differ
diff --git a/paddle/contrib/inference/CMakeLists.txt b/paddle/contrib/inference/CMakeLists.txt
index a8bbb4eb8081420ae0bbaf761bd27303c0d043cb..c30eff5010748685838feb984c9c817ffcf14c11 100644
--- a/paddle/contrib/inference/CMakeLists.txt
+++ b/paddle/contrib/inference/CMakeLists.txt
@@ -46,9 +46,14 @@ cc_library(paddle_inference_api
SRCS paddle_inference_api.cc paddle_inference_api_impl.cc
DEPS ${FLUID_CORE_MODULES} ${GLOB_OP_LIB})
+# Here the shared library doesn't depend on other fluid libraries, or double free will occur.
cc_library(paddle_inference_api_shared SHARED
- SRCS paddle_inference_api.cc paddle_inference_api_impl.cc
- DEPS ${FLUID_CORE_MODULES} ${GLOB_OP_LIB})
+ SRCS paddle_inference_api.cc paddle_inference_api_impl.cc)
+set_target_properties(paddle_inference_api_shared PROPERTIES OUTPUT_NAME paddle_inference_api)
+if(NOT APPLE)
+ set(LINK_FLAGS "-fPIC -fvisibility=hidden")
+ set_target_properties(paddle_inference_api_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
+endif()
cc_test(test_paddle_inference_api
SRCS test_paddle_inference_api.cc
diff --git a/paddle/contrib/inference/paddle_inference_api.cc b/paddle/contrib/inference/paddle_inference_api.cc
index ea46b3006f8d0964cc8229d3683ee7b602d6ef0d..4fe198ad7d4a752882965e9e7fc460741de53d22 100644
--- a/paddle/contrib/inference/paddle_inference_api.cc
+++ b/paddle/contrib/inference/paddle_inference_api.cc
@@ -23,7 +23,6 @@ int PaddleDtypeSize(PaddleDType dtype) {
case PaddleDType::INT64:
return sizeof(int64_t);
default:
- //
assert(false);
return -1;
}
diff --git a/paddle/fluid/framework/details/build_strategy.h b/paddle/fluid/framework/details/build_strategy.h
index 9c2c845c6efb206fb1ad5150189430b9a6fe9ea3..b2e5399e2376a86c1cd310b29c768832665af87f 100644
--- a/paddle/fluid/framework/details/build_strategy.h
+++ b/paddle/fluid/framework/details/build_strategy.h
@@ -34,7 +34,7 @@ struct BuildStrategy {
std::string debug_graphviz_path_{""};
- bool enable_data_balance_{true};
+ bool enable_data_balance_{false};
};
} // namespace details
diff --git a/paddle/fluid/framework/details/data_balance_op_handle.cc b/paddle/fluid/framework/details/data_balance_op_handle.cc
index d07235df5856591f8ad707c86fa5b3b65868c3d1..68896c8ac1bae7d4bfcfa79cc8ec5c26bf2d93ee 100644
--- a/paddle/fluid/framework/details/data_balance_op_handle.cc
+++ b/paddle/fluid/framework/details/data_balance_op_handle.cc
@@ -86,9 +86,9 @@ std::vector> DataBalanceOpHandle::GetBalancePlan(
}
void DataBalanceOpHandle::RunImpl() {
- if (places_.size() == 1) {
- return;
- }
+ PADDLE_ENFORCE_GT(places_.size(), 1,
+ "Data balance can only be enabled when the number of "
+ "places to run larger than 1.");
auto in_var_handles = DynamicCast(inputs_);
auto out_var_handles = DynamicCast(outputs_);
PADDLE_ENFORCE(in_var_handles.size() % places_.size() == 0);
diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc
index 46d0c2769cb334f5cb75ae0ef5e48da45448c48f..b82c2ef4082110f1621eb38d50361396511a4825 100644
--- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc
+++ b/paddle/fluid/framework/details/multi_devices_graph_builder.cc
@@ -59,6 +59,11 @@ MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder(
grad_names_.insert(GradVarName(p));
}
balance_vars_.resize(places_.size(), 0);
+ if (strategy_.enable_data_balance_ && places_.size() == 1) {
+ LOG(WARNING) << "It is no need to enable data balance when there is only "
+ "one place. enable_data_balance is set to False.";
+ strategy_.enable_data_balance_ = false;
+ }
}
void MultiDevSSAGraphBuilder::CreateOpHandleIOs(SSAGraph *result,
diff --git a/paddle/fluid/framework/op_info.cc b/paddle/fluid/framework/op_info.cc
index f1261dee0319440995951d1bee145404186a8ad4..af75baa5c4b98f7d092834c05eb57e9c7e131b29 100644
--- a/paddle/fluid/framework/op_info.cc
+++ b/paddle/fluid/framework/op_info.cc
@@ -21,8 +21,8 @@ namespace framework {
// a static local variable is already being initialized.
// https://stackoverflow.com/questions/11711920/how-to-implement-multithread-safe-singleton-in-c11-without-using-mutex
OpInfoMap& OpInfoMap::Instance() {
- static OpInfoMap* g_op_info_map = new OpInfoMap();
- return *g_op_info_map;
+ static OpInfoMap g_op_info_map;
+ return g_op_info_map;
}
} // namespace framework
} // namespace paddle
diff --git a/paddle/fluid/framework/op_registry.h b/paddle/fluid/framework/op_registry.h
index 3314e41cc51d74f87be0e2cd5eba9bb260c16be7..e7dfa608b48f89a2155e43c7e63e31154675cd38 100644
--- a/paddle/fluid/framework/op_registry.h
+++ b/paddle/fluid/framework/op_registry.h
@@ -182,21 +182,15 @@ struct OpKernelRegistrarFunctorEx \
- __op_registrar_##op_type##__(#op_type); \
- int TouchOpRegistrar_##op_type() { \
- __op_registrar_##op_type##__.Touch(); \
- return 0; \
+#define REGISTER_OPERATOR(op_type, op_class, ...) \
+ STATIC_ASSERT_GLOBAL_NAMESPACE( \
+ __reg_op__##op_type, \
+ "REGISTER_OPERATOR must be called in global namespace"); \
+ static ::paddle::framework::OperatorRegistrar \
+ __op_registrar_##op_type##__(#op_type); \
+ int TouchOpRegistrar_##op_type() { \
+ __op_registrar_##op_type##__.Touch(); \
+ return 0; \
}
#define REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) \
diff --git a/paddle/fluid/framework/op_registry_test.cc b/paddle/fluid/framework/op_registry_test.cc
index 18b1649cc71d5edd5b07740bbad1fe8f81128898..04996d7b09cecc3c330a47153c9b10310f1792f4 100644
--- a/paddle/fluid/framework/op_registry_test.cc
+++ b/paddle/fluid/framework/op_registry_test.cc
@@ -193,15 +193,10 @@ TEST(OpRegistry, CustomChecker) {
ASSERT_EQ(test_attr, 4);
}
-class CosineOpComplete : public paddle::framework::CosineOp {
- public:
- DEFINE_OP_CONSTRUCTOR(CosineOpComplete, paddle::framework::CosineOp);
- DEFINE_OP_CLONE_METHOD(CosineOpComplete);
-};
-
TEST(OperatorRegistrar, Test) {
paddle::framework::OperatorRegistrar<
- CosineOpComplete, paddle::framework::CosineOpProtoAndCheckerMaker>
+ paddle::framework::CosineOp,
+ paddle::framework::CosineOpProtoAndCheckerMaker>
reg("cos");
}
diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc
index 3cf8e8696d739e3f2894e490161b9fb5b459bc41..d1dc5fcd97b77fb7707c7d48f6eaeef140d3f306 100644
--- a/paddle/fluid/framework/operator.cc
+++ b/paddle/fluid/framework/operator.cc
@@ -633,6 +633,16 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
VLOG(3) << "expected_kernel_key:" << expected_kernel_key;
auto kernel_iter = kernels.find(expected_kernel_key);
+#ifdef PADDLE_WITH_MKLDNN
+ // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
+ if (kernel_iter == kernels.end() &&
+ expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
+ VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
+ expected_kernel_key.library_type_ = LibraryType::kPlain;
+ expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
+ kernel_iter = kernels.find(expected_kernel_key);
+ }
+#endif
if (kernel_iter == kernels.end()) {
PADDLE_THROW("op %s does not have kernel for %s", type_,
KernelTypeToString(expected_kernel_key));
diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h
index 01d750efbb8aaa35701f6caa7ec103ec21dd529e..1040eb882baea624e972faf4af3094119df72308 100644
--- a/paddle/fluid/framework/operator.h
+++ b/paddle/fluid/framework/operator.h
@@ -121,10 +121,6 @@ class OperatorBase {
//! Get all outputs variable names
virtual std::vector OutputVars(bool has_intermediate) const;
- // Return a new operator instance, which is as same as this.
- // Use unique_ptr to prevent caller forget to delete this pointer.
- virtual std::unique_ptr Clone() const = 0;
-
protected:
std::string type_;
// NOTE: in case of OpGrad, inputs_ contains:
@@ -145,37 +141,6 @@ class OperatorBase {
const platform::Place& place) const = 0;
};
-// Macro for define a clone method.
-// If you are writing an kernel operator, `Clone` will be defined when you
-// register it. i.e. `Clone` method is not needed to define by yourself.
-#define DEFINE_OP_CLONE_METHOD(cls) \
- std::unique_ptr<::paddle::framework::OperatorBase> Clone() const final { \
- return std::unique_ptr<::paddle::framework::OperatorBase>(new cls(*this)); \
- }
-
-// Macro for define a default constructor for Operator.
-// You can also use
-// using PARENT_CLASS::PARENT_CLASS;
-// to use parent's constructor.
-#define DEFINE_OP_CONSTRUCTOR(cls, parent_cls) \
- cls(const std::string& type, \
- const ::paddle::framework::VariableNameMap& inputs, \
- const ::paddle::framework::VariableNameMap& outputs, \
- const paddle::framework::AttributeMap& attrs) \
- : parent_cls(type, inputs, outputs, attrs) {}
-
-class NOP : public OperatorBase {
- public:
- using OperatorBase::OperatorBase;
- std::unique_ptr Clone() const override {
- return std::unique_ptr(new NOP(*this));
- }
-
- private:
- void RunImpl(const Scope& scope,
- const platform::Place& place) const override {}
-};
-
class ExecutionContext {
public:
ExecutionContext(const OperatorBase& op, const Scope& scope,
diff --git a/paddle/fluid/framework/operator_test.cc b/paddle/fluid/framework/operator_test.cc
index e211c678f8d61ddb69b6c612338bfc6a0afe8cd7..ac9dd8245ad4e0e8842f219b23d3866b03fdaedb 100644
--- a/paddle/fluid/framework/operator_test.cc
+++ b/paddle/fluid/framework/operator_test.cc
@@ -247,26 +247,3 @@ TEST(OpKernel, multi_inputs) {
auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
op->Run(scope, cpu_place);
}
-
-class OperatorClone : public paddle::framework::OperatorBase {
- public:
- DEFINE_OP_CLONE_METHOD(OperatorClone);
- OperatorClone(const std::string& type,
- const paddle::framework::VariableNameMap& inputs,
- const paddle::framework::VariableNameMap& outputs,
- const paddle::framework::AttributeMap& attrs)
- : OperatorBase(type, inputs, outputs, attrs) {}
-
- private:
- void RunImpl(const paddle::framework::Scope& scope,
- const paddle::platform::Place& place) const override {}
-};
-
-TEST(Operator, Clone) {
- paddle::framework::InitDevices(true);
- OperatorClone a("ABC", paddle::framework::VariableNameMap{},
- paddle::framework::VariableNameMap{},
- paddle::framework::AttributeMap{});
- auto b = a.Clone();
- ASSERT_EQ(a.Type(), b->Type());
-}
diff --git a/paddle/fluid/framework/var_type_inference_test.cc b/paddle/fluid/framework/var_type_inference_test.cc
index 14b81ddfecb8c996ae8709910c022a074e91eb3c..7842168f603885ce7dc87d2a01dfa4f544389faa 100644
--- a/paddle/fluid/framework/var_type_inference_test.cc
+++ b/paddle/fluid/framework/var_type_inference_test.cc
@@ -22,6 +22,17 @@ limitations under the License. */
namespace paddle {
namespace framework {
+class NOP : public OperatorBase {
+ public:
+ NOP(const std::string &type, const VariableNameMap &inputs,
+ const VariableNameMap &outputs, const AttributeMap &attrs)
+ : OperatorBase(type, inputs, outputs, attrs) {}
+
+ private:
+ void RunImpl(const Scope &scope,
+ const platform::Place &place) const override {}
+};
+
class SumOpMaker : public OpProtoAndCheckerMaker {
public:
void Make() {
diff --git a/paddle/fluid/memory/detail/buddy_allocator.cc b/paddle/fluid/memory/detail/buddy_allocator.cc
index 4194ba197948b47003863196efdac1c08a7ae4f6..01a8501dd4abe73cbc71dc4c08734cae66df08ef 100644
--- a/paddle/fluid/memory/detail/buddy_allocator.cc
+++ b/paddle/fluid/memory/detail/buddy_allocator.cc
@@ -19,8 +19,9 @@ namespace paddle {
namespace memory {
namespace detail {
-BuddyAllocator::BuddyAllocator(SystemAllocator* system_allocator,
- size_t min_chunk_size, size_t max_chunk_size)
+BuddyAllocator::BuddyAllocator(
+ std::unique_ptr system_allocator, size_t min_chunk_size,
+ size_t max_chunk_size)
: min_chunk_size_(min_chunk_size),
max_chunk_size_(max_chunk_size),
cache_(system_allocator->UseGpu()),
diff --git a/paddle/fluid/memory/detail/buddy_allocator.h b/paddle/fluid/memory/detail/buddy_allocator.h
index 2f39d774d6fb6a2bc37877eb2f8b90bebd3cda28..f0c83efc23ce39c4fc89296d672e1e55751851bf 100644
--- a/paddle/fluid/memory/detail/buddy_allocator.h
+++ b/paddle/fluid/memory/detail/buddy_allocator.h
@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once
+#include
#include // NOLINT
#include
#include
@@ -32,8 +33,8 @@ namespace detail {
class BuddyAllocator {
public:
- BuddyAllocator(SystemAllocator* system_allocator, size_t min_chunk_size,
- size_t max_chunk_size);
+ BuddyAllocator(std::unique_ptr system_allocator,
+ size_t min_chunk_size, size_t max_chunk_size);
~BuddyAllocator();
@@ -103,7 +104,7 @@ class BuddyAllocator {
private:
/*! Allocate CPU/GPU memory from system */
- SystemAllocator* system_allocator_;
+ std::unique_ptr system_allocator_;
std::mutex mutex_;
};
diff --git a/paddle/fluid/memory/malloc.cc b/paddle/fluid/memory/malloc.cc
index bd98ed81899440a46415d30b6d74fec2dac4c155..7c800b3c164049244770ceb2070b177d8307e85e 100644
--- a/paddle/fluid/memory/malloc.cc
+++ b/paddle/fluid/memory/malloc.cc
@@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
+#include
+
#include "paddle/fluid/memory/malloc.h"
#include "glog/logging.h"
@@ -34,12 +36,15 @@ namespace memory {
using BuddyAllocator = detail::BuddyAllocator;
BuddyAllocator* GetCPUBuddyAllocator() {
+ static std::once_flag init_flag;
static detail::BuddyAllocator* a = nullptr;
- if (a == nullptr) {
- a = new detail::BuddyAllocator(new detail::CPUAllocator,
- platform::CpuMinChunkSize(),
- platform::CpuMaxChunkSize());
- }
+
+ std::call_once(init_flag, []() {
+ a = new detail::BuddyAllocator(
+ std::unique_ptr(new detail::CPUAllocator),
+ platform::CpuMinChunkSize(), platform::CpuMaxChunkSize());
+ });
+
return a;
}
@@ -68,27 +73,33 @@ size_t Used(platform::CPUPlace place) {
#ifdef PADDLE_WITH_CUDA
BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) {
- static BuddyAllocator** as = NULL;
- if (as == NULL) {
+ static std::once_flag init_flag;
+ static detail::BuddyAllocator** a_arr = nullptr;
+
+ std::call_once(init_flag, [gpu_id]() {
int gpu_num = platform::GetCUDADeviceCount();
- as = new BuddyAllocator*[gpu_num];
- for (int gpu = 0; gpu < gpu_num; gpu++) {
- as[gpu] = nullptr;
+ PADDLE_ENFORCE(gpu_id < gpu_num, "gpu_id:%d should < gpu_num:%d", gpu_id,
+ gpu_num);
+
+ a_arr = new BuddyAllocator*[gpu_num];
+ for (int i = 0; i < gpu_num; i++) {
+ a_arr[i] = nullptr;
+ platform::SetDeviceId(i);
+ a_arr[i] = new BuddyAllocator(
+ std::unique_ptr(new detail::GPUAllocator(i)),
+ platform::GpuMinChunkSize(), platform::GpuMaxChunkSize());
+
+ VLOG(10) << "\n\nNOTE: each GPU device use "
+ << FLAGS_fraction_of_gpu_memory_to_use * 100
+ << "% of GPU memory.\n"
+ << "You can set GFlags environment variable '"
+ << "FLAGS_fraction_of_gpu_memory_to_use"
+ << "' to change the fraction of GPU usage.\n\n";
}
- }
+ });
+
platform::SetDeviceId(gpu_id);
- if (!as[gpu_id]) {
- as[gpu_id] = new BuddyAllocator(new detail::GPUAllocator(gpu_id),
- platform::GpuMinChunkSize(),
- platform::GpuMaxChunkSize());
- VLOG(10) << "\n\nNOTE: each GPU device use "
- << FLAGS_fraction_of_gpu_memory_to_use * 100
- << "% of GPU memory.\n"
- << "You can set GFlags environment variable '"
- << "FLAGS_fraction_of_gpu_memory_to_use"
- << "' to change the fraction of GPU usage.\n\n";
- }
- return as[gpu_id];
+ return a_arr[gpu_id];
}
template <>
@@ -125,12 +136,16 @@ void Free(platform::CUDAPlace place, void* p) {
}
BuddyAllocator* GetCUDAPinnedBuddyAllocator() {
- static BuddyAllocator* ba = NULL;
- if (ba == NULL) {
- ba = new BuddyAllocator(new detail::CUDAPinnedAllocator,
+ static std::once_flag init_flag;
+ static BuddyAllocator* ba = nullptr;
+
+ std::call_once(init_flag, []() {
+ ba = new BuddyAllocator(std::unique_ptr(
+ new detail::CUDAPinnedAllocator),
platform::CUDAPinnedMinChunkSize(),
platform::CUDAPinnedMaxChunkSize());
- }
+ });
+
return ba;
}
diff --git a/paddle/fluid/operators/elementwise_add_mkldnn_op.cc b/paddle/fluid/operators/elementwise_add_mkldnn_op.cc
index 3f612256840825a75f49944ab97ff957d572a863..1a5427b39241b666eeaf12b173ea00443bb5f6e4 100644
--- a/paddle/fluid/operators/elementwise_add_mkldnn_op.cc
+++ b/paddle/fluid/operators/elementwise_add_mkldnn_op.cc
@@ -85,7 +85,7 @@ class EltwiseAddMKLDNNKernel : public framework::OpKernel {
"Wrong layout/format set for X tensor");
PADDLE_ENFORCE(y->layout() == DataLayout::kMKLDNN &&
y->format() != memory::format::format_undef,
- "Wrong layout/format set for X tensor");
+ "Wrong layout/format set for Y tensor");
std::vector src_x_tz = framework::vectorize2int(x_dims);
std::vector src_y_tz = framework::vectorize2int(y_dims);
diff --git a/paddle/fluid/operators/read_op.cc b/paddle/fluid/operators/read_op.cc
index 695d7ea83df952d9f2212cc0aaca5c90c7b47ee7..65fcce8bb019965a805ad09d50be0aba64e4f24e 100644
--- a/paddle/fluid/operators/read_op.cc
+++ b/paddle/fluid/operators/read_op.cc
@@ -92,9 +92,13 @@ class ReadOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override {
AddInput("Reader", "(ReaderHolder) The executed reader.");
AddOutput("Out", "(LoDTensor) The output data.").AsDuplicable();
- AddAttr("throw_eof_exp",
- "If set true, an exception will be thrown when the Reader "
- "yields empty (which means there is no next data).")
+ AddAttr(
+ "throw_eof_exp",
+ "If set true, an exception will be thrown when the Reader "
+ "yields empty (which means there is no next data).\n"
+ "NOTES: This flag must be true always. It will be set to false"
+ " only when the data-balance is enabled in ParallelExecutor"
+ " and it is set by ParallelExecutor instance, not users.")
.SetDefault(true);
AddComment(R"DOC(
Read Operator
diff --git a/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.cpp b/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.cpp
index 637686e443ceb740d52d42524246ae48a85d52f0..3ef0dfbfe2e5842918500a3b0706c1a55024ce46 100644
--- a/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.cpp
+++ b/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.cpp
@@ -532,6 +532,7 @@ void TrainerThread::computeThread() {
break;
}
}
+ hl_fini();
}
void TrainerThread::prefetch() {
@@ -651,6 +652,7 @@ void TrainerThread::copyGradToBufferThread() {
}
partnerThread->notifyGradientCollect(pid);
}
+ hl_fini();
}
void TrainerThread::gradCollectThread() {
@@ -693,6 +695,7 @@ void TrainerThread::gradCollectThread() {
notifyCopyGradToBuffer(pid);
}
}
+ hl_fini();
}
void TrainerThread::doCallback(int pid) {
@@ -741,6 +744,7 @@ void TrainerThread::valueDispatchThread() {
thread->notifyValueReady(pid);
}
+ hl_fini();
}
void TrainerThread::notifyValueReady(int paramId) {
diff --git a/paddle/legacy/gserver/gradientmachines/ParallelNeuralNetwork.cpp b/paddle/legacy/gserver/gradientmachines/ParallelNeuralNetwork.cpp
index 450514ca88a319b30ca3ebae669c78502087540a..33d24b5b832fe9011591606860e0f50361367790 100644
--- a/paddle/legacy/gserver/gradientmachines/ParallelNeuralNetwork.cpp
+++ b/paddle/legacy/gserver/gradientmachines/ParallelNeuralNetwork.cpp
@@ -197,6 +197,7 @@ void ParallelThread::computeThread() {
job_work.layer_->markAllInputGrad();
}
}
+ hl_fini();
}
void ParallelThread::start() {
diff --git a/python/paddle/fluid/tests/unittests/test_data_balance.py b/python/paddle/fluid/tests/unittests/test_data_balance.py
index cffa3329ac556dc77f3cb508b807cbd49bb974f7..6d810920d55ccf069ff408c553069e8f5e590271 100644
--- a/python/paddle/fluid/tests/unittests/test_data_balance.py
+++ b/python/paddle/fluid/tests/unittests/test_data_balance.py
@@ -103,8 +103,12 @@ class TestDataBalance(unittest.TestCase):
exe = fluid.Executor(place)
exe.run(startup_prog)
+ build_strategy = fluid.BuildStrategy()
+ build_strategy.enable_data_balance = True
parallel_exe = fluid.ParallelExecutor(
- use_cuda=self.use_cuda, main_program=main_prog)
+ use_cuda=self.use_cuda,
+ main_program=main_prog,
+ build_strategy=build_strategy)
if (parallel_exe.device_count > self.batch_size):
print("WARNING: Unittest TestDataBalance skipped. \
@@ -145,9 +149,12 @@ class TestDataBalance(unittest.TestCase):
place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
-
+ build_strategy = fluid.BuildStrategy()
+ build_strategy.enable_data_balance = True
parallel_exe = fluid.ParallelExecutor(
- use_cuda=self.use_cuda, main_program=main_prog)
+ use_cuda=self.use_cuda,
+ main_program=main_prog,
+ build_strategy=build_strategy)
if (parallel_exe.device_count > self.batch_size):
print("WARNING: Unittest TestDataBalance skipped. \
diff --git a/python/paddle/fluid/transpiler/inference_transpiler.py b/python/paddle/fluid/transpiler/inference_transpiler.py
index d32c69d148dfa1633ce344611ca3fe7879a234e9..b8afeae5ebd6ef7948a7c0c2775f419af461da04 100644
--- a/python/paddle/fluid/transpiler/inference_transpiler.py
+++ b/python/paddle/fluid/transpiler/inference_transpiler.py
@@ -19,7 +19,7 @@ from ..framework import Program
from ..executor import global_scope
-class InferenceTranspiler:
+class InferenceTranspiler(object):
'''
Convert the fluid program to optimized inference program.
diff --git a/python/paddle/libs/__init__.py b/python/paddle/libs/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..34d4f4d07ed0d452c1965c5f1f198230571931aa
--- /dev/null
+++ b/python/paddle/libs/__init__.py
@@ -0,0 +1,15 @@
+# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# used for setup.py.in to store the thirdparty shared libraries
diff --git a/python/setup.py.in b/python/setup.py.in
index 51380149d0b09224c02050902897f23f53600de2..a0cb39070bf7a89e3ea4cb1d31f54f919d6ff74e 100644
--- a/python/setup.py.in
+++ b/python/setup.py.in
@@ -1,14 +1,13 @@
from setuptools import setup, Distribution, Extension
import subprocess
+import os
+import re
+import shutil
class BinaryDistribution(Distribution):
def has_ext_modules(foo):
return True
-MAJOR = 0
-MINOR = 14
-PATCH = 0
RC = 0
-ISTAGED = False
@@ -20,14 +19,47 @@ def git_commit():
git_commit = 'Unknown'
return git_commit
+def _get_version_detail(idx):
+ assert idx < 3, "vesion info consists of %(major)d.%(minor)d.%(patch)d, \
+ so detail index must less than 3"
+
+ if re.match('@TAG_VERSION_REGEX@', '@PADDLE_VERSION@'):
+ version_details = '@PADDLE_VERSION@'.split('.')
+
+ if len(version_details) == 3:
+ return version_details[idx]
+
+ return 0
+
+def get_major():
+ return int(_get_version_detail(0))
+
+def get_minor():
+ return int(_get_version_detail(1))
+
+def get_patch():
+ return str(_get_version_detail(2))
+
+def is_taged():
+ try:
+ cmd = ['git', 'describe', '--exact-match', '--tags']
+ git_tag = subprocess.Popen(cmd, stdout = subprocess.PIPE).communicate()[0].strip()
+ except:
+ return False
+
+ if git_tag.replace('v', '') == '@PADDLE_VERSION@':
+ return True
+ else:
+ return False
+
def write_version_py(filename='paddle/version.py'):
cnt = '''
# THIS FILE IS GENERATED FROM PADDLEPADDLE SETUP.PY
#
-full_version = '%(major)d.%(minor)d.%(patch)d'
+full_version = '%(major)d.%(minor)d.%(patch)s'
major = '%(major)d'
minor = '%(minor)d'
-patch = '%(patch)d'
+patch = '%(patch)s'
rc = '%(rc)d'
istaged = %(istaged)s
commit = '%(commit)s'
@@ -49,19 +81,20 @@ def mkl():
commit = git_commit()
with open(filename, 'w') as f:
f.write(cnt % {
- 'major': MAJOR,
- 'minor': MINOR,
- 'patch': PATCH,
+ 'major': get_major(),
+ 'minor': get_minor(),
+ 'patch': get_patch(),
'rc': RC,
'version': '${PADDLE_VERSION}',
'commit': commit,
- 'istaged': ISTAGED,
+ 'istaged': is_taged(),
'with_mkl': '@WITH_MKL@'})
write_version_py(filename='@PADDLE_BINARY_DIR@/python/paddle/version.py')
packages=['paddle',
+ 'paddle.libs',
'paddle.utils',
'paddle.dataset',
'paddle.reader',
@@ -113,12 +146,35 @@ package_dir={
}
if '${WITH_FLUID_ONLY}'== 'OFF':
package_dir['py_paddle']='${PADDLE_BINARY_DIR}/python/py_paddle'
-
-paddle_rt_lib_dir = 'lib'
-paddle_rt_libs = ['${WARPCTC_LIBRARIES}']
-if '${MKL_SHARED_LIBS}'!= '':
- paddle_rt_libs += '${MKL_SHARED_LIBS}'.split(';')
+# put all thirdparty libraries in paddle.libs
+package_data['paddle.libs']=['libwarpctc.so']
+libs_path='${PADDLE_BINARY_DIR}/python/paddle/libs'
+shutil.copy('${WARPCTC_LIBRARIES}', libs_path)
+if '${WITH_MKL}' == 'ON':
+ shutil.copy('${MKLML_LIB}', libs_path)
+ shutil.copy('${MKLML_IOMP_LIB}', libs_path)
+ package_data['paddle.libs']+=['libmklml_intel.so','libiomp5.so']
+if '${WITH_MKLDNN}' == 'ON':
+ # change rpath of libmkldnn.so.0, add $ORIGIN/ to it.
+ # The reason is that all thirdparty libraries in the same directory,
+ # thus, libmkldnn.so.0 will find libmklml_intel.so and libiomp5.so.
+ command = "patchelf --set-rpath '$ORIGIN/' ${MKLDNN_SHARED_LIB}"
+ if os.system(command) != 0:
+ raise Exception("patchelf --set-rpath for libmkldnn.so.0 fails")
+ package_data['paddle.libs']+=['libmkldnn.so.0']
+ shutil.copy('${MKLDNN_SHARED_LIB}', libs_path)
+# remove unused paddle/libs/__init__.py
+os.remove(libs_path+'/__init__.py')
+package_dir['paddle.libs']=libs_path
+
+# change rpath of core.so, add $ORIGIN/../libs/ to it.
+# The reason is that libwarpctc.so, libiomp5.so etc are in paddle.libs, and
+# core.so is in paddle.fluid, thus paddle/fluid/../libs will pointer to above libraries.
+# This operation will fix https://github.com/PaddlePaddle/Paddle/issues/3213
+command = "patchelf --set-rpath '$ORIGIN/../libs/' ${PADDLE_BINARY_DIR}/python/paddle/fluid/core.so"
+if os.system(command) != 0:
+ raise Exception("patchelf --set-rpath for core.so fails")
setup(name='${PACKAGE_NAME}',
version='${PADDLE_VERSION}',
@@ -128,6 +184,5 @@ setup(name='${PACKAGE_NAME}',
ext_modules=[Extension('_foo', ['stub.cc'])],
package_data=package_data,
package_dir=package_dir,
- scripts=paddle_bins,
- data_files=[(paddle_rt_lib_dir, paddle_rt_libs)]
+ scripts=paddle_bins
)