提交 fdecae5f 编写于 作者: Q qiaolongfei

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into prefetch_on_server

repos:
- repo: https://github.com/Lucas-C/pre-commit-hooks.git - repo: https://github.com/Lucas-C/pre-commit-hooks.git
sha: v1.0.1 sha: v1.0.1
hooks: hooks:
...@@ -25,6 +26,14 @@ ...@@ -25,6 +26,14 @@
entry: bash ./.clang_format.hook -i entry: bash ./.clang_format.hook -i
language: system language: system
files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|proto)$ files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|proto)$
- repo: local
hooks:
- id: cpplint-cpp-source
name: cpplint
description: Check C++ code style using cpplint.py.
entry: bash ./tools/codestyle/cpplint_pre_commit.hook
language: system
files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx)$
- repo: https://github.com/PaddlePaddle/pre-commit-golang - repo: https://github.com/PaddlePaddle/pre-commit-golang
sha: 8337620115c25ff8333f1b1a493bd031049bd7c0 sha: 8337620115c25ff8333f1b1a493bd031049bd7c0
hooks: hooks:
......
add_custom_target(paddle_apis ALL
DEPENDS paddle_v2_apis paddle_fluid_apis)
add_custom_target(paddle_docs ALL
DEPENDS paddle_v2_docs paddle_v2_docs_cn
paddle_fluid_docs paddle_fluid_docs_cn)
add_subdirectory(v2) add_subdirectory(v2)
add_subdirectory(fluid) add_subdirectory(fluid)
...@@ -27,6 +27,8 @@ sphinx_add_target(paddle_fluid_docs ...@@ -27,6 +27,8 @@ sphinx_add_target(paddle_fluid_docs
${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}
${SPHINX_HTML_DIR_EN}) ${SPHINX_HTML_DIR_EN})
add_dependencies(paddle_fluid_docs gen_proto_py)
# configured documentation tools and intermediate build results # configured documentation tools and intermediate build results
set(BINARY_BUILD_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_build") set(BINARY_BUILD_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_build")
...@@ -47,3 +49,7 @@ sphinx_add_target(paddle_fluid_docs_cn ...@@ -47,3 +49,7 @@ sphinx_add_target(paddle_fluid_docs_cn
${SPHINX_CACHE_DIR_CN} ${SPHINX_CACHE_DIR_CN}
${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}
${SPHINX_HTML_DIR_CN}) ${SPHINX_HTML_DIR_CN})
add_dependencies(paddle_fluid_docs_cn gen_proto_py)
add_subdirectory(api)
# configured documentation tools and intermediate build results
set(BINARY_BUILD_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_build")
# Sphinx cache with pickled ReST documents
set(SPHINX_CACHE_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_doctrees")
# HTML output director
set(SPHINX_HTML_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/html")
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/../../templates/conf.py.en.in"
"${BINARY_BUILD_DIR_EN}/conf.py"
@ONLY)
sphinx_add_target(paddle_fluid_apis
html
${BINARY_BUILD_DIR_EN}
${SPHINX_CACHE_DIR_EN}
${CMAKE_CURRENT_SOURCE_DIR}
${SPHINX_HTML_DIR_EN})
add_dependencies(paddle_fluid_apis gen_proto_py framework_py_proto copy_paddle_pybind)
...@@ -20,13 +20,15 @@ configure_file( ...@@ -20,13 +20,15 @@ configure_file(
"${BINARY_BUILD_DIR_EN}/conf.py" "${BINARY_BUILD_DIR_EN}/conf.py"
@ONLY) @ONLY)
sphinx_add_target(paddle_docs sphinx_add_target(paddle_v2_docs
html html
${BINARY_BUILD_DIR_EN} ${BINARY_BUILD_DIR_EN}
${SPHINX_CACHE_DIR_EN} ${SPHINX_CACHE_DIR_EN}
${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}
${SPHINX_HTML_DIR_EN}) ${SPHINX_HTML_DIR_EN})
add_dependencies(paddle_v2_docs gen_proto_py)
# configured documentation tools and intermediate build results # configured documentation tools and intermediate build results
set(BINARY_BUILD_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_build") set(BINARY_BUILD_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_build")
...@@ -41,11 +43,13 @@ configure_file( ...@@ -41,11 +43,13 @@ configure_file(
"${BINARY_BUILD_DIR_CN}/conf.py" "${BINARY_BUILD_DIR_CN}/conf.py"
@ONLY) @ONLY)
sphinx_add_target(paddle_docs_cn sphinx_add_target(paddle_v2_docs_cn
html html
${BINARY_BUILD_DIR_CN} ${BINARY_BUILD_DIR_CN}
${SPHINX_CACHE_DIR_CN} ${SPHINX_CACHE_DIR_CN}
${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}
${SPHINX_HTML_DIR_CN}) ${SPHINX_HTML_DIR_CN})
add_dependencies(paddle_v2_docs_cn gen_proto_py)
add_subdirectory(api) add_subdirectory(api)
...@@ -12,9 +12,11 @@ configure_file( ...@@ -12,9 +12,11 @@ configure_file(
"${BINARY_BUILD_DIR_EN}/conf.py" "${BINARY_BUILD_DIR_EN}/conf.py"
@ONLY) @ONLY)
sphinx_add_target(paddle_api_docs sphinx_add_target(paddle_v2_apis
html html
${BINARY_BUILD_DIR_EN} ${BINARY_BUILD_DIR_EN}
${SPHINX_CACHE_DIR_EN} ${SPHINX_CACHE_DIR_EN}
${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR}
${SPHINX_HTML_DIR_EN}) ${SPHINX_HTML_DIR_EN})
add_dependencies(paddle_v2_apis gen_proto_py framework_py_proto copy_paddle_pybind)
...@@ -2,10 +2,25 @@ ...@@ -2,10 +2,25 @@
Set Command-line Parameters Set Command-line Parameters
=========================== ===========================
The implementation of deep learning algorithms has a variety of characteristics, such as running environment, running stage, structure of the model and the traning strategy. PaddlePaddle supports the user to set various command-line parameters flexibly, which helps to achieve control of the model training or prediction process.
In this part, we take several actual scenarios as an example, and the use of some command-line parameters is displayed:
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
use_case_en.md use_case_en.md
Then, we summarize and classify the use of all command-line parameters:
.. toctree::
:maxdepth: 1
arguments_en.md arguments_en.md
Finally, the detailed descriptions are given, and we try to explain the propeties and significance of these command-line parameters in detail:
.. toctree::
:maxdepth: 1
detail_introduction_en.md detail_introduction_en.md
...@@ -224,13 +224,13 @@ void AsyncGRPCServer::ShutdownQueue() { ...@@ -224,13 +224,13 @@ void AsyncGRPCServer::ShutdownQueue() {
std::unique_lock<std::mutex> lock(cq_mutex_); std::unique_lock<std::mutex> lock(cq_mutex_);
cq_send_->Shutdown(); cq_send_->Shutdown();
cq_get_->Shutdown(); cq_get_->Shutdown();
is_shut_down_ = true;
} }
// This URL explains why shutdown is complicate: // This URL explains why shutdown is complicate:
void AsyncGRPCServer::ShutDown() { void AsyncGRPCServer::ShutDown() {
server_->Shutdown(); is_shut_down_ = true;
ShutdownQueue(); ShutdownQueue();
server_->Shutdown();
} }
void AsyncGRPCServer::TryToRegisterNewSendOne() { void AsyncGRPCServer::TryToRegisterNewSendOne() {
...@@ -276,14 +276,14 @@ void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, ...@@ -276,14 +276,14 @@ void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq,
bool ok = false; bool ok = false;
while (true) { while (true) {
if (!cq->Next(&tag, &ok)) { if (!cq->Next(&tag, &ok)) {
LOG(INFO) << cq_name << " get CompletionQueue shutdown!"; LOG(INFO) << cq_name << " CompletionQueue shutdown!";
break; break;
} }
PADDLE_ENFORCE(tag); PADDLE_ENFORCE(tag);
// FIXME(typhoonzero): de-couple the barriers with recv_op // FIXME(typhoonzero): de-couple the barriers with recv_op
if (cq_name == "cq_get") WaitCond(1); if (!is_shut_down_ && cq_name == "cq_get") WaitCond(1);
if (cq_name == "cq_send") WaitCond(0); if (!is_shut_down_ && cq_name == "cq_send") WaitCond(0);
RequestBase* base = (RequestBase*)tag; RequestBase* base = (RequestBase*)tag;
// reference: // reference:
......
...@@ -11,9 +11,10 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,9 +11,10 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <random> #include <random>
#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
......
...@@ -13,8 +13,10 @@ See the License for the specific language governing permissions and ...@@ -13,8 +13,10 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <unistd.h> #include <unistd.h>
#include <string> #include <string>
#include <thread> #include <thread> // NOLINT
#include <vector>
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
...@@ -30,9 +32,9 @@ namespace m = paddle::operators::math; ...@@ -30,9 +32,9 @@ namespace m = paddle::operators::math;
USE_OP(dropout); USE_OP(dropout);
void Compare(f::Scope& scope, p::DeviceContext& ctx) { void Compare(f::Scope* scope, const p::DeviceContext& ctx) {
// init // init
auto var = scope.Var("X"); auto var = scope->Var("X");
auto tensor = var->GetMutable<f::LoDTensor>(); auto tensor = var->GetMutable<f::LoDTensor>();
tensor->Resize({10, 10}); tensor->Resize({10, 10});
...@@ -44,12 +46,12 @@ void Compare(f::Scope& scope, p::DeviceContext& ctx) { ...@@ -44,12 +46,12 @@ void Compare(f::Scope& scope, p::DeviceContext& ctx) {
TensorFromVector(init, ctx, tensor); TensorFromVector(init, ctx, tensor);
auto place = ctx.GetPlace(); auto place = ctx.GetPlace();
auto out_var = scope.Var("Out"); auto out_var = scope->Var("Out");
auto out_tensor = out_var->GetMutable<f::LoDTensor>(); auto out_tensor = out_var->GetMutable<f::LoDTensor>();
out_tensor->Resize({10, 10}); out_tensor->Resize({10, 10});
out_tensor->mutable_data<float>(place); // allocate out_tensor->mutable_data<float>(place); // allocate
auto mask_var = scope.Var("Mask"); auto mask_var = scope->Var("Mask");
auto mask_tensor = mask_var->GetMutable<f::LoDTensor>(); auto mask_tensor = mask_var->GetMutable<f::LoDTensor>();
mask_tensor->Resize({10, 10}); mask_tensor->Resize({10, 10});
mask_tensor->mutable_data<float>(place); // allocate mask_tensor->mutable_data<float>(place); // allocate
...@@ -63,7 +65,7 @@ void Compare(f::Scope& scope, p::DeviceContext& ctx) { ...@@ -63,7 +65,7 @@ void Compare(f::Scope& scope, p::DeviceContext& ctx) {
auto dropout_op = f::OpRegistry::CreateOp( auto dropout_op = f::OpRegistry::CreateOp(
"dropout", {{"X", {"X"}}}, {{"Out", {"Out"}}, {"Mask", {"Mask"}}}, attrs); "dropout", {{"X", {"X"}}}, {{"Out", {"Out"}}, {"Mask", {"Mask"}}}, attrs);
dropout_op->Run(scope, place); dropout_op->Run(*scope, place);
std::vector<float> out_vec; std::vector<float> out_vec;
TensorToVector(*out_tensor, ctx, &out_vec); TensorToVector(*out_tensor, ctx, &out_vec);
...@@ -81,6 +83,11 @@ void Compare(f::Scope& scope, p::DeviceContext& ctx) { ...@@ -81,6 +83,11 @@ void Compare(f::Scope& scope, p::DeviceContext& ctx) {
} }
} }
// TODO(wyi): Due to
// https://github.com/PaddlePaddle/Paddle/issues/9507, I temporarily
// disable this test to remove the prevention of the merge of
// unrelated PRs.
/*
TEST(Dropout, CPUDense) { TEST(Dropout, CPUDense) {
f::Scope scope; f::Scope scope;
p::CPUPlace place; p::CPUPlace place;
...@@ -94,3 +101,4 @@ TEST(Dropout, GPUDense) { ...@@ -94,3 +101,4 @@ TEST(Dropout, GPUDense) {
p::CUDADeviceContext ctx(place); p::CUDADeviceContext ctx(place);
Compare(scope, ctx); Compare(scope, ctx);
} }
*/
...@@ -88,7 +88,6 @@ class ListenAndServOp : public framework::OperatorBase { ...@@ -88,7 +88,6 @@ class ListenAndServOp : public framework::OperatorBase {
void Stop() override { void Stop() override {
rpc_service_->Push(LISTEN_TERMINATE_MESSAGE); rpc_service_->Push(LISTEN_TERMINATE_MESSAGE);
rpc_service_->ShutDown();
server_thread_->join(); server_thread_->join();
} }
......
...@@ -214,7 +214,10 @@ class LRNOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -214,7 +214,10 @@ class LRNOpMaker : public framework::OpProtoAndCheckerMaker {
"Defaults to \"NHWC\". Specify the data format of the output data, " "Defaults to \"NHWC\". Specify the data format of the output data, "
"the input will be transformed automatically. ") "the input will be transformed automatically. ")
.SetDefault("AnyLayout"); .SetDefault("AnyLayout");
AddAttr<bool>("is_test", "").SetDefault(false); AddAttr<bool>("is_test",
"Turns on memory optimization that optimizes away "
"unnecessary memory allocations. Used by MKLDNN.")
.SetDefault(false);
AddComment(R"DOC( AddComment(R"DOC(
Local Response Normalization Operator. Local Response Normalization Operator.
......
...@@ -121,6 +121,10 @@ class LRNGradKernel : public framework::OpKernel<T> { ...@@ -121,6 +121,10 @@ class LRNGradKernel : public framework::OpKernel<T> {
T alpha = ctx.Attr<T>("alpha"); T alpha = ctx.Attr<T>("alpha");
T beta = ctx.Attr<T>("beta"); T beta = ctx.Attr<T>("beta");
PADDLE_ENFORCE(
!ctx.Attr<bool>("is_test"),
"is_test attribute should be set to False in training phase.");
LRNGradFunctor<DeviceContext, T> f; LRNGradFunctor<DeviceContext, T> f;
f(ctx, x, out, mid, x_g, out_g, N, C, H, W, n, alpha, beta); f(ctx, x, out, mid, x_g, out_g, N, C, H, W, n, alpha, beta);
} }
......
...@@ -122,7 +122,8 @@ void StartServerNet(bool is_sparse) { ...@@ -122,7 +122,8 @@ void StartServerNet(bool is_sparse) {
// sub program run in listen_and_serv_op, for simple test we use sum // sub program run in listen_and_serv_op, for simple test we use sum
f::ProgramDesc program; f::ProgramDesc program;
f::BlockDesc *optimize_block = program.MutableBlock(0); const auto &root_block = program.Block(0);
auto *optimize_block = program.AppendBlock(root_block);
// X for server side tensors, RX for received tensers, must be of same shape. // X for server side tensors, RX for received tensers, must be of same shape.
AddOp("sum", {{"X", {"x0", "x1"}}}, {{"Out", {"Out"}}}, {}, optimize_block); AddOp("sum", {{"X", {"x0", "x1"}}}, {{"Out", {"Out"}}}, {}, optimize_block);
......
...@@ -125,9 +125,8 @@ EOF ...@@ -125,9 +125,8 @@ EOF
-DWITH_AVX=${WITH_AVX:-ON} \ -DWITH_AVX=${WITH_AVX:-ON} \
-DWITH_SWIG_PY=ON \ -DWITH_SWIG_PY=ON \
-DWITH_STYLE_CHECK=OFF -DWITH_STYLE_CHECK=OFF
make -j `nproc` gen_proto_py framework_py_proto
make -j `nproc` copy_paddle_pybind make -j `nproc` paddle_docs paddle_apis
make -j `nproc` paddle_docs paddle_docs_cn paddle_api_docs
popd popd
fi fi
......
...@@ -7,9 +7,8 @@ cd $TRAVIS_BUILD_DIR/build ...@@ -7,9 +7,8 @@ cd $TRAVIS_BUILD_DIR/build
# Compile Documentation only. # Compile Documentation only.
cmake .. -DCMAKE_BUILD_TYPE=Release -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON -DWITH_STYLE_CHECK=OFF cmake .. -DCMAKE_BUILD_TYPE=Release -DWITH_GPU=OFF -DWITH_MKL=OFF -DWITH_DOC=ON -DWITH_STYLE_CHECK=OFF
make -j `nproc` gen_proto_py framework_py_proto
make -j `nproc` copy_paddle_pybind make -j `nproc` paddle_docs paddle_apis
make -j `nproc` paddle_docs paddle_docs_cn paddle_api_docs
# check websites for broken links # check websites for broken links
linkchecker doc/v2/en/html/index.html linkchecker doc/v2/en/html/index.html
......
#!/bin/bash
TOTAL_ERRORS=0
# The trick to remove deleted files: https://stackoverflow.com/a/2413151
for file in $(git diff --cached --name-status | awk '$1 != "D" {print $2}'); do
cpplint $file;
TOTAL_ERRORS=$(expr $TOTAL_ERRORS + $?);
done
exit $TOTAL_ERRORS
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册