diff --git a/cmake/cpplint.cmake b/cmake/cpplint.cmake index 6bbcd730e1b5ac49415cac676352e6df00eb6eb5..656e1a0803c6e389d70f37f592c3aa2e95a2bcd4 100644 --- a/cmake/cpplint.cmake +++ b/cmake/cpplint.cmake @@ -27,7 +27,8 @@ set(IGNORE_PATTERN .*cblas\\.h.* .*\\.pb\\.txt .*LtrDataProvider.* - .*MultiDataProvider.*) + .*MultiDataProvider.* + .*pb.*) # add_style_check_target # @@ -52,14 +53,13 @@ macro(add_style_check_target TARGET_NAME) endif() endforeach() if(LINT MATCHES ON) + # cpplint code style get_filename_component(base_filename ${filename} NAME) set(CUR_GEN ${CMAKE_CURRENT_BINARY_DIR}/${base_filename}.cpplint) - add_custom_command(OUTPUT ${CUR_GEN} - PRE_BUILD - COMMAND env ${py_env} "${PYTHON_EXECUTABLE}" "${PROJ_ROOT}/paddle/scripts/cpplint.py" - "--filter=${STYLE_FILTER}" - "--write-success=${CUR_GEN}" ${filename} - DEPENDS ${filename} + add_custom_command(TARGET ${TARGET_NAME} PRE_BUILD + COMMAND "${PYTHON_EXECUTABLE}" "${PROJ_ROOT}/paddle/scripts/cpplint.py" + "--filter=${STYLE_FILTER}" + "--write-success=${CUR_GEN}" ${filename} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) endif() endforeach() diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 200e897c2aa57fdb7f1cd17a9ce3e8de3ce17a8a..e42e75c12ab1e5133f5ecbdb90ef26e3f8df5133 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -185,6 +185,10 @@ function(cc_library TARGET_NAME) add_dependencies(${TARGET_NAME} ${cc_library_DEPS}) target_link_libraries(${TARGET_NAME} ${cc_library_DEPS}) endif() + + # cpplint code style + add_style_check_target(${TARGET_NAME} ${cc_library_SRCS}) + else(cc_library_SRCS) if (cc_library_DEPS) merge_static_libs(${TARGET_NAME} ${cc_library_DEPS}) diff --git a/paddle/framework/ddim.cc b/paddle/framework/ddim.cc index 3f949a6595ea326b97ac567daf9b35a68c8cf7f8..87a3618e095c544b422746ed3f497b21f3824fbd 100644 --- a/paddle/framework/ddim.cc +++ b/paddle/framework/ddim.cc @@ -1,9 +1,23 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + #include "paddle/framework/ddim.h" namespace paddle { namespace framework { -///@cond HIDDEN +/// @cond HIDDEN template Dim make_dim(const int* d) { @@ -50,7 +64,7 @@ void make_ddim(DDim& ddim, const int* dims, int n) { } } -///@endcond +/// @endcond DDim make_ddim(std::initializer_list dims) { DDim result(make_dim(0)); @@ -64,11 +78,11 @@ DDim make_ddim(const std::vector& dims) { return result; } -///@cond HIDDEN +/// @cond HIDDEN // XXX For some reason, putting this in an anonymous namespace causes errors class DynamicMutableIndexer : public boost::static_visitor { public: - DynamicMutableIndexer(int idx) : idx_(idx) {} + explicit DynamicMutableIndexer(int idx) : idx_(idx) {} template int& operator()(Dim& dim) const { @@ -81,7 +95,7 @@ class DynamicMutableIndexer : public boost::static_visitor { class DynamicConstIndexer : public boost::static_visitor { public: - DynamicConstIndexer(int idx) : idx_(idx) {} + explicit DynamicConstIndexer(int idx) : idx_(idx) {} template int operator()(const Dim& dim) const { @@ -92,7 +106,7 @@ class DynamicConstIndexer : public boost::static_visitor { int idx_; }; -///@endcond +/// @endcond int& DDim::operator[](int idx) { return boost::apply_visitor(DynamicMutableIndexer(idx), var); @@ -155,11 +169,11 @@ int get(const DDim& ddim, int idx) { return ddim[idx]; } void set(DDim& ddim, int idx, int value) { ddim[idx] = value; } -///@cond HIDDEN +/// @cond HIDDEN struct VectorizeVisitor : public boost::static_visitor<> { std::vector& vector; - VectorizeVisitor(std::vector& v) : vector(v) {} + explicit VectorizeVisitor(std::vector& v) : vector(v) {} template void operator()(const T& t) { @@ -169,7 +183,7 @@ struct VectorizeVisitor : public boost::static_visitor<> { void operator()(const Dim<1>& t) { vector.push_back(t.head); } }; -///@endcond +/// @endcond std::vector vectorize(const DDim& ddim) { std::vector result; @@ -187,7 +201,7 @@ ssize_t product(const DDim& ddim) { return result; } -///\cond HIDDEN +/// \cond HIDDEN struct ArityVisitor : boost::static_visitor { template @@ -196,15 +210,15 @@ struct ArityVisitor : boost::static_visitor { } }; -///\endcond +/// \endcond int arity(const DDim& d) { return boost::apply_visitor(ArityVisitor(), d); } -///\cond HIDDEN +/// \cond HIDDEN struct DDimPrinter : boost::static_visitor { std::ostream& os; - DDimPrinter(std::ostream& os_) : os(os_) {} + explicit DDimPrinter(std::ostream& os_) : os(os_) {} template void operator()(const T& t) { @@ -212,7 +226,7 @@ struct DDimPrinter : boost::static_visitor { } }; -///\endcond +/// \endcond std::ostream& operator<<(std::ostream& os, const DDim& ddim) { DDimPrinter printer(os); diff --git a/paddle/framework/ddim.h b/paddle/framework/ddim.h index 223c4180bee45e21547364441476b27051daca56..f8714acf320952b11a44016b1dfdc4f9ca9eb632 100644 --- a/paddle/framework/ddim.h +++ b/paddle/framework/ddim.h @@ -27,7 +27,7 @@ struct DDim { DDim() : var(Dim<1>()) {} template - DDim(const Dim& in) : var(in) {} + explicit DDim(const Dim& in) : var(in) {} template DDim& operator=(const Dim& in) { diff --git a/paddle/framework/net.cc b/paddle/framework/net.cc index 854ad8e33e9900ed4e54eceadc33c1ef36c1d116..a0e8788846d795429ba35715e60d4e3421e2f5ff 100644 --- a/paddle/framework/net.cc +++ b/paddle/framework/net.cc @@ -1,3 +1,19 @@ +/* + Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + #include "paddle/framework/net.h" namespace paddle { diff --git a/paddle/framework/op_registry.cc b/paddle/framework/op_registry.cc index 4b35e04e681b414c36cf6d9aee9e64dd68ba5da9..1d14535c50b542733663a6900a8b5f2033290ea6 100644 --- a/paddle/framework/op_registry.cc +++ b/paddle/framework/op_registry.cc @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + #include namespace paddle { @@ -33,4 +47,4 @@ void AttrTypeHelper::SetAttrType>(AttrProto* attr) { attr->set_type(paddle::framework::AttrType::STRINGS); } } // namespace framework -} // namespace paddle \ No newline at end of file +} // namespace paddle diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 8f7adff8b3982e91a3d7f6d598cd62d5005d5f17..d065670829ccbbc209181ee87e9f1301aa648b3f 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -40,4 +40,4 @@ std::string OperatorBase::DebugString() const { } } // namespace framework -} // namespace paddle \ No newline at end of file +} // namespace paddle diff --git a/paddle/function/GemmConvOp.cpp b/paddle/function/GemmConvOp.cpp index a40e5d9d2e76605525f0956445fc43c693933cf8..00880effc59cc80b2761fb6a4d9f3246439afd3f 100644 --- a/paddle/function/GemmConvOp.cpp +++ b/paddle/function/GemmConvOp.cpp @@ -117,8 +117,7 @@ public: ConvFunctionBase::init(config); } - virtual void check(const BufferArgs& inputs, - const BufferArgs& outputs) override { + void check(const BufferArgs& inputs, const BufferArgs& outputs) override { const TensorShape& input = inputs[0].shape(); const TensorShape& filter = inputs[1].shape(); const TensorShape& output = outputs[0].shape(); @@ -217,8 +216,7 @@ public: ConvFunctionBase::init(config); } - virtual void check(const BufferArgs& inputs, - const BufferArgs& outputs) override { + void check(const BufferArgs& inputs, const BufferArgs& outputs) override { const TensorShape& output = inputs[0].shape(); const TensorShape& filter = inputs[1].shape(); const TensorShape& input = outputs[0].shape(); @@ -311,8 +309,7 @@ public: ConvFunctionBase::init(config); } - virtual void check(const BufferArgs& inputs, - const BufferArgs& outputs) override { + void check(const BufferArgs& inputs, const BufferArgs& outputs) override { const TensorShape& output = inputs[0].shape(); const TensorShape& input = inputs[1].shape(); const TensorShape& filter = outputs[0].shape(); diff --git a/paddle/function/NaiveConvOp.cpp b/paddle/function/NaiveConvOp.cpp index 4348f0f775e9442c50a3c45b9a8e6dad5c6b198d..e0692fa06d6e0c35cfa742ca3eac7fe2037b1a80 100644 --- a/paddle/function/NaiveConvOp.cpp +++ b/paddle/function/NaiveConvOp.cpp @@ -90,8 +90,7 @@ public: ConvFunctionBase::init(config); } - virtual void check(const BufferArgs& inputs, - const BufferArgs& outputs) override { + void check(const BufferArgs& inputs, const BufferArgs& outputs) override { const TensorShape& input = inputs[0].shape(); const TensorShape& filter = inputs[1].shape(); const TensorShape& output = outputs[0].shape(); diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.cpp b/paddle/gserver/gradientmachines/NeuralNetwork.cpp index 2e839f640503b8f4e390fc87d9d59960dbc37f6e..cfa80a89365af5111746eec9599d16e37532a9f7 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.cpp +++ b/paddle/gserver/gradientmachines/NeuralNetwork.cpp @@ -403,7 +403,7 @@ public: : layerName_(layerName) { addEvaluator(std::move(evaluator)); } - virtual void eval(const NeuralNetwork& nn) override { + void eval(const NeuralNetwork& nn) override { const LayerPtr& layer = nn.getLayer(layerName_); CHECK(layer) << "Nonexisted layer: " << layerName_ << " in submodel " << nn.getName(); diff --git a/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp b/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp index 9a972466d66ba1417b2c31e66dc375b3da229aa8..9ddd449de7500f5682d59469328f06971c6e83bf 100644 --- a/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp +++ b/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp @@ -636,7 +636,7 @@ void lenToStarts(std::vector& starts) { } starts.back() = pos; } -} +} // namespace void RecurrentGradientMachine::calcSequenceStartPositions() { std::vector starts(commonSeqInfo_.size() + 1); diff --git a/paddle/gserver/layers/AgentLayer.cpp b/paddle/gserver/layers/AgentLayer.cpp index 15e7411b5fde0fa3a532394cf7d0e8477ef052d0..bdae7e623ae0472d4fe5ef3a88fc1e93bbf1e52c 100644 --- a/paddle/gserver/layers/AgentLayer.cpp +++ b/paddle/gserver/layers/AgentLayer.cpp @@ -124,7 +124,7 @@ void copyElements(const IVector& srcVec, dest[index[i]] = src[i]; } } -} +} // namespace void GatherAgentLayer::forwardIds(PassType passType) { IVectorPtr realId = realLayers_[0]->getOutputLabel(); diff --git a/paddle/memory/detail/memory_block.cc b/paddle/memory/detail/memory_block.cc index bc67bcef0fdf9358460d72da5f443b48744c6838..fc40993208323f1f5d18103165c8835b5f829613 100644 --- a/paddle/memory/detail/memory_block.cc +++ b/paddle/memory/detail/memory_block.cc @@ -152,6 +152,6 @@ MemoryBlock* MemoryBlock::metadata() const { reinterpret_cast(this) - 1)); } -} // detail -} // memory -} // paddle +} // namespace detail +} // namespace memory +} // namespace paddle diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index 430ce98bfc1454856a2a4ec76078931b90e588b8..df3d57d629184d28fd42130df9b020a7b52ade72 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -17,8 +17,6 @@ limitations under the License. */ #include "paddle/memory/detail/system_allocator.h" #include "paddle/platform/assert.h" -#include - namespace paddle { namespace memory { diff --git a/paddle/operators/add_op.cc b/paddle/operators/add_op.cc index 2766f0bf258ed863a4297c1e4a2be4673cbf3044..522b23cbc49f025a1ff674ce157358899d690e6d 100644 --- a/paddle/operators/add_op.cc +++ b/paddle/operators/add_op.cc @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + #include #include #include @@ -36,9 +50,9 @@ The equation is: Out = X + Y )DOC"); } }; -} // namespace op +} // namespace operators } // namespace paddle REGISTER_OP(add_two, paddle::operators::AddOp, paddle::operators::AddOpMaker); REGISTER_OP_CPU_KERNEL( - add_two, ::paddle::operators::AddKernel<::paddle::platform::CPUPlace>); \ No newline at end of file + add_two, ::paddle::operators::AddKernel<::paddle::platform::CPUPlace>); diff --git a/paddle/optimizer/parameter_optimizer_test.cpp b/paddle/optimizer/parameter_optimizer_test.cpp index 4e6254d9e4dab48279b4a880695959526d30d70c..edf4ae37a9beee2911d23dd1ab23e67a18065b1b 100644 --- a/paddle/optimizer/parameter_optimizer_test.cpp +++ b/paddle/optimizer/parameter_optimizer_test.cpp @@ -1,3 +1,19 @@ +/* + Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + #include "parameter_optimizer.h" #include #include @@ -5,21 +21,18 @@ #include "gtest/gtest.h" #include "lr_policy.h" -using namespace paddle; -using namespace paddle::optimizer; - -Tensor* FillTensor(size_t size) { - Tensor* param = new Tensor(size); - Tensor& p = *param; +paddle::optimizer::Tensor* FillTensor(size_t size) { + paddle::optimizer::Tensor* param = new paddle::optimizer::Tensor(size); + paddle::optimizer::Tensor& p = *param; for (size_t i = 0; i < p.size(); ++i) { p[i] = (float)rand() / (float)RAND_MAX; } return param; } -Tensor* FixedTensor(size_t size) { - Tensor* param = new Tensor(size); - Tensor& p = *param; +paddle::optimizer::Tensor* FixedTensor(size_t size) { + paddle::optimizer::Tensor* param = new paddle::optimizer::Tensor(size); + paddle::optimizer::Tensor& p = *param; for (size_t i = 0; i < p.size(); ++i) { p[i] = i; } @@ -28,7 +41,8 @@ Tensor* FixedTensor(size_t size) { class OptimizerTest : public testing::Test { public: - // init tensor shape + virtual ~OptimizerTest() {} + // init paddle::optimizer::Tensor shape const size_t kSize = 5; virtual void SetUp() { @@ -38,34 +52,36 @@ public: virtual void TearDown() {} void CreateSGD() { - Tensor* parameter = FixedTensor(kSize); - config_.set_optimizer(OptimizerConfig::SGD); + paddle::optimizer::Tensor* parameter = FixedTensor(kSize); + config_.set_optimizer(paddle::OptimizerConfig::SGD); config_.mutable_sgd()->set_momentum(0.0); config_.mutable_sgd()->set_decay(0.0); config_.mutable_sgd()->set_nesterov(false); - config_.set_lr_policy(OptimizerConfig::Const); + config_.set_lr_policy(paddle::OptimizerConfig::Const); config_.mutable_const_lr()->set_learning_rate(0.1); std::string str = config_.SerializeAsString(); - ParameterOptimizer* opt = ParameterOptimizer::Create(str, parameter); + paddle::optimizer::ParameterOptimizer* opt = + paddle::optimizer::ParameterOptimizer::Create(str, parameter); opts_.push_back(opt); } void CreateAdam() { - Tensor* parameter = FixedTensor(kSize); - config_.set_optimizer(OptimizerConfig::Adam); + paddle::optimizer::Tensor* parameter = FixedTensor(kSize); + config_.set_optimizer(paddle::OptimizerConfig::Adam); config_.mutable_adam()->set_beta_1(0.9); config_.mutable_adam()->set_beta_2(0.1); config_.mutable_adam()->set_epsilon(1e-3); config_.mutable_adam()->set_decay(0.0); - config_.set_lr_policy(OptimizerConfig::Const); + config_.set_lr_policy(paddle::OptimizerConfig::Const); config_.mutable_const_lr()->set_learning_rate(0.1); std::string str = config_.SerializeAsString(); - ParameterOptimizer* opt = ParameterOptimizer::Create(str, parameter); + paddle::optimizer::ParameterOptimizer* opt = + paddle::optimizer::ParameterOptimizer::Create(str, parameter); opts_.push_back(opt); } void TestGetWeight() { - Tensor* p = FixedTensor(kSize); + paddle::optimizer::Tensor* p = FixedTensor(kSize); for (size_t i = 0; i < opts_.size(); ++i) { int s = 0; float* newp = (float*)opts_[i]->get_weight(&s); @@ -76,7 +92,7 @@ public: } void TestUpdate() { - Tensor* g = FixedTensor(kSize); + paddle::optimizer::Tensor* g = FixedTensor(kSize); for (size_t i = 0; i < opts_.size(); ++i) { opts_[i]->Update(g); } @@ -91,8 +107,8 @@ public: } private: - std::vector opts_; - OptimizerConfig config_; + std::vector opts_; + paddle::OptimizerConfig config_; }; TEST_F(OptimizerTest, TestGetWeight) { TestGetWeight(); } diff --git a/paddle/optimizer/serialization_test.cpp b/paddle/optimizer/serialization_test.cpp index d2454140dc243b40ed8348578360b30894213838..e4d97cbdba545c4ba5adf5b30efd3fc9f3f744ee 100644 --- a/paddle/optimizer/serialization_test.cpp +++ b/paddle/optimizer/serialization_test.cpp @@ -1,19 +1,32 @@ +/* + Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + #include "serialization.h" #include "gtest/gtest.h" -using namespace paddle; -using namespace paddle::optimizer; - TEST(TensorToProto, Case1) { - Tensor t(3), t1(3); + paddle::optimizer::Tensor t(3), t1(3); for (size_t i = 0; i < t.size(); ++i) { t[i] = i; t1[i] = 0; } - TensorProto proto; - TensorToProto(t, &proto); - ProtoToTensor(proto, &t1); + paddle::TensorProto proto; + paddle::optimizer::TensorToProto(t, &proto); + paddle::optimizer::ProtoToTensor(proto, &t1); for (size_t i = 0; i < t1.size(); ++i) { EXPECT_EQ(t1[i], t[i]); } diff --git a/paddle/platform/cpu_info.cc b/paddle/platform/cpu_info.cc index 1905cfeee63038e2bbcff84200d66eaabf073273..dfab391cfbe1f04bc2a998233f7e7909579ca72b 100644 --- a/paddle/platform/cpu_info.cc +++ b/paddle/platform/cpu_info.cc @@ -41,8 +41,8 @@ inline size_t CpuTotalPhysicalMemory() { if (sysctl(mib, 2, &size, &len, NULL, 0) == 0) return (size_t)size; return 0L; #else - long pages = sysconf(_SC_PHYS_PAGES); - long page_size = sysconf(_SC_PAGE_SIZE); + int64_t pages = sysconf(_SC_PHYS_PAGES); + int64_t page_size = sysconf(_SC_PAGE_SIZE); return pages * page_size; #endif } diff --git a/paddle/platform/place.cc b/paddle/platform/place.cc index 0704820aa05079401eb56814d689d6e280311edb..b31515e1f028acac885a506ff1c20479407a05e3 100644 --- a/paddle/platform/place.cc +++ b/paddle/platform/place.cc @@ -1,3 +1,17 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + #include "paddle/platform/place.h" namespace paddle { @@ -7,7 +21,7 @@ namespace detail { class PlacePrinter : public boost::static_visitor<> { public: - PlacePrinter(std::ostream &os) : os_(os) {} + explicit PlacePrinter(std::ostream &os) : os_(os) {} void operator()(const CPUPlace &) { os_ << "CPUPlace"; } void operator()(const GPUPlace &p) { os_ << "GPUPlace(" << p.device << ")"; } diff --git a/paddle/utils/DynamicLoader.h b/paddle/utils/DynamicLoader.h index 9b5ad21724afd7176f958619e7e10d12dc08fa49..2e5ff76a06152b6a12818f06baaeaa6a69726ba8 100644 --- a/paddle/utils/DynamicLoader.h +++ b/paddle/utils/DynamicLoader.h @@ -12,8 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef DYNAMIC_LOAD_H_ -#define DYNAMIC_LOAD_H_ +#pragma once #include #include @@ -59,5 +58,3 @@ void GetWarpCTCDsoHandle(void** dso_handle); * */ void GetLapackDsoHandle(void** dso_handle); - -#endif // DYNAMIC_LOAD_H_ diff --git a/paddle/utils/ThreadLocal.h b/paddle/utils/ThreadLocal.h index b5e2862546212041a774599ec664b95e56224a07..0a27b8b97b83a9066af23039a317c437ea56777a 100644 --- a/paddle/utils/ThreadLocal.h +++ b/paddle/utils/ThreadLocal.h @@ -51,7 +51,7 @@ template class ThreadLocal { public: ThreadLocal() { - CHECK(pthread_key_create(&threadSpecificKey_, dataDestructor) == 0); + CHECK_EQ(pthread_key_create(&threadSpecificKey_, dataDestructor), 0); } ~ThreadLocal() { pthread_key_delete(threadSpecificKey_); } @@ -65,7 +65,7 @@ public: if (!p && createLocal) { p = new T(); int ret = pthread_setspecific(threadSpecificKey_, p); - CHECK(ret == 0); + CHECK_EQ(ret, 0); } return p; } @@ -79,7 +79,7 @@ public: if (T* q = get(false)) { dataDestructor(q); } - CHECK(pthread_setspecific(threadSpecificKey_, p) == 0); + CHECK_EQ(pthread_setspecific(threadSpecificKey_, p), 0); } /** @@ -112,7 +112,7 @@ private: template class ThreadLocalD { public: - ThreadLocalD() { CHECK(pthread_key_create(&threadSpecificKey_, NULL) == 0); } + ThreadLocalD() { CHECK_EQ(pthread_key_create(&threadSpecificKey_, NULL), 0); } ~ThreadLocalD() { pthread_key_delete(threadSpecificKey_); for (auto t : threadMap_) { @@ -127,7 +127,7 @@ public: T* p = (T*)pthread_getspecific(threadSpecificKey_); if (!p) { p = new T(); - CHECK(pthread_setspecific(threadSpecificKey_, p) == 0); + CHECK_EQ(pthread_setspecific(threadSpecificKey_, p), 0); updateMap(p); } return p; @@ -141,7 +141,7 @@ public: if (T* q = (T*)pthread_getspecific(threadSpecificKey_)) { dataDestructor(q); } - CHECK(pthread_setspecific(threadSpecificKey_, p) == 0); + CHECK_EQ(pthread_setspecific(threadSpecificKey_, p), 0); updateMap(p); }