diff --git a/paddle/fluid/framework/details/build_strategy.cc b/paddle/fluid/framework/details/build_strategy.cc index 04c1061536b778fa44e67504cd22846931e039c9..1e1b945f63cf480308c05ffc0f9a3b9f0b0da02b 100644 --- a/paddle/fluid/framework/details/build_strategy.cc +++ b/paddle/fluid/framework/details/build_strategy.cc @@ -118,7 +118,6 @@ std::unique_ptr BuildStrategy::Apply( std::unique_ptr graph(new ir::Graph(main_program)); for (std::shared_ptr &pass : pass_builder_->AllPasses()) { - VLOG(5) << "run pass: " << pass->Type(); if (pass->Type() == "multi_devices_pass") { pass->Erase("places"); pass->SetNotOwned>("places", &places); diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_pass.cc index 1bd238357a7c22ef42cfbefb13837e693668d19a..c16e3006d7646fa068d87d643e748f0fe14a9f5c 100644 --- a/paddle/fluid/framework/details/multi_devices_graph_pass.cc +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.cc @@ -329,7 +329,6 @@ std::unique_ptr MultiDevSSAGraphBuilder::ApplyImpl( std::unordered_map sharded_var_device; for (ir::Node *node : sorted_ops) { - VLOG(5) << "op name: " << node->Op()->Type(); if (boost::get( node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) == static_cast(OpRole::kRPC)) { @@ -366,11 +365,9 @@ std::unique_ptr MultiDevSSAGraphBuilder::ApplyImpl( // is true only for the op that scale the final scalar loss. // It also assumes backward op will always follow the forward op in // the block. - VLOG(5) << "this is loss scale op!"; is_forwarding = false; } else { int op_dev_id = GetOpDeviceID(result, node, sharded_var_device); - VLOG(5) << "on device id: " << op_dev_id; if (op_dev_id != -1) { // This op only runs on one specific device. CreateComputationalOp(&result, node, op_dev_id); for (ir::Node *n : node->outputs) { diff --git a/paddle/fluid/framework/parallel_executor.h b/paddle/fluid/framework/parallel_executor.h index 319701f1eb8784202bd94be559c3802784499d33..ef09b98b2aa91a9d729b94d15dbb676dde4092b6 100644 --- a/paddle/fluid/framework/parallel_executor.h +++ b/paddle/fluid/framework/parallel_executor.h @@ -20,8 +20,6 @@ limitations under the License. */ #include #include -#include "ThreadPool.h" - #include "paddle/fluid/framework/details/build_strategy.h" #include "paddle/fluid/framework/details/execution_strategy.h" #include "paddle/fluid/framework/executor.h" diff --git a/paddle/fluid/framework/scope.cc b/paddle/fluid/framework/scope.cc index 873f68e42e4fb2b839f64da001014dab8af790da..0d261dd7ccc323abddd2c3ef13f1874661a8ca75 100644 --- a/paddle/fluid/framework/scope.cc +++ b/paddle/fluid/framework/scope.cc @@ -58,10 +58,7 @@ int64_t GetEagerDeletionThreshold() { (static_cast(1) << 30)); } -Scope::~Scope() { - VLOG(5) << "~Scope()"; - DropKids(); -} +Scope::~Scope() { DropKids(); } Scope& Scope::NewScope() const { SCOPE_LOCK_GUARD diff --git a/paddle/fluid/framework/threadpool.cc b/paddle/fluid/framework/threadpool.cc index 7dc7430c55b8147a11441b9a928b4712822e3a98..d34f826c1abb99198fd4dbe9537495edff7b63af 100644 --- a/paddle/fluid/framework/threadpool.cc +++ b/paddle/fluid/framework/threadpool.cc @@ -48,18 +48,9 @@ void ThreadPool::Init() { ThreadPool::ThreadPool(int num_threads) : running_(true) { threads_.resize(num_threads); - for (int i = 0; i < num_threads; ++i) { - // for (auto& thread : threads_) { + for (auto& thread : threads_) { // TODO(Yancey1989): binding the thread on the specify CPU number - threads_[i].reset( - new std::thread(std::bind(&ThreadPool::TaskLoop, this, i))); - /** - sched_param sch; - int policy; - pthread_getschedparam(threads_[i]->native_handle(), &policy, &sch); - if (pthread_setschedparam(threads_[i]->native_handle(), SCHED_FIFO, &sch)) { - VLOG(1) << "Failed to setschedparam: " << errno; - }**/ + thread.reset(new std::thread(std::bind(&ThreadPool::TaskLoop, this))); } } @@ -77,7 +68,7 @@ ThreadPool::~ThreadPool() { } } -void ThreadPool::TaskLoop(int i) { +void ThreadPool::TaskLoop() { while (true) { Task task; diff --git a/paddle/fluid/framework/threadpool.h b/paddle/fluid/framework/threadpool.h index bd8c3cdee8ccbc7e110c2be0310693e2595d272b..5177b7ee029d5e01956de2ff2a8d725392e63e12 100644 --- a/paddle/fluid/framework/threadpool.h +++ b/paddle/fluid/framework/threadpool.h @@ -99,7 +99,7 @@ class ThreadPool { // The constructor starts threads to run TaskLoop, which retrieves // and runs tasks from the queue. - void TaskLoop(int i); + void TaskLoop(); // Init is called by GetInstance. static void Init(); diff --git a/paddle/fluid/framework/threadpool_test.cc b/paddle/fluid/framework/threadpool_test.cc index 1257a76e3e64fb7dfa0b1f1095b2e939bdae9b49..884d61e23428a0ad758946295ca9c470767e93ef 100644 --- a/paddle/fluid/framework/threadpool_test.cc +++ b/paddle/fluid/framework/threadpool_test.cc @@ -59,47 +59,3 @@ TEST(ThreadPool, ConcurrentRun) { } EXPECT_EQ(sum, ((n + 1) * n) / 2); } -static int64_t GetTS() { - struct timeval tp; - gettimeofday(&tp, NULL); - return tp.tv_sec * 1000000 + tp.tv_usec; -} - -void multi_call(std::function call) { - for (int i = 0; i < 500; ++i) { - call(); - } -} - -TEST(ThreadPool, PERFORMANCE) { - auto sum = [] { - int a = 0; - for (int i = 0; i < 1000; ++i) { - a += i; - } - }; - // framework::ThreadPool *pool = new framework::ThreadPool(2); - int64_t start = GetTS(); - for (int i = 0; i < 1000; ++i) { - // int64_t s = GetTS(); - framework::Async(std::move(sum)); - // pool->Run(std::move(sum)); - // VLOG(5) << "push to pool spent : " << GetTS() - s << " (us)."; - } - VLOG(5) << "pool spent: " << GetTS() - start << " (us)."; - start = GetTS(); - for (int i = 0; i < 1000; ++i) { - sum(); - } - VLOG(5) << "sequence call spent: " << GetTS() - start << " (us)."; - std::vector threads; - start = GetTS(); - for (int i = 0; i < 2; ++i) { - std::thread t(multi_call, std::ref(sum)); - threads.push_back(std::move(t)); - } - for (auto& thread : threads) { - thread.join(); - } - VLOG(5) << "two threads spent: " << GetTS() - start << " (us)."; -} diff --git a/paddle/fluid/operators/reader/blocking_queue.h b/paddle/fluid/operators/reader/blocking_queue.h index 10de11bfa5c16f71c131a3352b6a3d63915824e3..51b980acb5a08d431d96a3a92479dec09119c27e 100644 --- a/paddle/fluid/operators/reader/blocking_queue.h +++ b/paddle/fluid/operators/reader/blocking_queue.h @@ -67,12 +67,9 @@ class BlockingQueue { } bool Receive(T* elem) { - VLOG(1) << "blocking queue::Receive ..."; std::unique_lock lock(mutex_); receive_cv_.wait(lock, [&] { return !queue_.empty() || closed_; }); - VLOG(1) << "queue_.empty()=" << queue_.empty(); if (!queue_.empty()) { - if (elem == nullptr) VLOG(1) << "elem is nullptr"; PADDLE_ENFORCE_NOT_NULL(elem); *elem = queue_.front(); if (LIKELY(!speed_test_mode_)) { diff --git a/paddle/fluid/operators/reader/buffered_reader.cc b/paddle/fluid/operators/reader/buffered_reader.cc index 2d66000f1f88169a6b03217b80ea4770f27d5b55..cfa192f8e17c1bf56dc27be190e4d37341a9c7b0 100644 --- a/paddle/fluid/operators/reader/buffered_reader.cc +++ b/paddle/fluid/operators/reader/buffered_reader.cc @@ -82,13 +82,11 @@ void BufferedReader::StartImpl() { } void BufferedReader::ReadNextImpl(std::vector *out) { - VLOG(1) << "ReadNextImpl start on place: " << place_; if (position_.empty()) { out->clear(); return; } size_t i = position_.front().get(); - VLOG(1) << "position front: " << i; position_.pop(); if (i == -1UL) { @@ -105,7 +103,6 @@ void BufferedReader::ReadNextImpl(std::vector *out) { ReadAsync(prev_pos_); } prev_pos_ = i; - VLOG(1) << "success ReadNextImpl"; } } // namespace reader diff --git a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc index 924c92e0bfdf764d7d02dafb2fe724611a1a3ed0..954fec0fbcffdd7cec7a92c64dd5286f95dcb14d 100644 --- a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc +++ b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc @@ -25,15 +25,9 @@ class CreateDoubleBufferReaderOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope& scope, const platform::Place& dev_place) const override { - VLOG(1) << "find var in scope: " << &scope; - auto* out_var = scope.FindVar(Output("Out")); - VLOG(1) << "var " << Output("Out") << " -> " << out_var; - auto* out = out_var->GetMutable(); - - // auto* out = scope.Var(Output("Out")) - // ->template GetMutable(); + auto* out = scope.Var(Output("Out")) + ->template GetMutable(); if (out->Get() != nullptr) { - VLOG(1) << Output("Out") << " is not nullptr."; return; } const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) @@ -52,11 +46,8 @@ class CreateDoubleBufferReaderOp : public framework::OperatorBase { sin >> num; place = platform::CUDAPlace(static_cast(num)); } - VLOG(1) << "create buffered reader on " << place; out->Reset(framework::MakeDecoratedReader(underlying_reader, place, 2)); - VLOG(1) << "Reset Buffered Reader in var: " - << scope.FindVar(Input("UnderlyingReader")); } };