diff --git a/paddle/fluid/imperative/basic_engine.cc b/paddle/fluid/imperative/basic_engine.cc index 990937647ac8809f55a9f5ac0f32d0bf944c4595..29ba54986801f117074a415084b3e0f10675954b 100644 --- a/paddle/fluid/imperative/basic_engine.cc +++ b/paddle/fluid/imperative/basic_engine.cc @@ -281,6 +281,8 @@ void BasicEngine::Execute() { auto& inplace_grad_name_map = shared_cur_node->InplaceGradNameMap(); for (auto& cur_op : *shared_cur_node) { + platform::RecordEvent op_type_record_event(cur_op.Type()); + ++op_num; // CheckBackWardInput diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index 3123d4b507704ddcbbc6bf04846782f020fe0884..365dbbfa125fdaceaa900d2b1b73d28aa5a739e1 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -206,6 +206,7 @@ void VarBase::ClearGradient() { grad_t->mutable_value()->clear(); } } else { + platform::RecordEvent record_event("ClearGradient"); auto* grad_t = grad_var_->MutableVar()->GetMutable(); if (grad_t->IsInitialized()) { diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc index e5d664070e1a467ef27f196e54a4056af3d2018f..1cf94c7a79ea47b3fc096259961ac161c90ecaa4 100644 --- a/paddle/fluid/imperative/tracer.cc +++ b/paddle/fluid/imperative/tracer.cc @@ -133,6 +133,7 @@ void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins, const NameVarBaseMap& outs, framework::AttributeMap attrs, const platform::Place& place, bool trace_backward, const std::map& inplace_map) { + platform::RecordEvent op_type_record_event(type); VLOG(1) << "Trace Op: " << type; if (FLAGS_use_mkldnn) { // if both lists are empty all ops are enabled (default for diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index 7e60c98dc183276836fba4024b5b1d6ec3a865a5..6d1281d11f1ad6b4b3c4b63052efea4eb12ccd4e 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -29,6 +29,7 @@ limitations under the License. */ #include "paddle/fluid/platform/bfloat16.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/float16.h" +#include "paddle/fluid/platform/profiler.h" #include "pybind11/numpy.h" #include "pybind11/pybind11.h" @@ -293,6 +294,7 @@ void SetTensorFromPyArrayT( auto dst = self->mutable_data(place); paddle::platform::GpuMemcpySync(dst, array.data(), array.nbytes(), cudaMemcpyHostToDevice); + } else if (paddle::platform::is_cuda_pinned_place(place)) { auto dst = self->mutable_data(place); std::memcpy(dst, array.data(), array.nbytes()); @@ -706,8 +708,9 @@ inline py::array TensorToPyArray(const framework::Tensor &tensor, "or double free would occur")); size_t copy_bytes = sizeof_dtype * numel; - paddle::platform::GpuMemcpySync(py_arr.mutable_data(), tensor_buf_ptr, - copy_bytes, cudaMemcpyDeviceToHost); + auto p = BOOST_GET_CONST(platform::CUDAPlace, tensor.place()); + paddle::memory::Copy(platform::CPUPlace(), py_arr.mutable_data(), p, + tensor_buf_ptr, copy_bytes, nullptr); return py_arr; #else PADDLE_THROW(platform::errors::PermissionDenied(