// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h" namespace paddle { namespace framework { namespace details { ScaleLossGradOpHandle::ScaleLossGradOpHandle(size_t num_dev, Scope *scope, platform::Place place) : coeff_(static_cast(1.0 / num_dev)), scope_(scope), place_(place) {} ScaleLossGradOpHandle::~ScaleLossGradOpHandle() {} void ScaleLossGradOpHandle::RunImpl() { std::string var_name = static_cast(this->outputs_[0])->name_; float *tmp = scope_->FindVar(var_name)->GetMutable()->mutable_data( make_ddim({1}), place_); if (platform::is_cpu_place(place_)) { *tmp = coeff_; } else { #ifdef PADDLE_WITH_CUDA auto stream = static_cast(this->dev_ctx_[place_]) ->stream(); memory::Copy(boost::get(place_), tmp, platform::CPUPlace(), &coeff_, sizeof(float), stream); #endif } } } // namespace details } // namespace framework } // namespace paddle