提交 559b0224 编写于 作者: Y Yi Wang 提交者: GitHub

Merge pull request #3256 from wangkuiyi/cpplint_errors

Add explicit to some constructors
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <boost/variant.hpp>
......@@ -29,7 +43,7 @@ Attribute GetAttrValue(const AttrDesc& attr_desc);
template <typename T>
class LargerThanChecker {
public:
LargerThanChecker(T lower_bound) : lower_bound_(lower_bound) {}
explicit LargerThanChecker(T lower_bound) : lower_bound_(lower_bound) {}
void operator()(T& value) const {
PADDLE_ENFORCE(value > lower_bound_, "larger_than check fail");
}
......@@ -44,7 +58,8 @@ class LargerThanChecker {
template <typename T>
class DefaultValueSetter {
public:
DefaultValueSetter(T default_value) : default_value_(default_value) {}
explicit DefaultValueSetter(T default_value)
: default_value_(default_value) {}
void operator()(T& value) const { value = default_value_; }
private:
......@@ -87,7 +102,8 @@ class TypedAttrChecker {
typedef std::function<void(T&)> ValueChecker;
public:
TypedAttrChecker(const std::string& attr_name) : attr_name_(attr_name) {}
explicit TypedAttrChecker(const std::string& attr_name)
: attr_name_(attr_name) {}
TypedAttrChecker& InEnum(const std::unordered_set<T>& range) {
value_checkers_.push_back(EnumInContainer<T>(range));
......
......@@ -38,10 +38,10 @@ void SegmentInputs(const std::vector<Scope*>& step_scopes,
"input link [%s] is not in scope.",
inlinks[i].external);
Tensor* input = input_var->GetMutable<Tensor>();
DDim dims = input->dims();
framework::DDim dims = input->dims();
PADDLE_ENFORCE(static_cast<size_t>(dims[0]) == seq_len,
"all the inlinks must have same length");
DDim step_dims = slice_ddim(dims, 1, dims.size());
framework::DDim step_dims = slice_ddim(dims, 1, dims.size());
for (size_t j = 0; j < seq_len; j++) {
Tensor* step_input =
step_scopes[j]->NewVar(inlinks[i].internal)->GetMutable<Tensor>();
......@@ -64,13 +64,13 @@ void ConcatOutputs(const std::vector<Scope*>& step_scopes,
outlinks[i].external);
Tensor* output = output_var->GetMutable<Tensor>();
if (infer_shape_mode) {
DDim step_dims = step_scopes[0]
->FindVar(outlinks[i].internal)
->GetMutable<Tensor>()
->dims();
framework::DDim step_dims = step_scopes[0]
->FindVar(outlinks[i].internal)
->GetMutable<Tensor>()
->dims();
std::vector<int> dims_vec = vectorize(step_dims);
dims_vec.insert(dims_vec.begin(), seq_len);
output->Resize(make_ddim(dims_vec));
output->Resize(framework::make_ddim(dims_vec));
} else {
output->mutable_data<float>(platform::CPUPlace());
for (size_t j = 0; j < seq_len; j++) {
......
......@@ -19,8 +19,6 @@
namespace paddle {
namespace operators {
using namespace paddle::framework; // NOLINT
namespace rnn {
/**
......@@ -70,7 +68,7 @@ struct ArgumentName {
/**
* Prepare inputs for each step net.
*/
void SegmentInputs(const std::vector<Scope*>& step_scopes,
void SegmentInputs(const std::vector<framework::Scope*>& step_scopes,
const std::vector<Link>& inlinks,
const size_t seq_len,
bool infer_shape_mode);
......@@ -78,12 +76,12 @@ void SegmentInputs(const std::vector<Scope*>& step_scopes,
/**
* Process outputs of step nets and merge to variables.
*/
void ConcatOutputs(const std::vector<Scope*>& step_scopes,
void ConcatOutputs(const std::vector<framework::Scope*>& step_scopes,
const std::vector<Link>& outlinks,
const size_t seq_len,
bool infer_shape_mode);
void LinkMemories(const std::vector<Scope*>& step_scopes,
void LinkMemories(const std::vector<framework::Scope*>& step_scopes,
const std::vector<MemoryAttr>& memories,
const size_t step_id,
const int offset,
......@@ -103,14 +101,15 @@ void InitArgument(const ArgumentName& name, Argument* arg);
class RecurrentAlgorithm {
public:
void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const;
void Run(const framework::Scope& scope,
const platform::DeviceContext& dev_ctx) const;
void Init(std::unique_ptr<rnn::Argument> arg) { arg_ = std::move(arg); }
/**
* InferShape must be called before Run.
*/
void InferShape(const Scope& scope) const;
void InferShape(const framework::Scope& scope) const;
protected:
/*
......@@ -119,13 +118,15 @@ protected:
* NOTE the scopes are reused in both the forward and backward, so just
* create once and expand its size if more steps need.
*/
void CreateScopes(const Scope& scope) const;
void CreateScopes(const framework::Scope& scope) const;
const std::vector<Scope*>& GetStepScopes(const Scope& scope) const {
return *scope.FindVar(arg_->step_scopes)->GetMutable<std::vector<Scope*>>();
const std::vector<framework::Scope*>& GetStepScopes(
const framework::Scope& scope) const {
return *scope.FindVar(arg_->step_scopes)
->GetMutable<std::vector<framework::Scope*>>();
}
void InitMemories(Scope* step_scopes, bool infer_shape_mode) const;
void InitMemories(framework::Scope* step_scopes, bool infer_shape_mode) const;
private:
std::unique_ptr<rnn::Argument> arg_;
......@@ -146,18 +147,22 @@ class RecurrentGradientAlgorithm {
public:
void Init(std::unique_ptr<rnn::Argument> arg) { arg_ = std::move(arg); }
void Run(const Scope& scope, const platform::DeviceContext& dev_ctx) const;
void Run(const framework::Scope& scope,
const platform::DeviceContext& dev_ctx) const;
void LinkBootMemoryGradients(Scope* step_scopes, bool infer_shape_mode) const;
void LinkBootMemoryGradients(framework::Scope* step_scopes,
bool infer_shape_mode) const;
/**
* InferShape must be called before Run.
*/
void InferShape(const Scope& scope) const;
void InferShape(const framework::Scope& scope) const;
protected:
inline const std::vector<Scope*>& GetStepScopes(const Scope& scope) const {
return *scope.FindVar(arg_->step_scopes)->GetMutable<std::vector<Scope*>>();
inline const std::vector<framework::Scope*>& GetStepScopes(
const framework::Scope& scope) const {
return *scope.FindVar(arg_->step_scopes)
->GetMutable<std::vector<framework::Scope*>>();
}
private:
......@@ -165,16 +170,18 @@ private:
mutable size_t seq_len_;
};
class RecurrentOp final : public OperatorBase {
class RecurrentOp final : public framework::OperatorBase {
public:
void Init() override;
/**
* InferShape must be called before Run.
*/
void InferShape(const Scope& scope) const override { alg_.InferShape(scope); }
void InferShape(const framework::Scope& scope) const override {
alg_.InferShape(scope);
}
void Run(const Scope& scope,
void Run(const framework::Scope& scope,
const platform::DeviceContext& dev_ctx) const override {
alg_.Run(scope, dev_ctx);
}
......@@ -185,16 +192,18 @@ private:
RecurrentAlgorithm alg_;
};
class RecurrentGradientOp final : public OperatorBase {
class RecurrentGradientOp final : public framework::OperatorBase {
public:
void Init() override;
/**
* InferShape must be called before Run.
*/
void InferShape(const Scope& scope) const override { alg_.InferShape(scope); }
void InferShape(const framework::Scope& scope) const override {
alg_.InferShape(scope);
}
void Run(const Scope& scope,
void Run(const framework::Scope& scope,
const platform::DeviceContext& dev_ctx) const override {
alg_.Run(scope, dev_ctx);
}
......
......@@ -16,6 +16,7 @@
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "paddle/framework/ddim.h"
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
#include "paddle/framework/tensor.h"
......@@ -24,6 +25,9 @@
namespace paddle {
namespace operators {
using framework::make_ddim;
using framework::DDim;
class RecurrentOpTest : public ::testing::Test {
protected:
virtual void SetUp() override {
......@@ -72,7 +76,7 @@ protected:
}
void CreateRNNOp() {
OpDesc op_desc;
framework::OpDesc op_desc;
op_desc.set_type("recurrent_op");
// inlinks 0
......
......@@ -55,7 +55,7 @@ class CPUDeviceContext : public DeviceContext {
class CUDADeviceContext : public DeviceContext {
public:
CUDADeviceContext(GPUPlace); // NOLINT
explicit CUDADeviceContext(GPUPlace);
virtual ~CUDADeviceContext();
/*! \brief Wait for all operations completion in the stream. */
......
......@@ -15,24 +15,28 @@ limitations under the License. */
#include "paddle/platform/device_context.h"
#include "gtest/gtest.h"
using DEVICE_GPU = Eigen::GpuDevice;
TEST(Device, Init) {
using paddle::platform::DeviceContext;
using paddle::platform::CUDADeviceContext;
using paddle::platform::GPUPlace;
int count = paddle::platform::GetDeviceCount();
for (int i = 0; i < count; i++) {
paddle::platform::DeviceContext* device_context =
new paddle::platform::CUDADeviceContext(i);
DeviceContext* device_context = new CUDADeviceContext(GPUPlace(i));
Eigen::GpuDevice* gpu_device =
device_context->template get_eigen_device<DEVICE_GPU>();
device_context->template get_eigen_device<Eigen::GpuDevice>();
ASSERT_NE(nullptr, gpu_device);
delete device_context;
}
}
TEST(Device, CUDADeviceContext) {
using paddle::platform::CUDADeviceContext;
using paddle::platform::GPUPlace;
int count = paddle::platform::GetDeviceCount();
for (int i = 0; i < count; i++) {
paddle::platform::CUDADeviceContext* device_context =
new paddle::platform::CUDADeviceContext(i);
CUDADeviceContext* device_context = new CUDADeviceContext(GPUPlace(i));
Eigen::GpuDevice* gpu_device = device_context->eigen_device();
ASSERT_NE(nullptr, gpu_device);
cudnnHandle_t cudnn_handle = device_context->cudnn_handle();
......
......@@ -32,7 +32,7 @@ struct CPUPlace {
struct GPUPlace {
GPUPlace() : GPUPlace(0) {}
GPUPlace(int d) : device(d) {} // NOLINT
explicit GPUPlace(int d) : device(d) {}
// needed for variant equality comparison
inline bool operator==(const GPUPlace &o) const { return device == o.device; }
......
......@@ -39,8 +39,8 @@ public:
// size_ is 0.
Piece();
Piece(const char* d, size_t n);
Piece(const char* d); // NOLINT
Piece(const std::string& s); // NOLINT
Piece(const char* d); // NOLINT: accept C string into Piece.
Piece(const std::string& s); // NOLINT: accept C++ string into Piece.
const char* data() const { return data_; }
size_t len() const { return size_; }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册