提交 b58725bd 编写于 作者: L liaogang

Add cpplint for *.h and cuda *.cu

上级 dd249a50
...@@ -187,7 +187,13 @@ function(cc_library TARGET_NAME) ...@@ -187,7 +187,13 @@ function(cc_library TARGET_NAME)
endif() endif()
# cpplint code style # cpplint code style
add_style_check_target(${TARGET_NAME} ${cc_library_SRCS}) foreach(source_file ${cc_library_SRCS})
string(REGEX REPLACE "\\.[^.]*$" "" source ${source_file})
if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${source}.h)
list(APPEND cc_library_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/${source}.h)
endif()
endforeach()
add_style_check_target(${TARGET_NAME} ${cc_library_SRCS} ${cc_library_HEADERS})
else(cc_library_SRCS) else(cc_library_SRCS)
if (cc_library_DEPS) if (cc_library_DEPS)
...@@ -239,6 +245,14 @@ function(nv_library TARGET_NAME) ...@@ -239,6 +245,14 @@ function(nv_library TARGET_NAME)
add_dependencies(${TARGET_NAME} ${nv_library_DEPS}) add_dependencies(${TARGET_NAME} ${nv_library_DEPS})
target_link_libraries(${TARGET_NAME} ${nv_library_DEPS}) target_link_libraries(${TARGET_NAME} ${nv_library_DEPS})
endif() endif()
# cpplint code style
foreach(source_file ${nv_library_SRCS})
string(REGEX REPLACE "\\.[^.]*$" "" source ${source_file})
if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${source}.h)
list(APPEND cc_library_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/${source}.h)
endif()
endforeach()
add_style_check_target(${TARGET_NAME} ${nv_library_SRCS} ${nv_library_HEADERS})
else(nv_library_SRCS) else(nv_library_SRCS)
if (nv_library_DEPS) if (nv_library_DEPS)
merge_static_libs(${TARGET_NAME} ${nv_library_DEPS}) merge_static_libs(${TARGET_NAME} ${nv_library_DEPS})
......
...@@ -25,18 +25,15 @@ limitations under the License. */ ...@@ -25,18 +25,15 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace framework { namespace framework {
namespace {
typedef boost::variant<Dim<1>, Dim<2>, Dim<3>, Dim<4>, Dim<5>, Dim<6>, Dim<7>,
Dim<8>, Dim<9>>
DDimVar;
}
/** /**
* \brief A dynamically sized dimension. * \brief A dynamically sized dimension.
* *
* The number of dimensions must be between [1, 9]. * The number of dimensions must be between [1, 9].
*/ */
struct DDim { struct DDim {
typedef boost::variant<Dim<1>, Dim<2>, Dim<3>, Dim<4>, Dim<5>, Dim<6>, Dim<7>,
Dim<8>, Dim<9>>
DDimVar;
DDimVar var; DDimVar var;
DDim() : var(Dim<1>()) {} DDim() : var(Dim<1>()) {}
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once #pragma once
#include "paddle/framework/op_proto.pb.h" #include "paddle/framework/op_proto.pb.h"
...@@ -10,7 +24,7 @@ class OpRegistry; ...@@ -10,7 +24,7 @@ class OpRegistry;
enum InOutType { IN, OUT }; enum InOutType { IN, OUT };
struct OpInOutArg { struct OpInOutArg {
OpInOutArg(const std::string& proto_name, const InOutType& type, explicit OpInOutArg(const std::string& proto_name, const InOutType& type,
bool needed_in_grad, size_t begin_idx, size_t end_idx) bool needed_in_grad, size_t begin_idx, size_t end_idx)
: proto_name_(proto_name), : proto_name_(proto_name),
type_(type), type_(type),
...@@ -29,7 +43,7 @@ class GradOpBuilder { ...@@ -29,7 +43,7 @@ class GradOpBuilder {
using VarIndexMap = std::unordered_map<std::string, int>; using VarIndexMap = std::unordered_map<std::string, int>;
public: public:
GradOpBuilder(const OperatorBase& op) : op_(op) {} explicit GradOpBuilder(const OperatorBase& op) : op_(op) {}
OperatorBase* Build(); OperatorBase* Build();
private: private:
......
...@@ -315,7 +315,7 @@ class OpRegistry { ...@@ -315,7 +315,7 @@ class OpRegistry {
static std::unordered_map<std::string, OpProto>& protos() { static std::unordered_map<std::string, OpProto>& protos() {
static std::unordered_map<std::string, OpProto> protos_; static std::unordered_map<std::string, OpProto> protos_;
return protos_; return protos_;
}; }
static std::unordered_map<std::string, std::string>& grad_ops() { static std::unordered_map<std::string, std::string>& grad_ops() {
static std::unordered_map<std::string, std::string> grad_ops_; static std::unordered_map<std::string, std::string> grad_ops_;
...@@ -337,7 +337,7 @@ class OpRegistry { ...@@ -337,7 +337,7 @@ class OpRegistry {
static std::unordered_map<std::string, OpAttrChecker>& op_checkers() { static std::unordered_map<std::string, OpAttrChecker>& op_checkers() {
static std::unordered_map<std::string, OpAttrChecker> op_checkers_; static std::unordered_map<std::string, OpAttrChecker> op_checkers_;
return op_checkers_; return op_checkers_;
}; }
static void GenerateTempVariableName(OperatorBase* op) { static void GenerateTempVariableName(OperatorBase* op) {
static std::atomic<size_t> gUniqId(0UL); static std::atomic<size_t> gUniqId(0UL);
...@@ -354,7 +354,7 @@ class OpRegistry { ...@@ -354,7 +354,7 @@ class OpRegistry {
template <typename OpType, typename ProtoMakerType> template <typename OpType, typename ProtoMakerType>
class OpRegisterHelper { class OpRegisterHelper {
public: public:
OpRegisterHelper(const char* op_type) { explicit OpRegisterHelper(const char* op_type) {
OpRegistry::RegisterOp<OpType, ProtoMakerType>(op_type); OpRegistry::RegisterOp<OpType, ProtoMakerType>(op_type);
} }
}; };
......
...@@ -280,7 +280,7 @@ class OperatorWithKernel : public OperatorBase { ...@@ -280,7 +280,7 @@ class OperatorWithKernel : public OperatorBase {
platform::Place place_; platform::Place place_;
OpKernelKey() = default; OpKernelKey() = default;
OpKernelKey(const platform::DeviceContext& dev_ctx) { explicit OpKernelKey(const platform::DeviceContext& dev_ctx) {
place_ = dev_ctx.GetPlace(); place_ = dev_ctx.GetPlace();
} }
......
...@@ -442,7 +442,8 @@ DEFINE_MATRIX_UNARY_PARAMETER_OP(Clip, TWO_PARAMETER, ...@@ -442,7 +442,8 @@ DEFINE_MATRIX_UNARY_PARAMETER_OP(Clip, TWO_PARAMETER,
template<class T> template<class T>
void BaseMatrixT<T>::clip(T p1, T p2) { applyUnary(unary::Clip<T>(p1, p2)); } void BaseMatrixT<T>::clip(T p1, T p2) { applyUnary(unary::Clip<T>(p1, p2)); }
DEFINE_MATRIX_BINARY_PARAMETER_OP(ClipDerivative, TWO_PARAMETER, a = b < p1 ? 0 : (b > p2 ? 0 : 1)); DEFINE_MATRIX_BINARY_PARAMETER_OP(ClipDerivative, TWO_PARAMETER,
a = b < p1 ? 0 : (b > p2 ? 0 : 1));
template<class T> template<class T>
void BaseMatrixT<T>::clipDerivative(BaseMatrixT& b, T p1, T p2) { void BaseMatrixT<T>::clipDerivative(BaseMatrixT& b, T p1, T p2) {
applyBinary(binary::ClipDerivative<T>(p1, p2), b); applyBinary(binary::ClipDerivative<T>(p1, p2), b);
......
add_subdirectory(detail) add_subdirectory(detail)
cc_library(memory SRCS memory.cc) cc_library(memory SRCS memory.h memory.cc)
cc_library(memcpy SRCS memcpy.cc DEPS device_context) cc_library(memcpy SRCS memcpy.h memcpy.cc DEPS device_context)
cc_library(paddle_memory cc_library(paddle_memory
DEPS DEPS
......
...@@ -39,7 +39,7 @@ class BuddyAllocator { ...@@ -39,7 +39,7 @@ class BuddyAllocator {
public: public:
void* Alloc(size_t unaligned_size); void* Alloc(size_t unaligned_size);
void Free(void*); void Free(void* ptr);
size_t Used(); size_t Used();
public: public:
......
...@@ -33,17 +33,17 @@ namespace detail { ...@@ -33,17 +33,17 @@ namespace detail {
*/ */
class MetadataCache { class MetadataCache {
public: public:
MetadataCache(bool uses_gpu); explicit MetadataCache(bool uses_gpu);
public: public:
/*! \brief Load the associated metadata for the specified memory block. */ /*! \brief Load the associated metadata for the specified memory block. */
Metadata load(const MemoryBlock*); Metadata load(const MemoryBlock* memory_block);
/*! \brief Store the associated metadata for the specified memory block. */ /*! \brief Store the associated metadata for the specified memory block. */
void store(MemoryBlock*, const Metadata&); void store(MemoryBlock* memory_block, const Metadata& meta_data);
/*! \brief Indicate that the specified metadata will no longer be used. */ /*! \brief Indicate that the specified metadata will no longer be used. */
void invalidate(MemoryBlock*); void invalidate(MemoryBlock* memory_block);
public: public:
MetadataCache(const MetadataCache&) = delete; MetadataCache(const MetadataCache&) = delete;
......
...@@ -68,7 +68,7 @@ class PODDeleter { ...@@ -68,7 +68,7 @@ class PODDeleter {
static_assert(std::is_pod<T>::value, "T must be POD"); static_assert(std::is_pod<T>::value, "T must be POD");
public: public:
PODDeleter(Place place) : place_(place) {} explicit PODDeleter(Place place) : place_(place) {}
void operator()(T* ptr) { Free(place_, static_cast<void*>(ptr)); } void operator()(T* ptr) { Free(place_, static_cast<void*>(ptr)); }
private: private:
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU #define EIGEN_USE_GPU
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/operators/add_op.h" #include "paddle/operators/add_op.h"
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU #define EIGEN_USE_GPU
#include "paddle/operators/cross_entropy_op.h" #include "paddle/operators/cross_entropy_op.h"
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/operators/fill_zeros_like_op.h" #include "paddle/operators/fill_zeros_like_op.h"
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU #define EIGEN_USE_GPU
#include "paddle/operators/mean_op.h" #include "paddle/operators/mean_op.h"
......
...@@ -47,7 +47,7 @@ public: ...@@ -47,7 +47,7 @@ public:
T ig_size = (T)framework::product(IG->dims()); T ig_size = (T)framework::product(IG->dims());
EigenVector<T>::Flatten(*IG).device(*(context.GetEigenDevice<Place>())) = EigenVector<T>::Flatten(*IG).device((context.GetEigenDevice<Place>())) =
EigenScalar<T>::From(*OG) / ig_size; EigenScalar<T>::From(*OG) / ig_size;
} }
}; };
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
namespace paddle { namespace paddle {
namespace operators { namespace operators {
using namespace paddle::framework; using namespace paddle::framework; // NOLINT
namespace rnn { namespace rnn {
...@@ -94,7 +94,7 @@ void InitArgument(const ArgumentName& name, Argument* arg); ...@@ -94,7 +94,7 @@ void InitArgument(const ArgumentName& name, Argument* arg);
}; // namespace rnn }; // namespace rnn
// The sequence format in RecurrentOp is Tensor<seq_len, batch_size, dim> now. // The sequence format in RecurrentOp is Tensor<seq_len, batch_size, dim> now.
// TODO: // TODO(Yan Chunwei):
// 1. No-padding computing for sequences with indifinite length in one batch. // 1. No-padding computing for sequences with indifinite length in one batch.
// 2. Hierarchical RNN for sequence with sub-sequence. // 2. Hierarchical RNN for sequence with sub-sequence.
// 3. Internal Memory. // 3. Internal Memory.
...@@ -172,11 +172,9 @@ public: ...@@ -172,11 +172,9 @@ public:
/** /**
* InferShape must be called before Run. * InferShape must be called before Run.
*/ */
virtual void InferShape(const Scope& scope) const override { void InferShape(const Scope& scope) const override { alg_.InferShape(scope); }
alg_.InferShape(scope);
}
virtual void Run(const Scope& scope, void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override { const platform::DeviceContext& dev_ctx) const override {
alg_.Run(scope, dev_ctx); alg_.Run(scope, dev_ctx);
} }
...@@ -194,11 +192,9 @@ public: ...@@ -194,11 +192,9 @@ public:
/** /**
* InferShape must be called before Run. * InferShape must be called before Run.
*/ */
virtual void InferShape(const Scope& scope) const override { void InferShape(const Scope& scope) const override { alg_.InferShape(scope); }
alg_.InferShape(scope);
}
virtual void Run(const Scope& scope, void Run(const Scope& scope,
const platform::DeviceContext& dev_ctx) const override { const platform::DeviceContext& dev_ctx) const override {
alg_.Run(scope, dev_ctx); alg_.Run(scope, dev_ctx);
} }
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU #define EIGEN_USE_GPU
#include "paddle/operators/rowwise_add_op.h" #include "paddle/operators/rowwise_add_op.h"
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU #define EIGEN_USE_GPU
#include "paddle/operators/sgd_op.h" #include "paddle/operators/sgd_op.h"
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU #define EIGEN_USE_GPU
#include "paddle/operators/sigmoid_op.h" #include "paddle/operators/sigmoid_op.h"
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/operators/softmax_op.h" #include "paddle/operators/softmax_op.h"
namespace paddle { namespace paddle {
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#define EIGEN_USE_GPU #define EIGEN_USE_GPU
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/operators/softmax_op.h" #include "paddle/operators/softmax_op.h"
......
...@@ -40,7 +40,7 @@ class DeviceContext { ...@@ -40,7 +40,7 @@ class DeviceContext {
class CPUDeviceContext : public DeviceContext { class CPUDeviceContext : public DeviceContext {
public: public:
CPUDeviceContext(); CPUDeviceContext();
CPUDeviceContext(CPUPlace); explicit CPUDeviceContext(CPUPlace);
virtual ~CPUDeviceContext() {} virtual ~CPUDeviceContext() {}
Eigen::DefaultDevice* eigen_device() const; Eigen::DefaultDevice* eigen_device() const;
...@@ -55,7 +55,7 @@ class CPUDeviceContext : public DeviceContext { ...@@ -55,7 +55,7 @@ class CPUDeviceContext : public DeviceContext {
class CUDADeviceContext : public DeviceContext { class CUDADeviceContext : public DeviceContext {
public: public:
explicit CUDADeviceContext(GPUPlace); CUDADeviceContext(GPUPlace); // NOLINT
virtual ~CUDADeviceContext(); virtual ~CUDADeviceContext();
/*! \brief Wait for all operations completion in the stream. */ /*! \brief Wait for all operations completion in the stream. */
...@@ -69,10 +69,10 @@ class CUDADeviceContext : public DeviceContext { ...@@ -69,10 +69,10 @@ class CUDADeviceContext : public DeviceContext {
// clang-format off // clang-format off
/*! \brief Return cublas handle in the device context. */ /*! \brief Return cublas handle in the device context. */
cublasHandle_t cublas_handle (); cublasHandle_t cublas_handle();
/*! \brief Return cudnn handle in the device context. */ /*! \brief Return cudnn handle in the device context. */
cudnnHandle_t cudnn_handle (); cudnnHandle_t cudnn_handle();
/*! \brief Return curand handle in the device context. */ /*! \brief Return curand handle in the device context. */
curandGenerator_t curand_generator(); curandGenerator_t curand_generator();
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <paddle/platform/dynload/cublas.h> #include <paddle/platform/dynload/cublas.h>
namespace paddle { namespace paddle {
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <paddle/platform/dynload/cudnn.h> #include <paddle/platform/dynload/cudnn.h>
namespace paddle { namespace paddle {
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <paddle/platform/dynload/curand.h> #include <paddle/platform/dynload/curand.h>
namespace paddle { namespace paddle {
...@@ -10,6 +24,7 @@ void *curand_dso_handle; ...@@ -10,6 +24,7 @@ void *curand_dso_handle;
#define DEFINE_WRAP(__name) DynLoad__##__name __name #define DEFINE_WRAP(__name) DynLoad__##__name __name
CURAND_RAND_ROUTINE_EACH(DEFINE_WRAP); CURAND_RAND_ROUTINE_EACH(DEFINE_WRAP);
}
} } // namespace dynload
} } // namespace platform
\ No newline at end of file } // namespace paddle
...@@ -32,7 +32,7 @@ struct CPUPlace { ...@@ -32,7 +32,7 @@ struct CPUPlace {
struct GPUPlace { struct GPUPlace {
GPUPlace() : GPUPlace(0) {} GPUPlace() : GPUPlace(0) {}
GPUPlace(int d) : device(d) {} GPUPlace(int d) : device(d) {} // NOLINT
// needed for variant equality comparison // needed for variant equality comparison
inline bool operator==(const GPUPlace &o) const { return device == o.device; } inline bool operator==(const GPUPlace &o) const { return device == o.device; }
......
...@@ -39,8 +39,8 @@ public: ...@@ -39,8 +39,8 @@ public:
// size_ is 0. // size_ is 0.
Piece(); Piece();
Piece(const char* d, size_t n); Piece(const char* d, size_t n);
Piece(const char* d); Piece(const char* d); // NOLINT
Piece(const std::string& s); Piece(const std::string& s); // NOLINT
const char* data() const { return data_; } const char* data() const { return data_; }
size_t len() const { return size_; } size_t len() const { return size_; }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册