未验证 提交 9be41447 编写于 作者: C chentianyu03 提交者: GitHub

Copy boost optional to Paddle (#34780)

* copy boost optional.hpp to paddle

* copy boost optional.hpp to paddle

* move directions

* del fluid/utils

* modify .hpp to .h

* move directions

* modify to paddle::optional

* add modification description

* format code stype for the files in paddle/utils

* format code stype
上级 f1c1d9e0
......@@ -36,8 +36,8 @@ static inline bool SeqOnlyAllReduceOps(const BuildStrategy &strategy) {
!strategy.enable_parallel_graph_;
}
static inline void ConvertDefaultValue(boost::optional<bool> *default_value) {
if (*default_value == boost::none) {
static inline void ConvertDefaultValue(paddle::optional<bool> *default_value) {
if (*default_value == paddle::none) {
*default_value = true;
}
}
......@@ -247,7 +247,7 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder {
}
}
void AppendPassWithCheck(const boost::optional<bool> &append_pass,
void AppendPassWithCheck(const paddle::optional<bool> &append_pass,
const std::string &pass_name) {
AppendPassWithCheck(append_pass == true, pass_name);
}
......
......@@ -112,8 +112,8 @@ struct BuildStrategy {
bool enable_auto_fusion_{false};
// Fuse_all_optimizer_ops and fuse_all_reduce_ops require that gradients
// should not be sparse types
boost::optional<bool> fuse_all_optimizer_ops_{false};
boost::optional<bool> fuse_all_reduce_ops_{boost::none};
paddle::optional<bool> fuse_all_optimizer_ops_{false};
paddle::optional<bool> fuse_all_reduce_ops_{boost::none};
// fuse_relu_depthwise_conv can fuse the `relu ->
// depthwise_conv`
bool fuse_relu_depthwise_conv_{false};
......@@ -121,7 +121,7 @@ struct BuildStrategy {
// faster. Because fusing broadcast OP equals delaying the execution of all
// broadcast Ops, in this case, all nccl streams are used only for reduce
// operations for a period of time.
boost::optional<bool> fuse_broadcast_ops_{boost::none};
paddle::optional<bool> fuse_broadcast_ops_{boost::none};
// replace batch_norm with sync_batch_norm.
bool sync_batch_norm_{false};
......@@ -135,7 +135,7 @@ struct BuildStrategy {
// By default, memory_optimize would be opened if gc is disabled, and
// be closed if gc is enabled.
// Users can forcely enable/disable memory_optimize by setting True/False.
boost::optional<bool> memory_optimize_{boost::none};
paddle::optional<bool> memory_optimize_{boost::none};
// Turn on inplace by default.
bool enable_inplace_{true};
......
......@@ -74,11 +74,11 @@ bool IsReachable(ir::Graph* graph, Node* from, Node* to) {
}
template <typename T>
boost::optional<T> HasAttribute(const Node& op, const std::string& attr) {
paddle::optional<T> HasAttribute(const Node& op, const std::string& attr) {
if (op.Op()->HasAttr(attr))
return BOOST_GET_CONST(T, op.Op()->GetAttr(attr));
else
return boost::none;
return paddle::none;
}
ResidualConnectionMKLDNNFusePass::ResidualConnectionMKLDNNFusePass() {
......
......@@ -40,7 +40,7 @@ using GraphWithStats = std::pair<ir::Graph*, int>;
void CorrectGraphEdges(Graph* graph, Node* from, Node* to);
bool IsReachable(ir::Graph* graph, Node* from, Node* to);
boost::optional<Node*> HasBias(const Node& op, const std::string& bias_name);
paddle::optional<Node*> HasBias(const Node& op, const std::string& bias_name);
class ResidualConnectionMKLDNNFusePass : public FusePassBase {
private:
......
......@@ -27,6 +27,8 @@ limitations under the License. */
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/utils/none.h"
#include "paddle/utils/optional.h"
namespace paddle {
namespace framework {
......@@ -195,10 +197,10 @@ class Vector {
std::mutex &Mutex() const { return mtx_; }
boost::optional<platform::CUDAPlace> CUDAPlace() const {
paddle::optional<platform::CUDAPlace> CUDAPlace() const {
return gpu_ == nullptr
? boost::none
: boost::optional<platform::CUDAPlace>(
? paddle::none
: paddle::optional<platform::CUDAPlace>(
BOOST_GET_CONST(platform::CUDAPlace, gpu_->place()));
}
......@@ -389,7 +391,7 @@ class Vector {
auto &mtx = m_.Data().Mutex();
std::lock_guard<std::mutex> guard(mtx);
auto cuda_place = m_.Data().CUDAPlace();
if (cuda_place == boost::none ||
if (cuda_place == paddle::none ||
cuda_place == BOOST_GET(platform::CUDAPlace, place)) {
return m_.Data().CUDAData(place);
}
......@@ -405,7 +407,7 @@ class Vector {
auto &mtx = m_.Data().Mutex();
std::lock_guard<std::mutex> guard(mtx);
auto cuda_place = m_.Data().CUDAPlace();
if (cuda_place == boost::none ||
if (cuda_place == paddle::none ||
cuda_place == BOOST_GET(platform::CUDAPlace, place)) {
return m_.MutableData()->CUDAMutableData(place);
}
......
......@@ -22,6 +22,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_version_proto.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/utils/none.h"
namespace paddle {
namespace framework {
......@@ -42,7 +43,7 @@ using OpAttrVariantT =
std::vector<int32_t>, /* AttrType::INTS */
std::vector<int64_t>, /* AttrType::LONGS */
std::vector<std::string>, /* AttrType::STRINGS */
boost::none_t /* None */
paddle::none_t /* None */
>;
struct OpUpdateInfo {
......@@ -51,7 +52,7 @@ struct OpUpdateInfo {
struct OpAttrInfo : OpUpdateInfo {
OpAttrInfo(const std::string& name, const std::string& remark,
const OpAttrVariantT& default_value = boost::none)
const OpAttrVariantT& default_value = paddle::none)
: name_{name}, default_value_{default_value}, remark_{remark} {}
const std::string& name() const { return name_; }
......
......@@ -161,5 +161,5 @@ REGISTER_OP_VERSION(flip)
R"ROC(Upgrade flip, add new attr [axis] and delete attr [dims].)ROC",
paddle::framework::compatible::OpVersionDesc()
.NewAttr("axis", "The added attr 'axis' doesn't set default value.",
boost::none)
paddle::none)
.DeleteAttr("dims", "The attr 'dims' is deleted."));
......@@ -137,7 +137,7 @@ class ConcatPrimitiveFactory {
private:
std::vector<memory::desc> srcs_d;
std::vector<mkldnn::memory> srcs;
boost::optional<mkldnn::memory> dst_mem;
paddle::optional<mkldnn::memory> dst_mem;
};
template <typename T>
......
......@@ -893,7 +893,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
fuse_residual_conn, propagation, output_shift_scale, sum_scale);
} else {
conv_pd = handler->AcquireConvolutionPrimitiveDescriptor(
src_md, weights_md, boost::none, dst_md, strides, dilations,
src_md, weights_md, paddle::none, dst_md, strides, dilations,
paddings, mkldnn_engine, fuse_activation, fuse_alpha, fuse_beta,
fuse_residual_conn, propagation, output_shift_scale, sum_scale);
}
......
......@@ -89,7 +89,8 @@ class FCPrimitiveFactory {
// descriptor has been divided into separate cases, based on the number
// of input dimensions.
size_t input_dim_num = input->dims().size();
boost::optional<mkldnn::inner_product_forward::primitive_desc> fc_prim_desc;
paddle::optional<mkldnn::inner_product_forward::primitive_desc>
fc_prim_desc;
memory::desc usr_weights_desc = {};
switch (input_dim_num) {
case 2:
......@@ -545,11 +546,11 @@ class FCPrimitiveFactory {
private:
const mkldnn::engine& engine_;
boost::optional<memory> input_;
boost::optional<memory> output_;
paddle::optional<memory> input_;
paddle::optional<memory> output_;
std::shared_ptr<memory> bias_;
std::shared_ptr<memory> weights_;
boost::optional<inner_product_forward> fc_;
paddle::optional<inner_product_forward> fc_;
};
// Attempt to fetch cached primitive factory based on provided parameters
......
......@@ -290,10 +290,10 @@ class MulPrimitiveFactory {
}
const mkldnn::engine &engine_;
boost::optional<memory> x_input_;
boost::optional<memory> y_input_;
boost::optional<memory> output_;
boost::optional<inner_product_forward> mul_;
paddle::optional<memory> x_input_;
paddle::optional<memory> y_input_;
paddle::optional<memory> output_;
paddle::optional<inner_product_forward> mul_;
static constexpr bool is_int8_ =
std::is_same<XT, int8_t>::value || std::is_same<XT, uint8_t>::value;
};
......
......@@ -123,7 +123,7 @@ class SeqConcatGradKernel : public framework::OpKernel<T> {
}
std::vector<framework::Tensor> sliced_x;
std::vector<boost::optional<framework::Tensor>> sliced_dx;
std::vector<paddle::optional<framework::Tensor>> sliced_dx;
for (size_t i = 1; i < xs[0]->lod()[0].size(); ++i) {
for (size_t j = 0; j < xs.size(); ++j) {
......@@ -145,7 +145,7 @@ class SeqConcatGradKernel : public framework::OpKernel<T> {
if (dx) {
sliced_dx.emplace_back(dx->Slice(prev_lod, next_lod));
} else {
sliced_dx.emplace_back(boost::none);
sliced_dx.emplace_back(paddle::none);
}
}
}
......
......@@ -1426,7 +1426,7 @@ class ConvMKLDNNTemplateHandler : public MKLDNNHandler {
std::shared_ptr<typename forward_t::primitive_desc>
AcquireConvolutionPrimitiveDescriptor(
const mkldnn::memory::desc& src, const mkldnn::memory::desc& weights,
boost::optional<const mkldnn::memory::desc&> bias,
paddle::optional<const mkldnn::memory::desc&> bias,
const mkldnn::memory::desc& dst, const std::vector<int64_t>& strides,
const std::vector<int64_t>& dilations,
const std::vector<int64_t>& paddings, const mkldnn::engine& engine,
......
......@@ -73,6 +73,7 @@ limitations under the License. */
#include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/pybind/cuda_streams_py.h"
#include "paddle/fluid/pybind/io.h"
#include "paddle/utils/none.h"
#ifdef PADDLE_WITH_ASCEND
#include "paddle/fluid/pybind/ascend_wrapper_py.h"
#endif
......@@ -2910,7 +2911,7 @@ All parameter, weight, gradient are variables in Paddle.
.def_property("fuse_broadcast_ops",
[](const BuildStrategy &self) {
return self.fuse_broadcast_ops_ == true ||
self.fuse_broadcast_ops_ == boost::none;
self.fuse_broadcast_ops_ == paddle::none;
},
[](BuildStrategy &self, bool b) {
PADDLE_ENFORCE_NE(self.IsFinalized(), true,
......@@ -2940,7 +2941,7 @@ All parameter, weight, gradient are variables in Paddle.
.def_property("fuse_all_optimizer_ops",
[](const BuildStrategy &self) {
return self.fuse_all_optimizer_ops_ == true ||
self.fuse_all_optimizer_ops_ == boost::none;
self.fuse_all_optimizer_ops_ == paddle::none;
},
[](BuildStrategy &self, bool b) {
PADDLE_ENFORCE_NE(self.IsFinalized(), true,
......@@ -2989,7 +2990,7 @@ All parameter, weight, gradient are variables in Paddle.
[](BuildStrategy &self, const py::handle &value) {
auto *py_obj = value.ptr();
if (py_obj == nullptr || py_obj == Py_None) {
self.memory_optimize_ = boost::none;
self.memory_optimize_ = paddle::none;
} else if (PyBool_Check(py_obj)) {
self.memory_optimize_ = (py_obj == Py_True);
} else {
......@@ -3046,7 +3047,7 @@ All parameter, weight, gradient are variables in Paddle.
"fuse_all_reduce_ops",
[](const BuildStrategy &self) {
return self.fuse_all_reduce_ops_ == true ||
self.fuse_all_reduce_ops_ == boost::none;
self.fuse_all_reduce_ops_ == paddle::none;
},
[](BuildStrategy &self, bool b) { self.fuse_all_reduce_ops_ = b; })
.def_property("enable_backward_optimizer_op_deps",
......
......@@ -44,7 +44,7 @@ namespace reader = operators::reader;
// Check whether the tensor shape matches the VarDesc shape
// Return the different shape if exists
static boost::optional<std::vector<int64_t>> DiffTensorShapeWithVarDesc(
static paddle::optional<std::vector<int64_t>> DiffTensorShapeWithVarDesc(
const framework::LoDTensor &tensor, const framework::VarDesc &var_desc,
size_t num_places) {
auto tensor_shape = tensor.dims();
......@@ -56,7 +56,7 @@ static boost::optional<std::vector<int64_t>> DiffTensorShapeWithVarDesc(
if (desc_shape.size() != 0) { // Tensor rank = 0 but desc does not match
return framework::vectorize<int64_t>(tensor_shape);
} else {
return boost::none;
return paddle::none;
}
}
......@@ -92,7 +92,7 @@ static boost::optional<std::vector<int64_t>> DiffTensorShapeWithVarDesc(
}
}
return boost::none;
return paddle::none;
}
static const std::shared_ptr<reader::LoDTensorBlockingQueue> &GetQueue(
......
//This file copy from boost/any.hpp and boost version: 1.41.0
//Modified the following points:
//1. modify namespace from boost::any to paddle::any
//2. remove the depending boost header files
//3. remove/modify some macro
// This file copy from boost/any.hpp and boost version: 1.41.0
// Modified the following points:
// 1. modify namespace from boost::any to paddle::any
// 2. remove the depending boost header files
// 3. remove/modify some macro
// See http://www.boost.org/libs/any for Documentation.
......@@ -17,210 +17,157 @@
// where: tested with BCC 5.5, MSVC 6.0, and g++ 2.95
#include <algorithm>
#include <typeinfo>
#include <type_traits>
#include <typeinfo>
// See boost/python/type_id.hpp
// TODO: add BOOST_TYPEID_COMPARE_BY_NAME to config.hpp
# if (defined(__GNUC__) && __GNUC__ >= 3) \
|| defined(_AIX) \
|| ( defined(__sgi) && defined(__host_mips)) \
|| (defined(__hpux) && defined(__HP_aCC)) \
|| (defined(linux) && defined(__INTEL_COMPILER) && defined(__ICC))
# define BOOST_AUX_ANY_TYPE_ID_NAME
#if (defined(__GNUC__) && __GNUC__ >= 3) || defined(_AIX) || \
(defined(__sgi) && defined(__host_mips)) || \
(defined(__hpux) && defined(__HP_aCC)) || \
(defined(linux) && defined(__INTEL_COMPILER) && defined(__ICC))
#define BOOST_AUX_ANY_TYPE_ID_NAME
#include <cstring>
# endif
namespace paddle
{
class any
{
public: // structors
any()
: content(0)
{
}
#endif
template<typename ValueType>
any(const ValueType & value)
: content(new holder<ValueType>(value))
{
}
namespace paddle {
class any {
public: // structors
any() : content(0) {}
any(const any & other)
: content(other.content ? other.content->clone() : 0)
{
}
template <typename ValueType>
any(const ValueType &value) : content(new holder<ValueType>(value)) {}
~any()
{
delete content;
}
any(const any &other) : content(other.content ? other.content->clone() : 0) {}
public: // modifiers
~any() { delete content; }
any & swap(any & rhs)
{
std::swap(content, rhs.content);
return *this;
}
public: // modifiers
any &swap(any &rhs) {
std::swap(content, rhs.content);
return *this;
}
template<typename ValueType>
any & operator=(const ValueType & rhs)
{
any(rhs).swap(*this);
return *this;
}
template <typename ValueType>
any &operator=(const ValueType &rhs) {
any(rhs).swap(*this);
return *this;
}
any & operator=(any rhs)
{
rhs.swap(*this);
return *this;
}
any &operator=(any rhs) {
rhs.swap(*this);
return *this;
}
public: // queries
public: // queries
bool empty() const { return !content; }
bool empty() const
{
return !content;
}
const std::type_info &type() const {
return content ? content->type() : typeid(void);
}
const std::type_info & type() const
{
return content ? content->type() : typeid(void);
}
public: // types (public so any_cast can be non-friend)
class placeholder {
public: // structors
virtual ~placeholder() {}
public: // types (public so any_cast can be non-friend)
public: // queries
virtual const std::type_info &type() const = 0;
class placeholder
{
public: // structors
virtual placeholder *clone() const = 0;
};
virtual ~placeholder()
{
}
template <typename ValueType>
class holder : public placeholder {
public: // structors
holder(const ValueType &value) : held(value) {}
public: // queries
public: // queries
virtual const std::type_info &type() const { return typeid(ValueType); }
virtual const std::type_info & type() const = 0;
virtual placeholder *clone() const { return new holder(held); }
virtual placeholder * clone() const = 0;
public: // representation
ValueType held;
};
private: // intentionally left unimplemented
holder &operator=(const holder &);
};
template<typename ValueType>
class holder : public placeholder
{
public: // structors
public: // representation (public so any_cast can be non-friend)
placeholder *content;
};
holder(const ValueType & value)
: held(value)
{
}
class bad_any_cast : public std::bad_cast {
public:
virtual const char *what() const throw() {
return "paddle::bad_any_cast: "
"failed conversion using paddle::any_cast";
}
};
public: // queries
template <typename ValueType>
ValueType *any_cast(any *operand) {
return operand &&
#ifdef BOOST_AUX_ANY_TYPE_ID_NAME
std::strcmp(operand->type().name(),
typeid(ValueType).name()) == 0
#else
operand->type() == typeid(ValueType)
#endif
? &static_cast<any::holder<ValueType> *>(operand->content)->held
: 0;
}
virtual const std::type_info & type() const
{
return typeid(ValueType);
}
template <typename ValueType>
inline const ValueType *any_cast(const any *operand) {
return any_cast<ValueType>(const_cast<any *>(operand));
}
virtual placeholder * clone() const
{
return new holder(held);
}
template <typename ValueType>
ValueType any_cast(any &operand) {
typedef typename std::remove_reference<ValueType>::type nonref;
public: // representation
// If 'nonref' is still reference type, it means the user has not
// specialized 'remove_reference'.
ValueType held;
// Please use BOOST_BROKEN_COMPILER_TYPE_TRAITS_SPECIALIZATION macro
// to generate specialization of remove_reference for your class
// See type traits library documentation for details
static_assert(!std::is_reference<nonref>::value,
"!std::is_reference<nonref>::value");
private: // intentionally left unimplemented
holder & operator=(const holder &);
};
nonref *result = any_cast<nonref>(&operand);
if (!result) throw bad_any_cast();
return *result;
}
public: // representation (public so any_cast can be non-friend)
template <typename ValueType>
inline ValueType any_cast(const any &operand) {
typedef typename std::remove_reference<ValueType>::type nonref;
placeholder * content;
// The comment in the above version of 'any_cast' explains when this
// assert is fired and what to do.
static_assert(!std::is_reference<nonref>::value,
"!std::is_reference<nonref>::value");
};
return any_cast<const nonref &>(const_cast<any &>(operand));
}
class bad_any_cast : public std::bad_cast
{
public:
virtual const char * what() const throw()
{
return "paddle::bad_any_cast: "
"failed conversion using paddle::any_cast";
}
};
// Note: The "unsafe" versions of any_cast are not part of the
// public interface and may be removed at any time. They are
// required where we know what type is stored in the any and can't
// use typeid() comparison, e.g., when our types may travel across
// different shared libraries.
template <typename ValueType>
inline ValueType *unsafe_any_cast(any *operand) {
return &static_cast<any::holder<ValueType> *>(operand->content)->held;
}
template<typename ValueType>
ValueType * any_cast(any * operand)
{
return operand &&
#ifdef BOOST_AUX_ANY_TYPE_ID_NAME
std::strcmp(operand->type().name(), typeid(ValueType).name()) == 0
#else
operand->type() == typeid(ValueType)
#endif
? &static_cast<any::holder<ValueType> *>(operand->content)->held
: 0;
}
template<typename ValueType>
inline const ValueType * any_cast(const any * operand)
{
return any_cast<ValueType>(const_cast<any *>(operand));
}
template<typename ValueType>
ValueType any_cast(any & operand)
{
typedef typename std::remove_reference<ValueType>::type nonref;
// If 'nonref' is still reference type, it means the user has not
// specialized 'remove_reference'.
// Please use BOOST_BROKEN_COMPILER_TYPE_TRAITS_SPECIALIZATION macro
// to generate specialization of remove_reference for your class
// See type traits library documentation for details
static_assert(!std::is_reference<nonref>::value, "!std::is_reference<nonref>::value");
nonref * result = any_cast<nonref>(&operand);
if(!result)
throw bad_any_cast();
return *result;
}
template<typename ValueType>
inline ValueType any_cast(const any & operand)
{
typedef typename std::remove_reference<ValueType>::type nonref;
// The comment in the above version of 'any_cast' explains when this
// assert is fired and what to do.
static_assert(!std::is_reference<nonref>::value, "!std::is_reference<nonref>::value");
return any_cast<const nonref &>(const_cast<any &>(operand));
}
// Note: The "unsafe" versions of any_cast are not part of the
// public interface and may be removed at any time. They are
// required where we know what type is stored in the any and can't
// use typeid() comparison, e.g., when our types may travel across
// different shared libraries.
template<typename ValueType>
inline ValueType * unsafe_any_cast(any * operand)
{
return &static_cast<any::holder<ValueType> *>(operand->content)->held;
}
template<typename ValueType>
inline const ValueType * unsafe_any_cast(const any * operand)
{
return unsafe_any_cast<ValueType>(const_cast<any *>(operand));
}
template <typename ValueType>
inline const ValueType *unsafe_any_cast(const any *operand) {
return unsafe_any_cast<ValueType>(const_cast<any *>(operand));
}
}
// Copyright Kevlin Henney, 2000, 2001, 2002. All rights reserved.
......
// This file copy from boost/none_t.hpp and boost/none.hpp and boost version:
// 1.41.0
// Modified the following points:
// 1. modify namespace from boost::none to paddle::none
// 2. modify namespace from boost::none_t to paddle::none_t
// Copyright (C) 2003, Fernando Luis Cacciola Carballal.
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/optional for documentation.
//
// You are welcome to contact the author at:
// fernando_cacciola@hotmail.com
//
#ifndef PADDLE_NONE_17SEP2003_HPP
#define PADDLE_NONE_17SEP2003_HPP
namespace paddle {
namespace detail {
struct none_helper {};
}
typedef int detail::none_helper::*none_t;
} // namespace boost
// NOTE: Borland users have to include this header outside any precompiled
// headers
// (bcc<=5.64 cannot include instance data in a precompiled header)
// -- * To be verified, now that there's no unnamed namespace
namespace paddle {
none_t const none = ((none_t)0);
} // namespace boost
#endif
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册