未验证 提交 9be41447 编写于 作者: C chentianyu03 提交者: GitHub

Copy boost optional to Paddle (#34780)

* copy boost optional.hpp to paddle

* copy boost optional.hpp to paddle

* move directions

* del fluid/utils

* modify .hpp to .h

* move directions

* modify to paddle::optional

* add modification description

* format code stype for the files in paddle/utils

* format code stype
上级 f1c1d9e0
...@@ -36,8 +36,8 @@ static inline bool SeqOnlyAllReduceOps(const BuildStrategy &strategy) { ...@@ -36,8 +36,8 @@ static inline bool SeqOnlyAllReduceOps(const BuildStrategy &strategy) {
!strategy.enable_parallel_graph_; !strategy.enable_parallel_graph_;
} }
static inline void ConvertDefaultValue(boost::optional<bool> *default_value) { static inline void ConvertDefaultValue(paddle::optional<bool> *default_value) {
if (*default_value == boost::none) { if (*default_value == paddle::none) {
*default_value = true; *default_value = true;
} }
} }
...@@ -247,7 +247,7 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder { ...@@ -247,7 +247,7 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder {
} }
} }
void AppendPassWithCheck(const boost::optional<bool> &append_pass, void AppendPassWithCheck(const paddle::optional<bool> &append_pass,
const std::string &pass_name) { const std::string &pass_name) {
AppendPassWithCheck(append_pass == true, pass_name); AppendPassWithCheck(append_pass == true, pass_name);
} }
......
...@@ -112,8 +112,8 @@ struct BuildStrategy { ...@@ -112,8 +112,8 @@ struct BuildStrategy {
bool enable_auto_fusion_{false}; bool enable_auto_fusion_{false};
// Fuse_all_optimizer_ops and fuse_all_reduce_ops require that gradients // Fuse_all_optimizer_ops and fuse_all_reduce_ops require that gradients
// should not be sparse types // should not be sparse types
boost::optional<bool> fuse_all_optimizer_ops_{false}; paddle::optional<bool> fuse_all_optimizer_ops_{false};
boost::optional<bool> fuse_all_reduce_ops_{boost::none}; paddle::optional<bool> fuse_all_reduce_ops_{boost::none};
// fuse_relu_depthwise_conv can fuse the `relu -> // fuse_relu_depthwise_conv can fuse the `relu ->
// depthwise_conv` // depthwise_conv`
bool fuse_relu_depthwise_conv_{false}; bool fuse_relu_depthwise_conv_{false};
...@@ -121,7 +121,7 @@ struct BuildStrategy { ...@@ -121,7 +121,7 @@ struct BuildStrategy {
// faster. Because fusing broadcast OP equals delaying the execution of all // faster. Because fusing broadcast OP equals delaying the execution of all
// broadcast Ops, in this case, all nccl streams are used only for reduce // broadcast Ops, in this case, all nccl streams are used only for reduce
// operations for a period of time. // operations for a period of time.
boost::optional<bool> fuse_broadcast_ops_{boost::none}; paddle::optional<bool> fuse_broadcast_ops_{boost::none};
// replace batch_norm with sync_batch_norm. // replace batch_norm with sync_batch_norm.
bool sync_batch_norm_{false}; bool sync_batch_norm_{false};
...@@ -135,7 +135,7 @@ struct BuildStrategy { ...@@ -135,7 +135,7 @@ struct BuildStrategy {
// By default, memory_optimize would be opened if gc is disabled, and // By default, memory_optimize would be opened if gc is disabled, and
// be closed if gc is enabled. // be closed if gc is enabled.
// Users can forcely enable/disable memory_optimize by setting True/False. // Users can forcely enable/disable memory_optimize by setting True/False.
boost::optional<bool> memory_optimize_{boost::none}; paddle::optional<bool> memory_optimize_{boost::none};
// Turn on inplace by default. // Turn on inplace by default.
bool enable_inplace_{true}; bool enable_inplace_{true};
......
...@@ -74,11 +74,11 @@ bool IsReachable(ir::Graph* graph, Node* from, Node* to) { ...@@ -74,11 +74,11 @@ bool IsReachable(ir::Graph* graph, Node* from, Node* to) {
} }
template <typename T> template <typename T>
boost::optional<T> HasAttribute(const Node& op, const std::string& attr) { paddle::optional<T> HasAttribute(const Node& op, const std::string& attr) {
if (op.Op()->HasAttr(attr)) if (op.Op()->HasAttr(attr))
return BOOST_GET_CONST(T, op.Op()->GetAttr(attr)); return BOOST_GET_CONST(T, op.Op()->GetAttr(attr));
else else
return boost::none; return paddle::none;
} }
ResidualConnectionMKLDNNFusePass::ResidualConnectionMKLDNNFusePass() { ResidualConnectionMKLDNNFusePass::ResidualConnectionMKLDNNFusePass() {
......
...@@ -40,7 +40,7 @@ using GraphWithStats = std::pair<ir::Graph*, int>; ...@@ -40,7 +40,7 @@ using GraphWithStats = std::pair<ir::Graph*, int>;
void CorrectGraphEdges(Graph* graph, Node* from, Node* to); void CorrectGraphEdges(Graph* graph, Node* from, Node* to);
bool IsReachable(ir::Graph* graph, Node* from, Node* to); bool IsReachable(ir::Graph* graph, Node* from, Node* to);
boost::optional<Node*> HasBias(const Node& op, const std::string& bias_name); paddle::optional<Node*> HasBias(const Node& op, const std::string& bias_name);
class ResidualConnectionMKLDNNFusePass : public FusePassBase { class ResidualConnectionMKLDNNFusePass : public FusePassBase {
private: private:
......
...@@ -27,6 +27,8 @@ limitations under the License. */ ...@@ -27,6 +27,8 @@ limitations under the License. */
#include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/memory/memcpy.h"
#include "paddle/utils/none.h"
#include "paddle/utils/optional.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -195,10 +197,10 @@ class Vector { ...@@ -195,10 +197,10 @@ class Vector {
std::mutex &Mutex() const { return mtx_; } std::mutex &Mutex() const { return mtx_; }
boost::optional<platform::CUDAPlace> CUDAPlace() const { paddle::optional<platform::CUDAPlace> CUDAPlace() const {
return gpu_ == nullptr return gpu_ == nullptr
? boost::none ? paddle::none
: boost::optional<platform::CUDAPlace>( : paddle::optional<platform::CUDAPlace>(
BOOST_GET_CONST(platform::CUDAPlace, gpu_->place())); BOOST_GET_CONST(platform::CUDAPlace, gpu_->place()));
} }
...@@ -389,7 +391,7 @@ class Vector { ...@@ -389,7 +391,7 @@ class Vector {
auto &mtx = m_.Data().Mutex(); auto &mtx = m_.Data().Mutex();
std::lock_guard<std::mutex> guard(mtx); std::lock_guard<std::mutex> guard(mtx);
auto cuda_place = m_.Data().CUDAPlace(); auto cuda_place = m_.Data().CUDAPlace();
if (cuda_place == boost::none || if (cuda_place == paddle::none ||
cuda_place == BOOST_GET(platform::CUDAPlace, place)) { cuda_place == BOOST_GET(platform::CUDAPlace, place)) {
return m_.Data().CUDAData(place); return m_.Data().CUDAData(place);
} }
...@@ -405,7 +407,7 @@ class Vector { ...@@ -405,7 +407,7 @@ class Vector {
auto &mtx = m_.Data().Mutex(); auto &mtx = m_.Data().Mutex();
std::lock_guard<std::mutex> guard(mtx); std::lock_guard<std::mutex> guard(mtx);
auto cuda_place = m_.Data().CUDAPlace(); auto cuda_place = m_.Data().CUDAPlace();
if (cuda_place == boost::none || if (cuda_place == paddle::none ||
cuda_place == BOOST_GET(platform::CUDAPlace, place)) { cuda_place == BOOST_GET(platform::CUDAPlace, place)) {
return m_.MutableData()->CUDAMutableData(place); return m_.MutableData()->CUDAMutableData(place);
} }
......
...@@ -22,6 +22,7 @@ limitations under the License. */ ...@@ -22,6 +22,7 @@ limitations under the License. */
#include "paddle/fluid/framework/op_version_proto.h" #include "paddle/fluid/framework/op_version_proto.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
#include "paddle/utils/none.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -42,7 +43,7 @@ using OpAttrVariantT = ...@@ -42,7 +43,7 @@ using OpAttrVariantT =
std::vector<int32_t>, /* AttrType::INTS */ std::vector<int32_t>, /* AttrType::INTS */
std::vector<int64_t>, /* AttrType::LONGS */ std::vector<int64_t>, /* AttrType::LONGS */
std::vector<std::string>, /* AttrType::STRINGS */ std::vector<std::string>, /* AttrType::STRINGS */
boost::none_t /* None */ paddle::none_t /* None */
>; >;
struct OpUpdateInfo { struct OpUpdateInfo {
...@@ -51,7 +52,7 @@ struct OpUpdateInfo { ...@@ -51,7 +52,7 @@ struct OpUpdateInfo {
struct OpAttrInfo : OpUpdateInfo { struct OpAttrInfo : OpUpdateInfo {
OpAttrInfo(const std::string& name, const std::string& remark, OpAttrInfo(const std::string& name, const std::string& remark,
const OpAttrVariantT& default_value = boost::none) const OpAttrVariantT& default_value = paddle::none)
: name_{name}, default_value_{default_value}, remark_{remark} {} : name_{name}, default_value_{default_value}, remark_{remark} {}
const std::string& name() const { return name_; } const std::string& name() const { return name_; }
......
...@@ -161,5 +161,5 @@ REGISTER_OP_VERSION(flip) ...@@ -161,5 +161,5 @@ REGISTER_OP_VERSION(flip)
R"ROC(Upgrade flip, add new attr [axis] and delete attr [dims].)ROC", R"ROC(Upgrade flip, add new attr [axis] and delete attr [dims].)ROC",
paddle::framework::compatible::OpVersionDesc() paddle::framework::compatible::OpVersionDesc()
.NewAttr("axis", "The added attr 'axis' doesn't set default value.", .NewAttr("axis", "The added attr 'axis' doesn't set default value.",
boost::none) paddle::none)
.DeleteAttr("dims", "The attr 'dims' is deleted.")); .DeleteAttr("dims", "The attr 'dims' is deleted."));
...@@ -137,7 +137,7 @@ class ConcatPrimitiveFactory { ...@@ -137,7 +137,7 @@ class ConcatPrimitiveFactory {
private: private:
std::vector<memory::desc> srcs_d; std::vector<memory::desc> srcs_d;
std::vector<mkldnn::memory> srcs; std::vector<mkldnn::memory> srcs;
boost::optional<mkldnn::memory> dst_mem; paddle::optional<mkldnn::memory> dst_mem;
}; };
template <typename T> template <typename T>
......
...@@ -893,7 +893,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -893,7 +893,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
fuse_residual_conn, propagation, output_shift_scale, sum_scale); fuse_residual_conn, propagation, output_shift_scale, sum_scale);
} else { } else {
conv_pd = handler->AcquireConvolutionPrimitiveDescriptor( conv_pd = handler->AcquireConvolutionPrimitiveDescriptor(
src_md, weights_md, boost::none, dst_md, strides, dilations, src_md, weights_md, paddle::none, dst_md, strides, dilations,
paddings, mkldnn_engine, fuse_activation, fuse_alpha, fuse_beta, paddings, mkldnn_engine, fuse_activation, fuse_alpha, fuse_beta,
fuse_residual_conn, propagation, output_shift_scale, sum_scale); fuse_residual_conn, propagation, output_shift_scale, sum_scale);
} }
......
...@@ -89,7 +89,8 @@ class FCPrimitiveFactory { ...@@ -89,7 +89,8 @@ class FCPrimitiveFactory {
// descriptor has been divided into separate cases, based on the number // descriptor has been divided into separate cases, based on the number
// of input dimensions. // of input dimensions.
size_t input_dim_num = input->dims().size(); size_t input_dim_num = input->dims().size();
boost::optional<mkldnn::inner_product_forward::primitive_desc> fc_prim_desc; paddle::optional<mkldnn::inner_product_forward::primitive_desc>
fc_prim_desc;
memory::desc usr_weights_desc = {}; memory::desc usr_weights_desc = {};
switch (input_dim_num) { switch (input_dim_num) {
case 2: case 2:
...@@ -545,11 +546,11 @@ class FCPrimitiveFactory { ...@@ -545,11 +546,11 @@ class FCPrimitiveFactory {
private: private:
const mkldnn::engine& engine_; const mkldnn::engine& engine_;
boost::optional<memory> input_; paddle::optional<memory> input_;
boost::optional<memory> output_; paddle::optional<memory> output_;
std::shared_ptr<memory> bias_; std::shared_ptr<memory> bias_;
std::shared_ptr<memory> weights_; std::shared_ptr<memory> weights_;
boost::optional<inner_product_forward> fc_; paddle::optional<inner_product_forward> fc_;
}; };
// Attempt to fetch cached primitive factory based on provided parameters // Attempt to fetch cached primitive factory based on provided parameters
......
...@@ -290,10 +290,10 @@ class MulPrimitiveFactory { ...@@ -290,10 +290,10 @@ class MulPrimitiveFactory {
} }
const mkldnn::engine &engine_; const mkldnn::engine &engine_;
boost::optional<memory> x_input_; paddle::optional<memory> x_input_;
boost::optional<memory> y_input_; paddle::optional<memory> y_input_;
boost::optional<memory> output_; paddle::optional<memory> output_;
boost::optional<inner_product_forward> mul_; paddle::optional<inner_product_forward> mul_;
static constexpr bool is_int8_ = static constexpr bool is_int8_ =
std::is_same<XT, int8_t>::value || std::is_same<XT, uint8_t>::value; std::is_same<XT, int8_t>::value || std::is_same<XT, uint8_t>::value;
}; };
......
...@@ -123,7 +123,7 @@ class SeqConcatGradKernel : public framework::OpKernel<T> { ...@@ -123,7 +123,7 @@ class SeqConcatGradKernel : public framework::OpKernel<T> {
} }
std::vector<framework::Tensor> sliced_x; std::vector<framework::Tensor> sliced_x;
std::vector<boost::optional<framework::Tensor>> sliced_dx; std::vector<paddle::optional<framework::Tensor>> sliced_dx;
for (size_t i = 1; i < xs[0]->lod()[0].size(); ++i) { for (size_t i = 1; i < xs[0]->lod()[0].size(); ++i) {
for (size_t j = 0; j < xs.size(); ++j) { for (size_t j = 0; j < xs.size(); ++j) {
...@@ -145,7 +145,7 @@ class SeqConcatGradKernel : public framework::OpKernel<T> { ...@@ -145,7 +145,7 @@ class SeqConcatGradKernel : public framework::OpKernel<T> {
if (dx) { if (dx) {
sliced_dx.emplace_back(dx->Slice(prev_lod, next_lod)); sliced_dx.emplace_back(dx->Slice(prev_lod, next_lod));
} else { } else {
sliced_dx.emplace_back(boost::none); sliced_dx.emplace_back(paddle::none);
} }
} }
} }
......
...@@ -1426,7 +1426,7 @@ class ConvMKLDNNTemplateHandler : public MKLDNNHandler { ...@@ -1426,7 +1426,7 @@ class ConvMKLDNNTemplateHandler : public MKLDNNHandler {
std::shared_ptr<typename forward_t::primitive_desc> std::shared_ptr<typename forward_t::primitive_desc>
AcquireConvolutionPrimitiveDescriptor( AcquireConvolutionPrimitiveDescriptor(
const mkldnn::memory::desc& src, const mkldnn::memory::desc& weights, const mkldnn::memory::desc& src, const mkldnn::memory::desc& weights,
boost::optional<const mkldnn::memory::desc&> bias, paddle::optional<const mkldnn::memory::desc&> bias,
const mkldnn::memory::desc& dst, const std::vector<int64_t>& strides, const mkldnn::memory::desc& dst, const std::vector<int64_t>& strides,
const std::vector<int64_t>& dilations, const std::vector<int64_t>& dilations,
const std::vector<int64_t>& paddings, const mkldnn::engine& engine, const std::vector<int64_t>& paddings, const mkldnn::engine& engine,
......
...@@ -73,6 +73,7 @@ limitations under the License. */ ...@@ -73,6 +73,7 @@ limitations under the License. */
#include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/platform/profiler.h"
#include "paddle/fluid/pybind/cuda_streams_py.h" #include "paddle/fluid/pybind/cuda_streams_py.h"
#include "paddle/fluid/pybind/io.h" #include "paddle/fluid/pybind/io.h"
#include "paddle/utils/none.h"
#ifdef PADDLE_WITH_ASCEND #ifdef PADDLE_WITH_ASCEND
#include "paddle/fluid/pybind/ascend_wrapper_py.h" #include "paddle/fluid/pybind/ascend_wrapper_py.h"
#endif #endif
...@@ -2910,7 +2911,7 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -2910,7 +2911,7 @@ All parameter, weight, gradient are variables in Paddle.
.def_property("fuse_broadcast_ops", .def_property("fuse_broadcast_ops",
[](const BuildStrategy &self) { [](const BuildStrategy &self) {
return self.fuse_broadcast_ops_ == true || return self.fuse_broadcast_ops_ == true ||
self.fuse_broadcast_ops_ == boost::none; self.fuse_broadcast_ops_ == paddle::none;
}, },
[](BuildStrategy &self, bool b) { [](BuildStrategy &self, bool b) {
PADDLE_ENFORCE_NE(self.IsFinalized(), true, PADDLE_ENFORCE_NE(self.IsFinalized(), true,
...@@ -2940,7 +2941,7 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -2940,7 +2941,7 @@ All parameter, weight, gradient are variables in Paddle.
.def_property("fuse_all_optimizer_ops", .def_property("fuse_all_optimizer_ops",
[](const BuildStrategy &self) { [](const BuildStrategy &self) {
return self.fuse_all_optimizer_ops_ == true || return self.fuse_all_optimizer_ops_ == true ||
self.fuse_all_optimizer_ops_ == boost::none; self.fuse_all_optimizer_ops_ == paddle::none;
}, },
[](BuildStrategy &self, bool b) { [](BuildStrategy &self, bool b) {
PADDLE_ENFORCE_NE(self.IsFinalized(), true, PADDLE_ENFORCE_NE(self.IsFinalized(), true,
...@@ -2989,7 +2990,7 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -2989,7 +2990,7 @@ All parameter, weight, gradient are variables in Paddle.
[](BuildStrategy &self, const py::handle &value) { [](BuildStrategy &self, const py::handle &value) {
auto *py_obj = value.ptr(); auto *py_obj = value.ptr();
if (py_obj == nullptr || py_obj == Py_None) { if (py_obj == nullptr || py_obj == Py_None) {
self.memory_optimize_ = boost::none; self.memory_optimize_ = paddle::none;
} else if (PyBool_Check(py_obj)) { } else if (PyBool_Check(py_obj)) {
self.memory_optimize_ = (py_obj == Py_True); self.memory_optimize_ = (py_obj == Py_True);
} else { } else {
...@@ -3046,7 +3047,7 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -3046,7 +3047,7 @@ All parameter, weight, gradient are variables in Paddle.
"fuse_all_reduce_ops", "fuse_all_reduce_ops",
[](const BuildStrategy &self) { [](const BuildStrategy &self) {
return self.fuse_all_reduce_ops_ == true || return self.fuse_all_reduce_ops_ == true ||
self.fuse_all_reduce_ops_ == boost::none; self.fuse_all_reduce_ops_ == paddle::none;
}, },
[](BuildStrategy &self, bool b) { self.fuse_all_reduce_ops_ = b; }) [](BuildStrategy &self, bool b) { self.fuse_all_reduce_ops_ = b; })
.def_property("enable_backward_optimizer_op_deps", .def_property("enable_backward_optimizer_op_deps",
......
...@@ -44,7 +44,7 @@ namespace reader = operators::reader; ...@@ -44,7 +44,7 @@ namespace reader = operators::reader;
// Check whether the tensor shape matches the VarDesc shape // Check whether the tensor shape matches the VarDesc shape
// Return the different shape if exists // Return the different shape if exists
static boost::optional<std::vector<int64_t>> DiffTensorShapeWithVarDesc( static paddle::optional<std::vector<int64_t>> DiffTensorShapeWithVarDesc(
const framework::LoDTensor &tensor, const framework::VarDesc &var_desc, const framework::LoDTensor &tensor, const framework::VarDesc &var_desc,
size_t num_places) { size_t num_places) {
auto tensor_shape = tensor.dims(); auto tensor_shape = tensor.dims();
...@@ -56,7 +56,7 @@ static boost::optional<std::vector<int64_t>> DiffTensorShapeWithVarDesc( ...@@ -56,7 +56,7 @@ static boost::optional<std::vector<int64_t>> DiffTensorShapeWithVarDesc(
if (desc_shape.size() != 0) { // Tensor rank = 0 but desc does not match if (desc_shape.size() != 0) { // Tensor rank = 0 but desc does not match
return framework::vectorize<int64_t>(tensor_shape); return framework::vectorize<int64_t>(tensor_shape);
} else { } else {
return boost::none; return paddle::none;
} }
} }
...@@ -92,7 +92,7 @@ static boost::optional<std::vector<int64_t>> DiffTensorShapeWithVarDesc( ...@@ -92,7 +92,7 @@ static boost::optional<std::vector<int64_t>> DiffTensorShapeWithVarDesc(
} }
} }
return boost::none; return paddle::none;
} }
static const std::shared_ptr<reader::LoDTensorBlockingQueue> &GetQueue( static const std::shared_ptr<reader::LoDTensorBlockingQueue> &GetQueue(
......
//This file copy from boost/any.hpp and boost version: 1.41.0 // This file copy from boost/any.hpp and boost version: 1.41.0
//Modified the following points: // Modified the following points:
//1. modify namespace from boost::any to paddle::any // 1. modify namespace from boost::any to paddle::any
//2. remove the depending boost header files // 2. remove the depending boost header files
//3. remove/modify some macro // 3. remove/modify some macro
// See http://www.boost.org/libs/any for Documentation. // See http://www.boost.org/libs/any for Documentation.
...@@ -17,166 +17,115 @@ ...@@ -17,166 +17,115 @@
// where: tested with BCC 5.5, MSVC 6.0, and g++ 2.95 // where: tested with BCC 5.5, MSVC 6.0, and g++ 2.95
#include <algorithm> #include <algorithm>
#include <typeinfo>
#include <type_traits> #include <type_traits>
#include <typeinfo>
// See boost/python/type_id.hpp // See boost/python/type_id.hpp
// TODO: add BOOST_TYPEID_COMPARE_BY_NAME to config.hpp // TODO: add BOOST_TYPEID_COMPARE_BY_NAME to config.hpp
# if (defined(__GNUC__) && __GNUC__ >= 3) \ #if (defined(__GNUC__) && __GNUC__ >= 3) || defined(_AIX) || \
|| defined(_AIX) \ (defined(__sgi) && defined(__host_mips)) || \
|| ( defined(__sgi) && defined(__host_mips)) \ (defined(__hpux) && defined(__HP_aCC)) || \
|| (defined(__hpux) && defined(__HP_aCC)) \ (defined(linux) && defined(__INTEL_COMPILER) && defined(__ICC))
|| (defined(linux) && defined(__INTEL_COMPILER) && defined(__ICC)) #define BOOST_AUX_ANY_TYPE_ID_NAME
# define BOOST_AUX_ANY_TYPE_ID_NAME
#include <cstring> #include <cstring>
# endif #endif
namespace paddle namespace paddle {
{ class any {
class any
{
public: // structors public: // structors
any() : content(0) {}
any() template <typename ValueType>
: content(0) any(const ValueType &value) : content(new holder<ValueType>(value)) {}
{
}
template<typename ValueType>
any(const ValueType & value)
: content(new holder<ValueType>(value))
{
}
any(const any & other) any(const any &other) : content(other.content ? other.content->clone() : 0) {}
: content(other.content ? other.content->clone() : 0)
{
}
~any() ~any() { delete content; }
{
delete content;
}
public: // modifiers public: // modifiers
any &swap(any &rhs) {
any & swap(any & rhs)
{
std::swap(content, rhs.content); std::swap(content, rhs.content);
return *this; return *this;
} }
template<typename ValueType> template <typename ValueType>
any & operator=(const ValueType & rhs) any &operator=(const ValueType &rhs) {
{
any(rhs).swap(*this); any(rhs).swap(*this);
return *this; return *this;
} }
any & operator=(any rhs) any &operator=(any rhs) {
{
rhs.swap(*this); rhs.swap(*this);
return *this; return *this;
} }
public: // queries public: // queries
bool empty() const { return !content; }
bool empty() const const std::type_info &type() const {
{
return !content;
}
const std::type_info & type() const
{
return content ? content->type() : typeid(void); return content ? content->type() : typeid(void);
} }
public: // types (public so any_cast can be non-friend) public: // types (public so any_cast can be non-friend)
class placeholder {
class placeholder
{
public: // structors public: // structors
virtual ~placeholder() {}
virtual ~placeholder()
{
}
public: // queries public: // queries
virtual const std::type_info &type() const = 0;
virtual const std::type_info & type() const = 0; virtual placeholder *clone() const = 0;
virtual placeholder * clone() const = 0;
}; };
template<typename ValueType> template <typename ValueType>
class holder : public placeholder class holder : public placeholder {
{
public: // structors public: // structors
holder(const ValueType &value) : held(value) {}
holder(const ValueType & value)
: held(value)
{
}
public: // queries public: // queries
virtual const std::type_info &type() const { return typeid(ValueType); }
virtual const std::type_info & type() const virtual placeholder *clone() const { return new holder(held); }
{
return typeid(ValueType);
}
virtual placeholder * clone() const
{
return new holder(held);
}
public: // representation public: // representation
ValueType held; ValueType held;
private: // intentionally left unimplemented private: // intentionally left unimplemented
holder & operator=(const holder &); holder &operator=(const holder &);
}; };
public: // representation (public so any_cast can be non-friend) public: // representation (public so any_cast can be non-friend)
placeholder *content;
};
placeholder * content; class bad_any_cast : public std::bad_cast {
};
class bad_any_cast : public std::bad_cast
{
public: public:
virtual const char * what() const throw() virtual const char *what() const throw() {
{
return "paddle::bad_any_cast: " return "paddle::bad_any_cast: "
"failed conversion using paddle::any_cast"; "failed conversion using paddle::any_cast";
} }
}; };
template<typename ValueType> template <typename ValueType>
ValueType * any_cast(any * operand) ValueType *any_cast(any *operand) {
{
return operand && return operand &&
#ifdef BOOST_AUX_ANY_TYPE_ID_NAME #ifdef BOOST_AUX_ANY_TYPE_ID_NAME
std::strcmp(operand->type().name(), typeid(ValueType).name()) == 0 std::strcmp(operand->type().name(),
typeid(ValueType).name()) == 0
#else #else
operand->type() == typeid(ValueType) operand->type() == typeid(ValueType)
#endif #endif
? &static_cast<any::holder<ValueType> *>(operand->content)->held ? &static_cast<any::holder<ValueType> *>(operand->content)->held
: 0; : 0;
} }
template<typename ValueType> template <typename ValueType>
inline const ValueType * any_cast(const any * operand) inline const ValueType *any_cast(const any *operand) {
{
return any_cast<ValueType>(const_cast<any *>(operand)); return any_cast<ValueType>(const_cast<any *>(operand));
} }
template<typename ValueType> template <typename ValueType>
ValueType any_cast(any & operand) ValueType any_cast(any &operand) {
{
typedef typename std::remove_reference<ValueType>::type nonref; typedef typename std::remove_reference<ValueType>::type nonref;
// If 'nonref' is still reference type, it means the user has not // If 'nonref' is still reference type, it means the user has not
...@@ -185,42 +134,40 @@ namespace paddle ...@@ -185,42 +134,40 @@ namespace paddle
// Please use BOOST_BROKEN_COMPILER_TYPE_TRAITS_SPECIALIZATION macro // Please use BOOST_BROKEN_COMPILER_TYPE_TRAITS_SPECIALIZATION macro
// to generate specialization of remove_reference for your class // to generate specialization of remove_reference for your class
// See type traits library documentation for details // See type traits library documentation for details
static_assert(!std::is_reference<nonref>::value, "!std::is_reference<nonref>::value"); static_assert(!std::is_reference<nonref>::value,
"!std::is_reference<nonref>::value");
nonref * result = any_cast<nonref>(&operand); nonref *result = any_cast<nonref>(&operand);
if(!result) if (!result) throw bad_any_cast();
throw bad_any_cast();
return *result; return *result;
} }
template<typename ValueType> template <typename ValueType>
inline ValueType any_cast(const any & operand) inline ValueType any_cast(const any &operand) {
{
typedef typename std::remove_reference<ValueType>::type nonref; typedef typename std::remove_reference<ValueType>::type nonref;
// The comment in the above version of 'any_cast' explains when this // The comment in the above version of 'any_cast' explains when this
// assert is fired and what to do. // assert is fired and what to do.
static_assert(!std::is_reference<nonref>::value, "!std::is_reference<nonref>::value"); static_assert(!std::is_reference<nonref>::value,
"!std::is_reference<nonref>::value");
return any_cast<const nonref &>(const_cast<any &>(operand)); return any_cast<const nonref &>(const_cast<any &>(operand));
} }
// Note: The "unsafe" versions of any_cast are not part of the // Note: The "unsafe" versions of any_cast are not part of the
// public interface and may be removed at any time. They are // public interface and may be removed at any time. They are
// required where we know what type is stored in the any and can't // required where we know what type is stored in the any and can't
// use typeid() comparison, e.g., when our types may travel across // use typeid() comparison, e.g., when our types may travel across
// different shared libraries. // different shared libraries.
template<typename ValueType> template <typename ValueType>
inline ValueType * unsafe_any_cast(any * operand) inline ValueType *unsafe_any_cast(any *operand) {
{
return &static_cast<any::holder<ValueType> *>(operand->content)->held; return &static_cast<any::holder<ValueType> *>(operand->content)->held;
} }
template<typename ValueType> template <typename ValueType>
inline const ValueType * unsafe_any_cast(const any * operand) inline const ValueType *unsafe_any_cast(const any *operand) {
{
return unsafe_any_cast<ValueType>(const_cast<any *>(operand)); return unsafe_any_cast<ValueType>(const_cast<any *>(operand));
} }
} }
// Copyright Kevlin Henney, 2000, 2001, 2002. All rights reserved. // Copyright Kevlin Henney, 2000, 2001, 2002. All rights reserved.
......
// This file copy from boost/none_t.hpp and boost/none.hpp and boost version:
// 1.41.0
// Modified the following points:
// 1. modify namespace from boost::none to paddle::none
// 2. modify namespace from boost::none_t to paddle::none_t
// Copyright (C) 2003, Fernando Luis Cacciola Carballal.
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/optional for documentation.
//
// You are welcome to contact the author at:
// fernando_cacciola@hotmail.com
//
#ifndef PADDLE_NONE_17SEP2003_HPP
#define PADDLE_NONE_17SEP2003_HPP
namespace paddle {
namespace detail {
struct none_helper {};
}
typedef int detail::none_helper::*none_t;
} // namespace boost
// NOTE: Borland users have to include this header outside any precompiled
// headers
// (bcc<=5.64 cannot include instance data in a precompiled header)
// -- * To be verified, now that there's no unnamed namespace
namespace paddle {
none_t const none = ((none_t)0);
} // namespace boost
#endif
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册