提交 579449b0 编写于 作者: D dangqingqing

Update comments and revert pybind11.

上级 d2a70243
......@@ -26,7 +26,7 @@ ExternalProject_Add(
extern_pybind
${EXTERNAL_PROJECT_LOG_ARGS}
GIT_REPOSITORY "https://github.com/pybind/pybind11.git"
GIT_TAG "v2.2.1"
GIT_TAG "v2.1.1"
PREFIX ${PYBIND_SOURCE_DIR}
UPDATE_COMMAND ""
CONFIGURE_COMMAND ""
......
......@@ -21,24 +21,74 @@ limitations under the License. */
#include "paddle/framework/program_desc.h"
#include "paddle/framework/var_desc.h"
using boost::variant;
// Cast boost::variant for PyBind.
// Copy from
// https://github.com/pybind/pybind11/issues/576#issuecomment-269563199
namespace pybind11 {
namespace detail {
// Can be replaced by a generic lambda in C++14
struct variant_caster_visitor : public boost::static_visitor<handle> {
return_value_policy policy;
handle parent;
variant_caster_visitor(return_value_policy policy, handle parent)
: policy(policy), parent(parent) {}
template <class T>
handle operator()(T const &src) const {
return make_caster<T>::cast(src, policy, parent);
}
};
template <class Variant>
struct variant_caster;
template <template <class...> class V, class... Ts>
struct variant_caster<V<Ts...>> {
using Type = V<Ts...>;
template <typename T>
typename std::enable_if<
!std::is_same<T, boost::detail::variant::void_>::value, bool>::type
try_load(handle src, bool convert) {
auto caster = make_caster<T>();
if (!load_success_ && caster.load(src, convert)) {
load_success_ = true;
value = cast_op<T>(caster);
return true;
}
return false;
}
template <typename T>
typename std::enable_if<std::is_same<T, boost::detail::variant::void_>::value,
bool>::type
try_load(handle src, bool convert) {
return false;
}
bool load(handle src, bool convert) {
auto unused = {false, try_load<Ts>(src, convert)...};
(void)(unused);
return load_success_;
}
static handle cast(Type const &src, return_value_policy policy,
handle parent) {
variant_caster_visitor visitor(policy, parent);
return boost::apply_visitor(visitor, src);
}
PYBIND11_TYPE_CASTER(Type, _("Variant"));
bool load_success_{false};
};
// Add specialization for concrete variant type
template <class... Args>
struct type_caster<boost::variant<Args...>>
: variant_caster<boost::variant<Args...>> {};
template <>
struct visit_helper<boost::variant> {
template <typename... Args>
static auto call(Args &&... args) -> decltype(boost::apply_visitor(args...)) {
return boost::apply_visitor(args...);
}
};
} // namespace detail
} // namespace pybind11
......
......@@ -13,7 +13,6 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/pybind/protobuf.h"
#include "pybind11/iostream.h"
#include <mutex> // for call_once
#include <unordered_map>
......@@ -62,8 +61,8 @@ bool IsCompileGPU() {
#endif
}
PYBIND11_MODULE(core, m) {
m.doc() = "C++ core of PaddlePaddle";
PYBIND11_PLUGIN(core) {
py::module m("core", "C++ core of PaddlePaddle");
// using framework in this function. Since it is inside a function, it will
// not cause namespace pollution.
......@@ -501,8 +500,7 @@ All parameter, weight, gradient are variables in Paddle.
m.def("enable_profiler", platform::EnableProfiler);
m.def("disable_profiler", platform::DisableProfiler);
m.def("reset_profiler", platform::ResetProfiler);
py::add_ostream_redirect(m, "ostream_redirect");
return m.ptr();
}
} // namespace pybind
} // namespace paddle
......@@ -52,22 +52,30 @@ def cuda_profiler(output_file, output_mode=None, config=None):
def reset_profiler():
"""The profiler clear interface.
reset_profiler will clear the previous time record.
"""
core.reset_profiler()
@contextmanager
def profiler(state, sorted_key=None):
"""The profiler interface.
Different from cuda_profiler, this fuction can be used to profile both CPU
and GPU program.
Different from cuda_profiler, this profiler can be used to profile both CPU
and GPU program. By defalut, it records the CPU and GPU operator kernels,
if you want to profile other program, you can refer the profiling tutorial
to add more records.
Args:
state (string) : The profiler state, It should be 'CPU' or 'GPU'.
sorted_key (string) : If None, the profiler results will be printed
without sorting. Otherwise, the profiler results will be sorted
by the this flag. This flag should be one of 'calls', 'total',
'max', 'min' or 'ave'.
The `calls` means sorting by the calling counter.
state (string) : The profiling state, It should be 'CPU' or 'GPU'.
Although users may define CPUPlace or CUDAPlace when using Fluid,
the profiler doesn't get the state based on this Place. Since the
implementation is an independent part from the Fluid.
sorted_key (string) : If None, the profiling results will be printed
in the order of first end time of events. Otherwise, the profiling
results will be sorted by the this flag. This flag should be one
of 'calls', 'total', 'max', 'min' or 'ave'.
The `calls` means sorting by the number of calls.
The `total` means sorting by the total execution time.
The `max` means sorting by the maximum execution time.
The `min` means sorting by the minimum execution time.
......@@ -92,5 +100,6 @@ def profiler(state, sorted_key=None):
'min': core.EventSortingKey.kMin,
'ave': core.EventSortingKey.kAve,
}
with core.ostream_redirect(stdout=True, stderr=True):
# TODO(qingqing) : redirect C++ ostream to Python stream.
# with core.ostream_redirect(stdout=True, stderr=True):
core.disable_profiler(key_map[sorted_key])
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册