提交 a0aa2ec8 编写于 作者: D dzhwinter

build compile

上级 75681c0a
...@@ -160,7 +160,7 @@ if(UNIX AND NOT APPLE) ...@@ -160,7 +160,7 @@ if(UNIX AND NOT APPLE)
set(LINUX TRUE) set(LINUX TRUE)
endif(UNIX AND NOT APPLE) endif(UNIX AND NOT APPLE)
set(GPU_COMMON_FLAGS -std=c++11 ${GPU_COMMON_FLAGS}) set(GPU_COMMON_FLAGS /std:c++14 ${GPU_COMMON_FLAGS})
foreach(flag ${COMMON_FLAGS}) foreach(flag ${COMMON_FLAGS})
safe_set_cflag(CMAKE_C_FLAGS ${flag}) safe_set_cflag(CMAKE_C_FLAGS ${flag})
safe_set_cxxflag(CMAKE_CXX_FLAGS ${flag}) safe_set_cxxflag(CMAKE_CXX_FLAGS ${flag})
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#pragma once #pragma once
#include <algorithm> #include <algorithm>
#include <iterator>
#include <numeric> #include <numeric>
#include <sstream> #include <sstream>
#include <string> #include <string>
...@@ -25,8 +26,8 @@ ...@@ -25,8 +26,8 @@
namespace paddle { namespace paddle {
namespace inference { namespace inference {
static void split(const std::string &str, char sep,
static void split(const std::string &str, char sep, std::vector<std::string> *pieces) { std::vector<std::string> *pieces) {
pieces->clear(); pieces->clear();
if (str.empty()) { if (str.empty()) {
return; return;
......
...@@ -85,14 +85,17 @@ class CumKernel : public framework::OpKernel<typename Functor::ELEMENT_TYPE> { ...@@ -85,14 +85,17 @@ class CumKernel : public framework::OpKernel<typename Functor::ELEMENT_TYPE> {
template <typename Device, typename Dim, typename X, typename Out> template <typename Device, typename Dim, typename X, typename Out>
void ComputeImp(Device d, const Dim& dims, X x, Out out, int axis, void ComputeImp(Device d, const Dim& dims, X x, Out out, int axis,
bool reverse, bool exclusive) const { bool reverse, bool exclusive) const {
Functor func();
if (!reverse) { if (!reverse) {
out.reshape(dims).device(d) = Functor()(x.reshape(dims), axis, exclusive); out.reshape(dims).device(d) =
func.apply(x.reshape(dims), axis, exclusive);
} else { } else {
std::array<bool, Dim::count> rev; std::array<bool, Dim::count> rev;
rev.fill(false); rev.fill(false);
rev[axis] = reverse; rev[axis] = reverse;
out.reshape(dims).device(d) = out.reshape(dims).device(d) =
Functor()(x.reshape(dims).reverse(rev), axis, exclusive).reverse(rev); func.apply(x.reshape(dims).reverse(rev), axis, exclusive)
.reverse(rev);
} }
} }
}; };
...@@ -101,8 +104,7 @@ template <typename T> ...@@ -101,8 +104,7 @@ template <typename T>
struct CumsumFunctor { struct CumsumFunctor {
using ELEMENT_TYPE = T; using ELEMENT_TYPE = T;
template <typename X> template <typename X>
const typename X::TensorScanSumOp operator()(X x, int axis, const typename X::TensorScanSumOp apply(X x, int axis, bool exclusive) const {
bool exclusive) const {
return x.cumsum(axis, exclusive); return x.cumsum(axis, exclusive);
} }
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册