提交 379b471e 编写于 作者: D dzhwinter

squash commit

上级 a0aa2ec8
...@@ -169,9 +169,9 @@ set(CUDA_PROPAGATE_HOST_FLAGS OFF) ...@@ -169,9 +169,9 @@ set(CUDA_PROPAGATE_HOST_FLAGS OFF)
# Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc. # Release/Debug flags set by cmake. Such as -O3 -g -DNDEBUG etc.
# So, don't set these flags here. # So, don't set these flags here.
#list(APPEND CUDA_NVCC_FLAGS "-std=c++14")
if (NOT WIN32) # windows msvc2015 support c++11 natively. if (NOT WIN32) # windows msvc2015 support c++11 natively.
# -std=c++11 -fPIC not recoginize by msvc, -Xcompiler will be added by cmake. # -std=c++11 -fPIC not recoginize by msvc
list(APPEND CUDA_NVCC_FLAGS "-Xcompiler -fPIC") list(APPEND CUDA_NVCC_FLAGS "-Xcompiler -fPIC")
endif(NOT WIN32) endif(NOT WIN32)
......
...@@ -26,7 +26,6 @@ function(CheckCompilerCXX11Flag) ...@@ -26,7 +26,6 @@ function(CheckCompilerCXX11Flag)
endfunction() endfunction()
CheckCompilerCXX11Flag() CheckCompilerCXX11Flag()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
# safe_set_flag # safe_set_flag
# #
...@@ -136,7 +135,7 @@ else(NOT WIN32) ...@@ -136,7 +135,7 @@ else(NOT WIN32)
set(COMMON_FLAGS set(COMMON_FLAGS
"/w") #disable all warnings "/w") #disable all warnings
set(GPU_COMMON_FLAGS set(GPU_COMMON_FLAGS
-w) #disable all warnings "") #disable all warnings
endif(NOT WIN32) endif(NOT WIN32)
...@@ -160,7 +159,6 @@ if(UNIX AND NOT APPLE) ...@@ -160,7 +159,6 @@ if(UNIX AND NOT APPLE)
set(LINUX TRUE) set(LINUX TRUE)
endif(UNIX AND NOT APPLE) endif(UNIX AND NOT APPLE)
set(GPU_COMMON_FLAGS /std:c++14 ${GPU_COMMON_FLAGS})
foreach(flag ${COMMON_FLAGS}) foreach(flag ${COMMON_FLAGS})
safe_set_cflag(CMAKE_C_FLAGS ${flag}) safe_set_cflag(CMAKE_C_FLAGS ${flag})
safe_set_cxxflag(CMAKE_CXX_FLAGS ${flag}) safe_set_cxxflag(CMAKE_CXX_FLAGS ${flag})
......
...@@ -13,11 +13,10 @@ See the License for the specific language governing permissions and ...@@ -13,11 +13,10 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
// for windows compile eigen with logging // logging.h and windows.h conflict
#define GLOG_NO_ABBREVIATED_SEVERITIES #define GLOG_NO_ABBREVIATED_SEVERITIES
#include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor.h"
#include <math_constants.h>
#include "unsupported/Eigen/CXX11/Tensor" #include "unsupported/Eigen/CXX11/Tensor"
namespace paddle { namespace paddle {
...@@ -49,11 +48,13 @@ struct EigenTensor { ...@@ -49,11 +48,13 @@ struct EigenTensor {
using ConstType = using ConstType =
Eigen::TensorMap<Eigen::Tensor<const T, D, MajorType, IndexType>>; Eigen::TensorMap<Eigen::Tensor<const T, D, MajorType, IndexType>>;
static Type From(Tensor& tensor, DDim dims) { static Type From(Tensor& tensor, DDim dims) { // NOLINT
return Type(tensor.data<T>(), EigenDim<D>::From(dims)); return Type(tensor.data<T>(), EigenDim<D>::From(dims));
} }
static Type From(Tensor& tensor) { return From(tensor, tensor.dims_); } static Type From(Tensor& tensor) { // NOLINT
return From(tensor, tensor.dims_);
} // NOLINT
static ConstType From(const Tensor& tensor, DDim dims) { static ConstType From(const Tensor& tensor, DDim dims) {
return ConstType(tensor.data<T>(), EigenDim<D>::From(dims)); return ConstType(tensor.data<T>(), EigenDim<D>::From(dims));
...@@ -67,7 +68,8 @@ struct EigenTensor { ...@@ -67,7 +68,8 @@ struct EigenTensor {
template <typename T, int MajorType = Eigen::RowMajor, template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex> typename IndexType = Eigen::DenseIndex>
struct EigenMatrix : public EigenTensor<T, 2, MajorType, IndexType> { struct EigenMatrix : public EigenTensor<T, 2, MajorType, IndexType> {
static typename EigenMatrix::Type Reshape(Tensor& tensor, int num_col_dims) { static typename EigenMatrix::Type Reshape(Tensor& tensor, // NOLINT
int num_col_dims) {
int rank = tensor.dims_.size(); int rank = tensor.dims_.size();
PADDLE_ENFORCE(num_col_dims > 0 && num_col_dims < rank, PADDLE_ENFORCE(num_col_dims > 0 && num_col_dims < rank,
"`num_col_dims` must be between (0, rank_of_tensor)."); "`num_col_dims` must be between (0, rank_of_tensor).");
...@@ -89,11 +91,12 @@ template <typename T, int MajorType = Eigen::RowMajor, ...@@ -89,11 +91,12 @@ template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex> typename IndexType = Eigen::DenseIndex>
struct EigenVector : public EigenTensor<T, 1, MajorType, IndexType> { struct EigenVector : public EigenTensor<T, 1, MajorType, IndexType> {
// Flatten reshapes a Tensor into an EigenVector. // Flatten reshapes a Tensor into an EigenVector.
static typename EigenVector::Type Flatten(Tensor& tensor) { static typename EigenVector::Type Flatten(Tensor& tensor) { // NOLINT
return EigenVector::From(tensor, {product(tensor.dims_)}); return EigenVector::From(tensor, {product(tensor.dims_)});
} }
static typename EigenVector::ConstType Flatten(const Tensor& tensor) { static typename EigenVector::ConstType Flatten(
const Tensor& tensor) { // NOLINT
return EigenVector::From(tensor, {product(tensor.dims_)}); return EigenVector::From(tensor, {product(tensor.dims_)});
} }
}; };
...@@ -107,7 +110,7 @@ struct EigenScalar { ...@@ -107,7 +110,7 @@ struct EigenScalar {
using ConstType = Eigen::TensorMap< using ConstType = Eigen::TensorMap<
Eigen::TensorFixedSize<const T, Eigen::Sizes<>, MajorType, IndexType>>; Eigen::TensorFixedSize<const T, Eigen::Sizes<>, MajorType, IndexType>>;
static Type From(Tensor& tensor) { return Type(tensor.data<T>()); } static Type From(Tensor& tensor) { return Type(tensor.data<T>()); } // NOLINT
static ConstType From(const Tensor& tensor) { static ConstType From(const Tensor& tensor) {
return ConstType(tensor.data<T>()); return ConstType(tensor.data<T>());
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include <string>
#include "paddle/fluid/framework/ir/attention_lstm_fuse_pass.h" #include "paddle/fluid/framework/ir/attention_lstm_fuse_pass.h"
#include "paddle/fluid/framework/ir/graph_pattern_detector.h" #include "paddle/fluid/framework/ir/graph_pattern_detector.h"
...@@ -215,12 +216,12 @@ void PrepareLSTMWeight(const LoDTensor& W_forget_w0, ...@@ -215,12 +216,12 @@ void PrepareLSTMWeight(const LoDTensor& W_forget_w0,
VLOG(3) << "LSTMWeight resized to " << out->dims(); VLOG(3) << "LSTMWeight resized to " << out->dims();
float* out_data = out->mutable_data<float>(platform::CPUPlace()); float* out_data = out->mutable_data<float>(platform::CPUPlace());
std::array<const float*, 4> tensors( std::array<const float*, 4> tensors = {
{{W_forget_w0.data<float>(), W_input_w0.data<float>(), W_forget_w0.data<float>(), W_input_w0.data<float>(),
W_output_w0.data<float>(), W_cell_w0.data<float>()}}); W_output_w0.data<float>(), W_cell_w0.data<float>()};
std::array<const float*, 4> tensors1( std::array<const float*, 4> tensors1 = {
{{W_forget_w1.data<float>(), W_input_w1.data<float>(), W_forget_w1.data<float>(), W_input_w1.data<float>(),
W_output_w1.data<float>(), W_cell_w1.data<float>()}}); W_output_w1.data<float>(), W_cell_w1.data<float>()};
for (int row = 0; row < D; row++) { for (int row = 0; row < D; row++) {
for (int col = 0; col < 4; col++) { for (int col = 0; col < 4; col++) {
...@@ -242,9 +243,9 @@ void PrepareLSTMWeight(const LoDTensor& W_forget_w0, ...@@ -242,9 +243,9 @@ void PrepareLSTMWeight(const LoDTensor& W_forget_w0,
void PrepareLSTMBias(const LoDTensor& B_forget, const LoDTensor& B_input, void PrepareLSTMBias(const LoDTensor& B_forget, const LoDTensor& B_input,
const LoDTensor& B_output, const LoDTensor& B_cell, const LoDTensor& B_output, const LoDTensor& B_cell,
LoDTensor* out) { LoDTensor* out) {
std::array<const float*, 4> tensors( std::array<const float*, 4> tensors = {
{{B_forget.data<float>(), B_input.data<float>(), B_output.data<float>(), B_forget.data<float>(), B_input.data<float>(), B_output.data<float>(),
B_cell.data<float>()}}); B_cell.data<float>()};
PADDLE_ENFORCE_EQ(B_forget.dims().size(), 1); PADDLE_ENFORCE_EQ(B_forget.dims().size(), 1);
int D = B_forget.dims()[0]; int D = B_forget.dims()[0];
......
...@@ -21,6 +21,7 @@ cc_binary(inference_analyzer SRCS analyzer_main.cc DEPS analysis paddle_fluid) ...@@ -21,6 +21,7 @@ cc_binary(inference_analyzer SRCS analyzer_main.cc DEPS analysis paddle_fluid)
set(PYTHON_TESTS_DIR ${PADDLE_BINARY_DIR}/python/paddle/fluid/tests) set(PYTHON_TESTS_DIR ${PADDLE_BINARY_DIR}/python/paddle/fluid/tests)
if (NOT WIN32)
function (inference_analysis_test TARGET) function (inference_analysis_test TARGET)
if(WITH_TESTING) if(WITH_TESTING)
set(options "") set(options "")
...@@ -98,3 +99,4 @@ inference_analysis_test(test_chinese_ner SRCS chinese_ner_tester.cc ...@@ -98,3 +99,4 @@ inference_analysis_test(test_chinese_ner SRCS chinese_ner_tester.cc
ARGS --inference_model_dir=${PYTHON_TESTS_DIR}/book/word2vec.inference.model ARGS --inference_model_dir=${PYTHON_TESTS_DIR}/book/word2vec.inference.model
--infer_model=${CHINESE_NER_INSTALL_DIR}/model --infer_model=${CHINESE_NER_INSTALL_DIR}/model
--infer_data=${CHINESE_NER_INSTALL_DIR}/data.txt) --infer_data=${CHINESE_NER_INSTALL_DIR}/data.txt)
endif(NOT WIN32)
...@@ -87,7 +87,7 @@ function(op_library TARGET) ...@@ -87,7 +87,7 @@ function(op_library TARGET)
if (WIN32) if (WIN32)
# no nccl, no avx instructions ops. # no nccl, no avx instructions ops.
foreach(windows_unsupport_op "nccl_op" "gen_nccl_id_op" "warpctc_op" "hierarchical_sigmoid_op" foreach(windows_unsupport_op "nccl_op" "gen_nccl_id_op" "warpctc_op" "hierarchical_sigmoid_op"
"crf_decoding_op" "select_op" "lstmp_op" "gru_op" "lstm_op" "fusion_lstm_op") "crf_decoding_op" "select_op" "lstmp_op" "gru_op" "lstm_op" "fusion_lstm_op" "cumsum_op")
if ("${TARGET}" STREQUAL "${windows_unsupport_op}") if ("${TARGET}" STREQUAL "${windows_unsupport_op}")
return() return()
endif() endif()
......
...@@ -85,17 +85,14 @@ class CumKernel : public framework::OpKernel<typename Functor::ELEMENT_TYPE> { ...@@ -85,17 +85,14 @@ class CumKernel : public framework::OpKernel<typename Functor::ELEMENT_TYPE> {
template <typename Device, typename Dim, typename X, typename Out> template <typename Device, typename Dim, typename X, typename Out>
void ComputeImp(Device d, const Dim& dims, X x, Out out, int axis, void ComputeImp(Device d, const Dim& dims, X x, Out out, int axis,
bool reverse, bool exclusive) const { bool reverse, bool exclusive) const {
Functor func();
if (!reverse) { if (!reverse) {
out.reshape(dims).device(d) = out.reshape(dims).device(d) = Functor()(x.reshape(dims), axis, exclusive);
func.apply(x.reshape(dims), axis, exclusive);
} else { } else {
std::array<bool, Dim::count> rev; std::array<bool, Dim::count> rev;
rev.fill(false); rev.fill(false);
rev[axis] = reverse; rev[axis] = reverse;
out.reshape(dims).device(d) = out.reshape(dims).device(d) =
func.apply(x.reshape(dims).reverse(rev), axis, exclusive) Functor()(x.reshape(dims).reverse(rev), axis, exclusive).reverse(rev);
.reverse(rev);
} }
} }
}; };
...@@ -104,7 +101,8 @@ template <typename T> ...@@ -104,7 +101,8 @@ template <typename T>
struct CumsumFunctor { struct CumsumFunctor {
using ELEMENT_TYPE = T; using ELEMENT_TYPE = T;
template <typename X> template <typename X>
const typename X::TensorScanSumOp apply(X x, int axis, bool exclusive) const { const typename X::TensorScanSumOp operator()(X x, int axis,
bool exclusive) const {
return x.cumsum(axis, exclusive); return x.cumsum(axis, exclusive);
} }
}; };
......
...@@ -13,6 +13,15 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,15 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
#ifdef PADDLE_WITH_MKLML
#include "paddle/fluid/platform/dynload/mklml.h"
#endif
#ifdef PADDLE_USE_OPENBLAS
#include <cblas.h>
#endif
#include <vector> #include <vector>
#include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/operators/math/math_function_impl.h" #include "paddle/fluid/operators/math/math_function_impl.h"
......
...@@ -13,18 +13,6 @@ See the License for the specific language governing permissions and ...@@ -13,18 +13,6 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#ifdef PADDLE_WITH_MKLML
#include "paddle/fluid/platform/dynload/mklml.h"
#endif
#ifdef PADDLE_USE_OPENBLAS
#include <cblas.h>
// remove typedef in openblas
#undef FLOAT
#undef INT
#undef SIZE
#endif
#include <cmath> #include <cmath>
#include <vector> #include <vector>
......
...@@ -16,6 +16,7 @@ limitations under the License. */ ...@@ -16,6 +16,7 @@ limitations under the License. */
#include <vector> #include <vector>
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/math_function_impl.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/cuda_primitives.h"
......
...@@ -16,13 +16,12 @@ limitations under the License. */ ...@@ -16,13 +16,12 @@ limitations under the License. */
#include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/sequence_pooling.h" #include "paddle/fluid/operators/math/sequence_pooling.h"
#include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/macros.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
namespace math { namespace math {
#define FLT_MAX __FLT_MAX__
template <typename T> template <typename T>
struct MaxPoolFunctor { struct MaxPoolFunctor {
HOSTDEVICE void operator()(const T* input, const size_t start, HOSTDEVICE void operator()(const T* input, const size_t start,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册