From 3158b4b37a7743239030a331de56f9c227d14adf Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Thu, 28 Dec 2017 17:50:29 +0800 Subject: [PATCH] Update tensor_util --- paddle/framework/CMakeLists.txt | 6 ++- paddle/framework/tensor_util.cc | 10 +++-- paddle/framework/tensor_util.h | 3 ++ paddle/framework/tensor_util_test.cu | 57 ++++++++++++++++++++++++++++ 4 files changed, 72 insertions(+), 4 deletions(-) create mode 100644 paddle/framework/tensor_util_test.cu diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 2af10a996c1..46dce7d1d27 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -12,7 +12,11 @@ else() endif () cc_test(tensor_test SRCS tensor_test.cc DEPS tensor) -cc_test(tensor_util_test SRCS tensor_util_test.cc DEPS tensor) +if (WITH_GPU) + nv_test(tensor_util_test SRCS tensor_util_test.cc tensor_util_test.cu DEPS tensor) +else() + cc_test(tensor_util_test SRCS tensor_util_test.cc DEPS tensor) +endif() cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) diff --git a/paddle/framework/tensor_util.cc b/paddle/framework/tensor_util.cc index 293c65a0654..7efc649d0bc 100644 --- a/paddle/framework/tensor_util.cc +++ b/paddle/framework/tensor_util.cc @@ -31,6 +31,7 @@ struct AnyDTypeVisitor { void operator()() const { auto t = EigenVector::Flatten(tensor_); auto o = EigenScalar::From(*out_); + // return any of predicate_(t) is true. o.device(*ctx_.eigen_device()) = predicate_(t).any(); } }; @@ -66,9 +67,10 @@ struct AnyVisitor : public boost::static_visitor { framework::Tensor tmp; tmp.Resize({1}); tmp.mutable_data(cpu); - platform::DeviceContextPool::Instance().Get(gpu)->Wait(); - CopyFrom(out, cpu, &tmp); - platform::DeviceContextPool::Instance().Get(gpu)->Wait(); + auto gpuctx = platform::DeviceContextPool::Instance().Get(gpu); + gpuctx->Wait(); + CopyFrom(out, cpu, *gpuctx, &tmp); + gpuctx->Wait(); return GetResult(tmp, cpu); } @@ -89,6 +91,7 @@ struct HasNANPredicate { template auto operator()(const T& eigen_vec) const -> decltype(std::declval().isnan()) { + // Cast eigen_vector to vector of bool. true if is inf. return eigen_vec.isnan(); } }; @@ -102,6 +105,7 @@ struct HasInfPredicate { template auto operator()(const T& eigen_vec) const -> decltype(std::declval().isinf()) { + // Cast eigen_vector to vector of bool. true if is inf. return eigen_vec.isinf(); } }; diff --git a/paddle/framework/tensor_util.h b/paddle/framework/tensor_util.h index e71d8e5672a..784170dae3f 100644 --- a/paddle/framework/tensor_util.h +++ b/paddle/framework/tensor_util.h @@ -208,7 +208,10 @@ inline void CopyToVector(const Tensor& src, std::vector* dst) { src_ptr, size); } +// Returns true if a tensor contains NAN, i.e., Not A Number. extern bool HasNAN(const framework::Tensor& tensor); + +// Returns true if a tensor contains Inf, i.e., Infinity. extern bool HasInf(const framework::Tensor& tensor); } // namespace framework diff --git a/paddle/framework/tensor_util_test.cu b/paddle/framework/tensor_util_test.cu new file mode 100644 index 00000000000..ebd35fdf6c2 --- /dev/null +++ b/paddle/framework/tensor_util_test.cu @@ -0,0 +1,57 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "gtest/gtest.h" +#include "paddle/framework/tensor_util.h" +#include "paddle/platform/device_context.h" +#include "paddle/platform/place.h" + +namespace paddle { +namespace framework { + +static __global__ void FillNAN(float* buf) { + buf[0] = 0.0; + buf[1] = 0.1; + buf[2] = NAN; +} +static __global__ void FillInf(float* buf) { + buf[0] = 0.0; + buf[1] = INFINITY; + buf[2] = 0.5; +} + +TEST(HasNAN, GPU) { + Tensor tensor; + platform::CUDAPlace gpu(0); + auto& pool = platform::DeviceContextPool::Instance(); + auto* cuda_ctx = pool.GetByPlace(gpu); + float* buf = tensor.mutable_data({3}, gpu); + FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf); + cuda_ctx->Wait(); + ASSERT_TRUE(HasNAN(tensor)); +} + +TEST(HasInf, GPU) { + Tensor tensor; + platform::CUDAPlace gpu(0); + auto& pool = platform::DeviceContextPool::Instance(); + auto* cuda_ctx = pool.GetByPlace(gpu); + float* buf = tensor.mutable_data({3}, gpu); + FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf); + cuda_ctx->Wait(); + ASSERT_TRUE(HasInf(tensor)); +} + +} // namespace framework +} // namespace paddle -- GitLab