tensor_util_test.cu 9.1 KB
Newer Older
Y
Yang Yu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License. */

#include "gtest/gtest.h"
Y
Yi Wang 已提交
16 17 18
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/place.h"
Y
Yang Yu 已提交
19 20 21 22 23 24 25 26 27

namespace paddle {
namespace framework {

static __global__ void FillNAN(float* buf) {
  buf[0] = 0.0;
  buf[1] = 0.1;
  buf[2] = NAN;
}
28

Y
Yang Yu 已提交
29
static __global__ void FillInf(float* buf) {
30 31 32
  buf[0] = INFINITY;
  buf[1] = 0.1;
  buf[2] = 0.2;
Y
Yang Yu 已提交
33 34
}

35 36 37 38 39 40 41 42 43 44 45 46
static __global__ void FillNAN(platform::float16* buf) {
  buf[0] = 0.0;
  buf[1] = 0.1;
  buf[2].x = 0x7fff;
}

static __global__ void FillInf(platform::float16* buf) {
  buf[0] = 0.0;
  buf[1].x = 0x7c00;
  buf[2] = 0.5;
}

47 48 49 50 51 52 53 54 55 56 57 58
static __global__ void FillFinite(float* buf) {
  buf[0] = 0.0;
  buf[1] = 0.1;
  buf[2] = 0.2;
}

static __global__ void FillFinite(platform::float16* buf) {
  buf[0] = 0.0;
  buf[1] = 0.1;
  buf[2] = 0.2;
}

Y
Yi Wang 已提交
59
TEST(TensorContainsNAN, GPU) {
60 61
  paddle::platform::CUDAPlace gpu(0);
  auto& pool = paddle::platform::DeviceContextPool::Instance();
Y
Yang Yu 已提交
62
  auto* cuda_ctx = pool.GetByPlace(gpu);
63 64 65
  {
    Tensor tensor;
    float* buf = tensor.mutable_data<float>({3}, gpu);
66 67 68
#ifdef PADDLE_WITH_HIP
    hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf);
#else
69
    FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
70
#endif
71 72 73 74 75
    cuda_ctx->Wait();
    ASSERT_TRUE(TensorContainsNAN(tensor));
  }
  {
    Tensor tensor;
A
Abhinav Arora 已提交
76 77
    paddle::platform::float16* buf =
        tensor.mutable_data<paddle::platform::float16>({3}, gpu);
78 79 80
#ifdef PADDLE_WITH_HIP
    hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf);
#else
81
    FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
82
#endif
83 84 85
    cuda_ctx->Wait();
    ASSERT_TRUE(TensorContainsNAN(tensor));
  }
Y
Yang Yu 已提交
86 87
}

Y
Yi Wang 已提交
88
TEST(TensorContainsInf, GPU) {
89 90
  paddle::platform::CUDAPlace gpu(0);
  auto& pool = paddle::platform::DeviceContextPool::Instance();
Y
Yang Yu 已提交
91
  auto* cuda_ctx = pool.GetByPlace(gpu);
92 93 94
  {
    Tensor tensor;
    float* buf = tensor.mutable_data<float>({3}, gpu);
95 96 97
#ifdef PADDLE_WITH_HIP
    hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf);
#else
98
    FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
99
#endif
100 101 102 103 104
    cuda_ctx->Wait();
    ASSERT_TRUE(TensorContainsInf(tensor));
  }
  {
    Tensor tensor;
A
Abhinav Arora 已提交
105 106
    paddle::platform::float16* buf =
        tensor.mutable_data<paddle::platform::float16>({3}, gpu);
107 108 109
#ifdef PADDLE_WITH_HIP
    hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf);
#else
110
    FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
111
#endif
112 113 114
    cuda_ctx->Wait();
    ASSERT_TRUE(TensorContainsInf(tensor));
  }
Y
Yang Yu 已提交
115 116
}

117 118 119 120 121 122 123 124 125
TEST(TensorIsfinite, GPU) {
  paddle::platform::CUDAPlace gpu(0);
  using paddle::platform::float16;
  auto& pool = paddle::platform::DeviceContextPool::Instance();
  auto* cuda_ctx = pool.GetByPlace(gpu);
  // contains inf
  {
    Tensor tensor;
    float* buf = tensor.mutable_data<float>({3}, gpu);
126 127 128
#ifdef PADDLE_WITH_HIP
    hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf);
#else
129
    FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
130
#endif
131 132 133 134 135 136
    cuda_ctx->Wait();
    EXPECT_TRUE(!TensorIsfinite(tensor));
  }
  {
    Tensor tensor;
    float16* buf = tensor.mutable_data<float16>({3}, gpu);
137 138 139
#ifdef PADDLE_WITH_HIP
    hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf);
#else
140
    FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
141
#endif
142 143 144 145 146 147 148 149
    cuda_ctx->Wait();
    EXPECT_TRUE(!TensorIsfinite(tensor));
  }

  // contains nan
  {
    Tensor tensor;
    float* buf = tensor.mutable_data<float>({3}, gpu);
150 151 152
#ifdef PADDLE_WITH_HIP
    hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf);
#else
153
    FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
154
#endif
155 156 157 158 159 160
    cuda_ctx->Wait();
    EXPECT_TRUE(!TensorIsfinite(tensor));
  }
  {
    Tensor tensor;
    float16* buf = tensor.mutable_data<float16>({3}, gpu);
161 162 163
#ifdef PADDLE_WITH_HIP
    hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf);
#else
164
    FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
165
#endif
166 167 168 169 170 171 172 173
    cuda_ctx->Wait();
    EXPECT_TRUE(!TensorIsfinite(tensor));
  }

  // all element are finite
  {
    Tensor tensor;
    float* buf = tensor.mutable_data<float>({3}, gpu);
174 175 176 177
#ifdef PADDLE_WITH_HIP
    hipLaunchKernelGGL(FillFinite, dim3(1), dim3(1), 0, cuda_ctx->stream(),
                       buf);
#else
178
    FillFinite<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
179
#endif
180 181 182 183 184 185
    cuda_ctx->Wait();
    EXPECT_TRUE(TensorIsfinite(tensor));
  }
  {
    Tensor tensor;
    float16* buf = tensor.mutable_data<float16>({3}, gpu);
186 187 188 189
#ifdef PADDLE_WITH_HIP
    hipLaunchKernelGGL(FillFinite, dim3(1), dim3(1), 0, cuda_ctx->stream(),
                       buf);
#else
190
    FillFinite<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
191
#endif
192 193 194 195 196 197 198 199 200 201 202 203
    cuda_ctx->Wait();
    EXPECT_TRUE(TensorIsfinite(tensor));
  }
}

TEST(TensorContainsInf, GPUWithoutWait) {
  paddle::platform::CUDAPlace gpu(0);
  auto& pool = paddle::platform::DeviceContextPool::Instance();
  auto* cuda_ctx = pool.GetByPlace(gpu);
  {
    Tensor tensor, out;
    float* buf = tensor.mutable_data<float>({3}, gpu);
204 205 206
#ifdef PADDLE_WITH_HIP
    hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf);
#else
207
    FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
208
#endif
209 210 211 212 213 214 215 216 217 218 219 220
    cuda_ctx->Wait();
    TensorContainsInf(tensor, &out);
    platform::CPUPlace cpu;
    Tensor tmp;
    TensorCopy(out, cpu, *cuda_ctx, &tmp);
    cuda_ctx->Wait();
    ASSERT_EQ(tmp.data<bool>()[0], true);
  }
  {
    Tensor tensor, out;
    paddle::platform::float16* buf =
        tensor.mutable_data<paddle::platform::float16>({3}, gpu);
221 222 223
#ifdef PADDLE_WITH_HIP
    hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf);
#else
224
    FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
225
#endif
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
    cuda_ctx->Wait();
    TensorContainsInf(tensor, &out);
    platform::CPUPlace cpu;
    Tensor tmp;
    TensorCopy(out, cpu, *cuda_ctx, &tmp);
    cuda_ctx->Wait();
    ASSERT_EQ(tmp.data<bool>()[0], true);
  }
}

TEST(TensorContainsNAN, GPUWithoutWait) {
  paddle::platform::CUDAPlace gpu(0);
  auto& pool = paddle::platform::DeviceContextPool::Instance();
  auto* cuda_ctx = pool.GetByPlace(gpu);
  {
    Tensor tensor, out;
    float* buf = tensor.mutable_data<float>({3}, gpu);
243 244 245
#ifdef PADDLE_WITH_HIP
    hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf);
#else
246
    FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
247
#endif
248 249 250 251 252 253 254 255 256 257 258 259
    cuda_ctx->Wait();
    TensorContainsNAN(tensor, &out);
    platform::CPUPlace cpu;
    Tensor tmp;
    TensorCopy(out, cpu, *cuda_ctx, &tmp);
    cuda_ctx->Wait();
    ASSERT_EQ(tmp.data<bool>()[0], true);
  }
  {
    Tensor tensor, out;
    paddle::platform::float16* buf =
        tensor.mutable_data<paddle::platform::float16>({3}, gpu);
260 261 262
#ifdef PADDLE_WITH_HIP
    hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf);
#else
263
    FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
264
#endif
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
    cuda_ctx->Wait();
    TensorContainsNAN(tensor, &out);
    platform::CPUPlace cpu;
    Tensor tmp;
    TensorCopy(out, cpu, *cuda_ctx, &tmp);
    cuda_ctx->Wait();
    ASSERT_EQ(tmp.data<bool>()[0], true);
  }
}

TEST(TensorIsfinite, GPUWithoutWait) {
  paddle::platform::CUDAPlace gpu(0);
  auto& pool = paddle::platform::DeviceContextPool::Instance();
  auto* cuda_ctx = pool.GetByPlace(gpu);
  {
    Tensor tensor, out;
    float* buf = tensor.mutable_data<float>({3}, gpu);
282 283 284
#ifdef PADDLE_WITH_HIP
    hipLaunchKernelGGL(FillInf, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf);
#else
285
    FillInf<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
286
#endif
287 288 289 290 291 292 293 294 295 296 297
    cuda_ctx->Wait();
    TensorIsfinite(tensor, &out);
    platform::CPUPlace cpu;
    Tensor tmp;
    TensorCopy(out, cpu, *cuda_ctx, &tmp);
    cuda_ctx->Wait();
    EXPECT_EQ(tmp.data<bool>()[0], false);
  }
  {
    Tensor tensor, out;
    float* buf = tensor.mutable_data<float>({3}, gpu);
298 299 300
#ifdef PADDLE_WITH_HIP
    hipLaunchKernelGGL(FillNAN, dim3(1), dim3(1), 0, cuda_ctx->stream(), buf);
#else
301
    FillNAN<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
302
#endif
303 304 305 306 307 308 309 310 311 312 313
    cuda_ctx->Wait();
    TensorIsfinite(tensor, &out);
    platform::CPUPlace cpu;
    Tensor tmp;
    TensorCopy(out, cpu, *cuda_ctx, &tmp);
    cuda_ctx->Wait();
    EXPECT_EQ(tmp.data<bool>()[0], false);
  }
  {
    Tensor tensor, out;
    float* buf = tensor.mutable_data<float>({3}, gpu);
314 315 316 317
#ifdef PADDLE_WITH_HIP
    hipLaunchKernelGGL(FillFinite, dim3(1), dim3(1), 0, cuda_ctx->stream(),
                       buf);
#else
318
    FillFinite<<<1, 1, 0, cuda_ctx->stream()>>>(buf);
319
#endif
320 321 322 323 324 325 326 327 328 329
    cuda_ctx->Wait();
    TensorIsfinite(tensor, &out);
    platform::CPUPlace cpu;
    Tensor tmp;
    TensorCopy(out, cpu, *cuda_ctx, &tmp);
    cuda_ctx->Wait();
    EXPECT_EQ(tmp.data<bool>()[0], true);
  }
}

Y
Yang Yu 已提交
330 331
}  // namespace framework
}  // namespace paddle