diff --git a/paddle/fluid/operators/bernoulli_op.cu b/paddle/fluid/operators/bernoulli_op.cu index 5bdf20afe2006aec23d4e8fa2a1e79080f6ba7d2..dde4dd2567b793ffde16207515613aa1e0a6d4be 100644 --- a/paddle/fluid/operators/bernoulli_op.cu +++ b/paddle/fluid/operators/bernoulli_op.cu @@ -61,16 +61,16 @@ class BernoulliOpKernel BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace()).GetDeviceId(); auto gen_cuda = framework::GetDefaultCUDAGenerator(device_id); auto seed_offset = gen_cuda->IncrementOffset(1); - int gen_offset = size * seed_offset.second; + int64_t gen_offset = size * seed_offset.second; platform::Transform trans; - thrust::counting_iterator index_sequence_begin(0); + thrust::counting_iterator index_sequence_begin(0); auto* context = static_cast(&ctx.device_context()); trans(*context, index_sequence_begin, index_sequence_begin + size, in_data, out_data, - BernoulliCudaFunctor(static_cast(seed_offset.first), - static_cast(gen_offset))); + BernoulliCudaFunctor(static_cast(seed_offset.first), + static_cast(gen_offset))); } }; diff --git a/paddle/fluid/operators/gaussian_random_op.cu b/paddle/fluid/operators/gaussian_random_op.cu index 453ae20656f1d63bc9a7b088ef6785cf03270c59..2ea432db6c7f07d4017179dc862e340a59f19a1f 100644 --- a/paddle/fluid/operators/gaussian_random_op.cu +++ b/paddle/fluid/operators/gaussian_random_op.cu @@ -59,7 +59,7 @@ class GPUGaussianRandomKernel : public framework::OpKernel { } T mean = static_cast(context.Attr("mean")); T std = static_cast(context.Attr("std")); - thrust::counting_iterator index_sequence_begin(0); + thrust::counting_iterator index_sequence_begin(0); auto shape = GetShape(context); tensor->Resize(shape); T* data = tensor->mutable_data(context.GetPlace()); @@ -72,7 +72,7 @@ class GPUGaussianRandomKernel : public framework::OpKernel { if (gen_cuda->GetIsInitPy() && seed_flag) { auto seed_offset = gen_cuda->IncrementOffset(1); - int gen_offset = size * seed_offset.second; + int64_t gen_offset = size * seed_offset.second; thrust::transform( index_sequence_begin, index_sequence_begin + size, thrust::device_ptr(data), @@ -100,7 +100,7 @@ class GPUGaussianRandomBatchSizeLikeKernel : public framework::OpKernel { } T mean = static_cast(context.Attr("mean")); T std = static_cast(context.Attr("std")); - thrust::counting_iterator index_sequence_begin(0); + thrust::counting_iterator index_sequence_begin(0); int64_t size = tensor->numel(); int device_id = @@ -109,7 +109,7 @@ class GPUGaussianRandomBatchSizeLikeKernel : public framework::OpKernel { if (gen_cuda->GetIsInitPy() && seed_flag) { auto seed_offset = gen_cuda->IncrementOffset(1); - int gen_offset = size * seed_offset.second; + int64_t gen_offset = size * seed_offset.second; thrust::transform(index_sequence_begin, index_sequence_begin + size, thrust::device_ptr(data), GaussianGenerator(mean, std, seed_offset.first, diff --git a/paddle/fluid/operators/gumbel_softmax_op.cu b/paddle/fluid/operators/gumbel_softmax_op.cu index 6b6290d4af29f73216311ab41722a17adc237d11..d3edf72449537908612534a1d83b733fe2416b1e 100644 --- a/paddle/fluid/operators/gumbel_softmax_op.cu +++ b/paddle/fluid/operators/gumbel_softmax_op.cu @@ -129,7 +129,7 @@ struct GumbleNoiseGenerator { int64_t size = size_to_axis * size_from_axis; T* random_data = random_tensor.mutable_data({size}, platform::CUDAPlace()); - thrust::counting_iterator index_sequence_begin(0); + thrust::counting_iterator index_sequence_begin(0); // generate gumbel noise int device_id = @@ -137,7 +137,7 @@ struct GumbleNoiseGenerator { auto gen_cuda = framework::GetDefaultCUDAGenerator(device_id); if (gen_cuda->GetIsInitPy()) { auto seed_offset = gen_cuda->IncrementOffset(1); - int gen_offset = size * seed_offset.second; + int64_t gen_offset = size * seed_offset.second; thrust::transform( index_sequence_begin, index_sequence_begin + size, thrust::device_ptr(random_data), diff --git a/paddle/fluid/operators/multinomial_op.cu b/paddle/fluid/operators/multinomial_op.cu index 1e52cf36f69c8c72462e9e8e9d36743bc5be6551..a07cae8d3dabc98d22ff2423a605915e8260a802 100644 --- a/paddle/fluid/operators/multinomial_op.cu +++ b/paddle/fluid/operators/multinomial_op.cu @@ -239,7 +239,7 @@ class MultinomialOpKernel auto* rng_data = rng_data_tensor.mutable_data( {num_distributions, num_samples}, ctx.GetPlace()); - thrust::counting_iterator index_sequence_begin(0); + thrust::counting_iterator index_sequence_begin(0); platform::Transform trans; auto* context = static_cast(&ctx.device_context()); diff --git a/paddle/fluid/operators/truncated_gaussian_random_op.cu b/paddle/fluid/operators/truncated_gaussian_random_op.cu index 1f25a8807589232beec890e2833d437746bb3fc0..aaed8e5b62584d7c9eb89b6a38811ffd8fa1b2c9 100644 --- a/paddle/fluid/operators/truncated_gaussian_random_op.cu +++ b/paddle/fluid/operators/truncated_gaussian_random_op.cu @@ -97,7 +97,7 @@ class GPUTruncatedGaussianRandomKernel : public framework::OpKernel { } T mean = static_cast(context.Attr("mean")); T std = static_cast(context.Attr("std")); - thrust::counting_iterator index_sequence_begin(0); + thrust::counting_iterator index_sequence_begin(0); int64_t size = tensor->numel(); int device_id = @@ -106,7 +106,7 @@ class GPUTruncatedGaussianRandomKernel : public framework::OpKernel { if (gen_cuda->GetIsInitPy() && seed_flag) { auto seed_offset = gen_cuda->IncrementOffset(1); - int gen_offset = size * seed_offset.second; + int64_t gen_offset = size * seed_offset.second; thrust::transform( index_sequence_begin, index_sequence_begin + size, thrust::device_ptr(data), diff --git a/paddle/fluid/operators/uniform_random_inplace_op.cu b/paddle/fluid/operators/uniform_random_inplace_op.cu index 119788dc85cb26542caf13448b23dcb9449d6acf..bf82af865a1ebea23e219649984fefd4406b26e7 100644 --- a/paddle/fluid/operators/uniform_random_inplace_op.cu +++ b/paddle/fluid/operators/uniform_random_inplace_op.cu @@ -118,14 +118,14 @@ class GPUUniformRandomInplaceKernel : public framework::OpKernel { unsigned int diag_step = static_cast(ctx.Attr("diag_step")); T diag_val = static_cast(ctx.Attr("diag_val")); - thrust::counting_iterator index_sequence_begin(0); + thrust::counting_iterator index_sequence_begin(0); int64_t size = tensor->numel(); int device_id = BOOST_GET_CONST(platform::CUDAPlace, ctx.GetPlace()).GetDeviceId(); auto gen_cuda = framework::GetDefaultCUDAGenerator(device_id); if (gen_cuda->GetIsInitPy() && seed_flag) { auto seed_offset = gen_cuda->IncrementOffset(1); - int gen_offset = size * seed_offset.second; + int64_t gen_offset = size * seed_offset.second; thrust::transform( index_sequence_begin, index_sequence_begin + size, thrust::device_ptr(data), diff --git a/paddle/fluid/operators/uniform_random_op.cu b/paddle/fluid/operators/uniform_random_op.cu index ceb13a3dda41df0a3177d3291ad409b673a4c20c..97288b2b1fa7c143419f8b0f68bd53c8da697bee 100644 --- a/paddle/fluid/operators/uniform_random_op.cu +++ b/paddle/fluid/operators/uniform_random_op.cu @@ -139,14 +139,14 @@ class GPUUniformRandomKernel : public framework::OpKernel { unsigned int diag_step = static_cast(context.Attr("diag_step")); T diag_val = static_cast(context.Attr("diag_val")); - thrust::counting_iterator index_sequence_begin(0); + thrust::counting_iterator index_sequence_begin(0); int64_t size = tensor->numel(); int device_id = BOOST_GET_CONST(platform::CUDAPlace, context.GetPlace()).GetDeviceId(); auto gen_cuda = framework::GetDefaultCUDAGenerator(device_id); if (gen_cuda->GetIsInitPy() && seed_flag) { auto seed_offset = gen_cuda->IncrementOffset(1); - int gen_offset = size * seed_offset.second; + int64_t gen_offset = size * seed_offset.second; thrust::transform( index_sequence_begin, index_sequence_begin + size, thrust::device_ptr(data),