Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
779e27b9
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
779e27b9
编写于
8月 29, 2020
作者:
B
baihuawei
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
support categorical log_prob
上级
bc726ce2
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
191 addition
and
13 deletion
+191
-13
mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/range_gpu_kernel.cc
...rc/backend/kernel_compiler/gpu/arrays/range_gpu_kernel.cc
+26
-0
mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/range_gpu_kernel.h
...src/backend/kernel_compiler/gpu/arrays/range_gpu_kernel.h
+89
-0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/range_impl.cu
...ccsrc/backend/kernel_compiler/gpu/cuda_impl/range_impl.cu
+39
-0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/range_impl.cuh
...csrc/backend/kernel_compiler/gpu/cuda_impl/range_impl.cuh
+23
-0
mindspore/nn/probability/distribution/categorical.py
mindspore/nn/probability/distribution/categorical.py
+12
-11
mindspore/ops/operations/random_ops.py
mindspore/ops/operations/random_ops.py
+2
-2
未找到文件。
mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/range_gpu_kernel.cc
0 → 100644
浏览文件 @
779e27b9
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/arrays/range_gpu_kernel.h"
namespace
mindspore
{
namespace
kernel
{
MS_REG_GPU_KERNEL_ONE
(
Range
,
KernelAttr
().
AddInputAttr
(
kNumberTypeFloat32
).
AddOutputAttr
(
kNumberTypeFloat32
),
RangeGPUKernel
,
float
)
MS_REG_GPU_KERNEL_ONE
(
Range
,
KernelAttr
().
AddInputAttr
(
kNumberTypeInt32
).
AddOutputAttr
(
kNumberTypeInt32
),
RangeGPUKernel
,
int
)
}
// namespace kernel
}
// namespace mindspore
mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/range_gpu_kernel.h
0 → 100644
浏览文件 @
779e27b9
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_RANGE_GPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_RANGE_GPU_KERNEL_H_
#include <vector>
#include "backend/kernel_compiler/gpu/gpu_kernel.h"
#include "backend/kernel_compiler/gpu/gpu_kernel_factory.h"
#include "backend/kernel_compiler/gpu/cuda_impl/range_impl.cuh"
namespace
mindspore
{
namespace
kernel
{
template
<
typename
T
>
class
RangeGPUKernel
:
public
GpuKernel
{
public:
RangeGPUKernel
()
:
input_size_
(
0
),
output_size_
(
0
),
start_
(
0.
),
limit_
(
1.
),
delta_
(
1.
)
{}
~
RangeGPUKernel
()
=
default
;
const
std
::
vector
<
size_t
>
&
GetInputSizeList
()
const
override
{
return
input_size_list_
;
}
const
std
::
vector
<
size_t
>
&
GetOutputSizeList
()
const
override
{
return
output_size_list_
;
}
const
std
::
vector
<
size_t
>
&
GetWorkspaceSizeList
()
const
override
{
return
workspace_size_list_
;
}
bool
Launch
(
const
std
::
vector
<
AddressPtr
>
&
inputs
,
const
std
::
vector
<
AddressPtr
>
&
workspace
,
const
std
::
vector
<
AddressPtr
>
&
outputs
,
void
*
stream_ptr
)
override
{
T
*
input
=
GetDeviceAddress
<
T
>
(
inputs
,
0
);
T
*
output
=
GetDeviceAddress
<
T
>
(
outputs
,
0
);
int
size
=
SizeToInt
(
input_size_
/
sizeof
(
T
));
CalRange
(
size
,
start_
,
limit_
,
delta_
,
input
,
output
,
reinterpret_cast
<
cudaStream_t
>
(
stream_ptr
));
return
true
;
}
bool
Init
(
const
CNodePtr
&
kernel_node
)
override
{
size_t
input_num
=
AnfAlgo
::
GetInputTensorNum
(
kernel_node
);
if
(
input_num
!=
1
)
{
MS_LOG
(
ERROR
)
<<
"Input number is "
<<
input_num
<<
", but Range needs 1 input."
;
return
false
;
}
size_t
output_num
=
AnfAlgo
::
GetOutputTensorNum
(
kernel_node
);
if
(
output_num
!=
1
)
{
MS_LOG
(
ERROR
)
<<
"Output number is "
<<
output_num
<<
", but Range needs 1 output."
;
return
false
;
}
auto
input_shape
=
AnfAlgo
::
GetInputDeviceShape
(
kernel_node
,
0
);
auto
shape_size
=
input_shape
.
size
();
input_size_
=
1
;
for
(
size_t
i
=
0
;
i
<
shape_size
;
i
++
)
{
input_size_
*=
input_shape
[
i
];
}
input_size_
*=
sizeof
(
T
);
output_size_
=
input_size_
;
start_
=
GetAttr
<
float
>
(
kernel_node
,
"start"
);
limit_
=
GetAttr
<
float
>
(
kernel_node
,
"limit"
);
delta_
=
GetAttr
<
float
>
(
kernel_node
,
"delta"
);
InitSizeLists
();
return
true
;
}
protected:
void
InitSizeLists
()
override
{
input_size_list_
.
push_back
(
input_size_
);
output_size_list_
.
push_back
(
output_size_
);
return
;
}
private:
std
::
vector
<
size_t
>
input_size_list_
;
std
::
vector
<
size_t
>
output_size_list_
;
std
::
vector
<
size_t
>
workspace_size_list_
;
size_t
input_size_
;
size_t
output_size_
;
float
start_
;
float
limit_
;
float
delta_
;
};
}
// namespace kernel
}
// namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_RANGE_GPU_KERNEL_H_
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/range_impl.cu
0 → 100644
浏览文件 @
779e27b9
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda_runtime.h>
#include "range_impl.cuh"
#include "runtime/device/gpu/cuda_common.h"
template
<
typename
T
>
__global__
void
Range
(
const
int
size
,
const
float
start
,
const
float
limit
,
const
float
delta
,
const
T
*
input
,
T
*
output
)
{
for
(
int
pos
=
blockIdx
.
x
*
blockDim
.
x
+
threadIdx
.
x
;
pos
<
size
;
pos
+=
blockDim
.
x
*
gridDim
.
x
)
{
output
[
pos
]
=
input
[
pos
]
*
delta
+
start
;
}
}
template
<
typename
T
>
void
CalRange
(
const
int
size
,
const
float
start
,
const
float
limit
,
const
float
delta
,
const
T
*
input
,
T
*
output
,
cudaStream_t
cuda_stream
)
{
Range
<<<
GET_BLOCKS
(
size
),
GET_THREADS
,
0
,
cuda_stream
>>>
(
size
,
start
,
limit
,
delta
,
input
,
output
);
return
;
}
template
void
CalRange
<
float
>(
const
int
size
,
const
float
start
,
const
float
limit
,
const
float
delta
,
const
float
*
input
,
float
*
output
,
cudaStream_t
cuda_stream
);
template
void
CalRange
<
int
>(
const
int
size
,
const
float
start
,
const
float
limit
,
const
float
delta
,
const
int
*
input
,
int
*
output
,
cudaStream_t
cuda_stream
);
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/range_impl.cuh
0 → 100644
浏览文件 @
779e27b9
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_RANGE_IMPL_CUH_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_RANGE_IMPL_CUH_
template
<
typename
T
>
void
CalRange
(
const
int
size
,
const
float
start
,
const
float
limit
,
const
float
delta
,
const
T
*
input
,
T
*
output
,
cudaStream_t
cuda_stream
);
#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_RANGE_IMPL_CUH
mindspore/nn/probability/distribution/categorical.py
浏览文件 @
779e27b9
...
...
@@ -13,8 +13,8 @@
# limitations under the License.
# ============================================================================
"""Categorical Distribution"""
import
numpy
as
np
from
mindspore.ops
import
operations
as
P
import
mindspore.nn
as
nn
from
mindspore.common
import
dtype
as
mstype
from
.distribution
import
Distribution
from
._utils.utils
import
logits_to_probs
,
probs_to_logits
,
check_type
,
check_tensor_type
,
cast_to_tensor
,
raise_probs_logits_error
...
...
@@ -119,17 +119,19 @@ class Categorical(Distribution):
"""
return
self
.
_probs
def
_sample
(
self
,
sample_shape
=
(
1
,
)):
def
_sample
(
self
,
sample_shape
=
()):
"""
Sampling.
Args:
sample_shape (tuple): shape of the sample. Default: (
1,
).
sample_shape (tuple): shape of the sample. Default: ().
Returns:
Tensor, shape is shape(probs)[:-1] + sample_shape
"""
self
.
checktuple
(
sample_shape
,
'shape'
)
if
sample_shape
==
():
sample_shape
=
(
1
,)
num_sample
=
1
for
i
in
sample_shape
:
num_sample
*=
i
...
...
@@ -184,16 +186,15 @@ class Categorical(Distribution):
if
value
is
not
None
:
check_tensor_type
(
"value"
,
value
,
[
mstype
.
float32
,
bool
,
mstype
.
int32
])
value
=
self
.
expandim
(
self
.
cast
(
value
,
mstype
.
float32
),
-
1
)
index
=
cast_to_tensor
(
np
.
arange
(
self
.
shape
(
value
)[
0
]).
astype
(
np
.
float32
))
index
=
self
.
expandim
(
index
,
-
1
)
logits
=
self
.
_logits
if
self
.
_logits
.
dim
()
==
1
else
self
.
expandim
(
self
.
_logits
,
0
)
broad_shape
=
self
.
_broad_cast_shape
(
value
,
logits
)
broad_shape
=
self
.
_broad_cast_shape
(
value
,
self
.
_logits
)
broad
=
P
.
BroadcastTo
(
broad_shape
)
value
=
broad
(
value
)[...,
:
1
]
index
=
broad
(
index
)[...,
:
1
]
logits_pmf
=
self
.
reshape
(
broad
(
self
.
_logits
),
(
-
1
,
broad_shape
[
-
1
]))
value
=
self
.
reshape
(
broad
(
value
)[...,
:
1
],
(
-
1
,
1
))
index
=
nn
.
Range
(
0.
,
self
.
shape
(
value
)[
0
],
1
)()
index
=
self
.
reshape
(
index
,
(
-
1
,
1
))
value
=
self
.
concat
((
index
,
value
))
value
=
self
.
cast
(
value
,
mstype
.
int32
)
return
self
.
gather
(
logits
,
value
)
return
self
.
reshape
(
self
.
gather
(
logits_pmf
,
value
),
broad_shape
[:
-
1
]
)
return
None
def
_entropy
(
self
):
...
...
@@ -211,7 +212,7 @@ class Categorical(Distribution):
Enumerate categories.
"""
num_events
=
self
.
_num_events
values
=
cast_to_tensor
(
np
.
arange
(
num_events
).
astype
(
np
.
int32
),
mstype
.
float32
)
values
=
nn
.
Range
(
0.
,
num_events
,
1
)(
)
values
=
self
.
reshape
(
values
,
(
num_events
,
1
))
if
expand
:
values
=
P
.
BroadcastTo
((
num_events
,
self
.
_batch_shape
))(
values
)
...
...
mindspore/ops/operations/random_ops.py
浏览文件 @
779e27b9
...
...
@@ -450,8 +450,8 @@ class Multinomial(PrimitiveWithInfer):
Examples:
>>> input = Tensor([0., 9., 4., 0.], mstype.float32)
>>> multinomial = P.Multinomial(seed=10)
>>> output = multinomial(input, 2
, True
)
>>> multinomial = P.Multinomial(
replacement=True,
seed=10)
>>> output = multinomial(input, 2)
"""
@
prim_attr_register
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录