Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
b465bb0d
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
b465bb0d
编写于
4月 09, 2020
作者:
K
Kaipeng Deng
提交者:
GitHub
4月 09, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix adaptive_pool2d/pool3d error message. test=develop (#23658)
上级
97b09687
变更
8
显示空白变更内容
内联
并排
Showing
8 changed file
with
207 addition
and
79 deletion
+207
-79
paddle/fluid/operators/grid_sampler_cudnn_op.cu.cc
paddle/fluid/operators/grid_sampler_cudnn_op.cu.cc
+16
-9
paddle/fluid/operators/grid_sampler_op.cc
paddle/fluid/operators/grid_sampler_op.cc
+39
-15
paddle/fluid/operators/kldiv_loss_op.cc
paddle/fluid/operators/kldiv_loss_op.cc
+34
-17
paddle/fluid/operators/spectral_norm_op.cc
paddle/fluid/operators/spectral_norm_op.cc
+56
-23
paddle/fluid/operators/temporal_shift_op.cc
paddle/fluid/operators/temporal_shift_op.cc
+31
-12
paddle/fluid/operators/temporal_shift_op.cu
paddle/fluid/operators/temporal_shift_op.cu
+3
-2
python/paddle/fluid/layers/loss.py
python/paddle/fluid/layers/loss.py
+5
-1
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+23
-0
未找到文件。
paddle/fluid/operators/grid_sampler_cudnn_op.cu.cc
浏览文件 @
b465bb0d
...
...
@@ -30,8 +30,9 @@ template <typename T>
class
CUDNNGridSampleOpKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
"It must use CUDAPlace"
);
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
true
,
platform
::
errors
::
InvalidArgument
(
"It must use CUDAPlace when using CUDA Kernel"
));
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
auto
handle
=
dev_ctx
.
cudnn_handle
();
auto
*
input
=
ctx
.
Input
<
Tensor
>
(
"X"
);
...
...
@@ -59,10 +60,13 @@ class CUDNNGridSampleOpKernel : public framework::OpKernel<T> {
cudnnTensorDescriptor_t
cudnn_output_desc
=
output_desc
.
descriptor
<
T
>
(
DataLayout
::
kNCHW
,
framework
::
vectorize
<
int
>
(
output
->
dims
()));
PADDLE_ENFORCE_CUDA_SUCCESS
(
platform
::
dynload
::
cudnnSpatialTfSamplerForward
(
PADDLE_ENFORCE_CUDA_SUCCESS
(
platform
::
dynload
::
cudnnSpatialTfSamplerForward
(
handle
,
cudnn_st_desc
,
CudnnDataType
<
T
>::
kOne
(),
cudnn_input_desc
,
input_data
,
grid_data
,
CudnnDataType
<
T
>::
kZero
(),
cudnn_output_desc
,
output_data
));
output_data
),
platform
::
errors
::
InvalidArgument
(
"cudnnSpatialTfSamplerForward in Op(grid_sampler) failed"
));
}
};
...
...
@@ -70,8 +74,9 @@ template <typename T>
class
CUDNNGridSampleGradOpKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
"It must use CUDAPlace"
);
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
true
,
platform
::
errors
::
InvalidArgument
(
"It must use CUDAPlace when using CUDA Kernel"
));
auto
&
dev_ctx
=
ctx
.
template
device_context
<
platform
::
CUDADeviceContext
>();
auto
handle
=
dev_ctx
.
cudnn_handle
();
auto
*
input
=
ctx
.
Input
<
Tensor
>
(
"X"
);
...
...
@@ -117,7 +122,9 @@ class CUDNNGridSampleGradOpKernel : public framework::OpKernel<T> {
input_data
,
CudnnDataType
<
T
>::
kZero
(),
cudnn_input_grad_desc
,
input_grad_data
,
CudnnDataType
<
T
>::
kOne
(),
cudnn_output_grad_desc
,
output_grad_data
,
grid_data
,
CudnnDataType
<
T
>::
kZero
(),
grid_grad_data
));
grid_grad_data
),
platform
::
errors
::
InvalidArgument
(
"cudnnSpatialTfSamplerBackward in Op(grid_sampler) failed"
));
}
};
...
...
paddle/fluid/operators/grid_sampler_op.cc
浏览文件 @
b465bb0d
...
...
@@ -28,31 +28,55 @@ class GridSampleOp : public framework::OperatorWithKernel {
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) of GridSampleOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Grid"
),
"Input(Grid) of GridSampleOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Output"
),
"Output(Output) of GridSampleOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"X"
),
true
,
platform
::
errors
::
NotFound
(
"Input(X) of GridSampleOp should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"Grid"
),
true
,
platform
::
errors
::
NotFound
(
"Input(Grid) of GridSampleOp should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"Output"
),
true
,
platform
::
errors
::
NotFound
(
"Output(Output) of GridSampleOp should not be null."
));
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
auto
grid_dims
=
ctx
->
GetInputDim
(
"Grid"
);
PADDLE_ENFORCE
(
x_dims
.
size
()
==
4
,
"Input(X) of GridSampleOp should be 4-D Tensor."
);
PADDLE_ENFORCE
(
grid_dims
.
size
()
==
4
,
"Input(Grid) of GridSampleOp should be 4-D Tensor."
);
PADDLE_ENFORCE_EQ
(
x_dims
.
size
(),
4
,
platform
::
errors
::
InvalidArgument
(
"Input(X) of GridSampleOp should be 4-D Tensor, but "
"received X dimension size(%d)"
,
x_dims
.
size
()));
PADDLE_ENFORCE_EQ
(
grid_dims
.
size
(),
4
,
platform
::
errors
::
InvalidArgument
(
"Input(Grid) of GridSampleOp should be 4-D Tensor, "
"but received X dimension size(%d)"
,
grid_dims
.
size
()));
if
(
ctx
->
IsRuntime
()
||
grid_dims
[
3
]
>
0
)
{
PADDLE_ENFORCE
(
grid_dims
[
3
]
==
2
,
"Input(Grid) dims[3] should be 2."
);
PADDLE_ENFORCE_EQ
(
grid_dims
[
3
],
2
,
platform
::
errors
::
InvalidArgument
(
"Input(Grid) dimension[3] should be 2, but received %d"
,
grid_dims
[
3
]));
}
if
(
ctx
->
IsRuntime
())
{
PADDLE_ENFORCE_EQ
(
grid_dims
[
0
],
x_dims
[
0
],
"Input(X) and Input(Grid) dims[0] should be equal."
);
PADDLE_ENFORCE_EQ
(
grid_dims
[
0
],
x_dims
[
0
],
platform
::
errors
::
InvalidArgument
(
"Input(X) and Input(Grid) dimension[0] should be equal, but "
"received X dimension[0](%d) != Grid dimension[0](%d)"
,
x_dims
[
0
],
grid_dims
[
0
]));
PADDLE_ENFORCE_EQ
(
grid_dims
[
1
],
x_dims
[
2
],
"Input(X) dims[2] and Input(Grid) dims[1] should be equal."
);
platform
::
errors
::
InvalidArgument
(
"Input(X) dims[2] and Input(Grid) dims[1] should be equal, but "
"received X dimension[2](%d) != Grid dimension[1](%d)"
,
x_dims
[
2
],
grid_dims
[
1
]));
PADDLE_ENFORCE_EQ
(
grid_dims
[
2
],
x_dims
[
3
],
"Input(X) dims[3] and Input(Grid) dims[2] should be equal."
);
platform
::
errors
::
InvalidArgument
(
"Input(X) dims[3] and Input(Grid) dims[2] should be equal, but "
"received X dimension[3](%d) != Grid dimension[2](%d)"
,
x_dims
[
3
],
grid_dims
[
2
]));
}
ctx
->
SetOutputDim
(
"Output"
,
x_dims
);
...
...
paddle/fluid/operators/kldiv_loss_op.cc
浏览文件 @
b465bb0d
...
...
@@ -23,30 +23,42 @@ class KLDivLossOp : public framework::OperatorWithKernel {
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) of KLDivLossOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Target"
),
"Input(Target) of KLDivLossOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Loss"
),
"Output(Loss) of KLDivLossOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"X"
),
true
,
platform
::
errors
::
NotFound
(
"Input(X) of KLDivLossOp should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"Target"
),
true
,
platform
::
errors
::
NotFound
(
"Input(Target) of KLDivLossOp should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"Loss"
),
true
,
platform
::
errors
::
NotFound
(
"Output(Loss) of KLDivLossOp should not be null."
));
auto
dim_x
=
ctx
->
GetInputDim
(
"X"
);
auto
dim_target
=
ctx
->
GetInputDim
(
"Target"
);
PADDLE_ENFORCE_EQ
(
dim_x
.
size
(),
dim_target
.
size
(),
"Input(X) rank and Input(Target) rank should be same."
);
platform
::
errors
::
InvalidArgument
(
"Input(X) rank and Input(Target) rank should be "
"same, but received X rank(%d) != Target rank(%d)"
,
dim_x
.
size
(),
dim_target
.
size
()));
for
(
int
i
=
0
;
i
<
dim_x
.
size
();
i
++
)
{
if
(
ctx
->
IsRuntime
()
||
(
dim_x
[
i
]
>
0
&&
dim_target
[
i
]
>
0
))
{
PADDLE_ENFORCE_EQ
(
dim_x
[
i
],
dim_target
[
i
],
"Input(X) and Input(Target) should in same shape."
);
PADDLE_ENFORCE_EQ
(
dim_x
[
i
],
dim_target
[
i
],
platform
::
errors
::
InvalidArgument
(
"Input(X) and Input(Target) should in same shape. but received "
"X dimension[%d](%d) != Target dimension[%d](%d)"
,
i
,
dim_x
[
i
],
i
,
dim_target
[
i
]));
}
}
auto
reduction
=
ctx
->
Attrs
().
Get
<
std
::
string
>
(
"reduction"
);
PADDLE_ENFORCE
(
"mean"
==
reduction
||
"sum"
==
reduction
||
"batchmean"
==
reduction
||
"none"
==
reduction
,
"Attr(reduction) can only be 'none'|'batchmean'|'sum'|'mean'."
);
auto
reduction_valid
=
"mean"
==
reduction
||
"sum"
==
reduction
||
"batchmean"
==
reduction
||
"none"
==
reduction
;
PADDLE_ENFORCE_EQ
(
reduction_valid
,
true
,
platform
::
errors
::
InvalidArgument
(
"Attr(reduction) can only be 'none'|'batchmean'|'sum'|'mean'."
));
if
(
"none"
==
reduction
)
{
ctx
->
SetOutputDim
(
"Loss"
,
dim_x
);
...
...
@@ -123,10 +135,15 @@ class KLDivLossOpGrad : public framework::OperatorWithKernel {
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) should not be null"
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Target"
),
"Input(Target) should not be null"
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Loss"
)),
"Input(Loss@GRAD) should not be null"
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"X"
),
true
,
platform
::
errors
::
NotFound
(
"Input(X) should not be null"
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"Target"
),
true
,
platform
::
errors
::
NotFound
(
"Input(Target) should not be null"
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Loss"
)),
true
,
platform
::
errors
::
NotFound
(
"Input(Loss@GRAD) should not be null"
));
auto
dim_x
=
ctx
->
GetInputDim
(
"X"
);
if
(
ctx
->
HasOutput
(
framework
::
GradVarName
(
"X"
)))
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
dim_x
);
...
...
paddle/fluid/operators/spectral_norm_op.cc
浏览文件 @
b465bb0d
...
...
@@ -26,26 +26,45 @@ class SpectralNormOp : public framework::OperatorWithKernel {
protected:
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Weight"
),
"Input(Weight) of SpectralNormOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"U"
),
"Input(U) of SpectralNormOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"V"
),
"Input(V) of SpectralNormOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
"Output(Out) of SpectralNormOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"Weight"
),
true
,
platform
::
errors
::
NotFound
(
"Input(Weight) of SpectralNormOp should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"U"
),
true
,
platform
::
errors
::
NotFound
(
"Input(U) of SpectralNormOp should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"V"
),
true
,
platform
::
errors
::
NotFound
(
"Input(V) of SpectralNormOp should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"Out"
),
true
,
platform
::
errors
::
NotFound
(
"Output(Out) of SpectralNormOp should not be null."
));
auto
dim_weight
=
ctx
->
GetInputDim
(
"Weight"
);
auto
rank_weight
=
dim_weight
.
size
();
PADDLE_ENFORCE
(
rank_weight
>=
2
&&
rank_weight
<=
5
,
"The rank of Input(Weights) can only be 2, 3,"
"4, 5 for fc, conv1d, conv2d, conv3d layers."
);
PADDLE_ENFORCE_GE
(
rank_weight
,
2
,
platform
::
errors
::
InvalidArgument
(
"The rank of Input(Weights) should be greater equal "
"than 2, but received Weight rank(%d)"
,
rank_weight
));
PADDLE_ENFORCE_LE
(
rank_weight
,
5
,
platform
::
errors
::
InvalidArgument
(
"The rank of Input(Weights) should be less equal "
"than 5, but received Weight rank(%d)"
,
rank_weight
));
int
dim
=
ctx
->
Attrs
().
Get
<
int
>
(
"dim"
);
int
power_iters
=
ctx
->
Attrs
().
Get
<
int
>
(
"power_iters"
);
PADDLE_ENFORCE
(
dim
==
0
||
dim
==
1
,
"Attr(dim) can only be 0 or 1"
);
PADDLE_ENFORCE
(
power_iters
>=
0
,
"Attr(power_iters) should be larger equal then 0"
);
auto
dim_valid
=
dim
==
0
||
dim
==
1
;
PADDLE_ENFORCE_EQ
(
dim_valid
,
true
,
platform
::
errors
::
InvalidArgument
(
"Attr(dim) can only be 0 or 1, but received %d"
,
dim
));
PADDLE_ENFORCE_GE
(
power_iters
,
0
,
platform
::
errors
::
InvalidArgument
(
"Attr(power_iters) should be greater equal then 0, but received %d"
,
power_iters
));
int
h
=
dim_weight
[
dim
];
int
w
=
1
;
...
...
@@ -59,15 +78,22 @@ class SpectralNormOp : public framework::OperatorWithKernel {
if
(
ctx
->
IsRuntime
()
||
(
dim_u
[
0
]
>
0
&&
h
>
0
))
{
PADDLE_ENFORCE_EQ
(
dim_u
[
0
],
h
,
"Input(U) dims[0] should be equal to "
"Input(Weight) dims[Attr(dim)]"
);
platform
::
errors
::
InvalidArgument
(
"Input(U) dimension[0] should be equal to "
"Input(Weight) dimension[Attr(dim)], but received "
"U dimension[0](%d) != Weight dimension[%d](%d)"
,
dim_u
[
0
],
dim
,
h
));
}
if
(
ctx
->
IsRuntime
()
||
(
dim_v
[
0
]
>
0
&&
w
>
0
))
{
PADDLE_ENFORCE_EQ
(
dim_v
[
0
],
w
,
"Input(V) dims[0] should be equal to "
"the product of Input(Weight) dims except dims[Attr(dim)]"
);
platform
::
errors
::
InvalidArgument
(
"Input(V) dimension[0] should be equal to the product of "
"Input(Weight) dimension except dimension[Attr(dim)], but "
"received V dimension[0](%d) != product of Input(Weight) "
"dimension(%d)"
,
dim_v
[
0
],
w
));
}
ctx
->
SetOutputDim
(
"Out"
,
dim_weight
);
...
...
@@ -194,11 +220,18 @@ class SpectralNormOpGrad : public framework::OperatorWithKernel {
protected:
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Weight"
),
"Input(Weight) should not be null"
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"U"
),
"Input(U) should not be null"
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"V"
),
"Input(V) should not be null"
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
"Input(Out@GRAD) should not be null"
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"Weight"
),
true
,
platform
::
errors
::
NotFound
(
"Input(Weight) should not be null"
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"U"
),
true
,
platform
::
errors
::
NotFound
(
"Input(U) should not be null"
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"V"
),
true
,
platform
::
errors
::
NotFound
(
"Input(V) should not be null"
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
true
,
platform
::
errors
::
NotFound
(
"Input(Out@GRAD) should not be null"
));
auto
dim_x
=
ctx
->
GetInputDim
(
"Weight"
);
if
(
ctx
->
HasOutput
(
framework
::
GradVarName
(
"Weight"
)))
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"Weight"
),
dim_x
);
...
...
paddle/fluid/operators/temporal_shift_op.cc
浏览文件 @
b465bb0d
...
...
@@ -27,26 +27,45 @@ class TemporalShiftOp : public framework::OperatorWithKernel {
protected:
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
ctx
->
HasInput
(
"X"
),
true
,
"Input(X) of TemporalShiftOp should not be null."
);
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"Out"
),
true
,
"Output(Out) of TemporalShiftOp should not be null."
);
platform
::
errors
::
NotFound
(
"Input(X) of TemporalShiftOp should not be null."
));
PADDLE_ENFORCE_EQ
(
ctx
->
HasOutput
(
"Out"
),
true
,
platform
::
errors
::
NotFound
(
"Output(Out) of TemporalShiftOp should not be null."
));
auto
dim_x
=
ctx
->
GetInputDim
(
"X"
);
PADDLE_ENFORCE_EQ
(
dim_x
.
size
(),
4
,
"Input(X) rank should be 4 in shape of [N*T, C, H, W]."
);
platform
::
errors
::
InvalidArgument
(
"Input(X) rank should be 4 in shape of [N*T, C, H, "
"W], but received X rank(%d)"
,
dim_x
.
size
()));
int
seg_num
=
ctx
->
Attrs
().
Get
<
int
>
(
"seg_num"
);
float
shift_ratio
=
ctx
->
Attrs
().
Get
<
float
>
(
"shift_ratio"
);
PADDLE_ENFORCE_GT
(
seg_num
,
0
,
"Attr(seg_num) should be greater than 0."
);
PADDLE_ENFORCE_GT
(
shift_ratio
,
0.
,
"Attr(shift_ratio) should be greater than 0"
);
PADDLE_ENFORCE_LT
(
shift_ratio
,
0.5
,
"Attr(shift_ratio) should be less than 0.5"
);
PADDLE_ENFORCE_GT
(
seg_num
,
0
,
platform
::
errors
::
InvalidArgument
(
"Attr(seg_num) should be greater than 0, but received %d"
,
seg_num
));
PADDLE_ENFORCE_GT
(
shift_ratio
,
0.
,
platform
::
errors
::
InvalidArgument
(
"Attr(shift_ratio) should be greater than 0, but received %d"
,
shift_ratio
));
PADDLE_ENFORCE_LT
(
shift_ratio
,
0.5
,
platform
::
errors
::
InvalidArgument
(
"Attr(shift_ratio) should be less than 0.5, but received %d"
,
shift_ratio
));
if
(
ctx
->
IsRuntime
())
{
PADDLE_ENFORCE_EQ
(
dim_x
[
0
]
%
seg_num
,
0
,
"Input(X) dims[0] should be divided exactly by Attr(seg_num)."
);
PADDLE_ENFORCE_EQ
(
dim_x
[
0
]
%
seg_num
,
0
,
platform
::
errors
::
InvalidArgument
(
"Input(X) dimension[0] should be divided exactly "
"by Attr(seg_num), but received X dimension[0](%d) "
"mod seg_num(%d) != 0"
,
dim_x
[
0
],
seg_num
));
}
ctx
->
SetOutputDim
(
"Out"
,
dim_x
);
...
...
paddle/fluid/operators/temporal_shift_op.cu
浏览文件 @
b465bb0d
...
...
@@ -90,8 +90,9 @@ template <typename T>
class
TemporalShiftOpCUDAKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
PADDLE_ENFORCE
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
"This kernel only runs on GPU device."
);
PADDLE_ENFORCE_EQ
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()),
true
,
platform
::
errors
::
InvalidArgument
(
"This kernel only runs on GPU device."
));
auto
*
input
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
*
output
=
ctx
.
Output
<
Tensor
>
(
"Out"
);
int
t
=
ctx
.
Attr
<
int
>
(
"seg_num"
);
...
...
python/paddle/fluid/layers/loss.py
浏览文件 @
b465bb0d
...
...
@@ -21,7 +21,7 @@ from .layer_function_generator import templatedoc
from
..layer_helper
import
LayerHelper
from
..framework
import
Variable
,
in_dygraph_mode
from
..
import
core
from
..data_feeder
import
check_variable_and_dtype
from
..data_feeder
import
check_variable_and_dtype
,
check_type
from
..param_attr
import
ParamAttr
from
..initializer
import
NumpyArrayInitializer
,
Constant
from
..
import
core
...
...
@@ -1580,6 +1580,10 @@ def kldiv_loss(x, target, reduction='mean', name=None):
loss = fluid.layers.kldiv_loss(x=x, target=target, reduction='batchmean')
"""
helper
=
LayerHelper
(
'kldiv_loss'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'kldiv_loss'
)
check_variable_and_dtype
(
target
,
'target'
,
[
'float32'
,
'float64'
],
'kldiv_loss'
)
check_type
(
reduction
,
'reduction'
,
str
,
'kldiv_loss'
)
loss
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'kldiv_loss'
,
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
b465bb0d
...
...
@@ -2361,6 +2361,12 @@ def adaptive_pool2d(input,
pool_size=[3, 3],
pool_type='max')
"""
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
'adaptive_pool2d')
check_type(pool_type, 'pool_type', str, 'adaptive_pool2d')
check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool2d')
check_type(require_index, 'require_index', bool, 'adaptive_pool2d')
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
...
...
@@ -2516,6 +2522,12 @@ def adaptive_pool3d(input,
pool_size=[3, 3, 3],
pool_type='max')
"""
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
'adaptive_pool3d')
check_type(pool_type, 'pool_type', str, 'adaptive_pool3d')
check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool3d')
check_type(require_index, 'require_index', bool, 'adaptive_pool3d')
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
...
...
@@ -3568,6 +3580,11 @@ def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None):
x = fluid.layers.spectral_norm(weight=weight, dim=1, power_iters=2)
"""
helper = LayerHelper('spectral_norm', **locals())
check_variable_and_dtype(weight, 'weight', ['float32', 'float64'],
'spectral_norm')
check_type(dim, 'dim', int, 'spectral_norm')
check_type(power_iters, 'power_iters', int, 'spectral_norm')
check_type(eps, 'eps', float, 'spectral_norm')
dtype = weight.dtype
# create intput and parameters
...
...
@@ -12246,6 +12263,9 @@ def grid_sampler(x, grid, name=None):
"""
helper = LayerHelper("grid_sampler", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'grid_sampler')
check_variable_and_dtype(grid, 'grid', ['float32', 'float64'],
'grid_sampler')
if not isinstance(x, Variable):
return ValueError("The x should be a Variable")
...
...
@@ -12601,6 +12621,9 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None):
out = fluid.layers.temporal_shift(x=input, seg_num=2, shift_ratio=0.2)
"""
helper = LayerHelper("temporal_shift", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'temporal_shift')
check_type(seg_num, 'seg_num', int, 'temporal_shift')
check_type(shift_ratio, 'shift_ratio', float, 'temporal_shift')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录