Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
3e7ce583
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3e7ce583
编写于
5月 28, 2018
作者:
F
fengjiayi
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
stash
上级
85a41df3
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
210 addition
and
166 deletion
+210
-166
paddle/fluid/operators/random_crop_op.cc
paddle/fluid/operators/random_crop_op.cc
+32
-16
paddle/fluid/operators/random_crop_op.h
paddle/fluid/operators/random_crop_op.h
+88
-80
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+56
-70
python/paddle/fluid/tests/unittests/test_random_crop_op.py
python/paddle/fluid/tests/unittests/test_random_crop_op.py
+34
-0
未找到文件。
paddle/fluid/operators/random_crop_op.cc
浏览文件 @
3e7ce583
...
...
@@ -12,36 +12,52 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/random_crop_op.h"
#include <vector>
namespace
paddle
{
namespace
operators
{
class
RandomCropOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
return
framework
::
OpKernelType
(
framework
::
ToDataType
(
ctx
.
Input
<
framework
::
LoDTensor
>
(
"X"
)
->
type
()),
ctx
.
device_context
());
}
};
class
RandomCropOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddInput
(
"X"
,
""
);
AddOutput
(
"
Y
"
,
""
);
AddOutput
(
"
Out
"
,
""
);
AddInput
(
"Seed"
,
""
);
AddOutput
(
"SeedOut"
,
""
).
AsDispensable
();
AddAttr
<
std
::
vector
<
int
>>
(
"shape"
,
""
);
AddComment
(
""
);
}
};
class
RandomCropOpInferShape
:
public
framework
::
InferShapeBase
{
public:
void
operator
()(
framework
::
InferShapeContext
*
context
)
const
override
{
auto
shape
=
context
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"shape"
);
auto
x_dim
=
context
->
GetInputDim
(
"X"
);
PADDLE_ENFORCE_EQ
(
x_dim
.
size
(),
static_cast
<
int64_t
>
(
shape
.
size
()));
for
(
size_t
i
=
0
;
i
<
shape
.
size
();
++
i
)
{
if
(
shape
[
i
]
==
-
1
)
{
shape
[
i
]
=
static_cast
<
int
>
(
x_dim
[
i
]);
}
else
{
PADDLE_ENFORCE_GE
(
x_dim
[
i
],
shape
[
i
]);
}
void
operator
()(
framework
::
InferShapeContext
*
ctx
)
const
override
{
auto
seed_dim
=
ctx
->
GetInputDim
(
"Seed"
);
PADDLE_ENFORCE
(
seed_dim
.
size
()
==
1
&&
seed_dim
[
0
]
==
1
);
auto
shape
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"shape"
);
auto
x_dim
=
ctx
->
GetInputDim
(
"X"
);
PADDLE_ENFORCE_GT
(
x_dim
.
size
(),
static_cast
<
int64_t
>
(
shape
.
size
()));
auto
out_dim
=
framework
::
vectorize2int
(
x_dim
);
for
(
size_t
i
=
1
;
i
<=
shape
.
size
();
++
i
)
{
size_t
x_i
=
x_dim
.
size
()
-
i
;
size_t
shape_i
=
shape
.
size
()
-
i
;
PADDLE_ENFORCE_GE
(
x_dim
[
x_i
],
shape
[
shape_i
]);
out_dim
[
x_i
]
=
shape
[
shape_i
];
}
c
ontext
->
SetOutputDim
(
"Y"
,
framework
::
make_ddim
(
shape
));
c
ontext
->
SetOutputDim
(
"SeedOut"
,
framework
::
make_ddim
({
1
}));
c
tx
->
SetOutputDim
(
"Out"
,
framework
::
make_ddim
(
out_dim
));
c
tx
->
SetOutputDim
(
"SeedOut"
,
framework
::
make_ddim
({
1
}));
}
};
...
...
@@ -50,8 +66,8 @@ class RandomCropOpInferShape : public framework::InferShapeBase {
namespace
ops
=
paddle
::
operators
;
namespace
f
=
paddle
::
framework
;
REGISTER_OPERATOR
(
random_crop
,
f
::
OperatorWithKernel
,
ops
::
RandomCropOpMaker
,
ops
::
RandomCropOpInferShape
);
REGISTER_OPERATOR
(
random_crop
,
ops
::
RandomCropOp
,
ops
::
RandomCropOpMaker
,
ops
::
RandomCropOpInferShape
,
f
::
EmptyGradOpMaker
);
template
<
typename
T
>
using
Kernel
=
ops
::
RandomCropKernel
<
paddle
::
platform
::
CPUDeviceContext
,
T
>
;
...
...
paddle/fluid/operators/random_crop_op.h
浏览文件 @
3e7ce583
...
...
@@ -14,11 +14,14 @@
#pragma once
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/detail/safe_ref.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/for_range.h"
#include "thrust/random.h"
#ifdef PADDLE_WITH_CUDA
#include <thrust/random.h>
#endif
namespace
paddle
{
namespace
operators
{
...
...
@@ -34,6 +37,7 @@ struct Random<platform::CPUDeviceContext> {
using
UniformIntDist
=
std
::
uniform_int_distribution
<
T
>
;
};
#ifdef PADDLE_WITH_CUDA
template
<
>
struct
Random
<
platform
::
CUDADeviceContext
>
{
using
Engine
=
thrust
::
minstd_rand
;
...
...
@@ -41,29 +45,31 @@ struct Random<platform::CUDADeviceContext> {
template
<
typename
T
>
using
UniformIntDist
=
thrust
::
uniform_int_distribution
<
T
>
;
};
#endif
template
<
typename
T
>
HOSTDEVICE
inline
void
RandomCropImpl
(
const
T
*
x
,
size_t
*
x_dim
,
T
*
out
,
size_t
*
out_dim
,
int
i
,
int
rank
,
int64_t
prod_x_remain
,
int64_t
prod_out_remain
,
size_t
*
offset
)
{
size_t
x_length
=
x_dim
[
rank
];
size_t
out_length
=
out_dim
[
rank
];
int64_t
x_stride
=
prod_x_remain
/
x_length
;
int64_t
out_stride
=
prod_out_remain
/
out_length
;
size_t
offset_i
=
offset
[
i
];
if
(
x_stride
==
1
&&
out_stride
==
1
)
{
// In the final stage, copy from offset.
HOSTDEVICE
inline
void
StridedMemcpy
(
const
T
*
x
,
const
size_t
*
x_dims
,
T
*
out
,
const
size_t
*
out_dims
,
int
i
,
int
rank
,
size_t
prod_x_remain
,
size_t
prod_out_remain
,
const
size_t
*
offsets
)
{
size_t
x_dim_i
=
x_dims
[
i
];
size_t
out_dim_i
=
out_dims
[
i
];
size_t
x_stride
=
prod_x_remain
/
x_dim_i
;
size_t
out_stride
=
prod_out_remain
/
out_dim_i
;
size_t
offset_i
=
offsets
[
i
];
if
(
i
==
rank
-
1
)
{
PADDLE_ENFORCE
(
x_stride
==
1
&&
out_stride
==
1
);
x
+=
offset_i
;
for
(
size_t
i
=
0
;
i
<
out_length
;
++
i
)
{
for
(
size_t
j
=
0
;
j
<
out_dim_i
;
++
j
)
{
*
out
++
=
*
x
++
;
}
}
else
{
x
+=
offset_i
*
x_stride
;
for
(
size_t
i
=
0
;
i
<
out_length
;
++
i
)
{
RandomCropImpl
<
T
>
(
x
,
x_dim
,
out
,
out_dim
,
i
+
1
,
rank
,
x_stride
,
out_stride
,
offset
);
for
(
size_t
j
=
0
;
j
<
x_dim_i
;
++
j
)
{
StridedMemcpy
<
T
>
(
x
,
x_dims
,
out
,
out_dims
,
i
+
1
,
rank
,
x_stride
,
out_stride
,
offsets
);
x
+=
x_stride
;
out
+=
out_stride
;
}
...
...
@@ -74,94 +80,96 @@ template <typename DeviceContext, typename T>
struct
RandomCropFunctor
{
const
T
*
x_
;
T
*
out_
;
size_t
x_dim_
[
9
];
size_t
out_dim_
[
9
];
size_t
prod_same_dim_
;
size_t
prod_x_dim_
;
size_t
prod_out_dim_
;
int
num_same_dim_
;
size_t
x_dims_
[
9
];
size_t
out_dims_
[
9
];
int
num_batchsize_dims_
;
int
rank_
;
int64_t
seed_
;
RandomCropFunctor
(
const
T
*
x
,
T
*
out
,
int64_t
seed
)
size_t
prod_x_dims_
;
size_t
prod_out_dims_
;
size_t
prod_batchsize_dims_
;
size_t
prod_x_ins_dims_
;
size_t
prod_out_ins_dims_
;
RandomCropFunctor
(
const
T
*
x
,
T
*
out
,
const
framework
::
DDim
&
x_dims
,
const
framework
::
DDim
&
out_dims
,
int
num_batchsize_dims
,
int64_t
seed
)
:
x_
(
x
),
out_
(
out
),
prod_same_dim_
(
1
),
prod_x_dim_
(
1
),
prod_out_dim_
(
1
),
num_batchsize_dims_
(
num_batchsize_dims
),
rank_
(
x_dims
.
size
()),
seed_
(
seed
)
{
std
::
fill
(
x_dim_
,
x_dim_
+
sizeof
(
x_dim_
)
/
sizeof
(
size_t
),
0
);
std
::
fill
(
out_dim_
,
out_dim_
+
sizeof
(
out_dim_
)
/
sizeof
(
size_t
),
0
);
PADDLE_ENFORCE_EQ
(
x_dims
.
size
(),
out_dims
.
size
());
PADDLE_ENFORCE_GT
(
rank_
,
num_batchsize_dims_
);
prod_batchsize_dims_
=
1
;
prod_x_ins_dims_
=
1
;
prod_out_ins_dims_
=
1
;
for
(
size_t
i
=
0
;
i
<
rank_
;
++
i
)
{
size_t
x_dim_i
=
x_dims
[
i
];
size_t
out_dim_i
=
out_dims
[
i
];
x_dims_
[
i
]
=
x_dim_i
;
out_dims_
[
i
]
=
out_dim_i
;
if
(
i
<
num_batchsize_dims_
)
{
PADDLE_ENFORCE_EQ
(
x_dim_i
,
out_dim_i
);
prod_batchsize_dims_
*=
x_dim_i
;
}
else
{
prod_x_ins_dims_
*=
x_dim_i
;
prod_out_ins_dims_
*=
out_dim_i
;
}
}
prod_x_dims_
=
prod_batchsize_dims_
*
prod_x_ins_dims_
;
prod_out_dims_
=
prod_batchsize_dims_
*
prod_out_ins_dims_
;
}
HOSTDEVICE
void
operator
()(
size_t
i
)
{
HOSTDEVICE
void
operator
()(
size_t
i
ns_idx
)
{
typename
Random
<
DeviceContext
>::
Engine
engine
(
seed_
);
engine
.
discard
(
i
*
(
rank_
-
num_same_dim_
));
int64_t
prod_x_unsame
=
(
prod_x_dim_
/
prod_same_dim_
);
int64_t
prod_out_unsame
=
(
prod_out_dim_
/
prod_same_dim_
);
const
T
*
x
=
x_
+
i
*
prod_x_unsame
;
T
*
out
=
out_
+
i
*
prod_out_unsame
;
size_t
offset
[
9
];
for
(
int
i
=
num_same_dim_
;
i
<
rank_
;
++
i
)
{
engine
.
discard
(
ins_idx
*
(
rank_
-
num_batchsize_dims_
));
size_t
offsets
[
9
];
for
(
int
i
=
num_batchsize_dims_
;
i
<
rank_
;
++
i
)
{
typename
Random
<
DeviceContext
>::
template
UniformIntDist
<
size_t
>
dist
(
0
,
x_dim
_
[
i
]
-
out_dim
_
[
i
]);
offset
[
i
]
=
dist
(
engine
);
0
,
x_dim
s_
[
i
]
-
out_dims
_
[
i
]);
offset
s
[
i
]
=
dist
(
engine
);
}
RandomCropImpl
<
T
>
(
x
,
x_dim_
,
out
,
out_dim_
,
num_same_dim_
,
rank_
,
prod_x_unsame
,
prod_out_unsame
,
offset
);
const
T
*
x
=
x_
+
ins_idx
*
prod_x_ins_dims_
;
T
*
out
=
out_
+
ins_idx
*
prod_out_ins_dims_
;
StridedMemcpy
<
T
>
(
x
,
x_dims_
+
num_batchsize_dims_
,
out
,
out_dims_
+
num_batchsize_dims_
,
0
,
rank_
-
num_batchsize_dims_
,
prod_x_ins_dims_
,
prod_out_ins_dims_
,
offsets
);
}
};
template
<
typename
DeviceContext
,
typename
T
>
class
RandomCropKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
virtual
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
{
int64_t
seed
=
*
context
.
Input
<
framework
::
LoDTensor
>
(
"Seed"
)
->
data
<
int64_t
>
();
auto
&
x
=
detail
::
Ref
(
context
.
Input
<
framework
::
LoDTensor
>
(
"X"
));
auto
&
out
=
detail
::
Ref
(
context
.
Output
<
framework
::
LoDTensor
>
(
"Out"
));
RandomCropFunctor
<
DeviceContext
,
T
>
functor
{
x
.
data
<
T
>
(),
out
.
mutable_data
<
T
>
(
context
.
GetPlace
()),
seed
};
auto
&
out_dim
=
out
.
dims
();
auto
&
x_dim
=
x
.
dims
();
auto
rank
=
x_dim
.
size
();
while
(
rank
--
>
0
)
{
functor
.
x_dim_
[
rank
]
=
x_dim
[
rank
];
functor
.
out_dim_
[
rank
]
=
out_dim
[
rank
];
functor
.
prod_x_dim_
*=
x_dim
[
rank
];
functor
.
prod_out_dim_
*=
out_dim
[
rank
];
if
(
x_dim
[
rank
]
!=
out_dim
[
rank
])
{
PADDLE_ENFORCE_EQ
(
functor
.
prod_same_dim_
,
1
);
functor
.
num_same_dim_
=
rank
;
}
else
{
functor
.
prod_same_dim_
*=
out_dim
[
rank
];
}
}
functor
.
rank_
=
x_dim
.
size
();
virtual
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
{
int64_t
seed
=
*
ctx
.
Input
<
framework
::
LoDTensor
>
(
"Seed"
)
->
data
<
int64_t
>
();
auto
shape
=
ctx
.
Attr
<
std
::
vector
<
int
>>
(
"shape"
);
auto
&
x
=
detail
::
Ref
(
ctx
.
Input
<
framework
::
LoDTensor
>
(
"X"
));
auto
&
out
=
detail
::
Ref
(
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
));
int
num_batchsize_dims
=
x
.
dims
().
size
()
-
shape
.
size
();
RandomCropFunctor
<
DeviceContext
,
T
>
functor
(
x
.
data
<
T
>
(),
out
.
mutable_data
<
T
>
(
ctx
.
GetPlace
()),
x
.
dims
(),
out
.
dims
(),
num_batchsize_dims
,
seed
);
platform
::
ForRange
<
DeviceContext
>
for_range
(
c
ontext
.
template
device_context
<
DeviceContext
>(),
functor
.
prod_
same_dim
_
);
c
tx
.
template
device_context
<
DeviceContext
>(),
functor
.
prod_
batchsize_dims
_
);
for_range
(
functor
);
Random
<
platform
::
CPUDeviceContext
>::
Engine
engine
(
seed
);
engine
.
discard
(
functor
.
prod_same_dim_
*
(
functor
.
rank_
-
functor
.
num_same_dim_
));
*
context
.
Output
<
framework
::
LoDTensor
>
(
"SeedOut"
)
->
mutable_data
<
int64_t
>
(
engine
.
discard
(
functor
.
prod_batchsize_dims_
*
(
functor
.
rank_
-
functor
.
num_batchsize_dims_
));
*
ctx
.
Output
<
framework
::
LoDTensor
>
(
"SeedOut"
)
->
mutable_data
<
int64_t
>
(
platform
::
CPUPlace
())
=
engine
();
}
};
// TODO(fengjiayi): Backward of random crop op
}
// namespace operators
}
// namespace paddle
python/paddle/fluid/layers/nn.py
浏览文件 @
3e7ce583
...
...
@@ -24,64 +24,19 @@ from tensor import concat
import
utils
__all__
=
[
'fc'
,
'embedding'
,
'dynamic_lstm'
,
'dynamic_lstmp'
,
'dynamic_gru'
,
'gru_unit'
,
'linear_chain_crf'
,
'crf_decoding'
,
'cos_sim'
,
'cross_entropy'
,
'square_error_cost'
,
'chunk_eval'
,
'sequence_conv'
,
'conv2d'
,
'sequence_pool'
,
'sequence_softmax'
,
'softmax'
,
'pool2d'
,
'batch_norm'
,
'beam_search_decode'
,
'conv2d_transpose'
,
'sequence_expand'
,
'lstm_unit'
,
'reduce_sum'
,
'reduce_mean'
,
'reduce_max'
,
'reduce_min'
,
'reduce_prod'
,
'sequence_first_step'
,
'sequence_last_step'
,
'dropout'
,
'split'
,
'ctc_greedy_decoder'
,
'edit_distance'
,
'l2_normalize'
,
'matmul'
,
'topk'
,
'warpctc'
,
'sequence_reshape'
,
'transpose'
,
'im2sequence'
,
'nce'
,
'beam_search'
,
'row_conv'
,
'multiplex'
,
'layer_norm'
,
'softmax_with_cross_entropy'
,
'smooth_l1'
,
'one_hot'
,
'autoincreased_step_counter'
,
'reshape'
,
'lod_reset'
,
'lrn'
,
'pad'
,
'label_smooth'
,
'roi_pool'
,
'dice_loss'
,
'bilinear_interp'
,
'fc'
,
'embedding'
,
'dynamic_lstm'
,
'dynamic_lstmp'
,
'dynamic_gru'
,
'gru_unit'
,
'linear_chain_crf'
,
'crf_decoding'
,
'cos_sim'
,
'cross_entropy'
,
'square_error_cost'
,
'chunk_eval'
,
'sequence_conv'
,
'conv2d'
,
'sequence_pool'
,
'sequence_softmax'
,
'softmax'
,
'pool2d'
,
'batch_norm'
,
'beam_search_decode'
,
'conv2d_transpose'
,
'sequence_expand'
,
'lstm_unit'
,
'reduce_sum'
,
'reduce_mean'
,
'reduce_max'
,
'reduce_min'
,
'reduce_prod'
,
'sequence_first_step'
,
'sequence_last_step'
,
'dropout'
,
'split'
,
'ctc_greedy_decoder'
,
'edit_distance'
,
'l2_normalize'
,
'matmul'
,
'topk'
,
'warpctc'
,
'sequence_reshape'
,
'transpose'
,
'im2sequence'
,
'nce'
,
'beam_search'
,
'row_conv'
,
'multiplex'
,
'layer_norm'
,
'softmax_with_cross_entropy'
,
'smooth_l1'
,
'one_hot'
,
'autoincreased_step_counter'
,
'reshape'
,
'lod_reset'
,
'lrn'
,
'pad'
,
'label_smooth'
,
'roi_pool'
,
'dice_loss'
,
'bilinear_interp'
,
'random_crop'
]
...
...
@@ -154,7 +109,8 @@ def fc(input,
Examples:
.. code-block:: python
data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
data = fluid.layers.data(
name="data", shape=[32, 32], dtype="float32")
fc = fluid.layers.fc(input=data, size=1000, act="tanh")
"""
...
...
@@ -349,7 +305,8 @@ def dynamic_lstm(input,
cell_activation(str): The activation for cell output. Choices = ["sigmoid",
"tanh", "relu", "identity"], default "tanh".
candidate_activation(str): The activation for candidate hidden state.
Choices = ["sigmoid", "tanh", "relu", "identity"],
Choices = ["sigmoid", "tanh",
"relu", "identity"],
default "tanh".
dtype(str): Data type. Choices = ["float32", "float64"], default "float32".
name(str|None): A name for this layer(optional). If set None, the layer
...
...
@@ -516,10 +473,12 @@ def dynamic_lstmp(input,
cell_activation(str): The activation for cell output. Choices = ["sigmoid",
"tanh", "relu", "identity"], default "tanh".
candidate_activation(str): The activation for candidate hidden state.
Choices = ["sigmoid", "tanh", "relu", "identity"],
Choices = ["sigmoid", "tanh",
"relu", "identity"],
default "tanh".
proj_activation(str): The activation for projection output.
Choices = ["sigmoid", "tanh", "relu", "identity"],
Choices = ["sigmoid", "tanh",
"relu", "identity"],
default "tanh".
dtype(str): Data type. Choices = ["float32", "float64"], default "float32".
name(str|None): A name for this layer(optional). If set None, the layer
...
...
@@ -2171,7 +2130,8 @@ def reduce_mean(input, dim=None, keep_dim=False, name=None):
fluid.layers.reduce_mean(x) # [0.4375]
fluid.layers.reduce_mean(x, dim=0) # [0.15, 0.25, 0.55, 0.8]
fluid.layers.reduce_mean(x, dim=-1) # [0.475, 0.4]
fluid.layers.reduce_mean(x, dim=1, keep_dim=True) # [[0.475], [0.4]]
fluid.layers.reduce_mean(
x, dim=1, keep_dim=True) # [[0.475], [0.4]]
# x is a Tensor variable with shape [2, 2, 2] and elements as below:
# [[[1.0, 2.0], [3.0, 4.0]],
...
...
@@ -2390,7 +2350,8 @@ def split(input, num_or_sections, dim=-1, name=None):
x0.shape # [3, 3, 5]
x1.shape # [3, 3, 5]
x2.shape # [3, 3, 5]
x0, x1, x2 = fluid.layers.split(x, num_or_sections=[2, 3, 4], dim=1)
x0, x1, x2 = fluid.layers.split(
x, num_or_sections=[2, 3, 4], dim=1)
x0.shape # [3, 2, 5]
x1.shape # [3, 3, 5]
x2.shape # [3, 4, 5]
...
...
@@ -3300,7 +3261,8 @@ def softmax_with_cross_entropy(logits, label, soft_label=False):
data = fluid.layers.data(name='data', shape=[128], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
fc = fluid.layers.fc(input=data, size=100)
out = fluid.layers.softmax_with_cross_entropy(logits=fc, label=label)
out = fluid.layers.softmax_with_cross_entropy(
logits=fc, label=label)
"""
helper
=
LayerHelper
(
'softmax_with_cross_entropy'
,
**
locals
())
softmax
=
helper
.
create_tmp_variable
(
dtype
=
logits
.
dtype
)
...
...
@@ -3347,7 +3309,8 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None):
.. code-block:: python
data = fluid.layers.data(name='data', shape=[128], dtype='float32')
label = fluid.layers.data(name='label', shape=[100], dtype='float32')
label = fluid.layers.data(
name='label', shape=[100], dtype='float32')
fc = fluid.layers.fc(input=data, size=100)
out = fluid.layers.smooth_l1(x=fc, y=label)
"""
...
...
@@ -3669,7 +3632,8 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None):
Examples:
.. code-block:: python
data = fluid.layers.data(name="data", shape=[3, 112, 112], dtype="float32")
data = fluid.layers.data(
name="data", shape=[3, 112, 112], dtype="float32")
lrn = fluid.layers.lrn(input=data)
"""
helper
=
LayerHelper
(
'lrn'
,
**
locals
())
...
...
@@ -3922,10 +3886,10 @@ def bilinear_interp(input, out_h, out_w, name=None):
Bilinear interpolation is an extension of linear interpolation for
interpolating functions of two variables (e.g. H-direction and
W-direction in this layer) on a rectilinear 2D grid.
For details, please refer to Wikipedia:
https://en.wikipedia.org/wiki/Bilinear_interpolation
Args:
input (Variable): The input tensor of bilinear interpolation,
This is a 4-D tensor of the shape
...
...
@@ -3938,7 +3902,7 @@ def bilinear_interp(input, out_h, out_w, name=None):
Returns:
out (Variable): The output is a 4-D tensor of the shape
(num_batches, channls, out_h, out_w).
Examples:
.. code-block:: python
...
...
@@ -3954,3 +3918,25 @@ def bilinear_interp(input, out_h, out_w, name=None):
attrs
=
{
"out_h"
:
out_h
,
"out_w"
:
out_w
})
return
out
def
random_crop
(
input
,
shape
,
seed
=
0
):
helper
=
LayerHelper
(
"random_crop"
,
**
locals
())
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_tmp_variable
(
dtype
)
if
isinstance
(
seed
,
int
):
seed
=
helper
.
create_global_variable
(
persistable
=
True
,
shape
=
[
1
],
dtype
=
"int32"
)
helper
.
set_variable_initializer
(
var
=
seed
,
initializer
=
Constant
(
value
=
seed
))
elif
not
isinstance
(
seed
,
Variable
):
raise
ValueError
(
"'seed' must be a Variable or an int."
)
seed_out
=
helper
.
create_tmp_variable
(
dtype
=
"int32"
)
helper
.
append_op
(
type
=
"random_crop"
,
inputs
=
{
"X"
:
input
,
"Seed"
:
seed
},
outputs
=
{
"Out"
:
out
,
"SeedOut"
:
seed_out
},
attrs
=
{
"shape"
:
shape
})
return
out
python/paddle/fluid/tests/unittests/test_random_crop_op.py
0 → 100644
浏览文件 @
3e7ce583
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
numpy
as
np
import
paddle.fluid.core
as
core
from
op_test
import
OpTest
class
TestRandomCropOp
(
OpTest
):
def
setUp
(
self
):
to_crop
=
np
.
random
.
random
((
1
,
10
,
15
)).
astype
(
"float32"
)
self
.
op_type
=
"random_crop"
self
.
inputs
=
{
'X'
:
to_crop
,
'Seed'
:
np
.
array
([
10
])}
self
.
outputs
=
{
'Out'
:
np
.
array
([
1
,
2
,
3
]),
'SeedOut'
:
np
.
array
([
2
])}
self
.
attrs
=
{
'shape'
:
[
5
,
5
]}
def
test_check_output
(
self
):
self
.
check_output
()
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录