Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
9b6c5397
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9b6c5397
编写于
4月 02, 2018
作者:
_青葱
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Merge branch develop
上级
19b4a2a5
变更
18
隐藏空白更改
内联
并排
Showing
18 changed file
with
529 addition
and
170 deletion
+529
-170
.travis.yml
.travis.yml
+1
-1
paddle/fluid/operators/detail/CMakeLists.txt
paddle/fluid/operators/detail/CMakeLists.txt
+2
-1
paddle/fluid/operators/detail/grpc_client.cc
paddle/fluid/operators/detail/grpc_client.cc
+2
-1
paddle/fluid/operators/detail/grpc_server.cc
paddle/fluid/operators/detail/grpc_server.cc
+60
-1
paddle/fluid/operators/detail/grpc_server.h
paddle/fluid/operators/detail/grpc_server.h
+15
-0
paddle/fluid/operators/detail/grpc_server_test.cc
paddle/fluid/operators/detail/grpc_server_test.cc
+51
-0
paddle/fluid/operators/detail/grpc_service.h
paddle/fluid/operators/detail/grpc_service.h
+3
-0
paddle/fluid/operators/detail/send_recv.proto
paddle/fluid/operators/detail/send_recv.proto
+2
-0
paddle/fluid/operators/reshape_op.cc
paddle/fluid/operators/reshape_op.cc
+57
-73
paddle/fluid/operators/reshape_op.h
paddle/fluid/operators/reshape_op.h
+120
-7
python/paddle/fluid/executor.py
python/paddle/fluid/executor.py
+18
-55
python/paddle/fluid/layers/detection.py
python/paddle/fluid/layers/detection.py
+10
-11
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+98
-0
python/paddle/fluid/layers/ops.py
python/paddle/fluid/layers/ops.py
+0
-1
python/paddle/fluid/tests/unittests/op_test.py
python/paddle/fluid/tests/unittests/op_test.py
+4
-4
python/paddle/fluid/tests/unittests/test_mine_hard_examples_op.py
...addle/fluid/tests/unittests/test_mine_hard_examples_op.py
+0
-0
python/paddle/fluid/tests/unittests/test_reshape_op.py
python/paddle/fluid/tests/unittests/test_reshape_op.py
+86
-15
python/paddle/fluid/tests/unittests/test_target_assign_op.py
python/paddle/fluid/tests/unittests/test_target_assign_op.py
+0
-0
未找到文件。
.travis.yml
浏览文件 @
9b6c5397
...
@@ -34,7 +34,7 @@ addons:
...
@@ -34,7 +34,7 @@ addons:
-
automake
-
automake
-
libtool
-
libtool
-
ccache
-
ccache
ssh_known_hosts
:
52.76.173.135
ssh_known_hosts
:
13.229.163.131
before_install
:
before_install
:
-
if [[ "$JOB" == "check_style" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi
-
if [[ "$JOB" == "check_style" ]]; then sudo ln -s /usr/bin/clang-format-3.8 /usr/bin/clang-format; fi
# Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python
# Paddle is using protobuf 3.1 currently. Protobuf 3.2 breaks the compatibility. So we specify the python
...
...
paddle/fluid/operators/detail/CMakeLists.txt
浏览文件 @
9b6c5397
...
@@ -2,7 +2,8 @@ if(WITH_DISTRIBUTE)
...
@@ -2,7 +2,8 @@ if(WITH_DISTRIBUTE)
grpc_library
(
sendrecvop_grpc SRCS bytebuffer_stream.cc sendrecvop_utils.cc grpc_client.cc
grpc_library
(
sendrecvop_grpc SRCS bytebuffer_stream.cc sendrecvop_utils.cc grpc_client.cc
grpc_server.cc variable_response.cc PROTO send_recv.proto DEPS lod_tensor selected_rows
)
grpc_server.cc variable_response.cc PROTO send_recv.proto DEPS lod_tensor selected_rows
)
set
(
DISTRIBUTE_COMPILE_FLAGS
"-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor"
)
set
(
DISTRIBUTE_COMPILE_FLAGS
"-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor"
)
set_source_files_properties
(
serde_test.cc PROPERTIES COMPILE_FLAGS
${
DISTRIBUTE_COMPILE_FLAGS
}
)
set_source_files_properties
(
serde_test.cc
grpc_server_test
PROPERTIES COMPILE_FLAGS
${
DISTRIBUTE_COMPILE_FLAGS
}
)
cc_test
(
serde_test SRCS serde_test.cc variable_response.cc DEPS grpc++_unsecure grpc_unsecure gpr
cc_test
(
serde_test SRCS serde_test.cc variable_response.cc DEPS grpc++_unsecure grpc_unsecure gpr
cares zlib protobuf sendrecvop_grpc
)
cares zlib protobuf sendrecvop_grpc
)
cc_test
(
grpc_server_test SRCS grpc_server_test.cc DEPS sendrecvop_grpc grpc++_unsecure grpc_unsecure gpr cares zlib protobuf
)
endif
()
endif
()
paddle/fluid/operators/detail/grpc_client.cc
浏览文件 @
9b6c5397
...
@@ -150,7 +150,8 @@ bool RPCClient::AsyncPrefetchVariable(const std::string& ep,
...
@@ -150,7 +150,8 @@ bool RPCClient::AsyncPrefetchVariable(const std::string& ep,
s
->
response_call_back_
=
ProcGetResponse
;
s
->
response_call_back_
=
ProcGetResponse
;
auto
call
=
s
->
stub_g_
.
PrepareUnaryCall
(
auto
call
=
s
->
stub_g_
.
PrepareUnaryCall
(
s
->
context_
.
get
(),
"/sendrecv.SendRecvService/GetVariable"
,
req
,
&
cq_
);
s
->
context_
.
get
(),
"/sendrecv.SendRecvService/PrefetchVariable"
,
req
,
&
cq_
);
call
->
StartCall
();
call
->
StartCall
();
call
->
Finish
(
&
s
->
reply_
,
&
s
->
status_
,
(
void
*
)
s
);
call
->
Finish
(
&
s
->
reply_
,
&
s
->
status_
,
(
void
*
)
s
);
});
});
...
...
paddle/fluid/operators/detail/grpc_server.cc
浏览文件 @
9b6c5397
...
@@ -128,6 +128,47 @@ class RequestGet final : public RequestBase {
...
@@ -128,6 +128,47 @@ class RequestGet final : public RequestBase {
SimpleBlockQueue
<
MessageWithName
>*
queue_
;
SimpleBlockQueue
<
MessageWithName
>*
queue_
;
};
};
class
RequestPrefetch
final
:
public
RequestBase
{
public:
explicit
RequestPrefetch
(
GrpcService
::
AsyncService
*
service
,
::
grpc
::
ServerCompletionQueue
*
cq
,
framework
::
Scope
*
scope
,
const
platform
::
DeviceContext
*
dev_ctx
,
framework
::
Executor
*
executor
,
framework
::
ProgramDesc
*
program
,
int
blkid
)
:
RequestBase
(
service
,
cq
,
dev_ctx
),
responder_
(
&
ctx_
),
scope_
(
scope
),
executor_
(
executor
),
program_
(
program
),
blkid_
(
blkid
)
{
int
method_id
=
static_cast
<
int
>
(
detail
::
GrpcMethod
::
kPrefetchVariable
);
service_
->
RequestAsyncUnary
(
method_id
,
&
ctx_
,
&
request_
,
&
responder_
,
cq_
,
cq_
,
this
);
}
virtual
~
RequestPrefetch
()
{}
virtual
std
::
string
GetReqName
()
{
return
request_
.
varname
();
}
virtual
void
Process
()
{
// prefetch process...
::
grpc
::
ByteBuffer
reply
;
// TODO(Yancey1989): execute the Block which containers prefetch ops
responder_
.
Finish
(
reply
,
::
grpc
::
Status
::
OK
,
this
);
status_
=
FINISH
;
}
protected:
sendrecv
::
VariableMessage
request_
;
ServerAsyncResponseWriter
<::
grpc
::
ByteBuffer
>
responder_
;
framework
::
Scope
*
scope_
;
framework
::
Executor
*
executor_
;
framework
::
ProgramDesc
*
program_
;
int
blkid_
;
};
void
AsyncGRPCServer
::
WaitClientGet
(
int
count
)
{
void
AsyncGRPCServer
::
WaitClientGet
(
int
count
)
{
int
fetch_barriers
=
0
;
int
fetch_barriers
=
0
;
while
(
fetch_barriers
<
count
)
{
while
(
fetch_barriers
<
count
)
{
...
@@ -147,6 +188,7 @@ void AsyncGRPCServer::RunSyncUpdate() {
...
@@ -147,6 +188,7 @@ void AsyncGRPCServer::RunSyncUpdate() {
cq_send_
=
builder
.
AddCompletionQueue
();
cq_send_
=
builder
.
AddCompletionQueue
();
cq_get_
=
builder
.
AddCompletionQueue
();
cq_get_
=
builder
.
AddCompletionQueue
();
cq_prefetch_
=
builder
.
AddCompletionQueue
();
server_
=
builder
.
BuildAndStart
();
server_
=
builder
.
BuildAndStart
();
LOG
(
INFO
)
<<
"Server listening on "
<<
address_
<<
std
::
endl
;
LOG
(
INFO
)
<<
"Server listening on "
<<
address_
<<
std
::
endl
;
...
@@ -155,6 +197,8 @@ void AsyncGRPCServer::RunSyncUpdate() {
...
@@ -155,6 +197,8 @@ void AsyncGRPCServer::RunSyncUpdate() {
std
::
bind
(
&
AsyncGRPCServer
::
TryToRegisterNewSendOne
,
this
);
std
::
bind
(
&
AsyncGRPCServer
::
TryToRegisterNewSendOne
,
this
);
std
::
function
<
void
()
>
get_register
=
std
::
function
<
void
()
>
get_register
=
std
::
bind
(
&
AsyncGRPCServer
::
TryToRegisterNewGetOne
,
this
);
std
::
bind
(
&
AsyncGRPCServer
::
TryToRegisterNewGetOne
,
this
);
std
::
function
<
void
()
>
prefetch_register
=
std
::
bind
(
&
AsyncGRPCServer
::
TryToRegisterNewPrefetchOne
,
this
);
t_send_
.
reset
(
t_send_
.
reset
(
new
std
::
thread
(
std
::
bind
(
&
AsyncGRPCServer
::
HandleRequest
,
this
,
new
std
::
thread
(
std
::
bind
(
&
AsyncGRPCServer
::
HandleRequest
,
this
,
...
@@ -163,11 +207,14 @@ void AsyncGRPCServer::RunSyncUpdate() {
...
@@ -163,11 +207,14 @@ void AsyncGRPCServer::RunSyncUpdate() {
t_get_
.
reset
(
t_get_
.
reset
(
new
std
::
thread
(
std
::
bind
(
&
AsyncGRPCServer
::
HandleRequest
,
this
,
new
std
::
thread
(
std
::
bind
(
&
AsyncGRPCServer
::
HandleRequest
,
this
,
cq_get_
.
get
(),
"cq_get"
,
get_register
)));
cq_get_
.
get
(),
"cq_get"
,
get_register
)));
t_prefetch_
.
reset
(
new
std
::
thread
(
std
::
bind
(
&
AsyncGRPCServer
::
HandleRequest
,
this
,
cq_prefetch_
.
get
(),
"cq_prefetch"
,
prefetch_register
)));
// wait server
// wait server
server_
->
Wait
();
server_
->
Wait
();
t_send_
->
join
();
t_send_
->
join
();
t_get_
->
join
();
t_get_
->
join
();
t_prefetch_
->
join
();
}
}
void
AsyncGRPCServer
::
ShutdownQueue
()
{
void
AsyncGRPCServer
::
ShutdownQueue
()
{
...
@@ -203,6 +250,18 @@ void AsyncGRPCServer::TryToRegisterNewGetOne() {
...
@@ -203,6 +250,18 @@ void AsyncGRPCServer::TryToRegisterNewGetOne() {
VLOG
(
4
)
<<
"Create RequestGet status:"
<<
get
->
Status
();
VLOG
(
4
)
<<
"Create RequestGet status:"
<<
get
->
Status
();
}
}
void
AsyncGRPCServer
::
TryToRegisterNewPrefetchOne
()
{
std
::
unique_lock
<
std
::
mutex
>
lock
(
cq_mutex_
);
if
(
is_shut_down_
)
{
return
;
}
RequestPrefetch
*
prefetch
=
new
RequestPrefetch
(
&
service_
,
cq_prefetch_
.
get
(),
scope_
,
dev_ctx_
,
executor_
,
program_
,
prefetch_blk_id_
);
VLOG
(
4
)
<<
"Create RequestPrefetch status:"
<<
prefetch
->
Status
();
}
// FIXME(typhoonzero): change cq_name to enum.
// FIXME(typhoonzero): change cq_name to enum.
void
AsyncGRPCServer
::
HandleRequest
(
::
grpc
::
ServerCompletionQueue
*
cq
,
void
AsyncGRPCServer
::
HandleRequest
(
::
grpc
::
ServerCompletionQueue
*
cq
,
std
::
string
cq_name
,
std
::
string
cq_name
,
...
...
paddle/fluid/operators/detail/grpc_server.h
浏览文件 @
9b6c5397
...
@@ -17,7 +17,9 @@ limitations under the License. */
...
@@ -17,7 +17,9 @@ limitations under the License. */
#include <grpc++/grpc++.h>
#include <grpc++/grpc++.h>
#include <thread>
#include <thread>
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/var_type.h"
#include "paddle/fluid/framework/var_type.h"
...
@@ -53,6 +55,12 @@ class AsyncGRPCServer final {
...
@@ -53,6 +55,12 @@ class AsyncGRPCServer final {
void
SetDevCtx
(
const
platform
::
DeviceContext
*
dev_ctx
)
{
dev_ctx_
=
dev_ctx
;
}
void
SetDevCtx
(
const
platform
::
DeviceContext
*
dev_ctx
)
{
dev_ctx_
=
dev_ctx
;
}
void
SetProgram
(
framework
::
ProgramDesc
*
program
)
{
program_
=
program
;
}
void
SetPrefetchBlkdId
(
int
blkid
)
{
prefetch_blk_id_
=
blkid
;
}
void
SetExecutor
(
framework
::
Executor
*
executor
)
{
executor_
=
executor
;
}
const
ReceivedMessage
Get
()
{
return
this
->
var_recv_queue_
.
Pop
();
}
const
ReceivedMessage
Get
()
{
return
this
->
var_recv_queue_
.
Pop
();
}
void
Push
(
const
std
::
string
&
msg_name
)
{
void
Push
(
const
std
::
string
&
msg_name
)
{
...
@@ -66,6 +74,7 @@ class AsyncGRPCServer final {
...
@@ -66,6 +74,7 @@ class AsyncGRPCServer final {
std
::
function
<
void
()
>
TryToRegisterNewOne
);
std
::
function
<
void
()
>
TryToRegisterNewOne
);
void
TryToRegisterNewSendOne
();
void
TryToRegisterNewSendOne
();
void
TryToRegisterNewGetOne
();
void
TryToRegisterNewGetOne
();
void
TryToRegisterNewPrefetchOne
();
void
ShutdownQueue
();
void
ShutdownQueue
();
private:
private:
...
@@ -73,6 +82,7 @@ class AsyncGRPCServer final {
...
@@ -73,6 +82,7 @@ class AsyncGRPCServer final {
volatile
bool
is_shut_down_
=
false
;
volatile
bool
is_shut_down_
=
false
;
std
::
unique_ptr
<::
grpc
::
ServerCompletionQueue
>
cq_send_
;
std
::
unique_ptr
<::
grpc
::
ServerCompletionQueue
>
cq_send_
;
std
::
unique_ptr
<::
grpc
::
ServerCompletionQueue
>
cq_get_
;
std
::
unique_ptr
<::
grpc
::
ServerCompletionQueue
>
cq_get_
;
std
::
unique_ptr
<::
grpc
::
ServerCompletionQueue
>
cq_prefetch_
;
GrpcService
::
AsyncService
service_
;
GrpcService
::
AsyncService
service_
;
std
::
unique_ptr
<::
grpc
::
Server
>
server_
;
std
::
unique_ptr
<::
grpc
::
Server
>
server_
;
...
@@ -92,6 +102,11 @@ class AsyncGRPCServer final {
...
@@ -92,6 +102,11 @@ class AsyncGRPCServer final {
std
::
unique_ptr
<
std
::
thread
>
t_send_
;
std
::
unique_ptr
<
std
::
thread
>
t_send_
;
std
::
unique_ptr
<
std
::
thread
>
t_get_
;
std
::
unique_ptr
<
std
::
thread
>
t_get_
;
std
::
unique_ptr
<
std
::
thread
>
t_prefetch_
;
int
prefetch_blk_id_
;
framework
::
ProgramDesc
*
program_
;
framework
::
Executor
*
executor_
;
};
};
};
// namespace detail
};
// namespace detail
...
...
paddle/fluid/operators/detail/grpc_server_test.cc
0 → 100644
浏览文件 @
9b6c5397
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <unistd.h>
#include <string>
#include <thread>
#include "gtest/gtest.h"
#include "paddle/fluid/operators/detail/grpc_client.h"
#include "paddle/fluid/operators/detail/grpc_server.h"
namespace
framework
=
paddle
::
framework
;
namespace
platform
=
paddle
::
platform
;
namespace
detail
=
paddle
::
operators
::
detail
;
std
::
unique_ptr
<
detail
::
AsyncGRPCServer
>
rpc_service_
;
void
StartServer
(
const
std
::
string
&
endpoint
)
{
rpc_service_
.
reset
(
new
detail
::
AsyncGRPCServer
(
endpoint
));
}
TEST
(
PREFETCH
,
CPU
)
{
// start up a server instance backend
// TODO(Yancey1989): Need to start a server with optimize blocks and
// prefetch blocks.
std
::
thread
server_thread
(
StartServer
,
"127.0.0.1:8889"
);
framework
::
Scope
scope
;
platform
::
CPUPlace
place
;
platform
::
CPUDeviceContext
ctx
(
place
);
// create var on local scope
std
::
string
var_name
(
"tmp_0"
);
auto
var
=
scope
.
Var
(
var_name
);
auto
tensor
=
var
->
GetMutable
<
framework
::
LoDTensor
>
();
tensor
->
Resize
({
10
,
10
});
detail
::
RPCClient
client
;
client
.
AsyncPrefetchVariable
(
"127.0.0.1:8889"
,
ctx
,
scope
,
var_name
,
""
);
server_thread
.
join
();
rpc_service_
.
reset
(
nullptr
);
}
paddle/fluid/operators/detail/grpc_service.h
浏览文件 @
9b6c5397
...
@@ -76,6 +76,7 @@ namespace detail {
...
@@ -76,6 +76,7 @@ namespace detail {
enum
class
GrpcMethod
{
enum
class
GrpcMethod
{
kSendVariable
,
kSendVariable
,
kGetVariable
,
kGetVariable
,
kPrefetchVariable
,
};
};
static
const
int
kGrpcNumMethods
=
static
const
int
kGrpcNumMethods
=
...
@@ -87,6 +88,8 @@ inline const char* GrpcMethodName(GrpcMethod id) {
...
@@ -87,6 +88,8 @@ inline const char* GrpcMethodName(GrpcMethod id) {
return
"/sendrecv.SendRecvService/SendVariable"
;
return
"/sendrecv.SendRecvService/SendVariable"
;
case
GrpcMethod
::
kGetVariable
:
case
GrpcMethod
::
kGetVariable
:
return
"/sendrecv.SendRecvService/GetVariable"
;
return
"/sendrecv.SendRecvService/GetVariable"
;
case
GrpcMethod
::
kPrefetchVariable
:
return
"/sendrecv.SendREcvService/PrefetchVariable"
;
}
}
// Shouldn't be reached.
// Shouldn't be reached.
...
...
paddle/fluid/operators/detail/send_recv.proto
浏览文件 @
9b6c5397
...
@@ -21,6 +21,8 @@ service SendRecvService {
...
@@ -21,6 +21,8 @@ service SendRecvService {
rpc
SendVariable
(
VariableMessage
)
returns
(
VoidMessage
)
{}
rpc
SendVariable
(
VariableMessage
)
returns
(
VoidMessage
)
{}
// Argument VariableMessage for GetVariable should only contain varname.
// Argument VariableMessage for GetVariable should only contain varname.
rpc
GetVariable
(
VariableMessage
)
returns
(
VariableMessage
)
{}
rpc
GetVariable
(
VariableMessage
)
returns
(
VariableMessage
)
{}
// Prefetch variable by Ids
rpc
PrefetchVariable
(
VariableMessage
)
returns
(
VariableMessage
)
{}
}
}
// VariableMessage is serialized paddle variable message.
// VariableMessage is serialized paddle variable message.
...
...
paddle/fluid/operators/reshape_op.cc
浏览文件 @
9b6c5397
...
@@ -17,90 +17,66 @@ limitations under the License. */
...
@@ -17,90 +17,66 @@ limitations under the License. */
namespace
paddle
{
namespace
paddle
{
namespace
operators
{
namespace
operators
{
class
ReshapeOp
:
public
framework
::
OperatorWithKernel
{
public:
ReshapeOp
(
const
std
::
string
&
type
,
const
framework
::
VariableNameMap
&
inputs
,
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
)
:
OperatorWithKernel
(
type
,
inputs
,
outputs
,
attrs
)
{}
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
// input check
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) of ReshapeOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
"Output(Out) of ReshapeOp should not be null."
);
auto
shape
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"shape"
);
PADDLE_ENFORCE
(
shape
.
size
()
>
0
,
"Attr(shape) shouldn't be empty."
);
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
std
::
vector
<
size_t
>
neg_dims_idx
;
// set some dimension to -1 if it is unknown
const
int
unknown_size
=
-
1
;
for
(
size_t
i
=
0
;
i
<
shape
.
size
();
++
i
)
{
PADDLE_ENFORCE
(
shape
[
i
]
>
0
||
shape
[
i
]
==
unknown_size
,
"Each dimension of Attr(shape) must be positive or %d."
,
unknown_size
);
if
(
shape
[
i
]
==
unknown_size
)
{
neg_dims_idx
.
push_back
(
i
);
PADDLE_ENFORCE
(
neg_dims_idx
.
size
()
<=
1
,
"Only one dimension of Attr(shape) can be unknown."
);
}
}
int64_t
capacity
=
std
::
accumulate
(
shape
.
begin
(),
shape
.
end
(),
1
,
std
::
multiplies
<
int
>
());
int64_t
in_size
=
framework
::
product
(
x_dims
);
if
(
neg_dims_idx
.
size
()
==
1
)
{
// dim infer
shape
[
neg_dims_idx
[
0
]]
=
in_size
/
(
-
capacity
);
// recalculate capacity
capacity
=
shape
[
neg_dims_idx
[
0
]]
*
(
-
capacity
);
}
// capacity check
PADDLE_ENFORCE
(
capacity
==
in_size
,
"The size of Input(X) mismatches with Attr(shape)."
);
// resize output
std
::
vector
<
int64_t
>
shape_int64
(
shape
.
size
(),
0
);
std
::
transform
(
shape
.
begin
(),
shape
.
end
(),
shape_int64
.
begin
(),
[](
int
a
)
{
return
static_cast
<
int64_t
>
(
a
);
});
auto
out_dims
=
framework
::
make_ddim
(
shape_int64
);
ctx
->
SetOutputDim
(
"Out"
,
out_dims
);
if
(
shape
[
0
]
==
x_dims
[
0
])
{
// Only pass LoD when the first dimension is equal between
// output and input.
ctx
->
ShareLoD
(
"X"
,
/*->*/
"Out"
);
}
}
};
class
ReshapeOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
class
ReshapeOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
public:
ReshapeOpMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
ReshapeOpMaker
(
OpProto
*
proto
,
OpAttrChecker
*
op_checker
)
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
:
OpProtoAndCheckerMaker
(
proto
,
op_checker
)
{
AddInput
(
"X"
,
"The input tensor of reshape operator."
);
AddInput
(
"X"
,
"(Tensor). The input tensor of reshape operator."
);
AddOutput
(
"Out"
,
"The output tensor of reshape operator."
);
AddInput
(
"Shape"
,
AddAttr
<
std
::
vector
<
int
>>
(
"shape"
,
"(Tensor<int32>, optional). If provided, reshape according to "
"(vector<int>) "
"this given shape. That is to say it has a higher priority than "
"Target shape of reshape operator."
);
"the shape attribute, while the shape attribute still should be "
"set correctly to gurantee shape inference in compile time."
)
.
AsDispensable
();
AddOutput
(
"Out"
,
"(Tensor). The output tensor of reshape operator."
);
AddAttr
<
std
::
vector
<
int
>>
(
"shape"
,
"(std::vector<int>) Target shape of reshape operator."
);
AddAttr
<
bool
>
(
"inplace"
,
AddAttr
<
bool
>
(
"inplace"
,
"Change the source tensor's shape without copy memory."
)
"(default: false) Change the source tensor's shape without "
.
SetDefault
(
true
);
"memory copy. When Attr(inplace) is set true, the output "
"tensor shares memory with Input(X), otherwise, a new output "
"tensor is created, and its data are copied from Input(x)."
)
.
SetDefault
(
false
);
AddComment
(
R"DOC(
AddComment
(
R"DOC(
Reshape Operator.
Reshape Operator.
Reshape Input(X) into the shape specified by Attr(shape).
Reshape Input(X) into the shape specified by Attr(shape) or Input(Shape). The
data in Input(X) are unchanged.
Examples:
An example:
1. Given a 3-D tensor Input(X) with a shape [2, 4, 6], and the target shape
Given a 2-D tensor X with 2 rows and 2 columns : [[1, 2], [3, 4]]
specified by Attr(shape) is [6, 8], the reshape operator will transform Input(X)
into a 2-D tensor with shape [6, 8] and leaving Input(X)'s data unchanged.
and target shape = [1, 4], the reshape operator will transform
2. Given a 3-D tensor Input(X) with a shape [2, 4, 6], and the target shape
the tensor X into a 2-D tensor: [[1, 2, 3, 4]]
specified by Attr(shape) is [2, 3, -1, 2], the reshape operator will transform
Input(X) into a 4-D tensor with shape [2, 3, 4, 2] and leaving Input(X)'s data
unchanged. In this case, one and only dimension of Attr(shape) can be set to -1,
the value of this dimension is inferred from the total element number of
Input(X) and remaining dimensions.
3. Given a 3-D tensor Input(X) with a shape [2, 4, 6], and the target shape
specified by Attr(shape) is [-1, 0, 3, 2], the reshape operator will transform
Input(X) into a 4-D tensor with shape [2, 4, 3, 2] and leaving Input(X)'s data
unchanged. In this case, besides -1, 0 means the actual dimension value is going
to be copied from the corresponding dimension of Input(X).
Note:
1. One and only one dimension in Attr(shape) can be set -1. In this case,
the actual dimension value will be infered from the total element number of
Input(X) and remaining dimensions.
2. More than one dimensions in Attr(shape) can be set to 0, which means the real
dimension value will be copied from Input(X) at runtime. Note that the index of
0 can not exceed Rank(X). For example, Input(X) is a 3-D tensor with shape
[2, 3, 4], Attr(shape) = [2, 3, 2, 0] is an invalid input.
3. Input(Shape) has a higher priority than Attr(shape) if it is provided, while
Attr(shape) still should be set correctly to gurantee shape inference in
compile-time.
One dimension in the target shape can be set -1, representing that its
size is unknown. In this case, the real dimension will be infered from
the original shape of Input(X) and other dimensions in the target shape.
)DOC"
);
)DOC"
);
}
}
};
};
...
@@ -119,6 +95,14 @@ class ReshapeGradOp : public framework::OperatorWithKernel {
...
@@ -119,6 +95,14 @@ class ReshapeGradOp : public framework::OperatorWithKernel {
"Input(Out@GRAD) shouldn't be null."
);
"Input(Out@GRAD) shouldn't be null."
);
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
ctx
->
GetInputDim
(
"X"
));
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X"
),
ctx
->
GetInputDim
(
"X"
));
}
}
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
return
framework
::
OpKernelType
(
framework
::
ToDataType
(
ctx
.
Input
<
framework
::
LoDTensor
>
(
"X"
)
->
type
()),
ctx
.
device_context
());
}
};
};
}
// namespace operators
}
// namespace operators
...
...
paddle/fluid/operators/reshape_op.h
浏览文件 @
9b6c5397
...
@@ -20,17 +20,129 @@ limitations under the License. */
...
@@ -20,17 +20,129 @@ limitations under the License. */
namespace
paddle
{
namespace
paddle
{
namespace
operators
{
namespace
operators
{
class
ReshapeOp
:
public
framework
::
OperatorWithKernel
{
public:
ReshapeOp
(
const
std
::
string
&
type
,
const
framework
::
VariableNameMap
&
inputs
,
const
framework
::
VariableNameMap
&
outputs
,
const
framework
::
AttributeMap
&
attrs
)
:
OperatorWithKernel
(
type
,
inputs
,
outputs
,
attrs
)
{}
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"X"
),
"Input(X) of ReshapeOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
"Output(Out) of ReshapeOp should not be null."
);
const
std
::
vector
<
int
>
&
shape
=
ctx
->
Attrs
().
Get
<
std
::
vector
<
int
>>
(
"shape"
);
PADDLE_ENFORCE
(
!
shape
.
empty
(),
"The shape information must be set by Attr(shape)."
);
if
(
ctx
->
HasInput
(
"Shape"
)
&&
ctx
->
IsRuntime
())
{
// If true, set the shape of Output(Out) according to Input(Shape) in
// ReshapeKernel with ExecutionContext. Also check LoD in ReshapeKernel.
ctx
->
ShareLoD
(
"X"
,
/*->*/
"Out"
);
return
;
}
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
auto
out_dims
=
ValidateShape
(
shape
,
x_dims
);
ctx
->
SetOutputDim
(
"Out"
,
out_dims
);
if
(
x_dims
[
0
]
==
out_dims
[
0
])
{
// Only pass LoD when the first dimension of output and Input(X)
// are the same.
ctx
->
ShareLoD
(
"X"
,
/*->*/
"Out"
);
}
}
static
framework
::
DDim
ValidateShape
(
const
std
::
vector
<
int
>
shape
,
const
framework
::
DDim
&
in_dims
)
{
const
int64_t
in_size
=
framework
::
product
(
in_dims
);
// only one dimension canbe set to -1, whose size will be automatically
// infered.
const
int64_t
unk_dim_val
=
-
1
;
const
int64_t
copy_dim_val
=
0
;
std
::
vector
<
int64_t
>
output_shape
(
shape
.
size
(),
0
);
int64_t
capacity
=
1
;
int
unk_dim_idx
=
-
1
;
for
(
size_t
i
=
0
;
i
<
shape
.
size
();
++
i
)
{
if
(
shape
[
i
]
==
unk_dim_val
)
{
PADDLE_ENFORCE
(
unk_dim_idx
==
-
1
,
"Only one input dimension of Attr(shape) can be unknown."
);
unk_dim_idx
=
i
;
}
else
if
(
shape
[
i
]
==
copy_dim_val
)
{
PADDLE_ENFORCE
(
static_cast
<
int
>
(
i
)
<
in_dims
.
size
(),
"The index of dimension to copy from input shape must be less "
"than the size of input shape."
);
}
else
{
PADDLE_ENFORCE
(
shape
[
i
]
>
0
,
"Each input dimension of Attr(shape) must not be negtive except "
"one unknown dimension."
);
}
capacity
*=
(
shape
[
i
]
?
shape
[
i
]
:
in_dims
[
i
]);
output_shape
[
i
]
=
(
shape
[
i
]
?
static_cast
<
int64_t
>
(
shape
[
i
])
:
in_dims
[
i
]);
}
if
(
unk_dim_idx
!=
-
1
)
{
output_shape
[
unk_dim_idx
]
=
-
in_size
/
capacity
;
PADDLE_ENFORCE_EQ
(
output_shape
[
unk_dim_idx
]
*
capacity
,
-
in_size
,
"Invalid shape is given."
);
}
else
{
PADDLE_ENFORCE_EQ
(
capacity
,
in_size
,
"Invalid shape is given."
);
}
return
framework
::
make_ddim
(
output_shape
);
}
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
return
framework
::
OpKernelType
(
framework
::
ToDataType
(
ctx
.
Input
<
framework
::
LoDTensor
>
(
"X"
)
->
type
()),
ctx
.
device_context
());
}
};
template
<
typename
DeviceContext
,
typename
T
>
template
<
typename
DeviceContext
,
typename
T
>
class
ReshapeKernel
:
public
framework
::
OpKernel
<
T
>
{
class
ReshapeKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
{
auto
*
out
=
ctx
.
Output
<
framework
::
Tensor
>
(
"Out"
);
auto
*
out
=
ctx
.
Output
<
framework
::
LoDTensor
>
(
"Out"
);
auto
*
in
=
ctx
.
Input
<
framework
::
Tensor
>
(
"X"
);
auto
*
in
=
ctx
.
Input
<
framework
::
LoDTensor
>
(
"X"
);
auto
*
shape_tensor
=
ctx
.
Input
<
framework
::
LoDTensor
>
(
"Shape"
);
framework
::
DDim
out_dims
=
out
->
dims
();
if
(
shape_tensor
)
{
auto
*
shape_data
=
shape_tensor
->
data
<
int
>
();
if
(
platform
::
is_gpu_place
(
ctx
.
GetPlace
()))
{
framework
::
Tensor
cpu_shape_tensor
;
TensorCopy
(
*
shape_tensor
,
platform
::
CPUPlace
(),
ctx
.
device_context
(),
&
cpu_shape_tensor
);
shape_data
=
cpu_shape_tensor
.
data
<
int
>
();
}
auto
shape
=
std
::
vector
<
int
>
(
shape_data
,
shape_data
+
shape_tensor
->
numel
());
out_dims
=
ReshapeOp
::
ValidateShape
(
shape
,
in
->
dims
());
}
if
(
!
in
->
lod
().
empty
())
{
PADDLE_ENFORCE_EQ
(
out_dims
[
0
],
in
->
dims
()[
0
],
"Reshape operator cannot reshape an input sequence batch "
"into an output sequence batch that has a different "
"number of time steps. Please consider using "
"sequence_reshape op."
);
}
bool
inplace
=
ctx
.
Attr
<
bool
>
(
"inplace"
);
bool
inplace
=
ctx
.
Attr
<
bool
>
(
"inplace"
);
auto
out_dims
=
out
->
dims
(
);
out
->
Resize
(
out_dims
);
if
(
!
inplace
)
{
if
(
!
inplace
)
{
out
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
out
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
framework
::
TensorCopy
(
*
in
,
ctx
.
GetPlace
(),
ctx
.
device_context
(),
out
);
framework
::
TensorCopy
(
*
in
,
ctx
.
GetPlace
(),
ctx
.
device_context
(),
out
);
// TensorCopy will resize to in_dims.
out
->
Resize
(
out_dims
);
out
->
Resize
(
out_dims
);
}
else
{
}
else
{
out
->
ShareDataWith
(
*
in
);
out
->
ShareDataWith
(
*
in
);
...
@@ -42,9 +154,10 @@ class ReshapeKernel : public framework::OpKernel<T> {
...
@@ -42,9 +154,10 @@ class ReshapeKernel : public framework::OpKernel<T> {
template
<
typename
DeviceContext
,
typename
T
>
template
<
typename
DeviceContext
,
typename
T
>
class
ReshapeGradKernel
:
public
framework
::
OpKernel
<
T
>
{
class
ReshapeGradKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
{
auto
*
d_out
=
ctx
.
Input
<
framework
::
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
d_out
=
ctx
.
Input
<
framework
::
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
d_x
=
ctx
.
Output
<
framework
::
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
d_x
=
ctx
.
Output
<
framework
::
Tensor
>
(
framework
::
GradVarName
(
"X"
));
d_x
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
d_x
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
bool
inplace
=
ctx
.
Attr
<
bool
>
(
"inplace"
);
bool
inplace
=
ctx
.
Attr
<
bool
>
(
"inplace"
);
...
...
python/paddle/fluid/executor.py
浏览文件 @
9b6c5397
...
@@ -48,8 +48,7 @@ def as_numpy(tensor):
...
@@ -48,8 +48,7 @@ def as_numpy(tensor):
assert
isinstance
(
tensor
,
core
.
LoDTensor
)
assert
isinstance
(
tensor
,
core
.
LoDTensor
)
lod
=
tensor
.
lod
()
lod
=
tensor
.
lod
()
if
len
(
lod
)
>
0
:
if
len
(
lod
)
>
0
:
raise
RuntimeError
(
raise
RuntimeError
(
"Some of your fetched tensors hold LoD information.
\
"Some of your featched tensors hold LoD information.
\
They can not be completely cast to Python ndarray.
\
They can not be completely cast to Python ndarray.
\
Please set the parameter 'return_numpy' as 'False' to
\
Please set the parameter 'return_numpy' as 'False' to
\
return LoDTensor itself directly."
)
return LoDTensor itself directly."
)
...
@@ -180,60 +179,24 @@ def get_program_cache_key(feed, fetch_list):
...
@@ -180,60 +179,24 @@ def get_program_cache_key(feed, fetch_list):
class
Executor
(
object
):
class
Executor
(
object
):
def
__init__
(
self
,
places
):
def
__init__
(
self
,
place
):
if
not
isinstance
(
places
,
list
)
and
not
isinstance
(
places
,
tuple
):
self
.
place
=
place
places
=
[
places
]
p
=
core
.
Place
()
p
.
set_place
(
place
)
act_places
=
[]
self
.
executor
=
core
.
Executor
(
p
)
for
each
in
places
:
p
=
core
.
Place
()
p
.
set_place
(
each
)
act_places
.
append
(
p
)
# TODO(dzhwinter) : only use the first place
self
.
executor
=
core
.
Executor
(
act_places
[
0
])
self
.
places
=
places
self
.
program_caches
=
dict
()
self
.
program_caches
=
dict
()
def
aslodtensor
(
self
,
data
):
def
as_lodtensor
(
self
,
data
):
def
accumulate
(
data
):
if
isinstance
(
data
,
list
):
if
not
isinstance
(
data
,
list
):
raise
RuntimeError
(
"Some of your feed data hold LoD information.
\
return
1
They can not be completely cast from a list of Python
\
return
sum
([
accumulate
(
sub
)
for
sub
in
data
])
ndarray to LoDTensor. Please convert data to LoDTensor
\
directly before feeding the data.
\
def
parselod
(
data
):
"
)
seq_lens
=
[
accumulate
(
seq
)
for
seq
in
data
]
# single tensor case
cur_len
=
0
tensor
=
core
.
LoDTensor
()
lod
=
[
cur_len
]
tensor
.
set
(
data
,
self
.
place
)
for
l
in
seq_lens
:
return
tensor
cur_len
+=
l
lod
.
append
(
cur_len
)
return
lod
assert
len
(
self
.
places
)
!=
0
if
not
isinstance
(
data
,
list
):
# pure tensor case
tensor
=
core
.
LoDTensor
()
tensor
.
set
(
data
,
self
.
places
[
0
])
return
tensor
else
:
raise
RuntimeError
(
"Current implementation lacks unittests"
)
# lodtensor case
lod
=
[]
if
not
isinstance
(
data
[
0
],
list
):
lod
.
append
(
parselod
(
data
))
flattened_data
=
np
.
concatenate
(
data
,
axis
=
0
).
astype
(
"int64"
)
else
:
while
isinstance
(
data
[
0
],
list
):
lod
.
append
(
parselod
(
seq
))
flattened_data
=
[
item
for
seq
in
data
for
item
in
seq
]
data
=
flattened_data
flattened_data
=
np
.
concatenate
(
data
,
axis
=
0
).
astype
(
"int64"
)
flattened_data
=
flattened_data
.
reshape
([
len
(
flattened_data
),
1
])
tensor
=
core
.
LoDTensor
()
tensor
.
set
(
flattened_data
,
self
.
places
[
0
])
tensor
.
set_lod
(
lod
)
return
tensor
def
_get_program_cache
(
self
,
program_cache_key
):
def
_get_program_cache
(
self
,
program_cache_key
):
return
self
.
program_caches
.
get
(
program_cache_key
,
None
)
return
self
.
program_caches
.
get
(
program_cache_key
,
None
)
...
@@ -293,7 +256,7 @@ class Executor(object):
...
@@ -293,7 +256,7 @@ class Executor(object):
feed_target_name
=
op
.
desc
.
output
(
'Out'
)[
0
]
feed_target_name
=
op
.
desc
.
output
(
'Out'
)[
0
]
cur_feed
=
feed
[
feed_target_name
]
cur_feed
=
feed
[
feed_target_name
]
if
not
isinstance
(
cur_feed
,
core
.
LoDTensor
):
if
not
isinstance
(
cur_feed
,
core
.
LoDTensor
):
cur_feed
=
self
.
aslodtensor
(
cur_feed
)
cur_feed
=
self
.
as
_
lodtensor
(
cur_feed
)
idx
=
op
.
desc
.
attr
(
'col'
)
idx
=
op
.
desc
.
attr
(
'col'
)
core
.
set_feed_variable
(
scope
,
cur_feed
,
feed_var_name
,
idx
)
core
.
set_feed_variable
(
scope
,
cur_feed
,
feed_var_name
,
idx
)
else
:
else
:
...
...
python/paddle/fluid/layers/detection.py
浏览文件 @
9b6c5397
...
@@ -19,7 +19,6 @@ from layer_function_generator import generate_layer_fn
...
@@ -19,7 +19,6 @@ from layer_function_generator import generate_layer_fn
from
layer_function_generator
import
autodoc
from
layer_function_generator
import
autodoc
from
..layer_helper
import
LayerHelper
from
..layer_helper
import
LayerHelper
import
tensor
import
tensor
import
ops
import
nn
import
nn
import
math
import
math
...
@@ -58,7 +57,7 @@ def detection_output(loc,
...
@@ -58,7 +57,7 @@ def detection_output(loc,
This operation is to get the detection results by performing following
This operation is to get the detection results by performing following
two steps:
two steps:
1. Decode input bounding box predictions according to the prior boxes.
1. Decode input bounding box predictions according to the prior boxes.
2. Get the final detection results by applying multi-class non maximum
2. Get the final detection results by applying multi-class non maximum
suppression (NMS).
suppression (NMS).
...
@@ -130,9 +129,9 @@ def detection_output(loc,
...
@@ -130,9 +129,9 @@ def detection_output(loc,
target_box
=
loc
,
target_box
=
loc
,
code_type
=
'decode_center_size'
)
code_type
=
'decode_center_size'
)
old_shape
=
scores
.
shape
old_shape
=
scores
.
shape
scores
=
ops
.
reshape
(
x
=
scores
,
shape
=
(
-
1
,
old_shape
[
-
1
]))
scores
=
nn
.
reshape
(
x
=
scores
,
shape
=
(
-
1
,
old_shape
[
-
1
]))
scores
=
nn
.
softmax
(
input
=
scores
)
scores
=
nn
.
softmax
(
input
=
scores
)
scores
=
ops
.
reshape
(
x
=
scores
,
shape
=
old_shape
)
scores
=
nn
.
reshape
(
x
=
scores
,
shape
=
old_shape
)
scores
=
nn
.
transpose
(
scores
,
perm
=
[
0
,
2
,
1
])
scores
=
nn
.
transpose
(
scores
,
perm
=
[
0
,
2
,
1
])
scores
.
stop_gradient
=
True
scores
.
stop_gradient
=
True
nmsed_outs
=
helper
.
create_tmp_variable
(
dtype
=
decoded_box
.
dtype
)
nmsed_outs
=
helper
.
create_tmp_variable
(
dtype
=
decoded_box
.
dtype
)
...
@@ -463,7 +462,7 @@ def ssd_loss(location,
...
@@ -463,7 +462,7 @@ def ssd_loss(location,
num
,
num_prior
,
num_class
=
confidence
.
shape
num
,
num_prior
,
num_class
=
confidence
.
shape
def
__reshape_to_2d
(
var
):
def
__reshape_to_2d
(
var
):
return
ops
.
reshape
(
x
=
var
,
shape
=
[
-
1
,
var
.
shape
[
-
1
]])
return
nn
.
reshape
(
x
=
var
,
shape
=
[
-
1
,
var
.
shape
[
-
1
]])
# 1. Find matched boundding box by prior box.
# 1. Find matched boundding box by prior box.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
...
@@ -474,7 +473,7 @@ def ssd_loss(location,
...
@@ -474,7 +473,7 @@ def ssd_loss(location,
# 2. Compute confidence for mining hard examples
# 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices
# 2.1. Get the target label based on matched indices
gt_label
=
ops
.
reshape
(
x
=
gt_label
,
shape
=
gt_label
.
shape
+
(
1
,
))
gt_label
=
nn
.
reshape
(
x
=
gt_label
,
shape
=
gt_label
.
shape
+
(
1
,
))
gt_label
.
stop_gradient
=
True
gt_label
.
stop_gradient
=
True
target_label
,
_
=
target_assign
(
target_label
,
_
=
target_assign
(
gt_label
,
matched_indices
,
mismatch_value
=
background_label
)
gt_label
,
matched_indices
,
mismatch_value
=
background_label
)
...
@@ -487,7 +486,7 @@ def ssd_loss(location,
...
@@ -487,7 +486,7 @@ def ssd_loss(location,
conf_loss
=
nn
.
softmax_with_cross_entropy
(
confidence
,
target_label
)
conf_loss
=
nn
.
softmax_with_cross_entropy
(
confidence
,
target_label
)
# 3. Mining hard examples
# 3. Mining hard examples
conf_loss
=
ops
.
reshape
(
x
=
conf_loss
,
shape
=
(
num
,
num_prior
))
conf_loss
=
nn
.
reshape
(
x
=
conf_loss
,
shape
=
(
num
,
num_prior
))
conf_loss
.
stop_gradient
=
True
conf_loss
.
stop_gradient
=
True
neg_indices
=
helper
.
create_tmp_variable
(
dtype
=
'int32'
)
neg_indices
=
helper
.
create_tmp_variable
(
dtype
=
'int32'
)
dtype
=
matched_indices
.
dtype
dtype
=
matched_indices
.
dtype
...
@@ -556,7 +555,7 @@ def ssd_loss(location,
...
@@ -556,7 +555,7 @@ def ssd_loss(location,
# 5.3 Compute overall weighted loss.
# 5.3 Compute overall weighted loss.
loss
=
conf_loss_weight
*
conf_loss
+
loc_loss_weight
*
loc_loss
loss
=
conf_loss_weight
*
conf_loss
+
loc_loss_weight
*
loc_loss
# reshape to [N, Np], N is the batch size and Np is the prior box number.
# reshape to [N, Np], N is the batch size and Np is the prior box number.
loss
=
ops
.
reshape
(
x
=
loss
,
shape
=
[
-
1
,
num_prior
])
loss
=
nn
.
reshape
(
x
=
loss
,
shape
=
[
-
1
,
num_prior
])
loss
=
nn
.
reduce_sum
(
loss
,
dim
=
1
,
keep_dim
=
True
)
loss
=
nn
.
reduce_sum
(
loss
,
dim
=
1
,
keep_dim
=
True
)
if
normalize
:
if
normalize
:
normalizer
=
nn
.
reduce_sum
(
target_loc_weight
)
normalizer
=
nn
.
reduce_sum
(
target_loc_weight
)
...
@@ -709,7 +708,7 @@ def multi_box_head(inputs,
...
@@ -709,7 +708,7 @@ def multi_box_head(inputs,
new_shape
=
[
new_shape
=
[
-
1
,
reduce
(
lambda
x
,
y
:
x
*
y
,
input
.
shape
[
axis
:
len
(
input
.
shape
)])
-
1
,
reduce
(
lambda
x
,
y
:
x
*
y
,
input
.
shape
[
axis
:
len
(
input
.
shape
)])
]
]
out
=
ops
.
reshape
(
x
=
input
,
shape
=
new_shape
)
out
=
nn
.
reshape
(
x
=
input
,
shape
=
new_shape
)
return
out
return
out
def
_is_list_or_tuple_
(
data
):
def
_is_list_or_tuple_
(
data
):
...
@@ -803,7 +802,7 @@ def multi_box_head(inputs,
...
@@ -803,7 +802,7 @@ def multi_box_head(inputs,
mbox_loc
.
shape
[
0
],
mbox_loc
.
shape
[
0
],
mbox_loc
.
shape
[
1
]
*
mbox_loc
.
shape
[
2
]
*
mbox_loc
.
shape
[
3
]
/
4
,
4
mbox_loc
.
shape
[
1
]
*
mbox_loc
.
shape
[
2
]
*
mbox_loc
.
shape
[
3
]
/
4
,
4
]
]
mbox_loc_flatten
=
ops
.
reshape
(
mbox_loc
,
shape
=
new_shape
)
mbox_loc_flatten
=
nn
.
reshape
(
mbox_loc
,
shape
=
new_shape
)
mbox_locs
.
append
(
mbox_loc_flatten
)
mbox_locs
.
append
(
mbox_loc_flatten
)
# get conf
# get conf
...
@@ -819,7 +818,7 @@ def multi_box_head(inputs,
...
@@ -819,7 +818,7 @@ def multi_box_head(inputs,
conf_loc
.
shape
[
0
],
conf_loc
.
shape
[
1
]
*
conf_loc
.
shape
[
2
]
*
conf_loc
.
shape
[
0
],
conf_loc
.
shape
[
1
]
*
conf_loc
.
shape
[
2
]
*
conf_loc
.
shape
[
3
]
/
num_classes
,
num_classes
conf_loc
.
shape
[
3
]
/
num_classes
,
num_classes
]
]
conf_loc_flatten
=
ops
.
reshape
(
conf_loc
,
shape
=
new_shape
)
conf_loc_flatten
=
nn
.
reshape
(
conf_loc
,
shape
=
new_shape
)
mbox_confs
.
append
(
conf_loc_flatten
)
mbox_confs
.
append
(
conf_loc_flatten
)
if
len
(
box_results
)
==
1
:
if
len
(
box_results
)
==
1
:
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
9b6c5397
...
@@ -73,6 +73,7 @@ __all__ = [
...
@@ -73,6 +73,7 @@ __all__ = [
'smooth_l1'
,
'smooth_l1'
,
'one_hot'
,
'one_hot'
,
'autoincreased_step_counter'
,
'autoincreased_step_counter'
,
'reshape'
,
'lod_reset'
,
'lod_reset'
,
'lrn'
,
'lrn'
,
]
]
...
@@ -3265,6 +3266,8 @@ def one_hot(input, depth):
...
@@ -3265,6 +3266,8 @@ def one_hot(input, depth):
The one-hot tensor or LodTensor, same as input.
The one-hot tensor or LodTensor, same as input.
Examples:
Examples:
.. code-block:: python
X is a LoDTensor:
X is a LoDTensor:
X.lod = [[0, 1, 4]]
X.lod = [[0, 1, 4]]
X.shape = [4, 1]
X.shape = [4, 1]
...
@@ -3319,6 +3322,101 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1):
...
@@ -3319,6 +3322,101 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1):
return
counter
return
counter
def
reshape
(
x
,
shape
,
actual_shape
=
None
,
act
=
None
,
inplace
=
True
,
name
=
None
):
"""
Gives a new shape to the input Tensor without changing its data.
The target shape can be given by :attr:`shape` or :attr:`actual_shape`.
:attr:`shape` is a list of integer while :attr:`actual_shape` is a tensor
variable. :attr:`actual_shape` has a higher priority than :attr:`shape`
if it is provided, while :attr:`shape` still should be set correctly to
gurantee shape inference in compile-time.
Some tricks exist when specifying the target shape.
1. -1 means the value of this dimension is inferred from the total element
number of x and remaining dimensions. Thus one and only one dimension can
be set -1.
2. 0 means the actual dimension value is going to be copied from the
corresponding dimension of x. The indice of 0s in shape can not exceed
Rank(X).
Here are some examples to explain it.
1. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
is [6, 8], the reshape operator will transform x into a 2-D tensor with
shape [6, 8] and leaving x's data unchanged.
2. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
specified is [2, 3, -1, 2], the reshape operator will transform x into a
4-D tensor with shape [2, 3, 4, 2] and leaving x's data unchanged. In this
case, one dimension of the target shape is set to -1, the value of this
dimension is inferred from the total element number of x and remaining
dimensions.
3. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
is [-1, 0, 3, 2], the reshape operator will transform x into a 4-D tensor
with shape [2, 4, 3, 2] and leaving x's data unchanged. In this case,
besides -1, 0 means the actual dimension value is going to be copied from
the corresponding dimension of x.
Args:
input(variable): The input tensor.
shape(list): The new shape. At most one dimension of the new shape can
be -1.
actual_shape(variable): An optional input. If provided, reshape
according to this given shape rather than
:attr:`shape` specifying shape. That is to
say :attr:`actual_shape` has a higher priority
than :attr:`shape`.
act (str): The non-linear activation to be applied to output variable.
inplace(bool): If this flag is set true, a new output tensor is created
whose data is copied from input x, otherwise the output
shares data with input without copying.
Returns(variable): The output tensor.
Examples:
.. code-block:: python
data = fluid.layers.data(
name='data', shape=[2, 4, 6], dtype='float32')
reshaped = fluid.layers.reshape(
x=data, shape=[-1, 0, 3, 2], act='tanh', inplace=True)
"""
if
not
(
isinstance
(
shape
,
list
)
or
isinstance
(
shape
,
tuple
)):
raise
ValueError
(
"Input shape must be a python lsit or tuple."
)
# Validate the shape
unk_dim_idx
=
-
1
for
dim_idx
,
dim_size
in
enumerate
(
shape
):
if
dim_size
==
-
1
:
assert
unk_dim_idx
==
-
1
,
(
"Only one dimension in shape can be unknown."
)
unk_dim_idx
=
dim_idx
elif
dim_size
==
0
:
assert
dim_idx
<
len
(
x
.
shape
),
(
"The indice of 0s in shape can not exceed Rank(X)."
)
else
:
assert
dim_size
>
0
,
(
"Each dimension size given in shape must not be negtive "
"except one unknown dimension."
)
helper
=
LayerHelper
(
"reshape"
,
**
locals
())
reshaped
=
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"reshape"
,
inputs
=
{
"X"
:
x
,
"Shape"
:
actual_shape
}
if
isinstance
(
actual_shape
,
Variable
)
else
{
"X"
:
x
},
attrs
=
{
"shape"
:
shape
,
"inplace"
:
inplace
},
outputs
=
{
"Out"
:
reshaped
})
return
helper
.
append_activation
(
reshaped
)
def
lod_reset
(
x
,
y
=
None
,
target_lod
=
None
):
def
lod_reset
(
x
,
y
=
None
,
target_lod
=
None
):
"""
"""
LoD Reset Operator. Set LoD of **x** to a new one specified by **y** or
LoD Reset Operator. Set LoD of **x** to a new one specified by **y** or
...
...
python/paddle/fluid/layers/ops.py
浏览文件 @
9b6c5397
...
@@ -49,7 +49,6 @@ __activations__ = [
...
@@ -49,7 +49,6 @@ __activations__ = [
__all__
=
[
__all__
=
[
'mean'
,
'mean'
,
'mul'
,
'mul'
,
'reshape'
,
'scale'
,
'scale'
,
'sigmoid_cross_entropy_with_logits'
,
'sigmoid_cross_entropy_with_logits'
,
'elementwise_add'
,
'elementwise_add'
,
...
...
python/paddle/fluid/tests/unittests/op_test.py
浏览文件 @
9b6c5397
...
@@ -334,7 +334,7 @@ class OpTest(unittest.TestCase):
...
@@ -334,7 +334,7 @@ class OpTest(unittest.TestCase):
np
.
allclose
(
np
.
allclose
(
actual_t
,
expect_t
,
atol
=
atol
),
actual_t
,
expect_t
,
atol
=
atol
),
"Output ("
+
out_name
+
") has diff at "
+
str
(
place
)
+
"Output ("
+
out_name
+
") has diff at "
+
str
(
place
)
+
str
(
actual_t
)
+
str
(
expect_t
))
str
(
actual_t
)
+
"
\n
"
+
str
(
expect_t
))
if
isinstance
(
expect
,
tuple
):
if
isinstance
(
expect
,
tuple
):
self
.
assertListEqual
(
actual
.
lod
(),
expect
[
1
],
self
.
assertListEqual
(
actual
.
lod
(),
expect
[
1
],
"Output ("
+
out_name
+
"Output ("
+
out_name
+
...
@@ -568,6 +568,6 @@ class OpTest(unittest.TestCase):
...
@@ -568,6 +568,6 @@ class OpTest(unittest.TestCase):
fetch_list
=
[
g
for
p
,
g
in
param_grad_list
]
fetch_list
=
[
g
for
p
,
g
in
param_grad_list
]
executor
=
Executor
(
place
)
executor
=
Executor
(
place
)
return
map
(
return
map
(
np
.
array
,
np
.
array
,
executor
.
run
(
prog
,
feed_dict
,
fetch_list
,
executor
.
run
(
prog
,
feed_dict
,
fetch_list
,
return_numpy
=
False
))
return_numpy
=
False
))
python/paddle/fluid/tests/unittests/test_mine_hard_examples_op.py
100755 → 100644
浏览文件 @
9b6c5397
文件模式从 100755 更改为 100644
python/paddle/fluid/tests/unittests/test_reshape_op.py
浏览文件 @
9b6c5397
...
@@ -14,15 +14,19 @@
...
@@ -14,15 +14,19 @@
import
unittest
import
unittest
import
numpy
as
np
import
numpy
as
np
from
op_test
import
OpTest
from
op_test
import
OpTest
class
TestReshapeOp
(
OpTest
):
class
TestReshapeOp
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
ori_shape
=
(
2
,
25
)
new_shape
=
(
5
,
10
)
self
.
op_type
=
"reshape"
self
.
op_type
=
"reshape"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
10
,
20
)
).
astype
(
"float32"
)}
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
ori_shape
).
astype
(
"float32"
)}
self
.
attrs
=
{
'shape'
:
[
10
*
20
]
}
self
.
attrs
=
{
"shape"
:
new_shape
,
"inplace"
:
False
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
reshape
(
self
.
attrs
[
'shape'
]
)}
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
new_shape
)}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
()
...
@@ -31,12 +35,33 @@ class TestReshapeOp(OpTest):
...
@@ -31,12 +35,33 @@ class TestReshapeOp(OpTest):
self
.
check_grad
([
"X"
],
"Out"
)
self
.
check_grad
([
"X"
],
"Out"
)
class
TestReshapeOpDimInfer
(
OpTest
):
class
TestReshapeOpDimInfer
1
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
ori_shape
=
(
5
,
10
)
new_shape
=
(
5
,
-
1
,
5
)
self
.
op_type
=
"reshape"
self
.
op_type
=
"reshape"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
10
,
20
)).
astype
(
"float32"
)}
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
ori_shape
).
astype
(
"float32"
)}
self
.
attrs
=
{
'shape'
:
[
4
,
-
1
,
5
]}
self
.
attrs
=
{
"shape"
:
new_shape
,
"inplace"
:
False
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
reshape
(
self
.
attrs
[
'shape'
])}
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
self
.
attrs
[
"shape"
])}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
"X"
],
"Out"
)
class
TestReshapeOpDimInfer2
(
OpTest
):
def
setUp
(
self
):
ori_shape
=
(
2
,
2
,
6
)
new_shape
=
(
2
,
0
,
3
,
-
1
)
infered_shape
=
(
2
,
2
,
3
,
-
1
)
self
.
op_type
=
"reshape"
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
ori_shape
).
astype
(
"float32"
)}
self
.
attrs
=
{
"shape"
:
new_shape
,
"inplace"
:
False
}
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
infered_shape
)}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
()
...
@@ -47,10 +72,30 @@ class TestReshapeOpDimInfer(OpTest):
...
@@ -47,10 +72,30 @@ class TestReshapeOpDimInfer(OpTest):
class
TestReshapeOpInplace
(
OpTest
):
class
TestReshapeOpInplace
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
ori_shape
=
(
2
,
25
)
new_shape
=
(
5
,
10
)
self
.
op_type
=
"reshape"
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
ori_shape
).
astype
(
"float32"
)}
self
.
attrs
=
{
"shape"
:
new_shape
}
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
new_shape
)}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
"X"
],
"Out"
)
class
TestReshapeOpDimInferInplace1
(
OpTest
):
def
setUp
(
self
):
ori_shape
=
(
5
,
10
)
new_shape
=
(
5
,
-
1
,
5
)
self
.
op_type
=
"reshape"
self
.
op_type
=
"reshape"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
10
,
20
)
).
astype
(
"float32"
)}
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
ori_shape
).
astype
(
"float32"
)}
self
.
attrs
=
{
'shape'
:
[
10
*
20
],
'inplace'
:
Tru
e
}
self
.
attrs
=
{
"shape"
:
new_shap
e
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
reshape
(
self
.
attrs
[
'shape'
]
)}
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
new_shape
)}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
()
...
@@ -59,12 +104,38 @@ class TestReshapeOpInplace(OpTest):
...
@@ -59,12 +104,38 @@ class TestReshapeOpInplace(OpTest):
self
.
check_grad
([
"X"
],
"Out"
)
self
.
check_grad
([
"X"
],
"Out"
)
class
TestReshapeOpDimInferInplace
(
OpTest
):
class
TestReshapeOpDimInferInplace
2
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
ori_shape
=
(
2
,
2
,
6
)
new_shape
=
(
2
,
0
,
3
,
-
1
)
infered_shape
=
(
2
,
2
,
3
,
-
1
)
self
.
op_type
=
"reshape"
self
.
inputs
=
{
"X"
:
np
.
random
.
random
(
ori_shape
).
astype
(
"float32"
)}
self
.
attrs
=
{
"shape"
:
new_shape
}
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
infered_shape
)}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
"X"
],
"Out"
)
class
TestReshapeOpWithInputShape
(
OpTest
):
def
setUp
(
self
):
ori_shape
=
(
6
,
5
)
new_shape
=
(
0
,
-
1
,
5
)
actual_shape
=
(
2
,
3
,
5
)
self
.
op_type
=
"reshape"
self
.
op_type
=
"reshape"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
10
,
20
)).
astype
(
"float32"
)}
self
.
inputs
=
{
self
.
attrs
=
{
'shape'
:
[
4
,
-
1
,
5
],
'inplace'
:
True
}
"X"
:
np
.
random
.
random
(
ori_shape
).
astype
(
"float32"
),
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
reshape
(
self
.
attrs
[
'shape'
])}
"Shape"
:
np
.
array
(
actual_shape
,
dtype
=
"int32"
)
}
self
.
attrs
=
{
"shape"
:
new_shape
}
self
.
outputs
=
{
"Out"
:
self
.
inputs
[
"X"
].
reshape
(
actual_shape
)}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
()
...
@@ -73,5 +144,5 @@ class TestReshapeOpDimInferInplace(OpTest):
...
@@ -73,5 +144,5 @@ class TestReshapeOpDimInferInplace(OpTest):
self
.
check_grad
([
"X"
],
"Out"
)
self
.
check_grad
([
"X"
],
"Out"
)
if
__name__
==
'__main__'
:
if
__name__
==
"__main__"
:
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_target_assign_op.py
100755 → 100644
浏览文件 @
9b6c5397
文件模式从 100755 更改为 100644
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录