Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
312b7786
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
312b7786
编写于
11月 26, 2018
作者:
Q
Qiao Longfei
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
clean code
上级
2b6c0c09
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
3 addition
and
390 deletion
+3
-390
paddle/fluid/operators/distributed_ops/lookup_remote_table_op.cc
...fluid/operators/distributed_ops/lookup_remote_table_op.cc
+0
-114
paddle/fluid/operators/distributed_ops/lookup_remote_table_op.h
.../fluid/operators/distributed_ops/lookup_remote_table_op.h
+0
-274
paddle/fluid/operators/lookup_table_op.cc
paddle/fluid/operators/lookup_table_op.cc
+1
-1
paddle/fluid/operators/lookup_table_op.h
paddle/fluid/operators/lookup_table_op.h
+2
-1
未找到文件。
paddle/fluid/operators/distributed_ops/lookup_remote_table_op.cc
已删除
100644 → 0
浏览文件 @
2b6c0c09
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/distributed_ops/lookup_remote_table_op.h"
#include "paddle/fluid/framework/var_type_inference.h"
namespace
paddle
{
namespace
operators
{
class
LookupRemoteTableOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"W"
),
"Input(W) of LookupRemoteTableOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasInput
(
"Ids"
),
"Input(Ids) of LookupRemoteTableOp should not be null."
);
PADDLE_ENFORCE
(
ctx
->
HasOutput
(
"Out"
),
"Output(Out) of LookupRemoteTableOp should not be null."
);
auto
table_dims
=
ctx
->
GetInputDim
(
"W"
);
auto
ids_dims
=
ctx
->
GetInputDim
(
"Ids"
);
int
ids_rank
=
ids_dims
.
size
();
PADDLE_ENFORCE_EQ
(
table_dims
.
size
(),
2
);
PADDLE_ENFORCE_EQ
(
ids_dims
[
ids_rank
-
1
],
1
,
"The last dimension of the 'Ids' tensor must be 1."
);
auto
output_dims
=
framework
::
vectorize
(
framework
::
slice_ddim
(
ids_dims
,
0
,
ids_rank
-
1
));
output_dims
.
push_back
(
table_dims
[
1
]);
ctx
->
SetOutputDim
(
"Out"
,
framework
::
make_ddim
(
output_dims
));
if
(
ctx
->
GetOutputsVarType
(
"Out"
)[
0
]
==
framework
::
proto
::
VarType
::
LOD_TENSOR
)
{
ctx
->
ShareLoD
(
"Ids"
,
/*->*/
"Out"
);
}
}
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
data_type
=
framework
::
GetDataTypeOfVar
(
ctx
.
InputVar
(
"W"
));
return
framework
::
OpKernelType
(
data_type
,
ctx
.
device_context
());
}
};
class
LookupRemoteTableOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddInput
(
"W"
,
"(Tensor) The input represents embedding tensors, "
"which is a learnable parameter."
);
AddInput
(
"Ids"
,
"An input with type int32 or int64 "
"contains the ids to be looked up in W. "
"The last dimension size must be 1."
);
AddOutput
(
"Out"
,
"The lookup results, which have the same type as W."
);
AddAttr
<
std
::
vector
<
int64_t
>>
(
"height_sections"
,
"Height for each output SelectedRows."
)
.
SetDefault
(
std
::
vector
<
int64_t
>
({}));
AddAttr
<
int
>
(
"trainer_id"
,
"trainer id from 0 ~ worker_num."
).
SetDefault
(
0
);
AddAttr
<
std
::
vector
<
std
::
string
>>
(
"epmap"
,
"(string vector, default 127.0.0.1:6164)"
"Server endpoints in the order of input variables for mapping"
)
.
SetDefault
({
"127.0.0.1:6164"
});
AddAttr
<
int64_t
>
(
"padding_idx"
,
"(int64, default -1) "
"If the value is -1, it makes no effect to lookup. "
"Otherwise the given value indicates padding the output "
"with zeros whenever lookup encounters it in Ids."
)
.
SetDefault
(
kNoPadding
);
// NOTE(minqiyang): grad_inplace is an temporal attribute,
// please do NOT set this attribute in python layer.
AddAttr
<
bool
>
(
"grad_inplace"
,
"(boolean, default false) "
"If the grad op reuse the input's variable."
)
.
SetDefault
(
false
);
AddComment
(
R"DOC(
Lookup Remote Table Operator.
This operator is used to perform lookups on the parameter W,
then concatenated into a dense tensor.
The input Ids can carry the LoD (Level of Details) information,
or not. And the output only shares the LoD information with input Ids.
)DOC"
);
}
};
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OPERATOR
(
lookup_remote_table
,
ops
::
LookupRemoteTableOp
,
paddle
::
framework
::
EmptyGradOpMaker
,
ops
::
LookupRemoteTableOpMaker
);
REGISTER_OP_CPU_KERNEL
(
lookup_remote_table
,
ops
::
LookupRemoteTableKernel
<
float
>
,
ops
::
LookupRemoteTableKernel
<
double
>
);
paddle/fluid/operators/distributed_ops/lookup_remote_table_op.h
已删除
100644 → 0
浏览文件 @
2b6c0c09
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <future> // NOLINT
#include <ostream>
#include <set>
#include <string>
#include <unordered_map>
#include <vector>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/detail/macros.h"
#include "paddle/fluid/operators/distributed_ops/send_recv_util.h"
#include "paddle/fluid/operators/math/blas.h"
namespace
paddle
{
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
using
LoDTensor
=
framework
::
LoDTensor
;
using
SelectedRows
=
framework
::
SelectedRows
;
using
DDim
=
framework
::
DDim
;
constexpr
int64_t
kNoPadding
=
-
1
;
inline
size_t
GetSectionIndex
(
int64_t
id
,
const
std
::
vector
<
int64_t
>&
abs_sections
)
{
for
(
size_t
i
=
1
;
i
<
abs_sections
.
size
();
++
i
)
{
if
(
id
<
abs_sections
[
i
])
{
return
i
-
1
;
}
}
return
abs_sections
.
size
()
-
1
;
}
inline
std
::
vector
<
int64_t
>
ToAbsoluteSection
(
const
std
::
vector
<
int64_t
>&
height_sections
)
{
std
::
vector
<
int64_t
>
abs_sections
;
abs_sections
.
resize
(
height_sections
.
size
());
abs_sections
[
0
]
=
0
;
for
(
size_t
i
=
1
;
i
<
height_sections
.
size
();
++
i
)
{
abs_sections
[
i
]
=
height_sections
[
i
-
1
]
+
abs_sections
[
i
-
1
];
}
return
abs_sections
;
}
inline
std
::
vector
<
std
::
vector
<
int64_t
>>
SplitIds
(
const
std
::
string
&
id_name
,
const
std
::
vector
<
int64_t
>&
height_section
,
framework
::
Scope
*
scope
)
{
auto
&
id_tensor
=
scope
->
Var
(
id_name
)
->
Get
<
framework
::
LoDTensor
>
();
auto
*
id_data
=
id_tensor
.
data
<
int64_t
>
();
std
::
set
<
int64_t
>
all_ids
;
for
(
size_t
i
=
0
;
i
<
id_tensor
.
numel
();
++
i
)
{
all_ids
.
insert
(
id_data
[
i
]);
}
auto
abs_sections
=
ToAbsoluteSection
(
height_section
);
std
::
vector
<
std
::
vector
<
int64_t
>>
splited_ids
;
splited_ids
.
resize
(
height_section
.
size
()
+
1
);
for
(
auto
&
id
:
all_ids
)
{
auto
section_index
=
GetSectionIndex
(
id
,
abs_sections
);
splited_ids
[
section_index
].
push_back
(
id
-
abs_sections
[
section_index
]);
}
return
splited_ids
;
}
inline
void
SplitIdsIntoMultipleVarsBySection
(
const
std
::
string
&
id_name
,
const
std
::
vector
<
std
::
string
>&
in_var_names
,
const
std
::
vector
<
int64_t
>&
height_section
,
const
std
::
vector
<
std
::
vector
<
int64_t
>>&
splited_ids
,
framework
::
Scope
*
scope
)
{
PADDLE_ENFORCE_EQ
(
in_var_names
.
size
(),
height_section
.
size
()
+
1
,
""
);
auto
place
=
platform
::
CPUPlace
();
for
(
size_t
i
=
0
;
i
<
in_var_names
.
size
();
++
i
)
{
auto
*
id_tensor
=
scope
->
Var
(
in_var_names
[
i
])
->
GetMutable
<
framework
::
LoDTensor
>
();
auto
&
ids
=
splited_ids
[
i
];
if
(
!
ids
.
empty
())
{
auto
*
id_tensor_data
=
id_tensor
->
mutable_data
<
int64_t
>
(
framework
::
make_ddim
({
static_cast
<
int64_t
>
(
ids
.
size
()),
1
}),
place
);
memcpy
(
id_tensor_data
,
ids
.
data
(),
sizeof
(
int64_t
)
*
ids
.
size
());
}
}
}
inline
void
MergeMultipleVarsIntoOnBySection
(
const
std
::
string
&
id_name
,
const
std
::
string
&
out_name
,
const
std
::
vector
<
std
::
string
>&
out_var_names
,
const
std
::
vector
<
int64_t
>&
height_section
,
const
std
::
vector
<
std
::
vector
<
int64_t
>>&
splited_ids
,
const
framework
::
ExecutionContext
&
context
,
framework
::
Scope
*
scope
)
{
PADDLE_ENFORCE_EQ
(
out_var_names
.
size
(),
height_section
.
size
()
+
1
,
""
);
auto
cpu_place
=
platform
::
CPUPlace
();
auto
abs_sections
=
ToAbsoluteSection
(
height_section
);
auto
&
id_tensor
=
scope
->
Var
(
id_name
)
->
Get
<
framework
::
LoDTensor
>
();
auto
*
id_data
=
id_tensor
.
data
<
int64_t
>
();
std
::
unordered_map
<
int64_t
,
std
::
vector
<
size_t
>>
id_to_offset
;
for
(
size_t
i
=
0
;
i
<
id_tensor
.
numel
();
++
i
)
{
id_to_offset
[
id_data
[
i
]].
push_back
(
i
);
}
auto
*
out_tensor
=
scope
->
Var
(
out_name
)
->
GetMutable
<
framework
::
LoDTensor
>
();
auto
*
out_tensor_data
=
out_tensor
->
mutable_data
<
float
>
(
context
.
GetPlace
());
for
(
size_t
section_idx
=
0
;
section_idx
<
out_var_names
.
size
();
++
section_idx
)
{
auto
&
ids_in_this_section
=
splited_ids
[
section_idx
];
auto
&
prefetch_out_var
=
scope
->
Var
(
out_var_names
[
section_idx
])
->
Get
<
framework
::
LoDTensor
>
();
const
auto
*
out_var_data
=
prefetch_out_var
.
data
<
float
>
();
auto
&
dims
=
prefetch_out_var
.
dims
();
PADDLE_ENFORCE_EQ
(
dims
.
size
(),
2
,
""
);
PADDLE_ENFORCE_EQ
(
ids_in_this_section
.
size
(),
dims
[
0
]);
auto
row_numel
=
dims
[
1
];
for
(
size_t
i
=
0
;
i
<
dims
[
0
];
++
i
)
{
auto
id
=
ids_in_this_section
[
i
];
auto
origin_id
=
id
+
abs_sections
[
section_idx
];
auto
&
offsets
=
id_to_offset
[
origin_id
];
for
(
auto
&
offset
:
offsets
)
{
// should support GPU tensor
memory
::
Copy
(
cpu_place
,
out_tensor_data
+
offset
*
row_numel
,
cpu_place
,
out_var_data
+
i
*
row_numel
,
sizeof
(
float
)
*
row_numel
);
}
}
}
}
void
prefetch
(
const
std
::
string
&
id_name
,
const
std
::
string
&
out_name
,
const
std
::
string
&
table_name
,
const
std
::
vector
<
std
::
string
>&
epmap
,
const
std
::
vector
<
int64_t
>&
height_sections
,
const
framework
::
ExecutionContext
&
context
)
{
auto
&
local_scope
=
context
.
scope
().
NewScope
();
platform
::
DeviceContextPool
&
pool
=
platform
::
DeviceContextPool
::
Instance
();
auto
&
ctx
=
*
pool
.
Get
(
context
.
GetPlace
());
distributed
::
RPCClient
*
rpc_client
=
distributed
::
RPCClient
::
GetInstance
<
RPCCLIENT_T
>
(
context
.
Attr
<
int
>
(
"trainer_id"
));
std
::
vector
<
std
::
string
>
in_var_names
;
std
::
vector
<
std
::
string
>
out_var_names
;
for
(
size_t
i
=
0
;
i
<
epmap
.
size
();
++
i
)
{
in_var_names
.
push_back
(
id_name
+
"@"
+
epmap
[
i
]);
out_var_names
.
push_back
(
out_name
+
"@"
+
epmap
[
i
]);
}
auto
splited_ids
=
SplitIds
(
id_name
,
height_sections
,
&
local_scope
);
SplitIdsIntoMultipleVarsBySection
(
id_name
,
in_var_names
,
height_sections
,
splited_ids
,
&
local_scope
);
// create output var in local scope
for
(
auto
&
name
:
out_var_names
)
{
local_scope
.
Var
(
name
)
->
GetMutable
<
framework
::
LoDTensor
>
();
}
std
::
vector
<
distributed
::
VarHandlePtr
>
rets
;
for
(
size_t
i
=
0
;
i
<
in_var_names
.
size
();
i
++
)
{
if
(
NeedSend
(
local_scope
,
in_var_names
[
i
]))
{
VLOG
(
30
)
<<
"sending "
<<
in_var_names
[
i
]
<<
" to "
<<
epmap
[
i
]
<<
" to get "
<<
out_var_names
[
i
]
<<
" back"
;
rets
.
push_back
(
rpc_client
->
AsyncPrefetchVar
(
epmap
[
i
],
ctx
,
local_scope
,
in_var_names
[
i
],
out_var_names
[
i
]));
}
else
{
VLOG
(
30
)
<<
"don't send no-initialied variable: "
<<
out_var_names
[
i
];
}
}
for
(
size_t
i
=
0
;
i
<
rets
.
size
();
i
++
)
{
PADDLE_ENFORCE
(
rets
[
i
]
->
Wait
(),
"internal error in RPCClient"
);
}
MergeMultipleVarsIntoOnBySection
(
id_name
,
out_name
,
out_var_names
,
height_sections
,
splited_ids
,
context
,
&
local_scope
);
context
.
scope
().
DeleteScope
(
&
local_scope
);
}
template
<
typename
T
>
class
LookupRemoteTableKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
std
::
string
id_name
=
context
.
Inputs
(
"Ids"
).
front
();
auto
*
ids_t
=
context
.
Input
<
LoDTensor
>
(
"Ids"
);
// int tensor
std
::
string
out_name
=
context
.
Outputs
(
"Out"
).
front
();
auto
*
output_t
=
context
.
Output
<
LoDTensor
>
(
"Out"
);
// float tensor
std
::
string
table_name
=
context
.
Inputs
(
"W"
).
front
();
auto
*
table_var
=
context
.
InputVar
(
"W"
);
int64_t
padding_idx
=
context
.
Attr
<
int64_t
>
(
"padding_idx"
);
int64_t
*
ids
=
const_cast
<
int64_t
*>
(
ids_t
->
data
<
int64_t
>
());
int64_t
ids_numel
=
ids_t
->
numel
();
auto
epmap
=
context
.
Attr
<
std
::
vector
<
std
::
string
>>
(
"epmap"
);
auto
height_sections
=
context
.
Attr
<
std
::
vector
<
int64_t
>>
(
"height_sections"
);
auto
&
local_scope
=
context
.
scope
().
NewScope
();
platform
::
DeviceContextPool
&
pool
=
platform
::
DeviceContextPool
::
Instance
();
auto
&
ctx
=
*
pool
.
Get
(
context
.
GetPlace
());
distributed
::
RPCClient
*
rpc_client
=
distributed
::
RPCClient
::
GetInstance
<
RPCCLIENT_T
>
(
context
.
Attr
<
int
>
(
"trainer_id"
));
std
::
vector
<
std
::
string
>
in_var_names
;
std
::
vector
<
std
::
string
>
out_var_names
;
for
(
size_t
i
=
0
;
i
<
epmap
.
size
();
++
i
)
{
in_var_names
.
push_back
(
id_name
+
"@"
+
epmap
[
i
]);
out_var_names
.
push_back
(
out_name
+
"@"
+
epmap
[
i
]);
}
auto
splited_ids
=
SplitIds
(
id_name
,
height_sections
,
&
local_scope
);
SplitIdsIntoMultipleVarsBySection
(
id_name
,
in_var_names
,
height_sections
,
splited_ids
,
&
local_scope
);
// create output var in local scope
for
(
auto
&
name
:
out_var_names
)
{
local_scope
.
Var
(
name
)
->
GetMutable
<
framework
::
LoDTensor
>
();
}
std
::
vector
<
distributed
::
VarHandlePtr
>
rets
;
for
(
size_t
i
=
0
;
i
<
in_var_names
.
size
();
i
++
)
{
if
(
NeedSend
(
local_scope
,
in_var_names
[
i
]))
{
VLOG
(
30
)
<<
"sending "
<<
in_var_names
[
i
]
<<
" to "
<<
epmap
[
i
]
<<
" to get "
<<
out_var_names
[
i
]
<<
" back"
;
rets
.
push_back
(
rpc_client
->
AsyncPrefetchVar
(
epmap
[
i
],
ctx
,
local_scope
,
in_var_names
[
i
],
out_var_names
[
i
]));
}
else
{
VLOG
(
30
)
<<
"don't send no-initialied variable: "
<<
out_var_names
[
i
];
}
}
for
(
size_t
i
=
0
;
i
<
rets
.
size
();
i
++
)
{
PADDLE_ENFORCE
(
rets
[
i
]
->
Wait
(),
"internal error in RPCClient"
);
}
MergeMultipleVarsIntoOnBySection
(
id_name
,
out_name
,
out_var_names
,
height_sections
,
splited_ids
,
context
,
&
local_scope
);
context
.
scope
().
DeleteScope
(
&
local_scope
);
}
};
}
// namespace operators
}
// namespace paddle
paddle/fluid/operators/lookup_table_op.cc
浏览文件 @
312b7786
...
...
@@ -98,7 +98,7 @@ class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker {
"epmap"
,
"(string vector, default 127.0.0.1:6164)"
"Server endpoints in the order of input variables for mapping"
)
.
SetDefault
({
"127.0.0.1:6164"
});
.
SetDefault
({});
AddComment
(
R"DOC(
Lookup Table Operator.
...
...
paddle/fluid/operators/lookup_table_op.h
浏览文件 @
312b7786
...
...
@@ -51,10 +51,11 @@ class LookupTableKernel : public framework::OpKernel<T> {
auto
out_name
=
context
.
Outputs
(
"Out"
).
front
();
auto
table_name
=
context
.
Inputs
(
"W"
).
front
();
auto
epmap
=
context
.
Attr
<
std
::
vector
<
std
::
string
>>
(
"epmap"
);
auto
remote_prefetch
=
context
.
Attr
<
bool
>
(
"remote_prefetch"
);
auto
height_sections
=
context
.
Attr
<
std
::
vector
<
int64_t
>>
(
"height_sections"
);
if
(
!
epmap
.
empty
()
)
{
if
(
remote_prefetch
)
{
// if emap is not empty, then the paramter will be fetched from remote parameter
// server
#ifdef PADDLE_WITH_DISTRIBUTE
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录