Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
511b6e23
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
511b6e23
编写于
9月 06, 2017
作者:
Q
QI JUN
提交者:
GitHub
9月 06, 2017
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #3900 from QiJune/dim_int64
make dim int to int64_t
上级
b3afe30d
52f2bc1f
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
105 addition
and
92 deletion
+105
-92
paddle/framework/ddim.cc
paddle/framework/ddim.cc
+32
-32
paddle/framework/ddim.h
paddle/framework/ddim.h
+10
-10
paddle/framework/ddim_test.cc
paddle/framework/ddim_test.cc
+2
-2
paddle/framework/dim.h
paddle/framework/dim.h
+35
-32
paddle/framework/dim_test.cu
paddle/framework/dim_test.cu
+3
-3
paddle/framework/eigen.h
paddle/framework/eigen.h
+1
-1
paddle/framework/tensor_impl.h
paddle/framework/tensor_impl.h
+2
-2
paddle/operators/gaussian_random_op.cc
paddle/operators/gaussian_random_op.cc
+8
-3
paddle/operators/rnn/recurrent_op_utils.cc
paddle/operators/rnn/recurrent_op_utils.cc
+1
-1
paddle/operators/uniform_random_op.cc
paddle/operators/uniform_random_op.cc
+8
-3
paddle/pybind/pybind.cc
paddle/pybind/pybind.cc
+1
-1
paddle/pybind/tensor_py.h
paddle/pybind/tensor_py.h
+2
-2
未找到文件。
paddle/framework/ddim.cc
浏览文件 @
511b6e23
...
...
@@ -21,16 +21,16 @@ namespace framework {
/// @cond HIDDEN
template
<
int
i
>
Dim
<
i
>
make_dim
(
const
int
*
d
)
{
Dim
<
i
>
make_dim
(
const
int
64_t
*
d
)
{
return
Dim
<
i
>
(
*
d
,
make_dim
<
i
-
1
>
(
d
+
1
));
}
template
<
>
Dim
<
1
>
make_dim
<
1
>
(
const
int
*
d
)
{
Dim
<
1
>
make_dim
<
1
>
(
const
int
64_t
*
d
)
{
return
Dim
<
1
>
(
*
d
);
}
void
make_ddim
(
DDim
&
ddim
,
const
int
*
dims
,
int
n
)
{
void
make_ddim
(
DDim
&
ddim
,
const
int
64_t
*
dims
,
int
n
)
{
switch
(
n
)
{
case
1
:
ddim
=
make_dim
<
1
>
(
dims
);
...
...
@@ -67,13 +67,13 @@ void make_ddim(DDim& ddim, const int* dims, int n) {
/// @endcond
DDim
make_ddim
(
std
::
initializer_list
<
int
>
dims
)
{
DDim
make_ddim
(
std
::
initializer_list
<
int
64_t
>
dims
)
{
DDim
result
(
make_dim
(
0
));
make_ddim
(
result
,
dims
.
begin
(),
dims
.
size
());
return
result
;
}
DDim
make_ddim
(
const
std
::
vector
<
int
>&
dims
)
{
DDim
make_ddim
(
const
std
::
vector
<
int
64_t
>&
dims
)
{
DDim
result
(
make_dim
(
0
));
make_ddim
(
result
,
&
dims
[
0
],
dims
.
size
());
return
result
;
...
...
@@ -81,12 +81,12 @@ DDim make_ddim(const std::vector<int>& dims) {
/// @cond HIDDEN
// XXX For some reason, putting this in an anonymous namespace causes errors
class
DynamicMutableIndexer
:
public
boost
::
static_visitor
<
int
&>
{
class
DynamicMutableIndexer
:
public
boost
::
static_visitor
<
int
64_t
&>
{
public:
explicit
DynamicMutableIndexer
(
int
idx
)
:
idx_
(
idx
)
{}
template
<
int
D
>
int
&
operator
()(
Dim
<
D
>&
dim
)
const
{
int
64_t
&
operator
()(
Dim
<
D
>&
dim
)
const
{
return
dim
[
idx_
];
}
...
...
@@ -94,12 +94,12 @@ class DynamicMutableIndexer : public boost::static_visitor<int&> {
int
idx_
;
};
class
DynamicConstIndexer
:
public
boost
::
static_visitor
<
int
>
{
class
DynamicConstIndexer
:
public
boost
::
static_visitor
<
int
64_t
>
{
public:
explicit
DynamicConstIndexer
(
int
idx
)
:
idx_
(
idx
)
{}
template
<
int
D
>
int
operator
()(
const
Dim
<
D
>&
dim
)
const
{
int
64_t
operator
()(
const
Dim
<
D
>&
dim
)
const
{
return
dim
[
idx_
];
}
...
...
@@ -109,22 +109,22 @@ class DynamicConstIndexer : public boost::static_visitor<int> {
/// @endcond
int
&
DDim
::
operator
[](
int
idx
)
{
int
64_t
&
DDim
::
operator
[](
int
idx
)
{
return
boost
::
apply_visitor
(
DynamicMutableIndexer
(
idx
),
var
);
}
int
DDim
::
operator
[](
int
idx
)
const
{
int
64_t
DDim
::
operator
[](
int
idx
)
const
{
return
boost
::
apply_visitor
(
DynamicConstIndexer
(
idx
),
var
);
}
ssize
_t
DDim
::
size
()
const
{
return
arity
(
*
this
);
}
int64
_t
DDim
::
size
()
const
{
return
arity
(
*
this
);
}
bool
DDim
::
operator
==
(
DDim
d
)
const
{
if
(
var
.
which
()
!=
d
.
getVar
().
which
())
{
return
false
;
}
else
{
std
::
vector
<
int
>
v1
=
vectorize
(
*
this
);
std
::
vector
<
int
>
v2
=
vectorize
(
d
);
std
::
vector
<
int
64_t
>
v1
=
vectorize
(
*
this
);
std
::
vector
<
int
64_t
>
v2
=
vectorize
(
d
);
for
(
unsigned
int
i
=
0
;
i
<
v1
.
size
();
i
++
)
{
if
(
v1
[
i
]
!=
v2
[
i
])
{
...
...
@@ -139,10 +139,10 @@ bool DDim::operator==(DDim d) const {
bool
DDim
::
operator
!=
(
DDim
d
)
const
{
return
!
(
*
this
==
d
);
}
DDim
DDim
::
operator
+
(
DDim
d
)
const
{
std
::
vector
<
int
>
v1
=
vectorize
(
*
this
);
std
::
vector
<
int
>
v2
=
vectorize
(
d
);
std
::
vector
<
int
64_t
>
v1
=
vectorize
(
*
this
);
std
::
vector
<
int
64_t
>
v2
=
vectorize
(
d
);
std
::
vector
<
int
>
v3
;
std
::
vector
<
int
64_t
>
v3
;
assert
(
v1
.
size
()
==
v2
.
size
());
...
...
@@ -154,10 +154,10 @@ DDim DDim::operator+(DDim d) const {
}
DDim
DDim
::
operator
*
(
DDim
d
)
const
{
std
::
vector
<
int
>
v1
=
vectorize
(
*
this
);
std
::
vector
<
int
>
v2
=
vectorize
(
d
);
std
::
vector
<
int
64_t
>
v1
=
vectorize
(
*
this
);
std
::
vector
<
int
64_t
>
v2
=
vectorize
(
d
);
std
::
vector
<
int
>
v3
;
std
::
vector
<
int
64_t
>
v3
;
assert
(
v1
.
size
()
==
v2
.
size
());
...
...
@@ -168,15 +168,15 @@ DDim DDim::operator*(DDim d) const {
return
make_ddim
(
v3
);
}
int
get
(
const
DDim
&
ddim
,
int
idx
)
{
return
ddim
[
idx
];
}
int
64_t
get
(
const
DDim
&
ddim
,
int
idx
)
{
return
ddim
[
idx
];
}
void
set
(
DDim
&
ddim
,
int
idx
,
int
value
)
{
ddim
[
idx
]
=
value
;
}
/// @cond HIDDEN
struct
VectorizeVisitor
:
public
boost
::
static_visitor
<>
{
std
::
vector
<
int
>&
vector
;
std
::
vector
<
int
64_t
>&
vector
;
explicit
VectorizeVisitor
(
std
::
vector
<
int
>&
v
)
:
vector
(
v
)
{}
explicit
VectorizeVisitor
(
std
::
vector
<
int
64_t
>&
v
)
:
vector
(
v
)
{}
template
<
typename
T
>
void
operator
()(
const
T
&
t
)
{
...
...
@@ -188,31 +188,31 @@ struct VectorizeVisitor : public boost::static_visitor<> {
};
/// @endcond
std
::
vector
<
int
>
vectorize
(
const
DDim
&
ddim
)
{
std
::
vector
<
int
>
result
;
std
::
vector
<
int
64_t
>
vectorize
(
const
DDim
&
ddim
)
{
std
::
vector
<
int
64_t
>
result
;
VectorizeVisitor
visitor
(
result
);
boost
::
apply_visitor
(
visitor
,
ddim
);
return
result
;
}
struct
ProductVisitor
:
public
boost
::
static_visitor
<
ssize
_t
>
{
struct
ProductVisitor
:
public
boost
::
static_visitor
<
int64
_t
>
{
template
<
int
D
>
ssize
_t
operator
()(
const
Dim
<
D
>&
dim
)
{
int64
_t
operator
()(
const
Dim
<
D
>&
dim
)
{
return
product
(
dim
);
}
};
ssize
_t
product
(
const
DDim
&
ddim
)
{
int64
_t
product
(
const
DDim
&
ddim
)
{
ProductVisitor
visitor
;
return
boost
::
apply_visitor
(
visitor
,
ddim
);
}
struct
SliceVectorizeVisitor
:
public
boost
::
static_visitor
<>
{
std
::
vector
<
int
>&
vector
;
std
::
vector
<
int
64_t
>&
vector
;
int
begin
;
int
end
;
SliceVectorizeVisitor
(
std
::
vector
<
int
>&
v
,
int
b
,
int
e
)
SliceVectorizeVisitor
(
std
::
vector
<
int
64_t
>&
v
,
int
b
,
int
e
)
:
vector
(
v
),
begin
(
b
),
end
(
e
)
{
PADDLE_ENFORCE
(
begin
<
end
,
"Begin index must be less than end index in ddim slice."
);
...
...
@@ -240,7 +240,7 @@ struct SliceVectorizeVisitor : public boost::static_visitor<> {
};
DDim
slice_ddim
(
const
DDim
&
dim
,
int
begin
,
int
end
)
{
std
::
vector
<
int
>
vec
;
std
::
vector
<
int
64_t
>
vec
;
vec
.
reserve
(
end
-
begin
);
SliceVectorizeVisitor
visitor
(
vec
,
begin
,
end
);
boost
::
apply_visitor
(
visitor
,
dim
);
...
...
@@ -280,7 +280,7 @@ std::ostream& operator<<(std::ostream& os, const DDim& ddim) {
return
os
;
}
DDim
::
DDim
(
std
::
initializer_list
<
int
>
init_list
)
{
DDim
::
DDim
(
std
::
initializer_list
<
int
64_t
>
init_list
)
{
*
this
=
make_ddim
(
init_list
);
}
}
// namespace framework
...
...
paddle/framework/ddim.h
浏览文件 @
511b6e23
...
...
@@ -40,7 +40,7 @@ struct DDim {
template
<
int
D
>
explicit
DDim
(
const
Dim
<
D
>&
in
)
:
var
(
in
)
{}
/*implicit*/
DDim
(
std
::
initializer_list
<
int
>
init_list
);
/*implicit*/
DDim
(
std
::
initializer_list
<
int
64_t
>
init_list
);
template
<
int
D
>
DDim
&
operator
=
(
const
Dim
<
D
>&
in
)
{
...
...
@@ -48,8 +48,8 @@ struct DDim {
return
*
this
;
}
int
&
operator
[](
int
idx
);
int
operator
[](
int
idx
)
const
;
int
64_t
&
operator
[](
int
idx
);
int
64_t
operator
[](
int
idx
)
const
;
template
<
typename
Visitor
>
typename
Visitor
::
result_type
apply_visitor
(
Visitor
&
visitor
)
{
...
...
@@ -71,15 +71,15 @@ struct DDim {
DDim
operator
*
(
DDim
d
)
const
;
ssize
_t
size
()
const
;
int64
_t
size
()
const
;
};
/**
* \brief Make a DDim from std::vector<int>
* \brief Make a DDim from std::vector<int
64_t
>
*
* \param dims An vector of ints. Must be sized between [1, 9]
*/
DDim
make_ddim
(
const
std
::
vector
<
int
>&
dims
);
DDim
make_ddim
(
const
std
::
vector
<
int
64_t
>&
dims
);
/**
* \brief Make a DDim from an initializer list
...
...
@@ -87,14 +87,14 @@ DDim make_ddim(const std::vector<int>& dims);
* \param dims An initializer list of ints. Must be sized between [1, 9]
*
*/
DDim
make_ddim
(
std
::
initializer_list
<
int
>
dims
);
DDim
make_ddim
(
std
::
initializer_list
<
int
64_t
>
dims
);
int
get
(
const
DDim
&
dim
,
int
idx
);
int
64_t
get
(
const
DDim
&
dim
,
int
idx
);
void
set
(
DDim
&
dim
,
int
idx
,
int
val
);
std
::
vector
<
int
>
vectorize
(
const
DDim
&
ddim
);
std
::
vector
<
int
64_t
>
vectorize
(
const
DDim
&
ddim
);
ssize
_t
product
(
const
DDim
&
ddim
);
int64
_t
product
(
const
DDim
&
ddim
);
/**
* \brief Slice a ddim
...
...
paddle/framework/ddim_test.cc
浏览文件 @
511b6e23
...
...
@@ -12,7 +12,7 @@ TEST(DDim, Equality) {
EXPECT_EQ
(
ddim
[
2
],
5
);
// construct a DDim from a vector
std
::
vector
<
int
>
vec
({
9
,
1
,
5
});
std
::
vector
<
int
64_t
>
vec
({
9
,
1
,
5
});
paddle
::
framework
::
DDim
vddim
=
paddle
::
framework
::
make_ddim
(
vec
);
EXPECT_EQ
(
ddim
[
0
],
9
);
EXPECT_EQ
(
ddim
[
1
],
1
);
...
...
@@ -25,7 +25,7 @@ TEST(DDim, Equality) {
EXPECT_EQ
(
paddle
::
framework
::
get
(
ddim
,
0
),
6
);
// vectorize a DDim
std
::
vector
<
int
>
res_vec
=
paddle
::
framework
::
vectorize
(
vddim
);
std
::
vector
<
int
64_t
>
res_vec
=
paddle
::
framework
::
vectorize
(
vddim
);
EXPECT_EQ
(
res_vec
[
0
],
9
);
EXPECT_EQ
(
res_vec
[
1
],
1
);
EXPECT_EQ
(
res_vec
[
2
],
5
);
...
...
paddle/framework/dim.h
浏览文件 @
511b6e23
...
...
@@ -17,13 +17,13 @@ struct Dim {
static
constexpr
int
dimensions
=
i
;
template
<
typename
...
Args
>
HOSTDEVICE
Dim
(
int
_head
,
Args
...
_tail
)
:
head
(
_head
),
tail
(
_tail
...)
{
HOSTDEVICE
Dim
(
int
64_t
_head
,
Args
...
_tail
)
:
head
(
_head
),
tail
(
_tail
...)
{
static_assert
(
sizeof
...(
_tail
)
==
i
-
1
,
"Dim initialized with the wrong number of parameters"
);
}
HOSTDEVICE
Dim
(
int
_head
,
const
Dim
<
i
-
1
>&
_tail
)
:
head
(
_head
),
tail
(
_tail
)
{}
Dim
(
int
64_t
_head
,
const
Dim
<
i
-
1
>&
_tail
)
:
head
(
_head
),
tail
(
_tail
)
{}
HOSTDEVICE
Dim
()
:
head
(
0
),
tail
()
{}
...
...
@@ -31,12 +31,12 @@ struct Dim {
/** Construct a Dim from a linear index and size. Uses Fortran order
* indexing. */
HOSTDEVICE
Dim
(
int
idx
,
const
Dim
<
i
>&
size
)
Dim
(
int
64_t
idx
,
const
Dim
<
i
>&
size
)
:
head
(
idx
%
size
.
head
),
tail
(
idx
/
size
.
head
,
size
.
tail
)
{}
/** Construct a Dim with each dimension set to the given index */
HOSTDEVICE
Dim
(
int
idx
)
:
head
(
idx
),
tail
(
idx
)
{}
Dim
(
int
64_t
idx
)
:
head
(
idx
),
tail
(
idx
)
{}
HOSTDEVICE
bool
operator
==
(
const
Dim
<
i
>&
o
)
const
{
...
...
@@ -47,13 +47,13 @@ struct Dim {
bool
operator
!=
(
const
Dim
<
i
>&
o
)
const
{
return
!
(
*
this
==
o
);
}
HOSTDEVICE
int
&
operator
[](
int
idx
);
int
64_t
&
operator
[](
int
idx
);
HOSTDEVICE
int
operator
[](
int
idx
)
const
;
int
64_t
operator
[](
int
idx
)
const
;
HOST
std
::
string
to_string
()
const
;
int
head
;
int
64_t
head
;
Dim
<
i
-
1
>
tail
;
};
...
...
@@ -63,7 +63,7 @@ struct Dim<1> {
static
constexpr
int
dimensions
=
1
;
HOSTDEVICE
Dim
(
int
_head
)
:
head
(
_head
)
{}
Dim
(
int
64_t
_head
)
:
head
(
_head
)
{}
HOSTDEVICE
Dim
()
:
head
(
0
)
{}
...
...
@@ -86,11 +86,11 @@ struct Dim<1> {
bool
operator
!=
(
const
Dim
<
1
>&
o
)
const
{
return
!
(
*
this
==
o
);
}
HOSTDEVICE
int
&
operator
[](
int
idx
);
int
64_t
&
operator
[](
int
idx
);
HOSTDEVICE
int
operator
[](
int
idx
)
const
;
int
64_t
operator
[](
int
idx
)
const
;
int
head
;
int
64_t
head
;
};
namespace
{
...
...
@@ -100,12 +100,12 @@ template <int i>
struct
DimGetter
{
// Return a copy if Dim is const
template
<
typename
D
>
HOSTDEVICE
static
int
impl
(
const
D
&
d
)
{
HOSTDEVICE
static
int
64_t
impl
(
const
D
&
d
)
{
return
DimGetter
<
i
-
1
>::
impl
(
d
.
tail
);
}
// Return a reference if Dim is mutable
template
<
typename
D
>
HOSTDEVICE
static
int
&
impl
(
D
&
d
)
{
HOSTDEVICE
static
int
64_t
&
impl
(
D
&
d
)
{
return
DimGetter
<
i
-
1
>::
impl
(
d
.
tail
);
}
};
...
...
@@ -115,18 +115,18 @@ template <>
struct
DimGetter
<
0
>
{
// Return a copy if Dim is const
template
<
typename
D
>
HOSTDEVICE
static
int
impl
(
const
D
&
d
)
{
HOSTDEVICE
static
int
64_t
impl
(
const
D
&
d
)
{
return
d
.
head
;
}
// Return a reference if Dim is mutable
template
<
typename
D
>
HOSTDEVICE
static
int
&
impl
(
D
&
d
)
{
HOSTDEVICE
static
int
64_t
&
impl
(
D
&
d
)
{
return
d
.
head
;
}
};
template
<
int
D
>
HOSTDEVICE
int
&
indexer
(
Dim
<
D
>&
dim
,
int
idx
)
{
HOSTDEVICE
int
64_t
&
indexer
(
Dim
<
D
>&
dim
,
int
idx
)
{
#ifndef __CUDA_ARCH__
if
(
idx
<
0
)
{
throw
std
::
invalid_argument
(
"Tried to access a negative dimension"
);
...
...
@@ -141,7 +141,7 @@ HOSTDEVICE int& indexer(Dim<D>& dim, int idx) {
}
template
<
>
HOSTDEVICE
int
&
indexer
<
1
>
(
Dim
<
1
>&
dim
,
int
idx
)
{
HOSTDEVICE
int
64_t
&
indexer
<
1
>
(
Dim
<
1
>&
dim
,
int
idx
)
{
#ifndef __CUDA_ARCH__
if
(
idx
!=
0
)
{
throw
std
::
invalid_argument
(
"Invalid index"
);
...
...
@@ -153,7 +153,7 @@ HOSTDEVICE int& indexer<1>(Dim<1>& dim, int idx) {
}
template
<
int
D
>
HOSTDEVICE
int
indexer
(
const
Dim
<
D
>&
dim
,
int
idx
)
{
HOSTDEVICE
int
64_t
indexer
(
const
Dim
<
D
>&
dim
,
int
idx
)
{
#ifndef __CUDA_ARCH__
if
(
idx
<
0
)
{
throw
std
::
invalid_argument
(
"Tried to access a negative dimension"
);
...
...
@@ -168,7 +168,7 @@ HOSTDEVICE int indexer(const Dim<D>& dim, int idx) {
}
template
<
>
HOSTDEVICE
int
indexer
<
1
>
(
const
Dim
<
1
>&
dim
,
int
idx
)
{
HOSTDEVICE
int
64_t
indexer
<
1
>
(
const
Dim
<
1
>&
dim
,
int
idx
)
{
#ifndef __CUDA_ARCH__
if
(
idx
!=
0
)
{
throw
std
::
invalid_argument
(
"Invalid index"
);
...
...
@@ -182,73 +182,76 @@ HOSTDEVICE int indexer<1>(const Dim<1>& dim, int idx) {
}
// namespace
// Static access to constant Dim
template
<
int
i
,
int
l
>
HOSTDEVICE
int
get
(
const
Dim
<
l
>&
d
)
{
HOSTDEVICE
int
64_t
get
(
const
Dim
<
l
>&
d
)
{
return
DimGetter
<
i
>::
impl
(
d
);
}
// Static access to mutable Dim
template
<
int
i
,
int
l
>
HOSTDEVICE
int
&
get
(
Dim
<
l
>&
d
)
{
HOSTDEVICE
int
64_t
&
get
(
Dim
<
l
>&
d
)
{
return
DimGetter
<
i
>::
impl
(
d
);
}
// Dynamic access to constant Dim
template
<
int
l
>
HOSTDEVICE
int
Dim
<
l
>::
operator
[](
int
i
)
const
{
HOSTDEVICE
int
64_t
Dim
<
l
>::
operator
[](
int
i
)
const
{
return
indexer
(
*
this
,
i
);
}
// Dynamic access to mutable Dim
template
<
int
l
>
HOSTDEVICE
int
&
Dim
<
l
>::
operator
[](
int
i
)
{
HOSTDEVICE
int
64_t
&
Dim
<
l
>::
operator
[](
int
i
)
{
return
indexer
(
*
this
,
i
);
}
// Dynamic access to constant Dim
inline
HOSTDEVICE
int
Dim
<
1
>::
operator
[](
int
i
)
const
{
inline
HOSTDEVICE
int
64_t
Dim
<
1
>::
operator
[](
int
i
)
const
{
return
indexer
(
*
this
,
i
);
}
// Dynamic access to mutable Dim
inline
HOSTDEVICE
int
&
Dim
<
1
>::
operator
[](
int
i
)
{
return
indexer
(
*
this
,
i
);
}
inline
HOSTDEVICE
int64_t
&
Dim
<
1
>::
operator
[](
int
i
)
{
return
indexer
(
*
this
,
i
);
}
// Dynamic access to constant Dim
// without std::enable_if will try to instantiate this on get<0>(d)
template
<
int
l
>
HOSTDEVICE
typename
std
::
enable_if
<
(
l
>
0
),
int
>::
type
get
(
const
Dim
<
l
>&
d
,
int
i
)
{
HOSTDEVICE
typename
std
::
enable_if
<
(
l
>
0
),
int
64_t
>::
type
get
(
const
Dim
<
l
>&
d
,
int
i
)
{
return
d
[
i
];
}
// Dynamic access to mutable Dim
template
<
int
l
>
HOSTDEVICE
typename
std
::
enable_if
<
(
l
>
0
),
int
&>::
type
get
(
Dim
<
l
>&
d
,
int
i
)
{
HOSTDEVICE
typename
std
::
enable_if
<
(
l
>
0
),
int64_t
&>::
type
get
(
Dim
<
l
>&
d
,
int
i
)
{
return
d
[
i
];
}
// Dot product of two dims
template
<
int
i
>
HOSTDEVICE
int
linearize
(
const
Dim
<
i
>&
a
,
const
Dim
<
i
>&
b
)
{
HOSTDEVICE
int
64_t
linearize
(
const
Dim
<
i
>&
a
,
const
Dim
<
i
>&
b
)
{
return
a
.
head
*
b
.
head
+
linearize
(
a
.
tail
,
b
.
tail
);
}
// Base case dot product of two Dims
// Notice it is inline because it is no longer a template
template
<
>
HOSTDEVICE
inline
int
linearize
(
const
Dim
<
1
>&
a
,
const
Dim
<
1
>&
b
)
{
HOSTDEVICE
inline
int
64_t
linearize
(
const
Dim
<
1
>&
a
,
const
Dim
<
1
>&
b
)
{
return
a
.
head
*
b
.
head
;
}
// Product of a Dim
template
<
int
i
>
HOSTDEVICE
int
product
(
const
Dim
<
i
>&
a
,
int
prod
=
1
)
{
HOSTDEVICE
int
64_t
product
(
const
Dim
<
i
>&
a
,
int
prod
=
1
)
{
return
prod
*
a
.
head
*
product
(
a
.
tail
);
}
// Base case product of a Dim
// Notice it is inline because it is no longer a template
template
<
>
HOSTDEVICE
inline
int
product
(
const
Dim
<
1
>&
a
,
int
prod
)
{
HOSTDEVICE
inline
int
64_t
product
(
const
Dim
<
1
>&
a
,
int
prod
)
{
return
prod
*
a
.
head
;
}
...
...
paddle/framework/dim_test.cu
浏览文件 @
511b6e23
...
...
@@ -8,7 +8,7 @@ __global__ void test(paddle::framework::Dim<2>* o) {
o
[
0
]
=
paddle
::
framework
::
make_dim
(
5
,
6
);
}
__global__
void
dyn_idx_gpu
(
int
*
o
)
{
__global__
void
dyn_idx_gpu
(
int
64_t
*
o
)
{
auto
d
=
paddle
::
framework
::
make_dim
(
5
,
6
);
o
[
0
]
=
d
[
1
];
}
...
...
@@ -47,9 +47,9 @@ TEST(Dim, Equality) {
EXPECT_EQ
(
b
[
1
],
11
);
// dynamic access on GPU
thrust
::
device_vector
<
int
>
r
(
1
);
thrust
::
device_vector
<
int
64_t
>
r
(
1
);
dyn_idx_gpu
<<<
1
,
1
>>>
(
thrust
::
raw_pointer_cast
(
r
.
data
()));
int
res
=
r
[
0
];
int
64_t
res
=
r
[
0
];
EXPECT_EQ
(
res
,
6
);
// ex_prefix_mul
...
...
paddle/framework/eigen.h
浏览文件 @
511b6e23
...
...
@@ -28,7 +28,7 @@ struct EigenDim {
static
Type
From
(
const
DDim
&
dims
)
{
PADDLE_ENFORCE
(
arity
(
dims
)
==
D
,
"D must match arity(DDim)"
);
Type
ret
;
for
(
int
d
=
0
;
d
<
arity
(
dims
);
d
++
)
{
for
(
int
64_t
d
=
0
;
d
<
arity
(
dims
);
d
++
)
{
ret
[
d
]
=
dims
[
d
];
}
return
ret
;
...
...
paddle/framework/tensor_impl.h
浏览文件 @
511b6e23
...
...
@@ -58,7 +58,7 @@ inline T* Tensor::mutable_data(platform::Place place) {
"Tensor's numel must be larger than zero to call "
"Tensor::mutable_data. Call Tensor::set_dim first."
);
/* some versions of boost::variant don't have operator!= */
size
_t
size
=
product
(
dims_
)
*
sizeof
(
T
);
int64
_t
size
=
product
(
dims_
)
*
sizeof
(
T
);
if
(
holder_
==
nullptr
||
!
(
holder_
->
place
()
==
place
)
||
holder_
->
size
()
<
size
+
offset_
)
{
if
(
platform
::
is_cpu_place
(
place
))
{
...
...
@@ -131,7 +131,7 @@ inline Tensor Tensor::Slice(const int& begin_idx, const int& end_idx) const {
PADDLE_ENFORCE_LT
(
begin_idx
,
end_idx
,
"Begin index must be less than end index."
);
PADDLE_ENFORCE_NE
(
dims_
[
0
],
1
,
"Can not slice a tensor with dims_[0] = 1."
);
in
t
base
=
product
(
dims_
)
/
dims_
[
0
];
size_
t
base
=
product
(
dims_
)
/
dims_
[
0
];
Tensor
dst
;
dst
.
holder_
=
holder_
;
DDim
dst_dims
=
dims_
;
...
...
paddle/operators/gaussian_random_op.cc
浏览文件 @
511b6e23
...
...
@@ -31,8 +31,8 @@ class CPUGaussianRandomKernel : public framework::OpKernel {
}
engine
.
seed
(
seed
);
std
::
normal_distribution
<
T
>
dist
(
mean
,
std
);
ssize
_t
size
=
framework
::
product
(
tensor
->
dims
());
for
(
ssize
_t
i
=
0
;
i
<
size
;
++
i
)
{
int64
_t
size
=
framework
::
product
(
tensor
->
dims
());
for
(
int64
_t
i
=
0
;
i
<
size
;
++
i
)
{
data
[
i
]
=
dist
(
engine
);
}
}
...
...
@@ -46,9 +46,14 @@ class GaussianRandomOp : public framework::OperatorWithKernel {
void
InferShape
(
const
framework
::
InferShapeContext
&
context
)
const
override
{
auto
*
tensor
=
context
.
Output
<
framework
::
Tensor
>
(
"Out"
);
auto
dims
=
GetAttr
<
std
::
vector
<
int
>>
(
"dims"
);
std
::
vector
<
int64_t
>
temp
;
temp
.
reserve
(
dims
.
size
());
for
(
auto
dim
:
dims
)
{
temp
.
push_back
(
static_cast
<
int64_t
>
(
dim
));
}
PADDLE_ENFORCE
(
dims
.
size
()
>
0UL
,
"dims can be one int or array. dims must be set."
);
tensor
->
Resize
(
framework
::
make_ddim
(
dims
));
tensor
->
Resize
(
framework
::
make_ddim
(
temp
));
}
};
...
...
paddle/operators/rnn/recurrent_op_utils.cc
浏览文件 @
511b6e23
...
...
@@ -61,7 +61,7 @@ void ConcatOutputs(const std::vector<Scope*>& step_scopes,
PADDLE_ENFORCE
(
step_scope_var
!=
nullptr
,
"%s not in scope"
,
outlinks
[
i
].
internal
);
f
::
DDim
step_dims
=
step_scope_var
->
template
GetMutable
<
Tensor
>()
->
dims
();
std
::
vector
<
int
>
dims_vec
=
vectorize
(
step_dims
);
std
::
vector
<
int
64_t
>
dims_vec
=
vectorize
(
step_dims
);
dims_vec
.
insert
(
dims_vec
.
begin
(),
seq_len
);
output
->
Resize
(
f
::
make_ddim
(
dims_vec
));
}
else
{
...
...
paddle/operators/uniform_random_op.cc
浏览文件 @
511b6e23
...
...
@@ -35,8 +35,8 @@ class CPUUniformRandomKernel : public framework::OpKernel {
std
::
uniform_real_distribution
<
T
>
dist
(
static_cast
<
T
>
(
context
.
GetAttr
<
float
>
(
"min"
)),
static_cast
<
T
>
(
context
.
GetAttr
<
float
>
(
"max"
)));
ssize
_t
size
=
framework
::
product
(
tensor
->
dims
());
for
(
ssize
_t
i
=
0
;
i
<
size
;
++
i
)
{
int64
_t
size
=
framework
::
product
(
tensor
->
dims
());
for
(
int64
_t
i
=
0
;
i
<
size
;
++
i
)
{
data
[
i
]
=
dist
(
engine
);
}
}
...
...
@@ -52,7 +52,12 @@ class UniformRandomOp : public framework::OperatorWithKernel {
"uniform_random's min must less then max"
);
auto
*
tensor
=
ctx
.
Output
<
framework
::
Tensor
>
(
"Out"
);
auto
dims
=
GetAttr
<
std
::
vector
<
int
>>
(
"dims"
);
tensor
->
Resize
(
framework
::
make_ddim
(
dims
));
std
::
vector
<
int64_t
>
temp
;
temp
.
reserve
(
dims
.
size
());
for
(
auto
dim
:
dims
)
{
temp
.
push_back
(
static_cast
<
int64_t
>
(
dim
));
}
tensor
->
Resize
(
framework
::
make_ddim
(
temp
));
}
};
...
...
paddle/pybind/pybind.cc
浏览文件 @
511b6e23
...
...
@@ -76,7 +76,7 @@ PYBIND11_PLUGIN(core) {
.
def
(
"get_dims"
,
[](
const
Tensor
&
self
)
{
return
vectorize
(
self
.
dims
());
})
.
def
(
"set_dims"
,
[](
Tensor
&
self
,
const
std
::
vector
<
int
>
&
dim
)
{
[](
Tensor
&
self
,
const
std
::
vector
<
int
64_t
>
&
dim
)
{
self
.
Resize
(
make_ddim
(
dim
));
})
.
def
(
"alloc_float"
,
...
...
paddle/pybind/tensor_py.h
浏览文件 @
511b6e23
...
...
@@ -85,7 +85,7 @@ void PyCPUTensorSetFromArray(
framework
::
Tensor
&
self
,
py
::
array_t
<
T
,
py
::
array
::
c_style
|
py
::
array
::
forcecast
>
array
,
paddle
::
platform
::
CPUPlace
&
place
)
{
std
::
vector
<
int
>
dims
;
std
::
vector
<
int
64_t
>
dims
;
dims
.
reserve
(
array
.
ndim
());
for
(
size_t
i
=
0
;
i
<
array
.
ndim
();
++
i
)
{
dims
.
push_back
((
int
)
array
.
shape
()[
i
]);
...
...
@@ -102,7 +102,7 @@ void PyCUDATensorSetFromArray(
framework
::
Tensor
&
self
,
py
::
array_t
<
T
,
py
::
array
::
c_style
|
py
::
array
::
forcecast
>
array
,
paddle
::
platform
::
GPUPlace
&
place
)
{
std
::
vector
<
int
>
dims
;
std
::
vector
<
int
64_t
>
dims
;
dims
.
reserve
(
array
.
ndim
());
for
(
size_t
i
=
0
;
i
<
array
.
ndim
();
++
i
)
{
dims
.
push_back
((
int
)
array
.
shape
()[
i
]);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录