Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
c5c80516
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c5c80516
编写于
1月 04, 2017
作者:
H
hedaoyuan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add BufferArg
上级
0c4be7e6
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
436 addition
and
0 deletion
+436
-0
paddle/function/BufferArg.cpp
paddle/function/BufferArg.cpp
+43
-0
paddle/function/BufferArg.h
paddle/function/BufferArg.h
+260
-0
paddle/function/BufferArgTest.cpp
paddle/function/BufferArgTest.cpp
+128
-0
paddle/function/TensorType.h
paddle/function/TensorType.h
+5
-0
未找到文件。
paddle/function/BufferArg.cpp
0 → 100644
浏览文件 @
c5c80516
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <glog/logging.h>
#include "BufferArg.h"
namespace
paddle
{
const
SequenceArg
&
BufferArg
::
sequence
()
const
{
// CHECK_EQ(bufferType_, TENSOR_SEQUENCE_DATA);
return
dynamic_cast
<
const
SequenceArg
&>
(
*
this
);
}
const
SparseMatrixArg
&
BufferArg
::
sparse
()
const
{
// CHECK_EQ(bufferType_, TENSOR_SPARSE);
return
dynamic_cast
<
const
SparseMatrixArg
&>
(
*
this
);
}
void
BufferArgs
::
addArg
(
const
Matrix
&
arg
,
const
TensorShape
&
shape
)
{
args_
.
push_back
(
std
::
make_shared
<
BufferArg
>
(
arg
,
shape
));
}
void
BufferArgs
::
addArg
(
const
CpuSparseMatrix
&
arg
)
{
args_
.
push_back
(
std
::
make_shared
<
SparseMatrixArg
>
(
arg
));
}
void
BufferArgs
::
addArg
(
const
GpuSparseMatrix
&
arg
)
{
args_
.
push_back
(
std
::
make_shared
<
SparseMatrixArg
>
(
arg
));
}
}
// namespace paddle
paddle/function/BufferArg.h
0 → 100644
浏览文件 @
c5c80516
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <glog/logging.h>
#include "TensorShape.h"
#include "TensorType.h"
#include "paddle/math/CpuSparseMatrix.h"
#include "paddle/math/Matrix.h"
#include "paddle/math/SparseMatrix.h"
namespace
paddle
{
enum
BufferType
{
TENSOR_NORMAL
=
0
,
TENSOR_SEQUENCE_ID
=
1
,
TENSOR_SEQUENCE_DATA
=
2
,
TENSOR_SPARSE
=
3
};
enum
SparseDataType
{
SPARSE_NO_VALUE
=
0
,
// do not need value pointer, all values are 1
SPARSE_FLOAT_VALUE
=
1
};
enum
SparseDataFormat
{
SPARSE_CSR_FORMAT
=
0
,
SPARSE_CSC_FORMAT
=
1
};
/**
* BufferArg used as the argument type for Function.
*/
class
BufferArg
;
class
SequenceArg
;
class
SparseMatrixArg
;
typedef
std
::
shared_ptr
<
BufferArg
>
BufferArgPtr
;
class
BufferArgs
{
public:
BufferArgs
()
{}
size_t
size
()
const
{
return
args_
.
size
();
}
// add argument into BufferArgss
template
<
typename
Tensor
>
void
addArg
(
const
Tensor
&
arg
)
{
args_
.
push_back
(
std
::
make_shared
<
BufferArg
>
(
arg
));
}
void
addArg
(
const
Matrix
&
arg
,
const
TensorShape
&
shape
);
void
addArg
(
const
CpuSparseMatrix
&
arg
);
void
addArg
(
const
GpuSparseMatrix
&
arg
);
// get argument
const
BufferArg
&
operator
[](
size_t
num
)
const
{
CHECK_LT
(
num
,
args_
.
size
());
return
*
args_
[
num
];
}
private:
std
::
vector
<
BufferArgPtr
>
args_
;
};
// an array of arbitrary dimensions
class
BufferArg
{
public:
BufferArg
(
void
*
buf
,
ValueType
valueType
,
const
TensorShape
&
shape
)
:
buf_
(
buf
),
valueType_
(
valueType
),
shape_
(
shape
)
{}
BufferArg
(
void
*
buf
,
ValueType
valueType
)
:
buf_
(
buf
),
valueType_
(
valueType
)
{}
BufferArg
(
const
Matrix
&
matrix
)
:
buf_
((
void
*
)
matrix
.
getData
()),
valueType_
(
DataType
<
real
>::
value
),
shape_
(
2
)
{
shape_
.
setDim
(
0
,
matrix
.
getHeight
());
shape_
.
setDim
(
1
,
matrix
.
getWidth
());
}
BufferArg
(
const
Matrix
&
matrix
,
const
TensorShape
&
shape
)
:
buf_
((
void
*
)
matrix
.
getData
()),
valueType_
(
DataType
<
real
>::
value
),
shape_
(
shape
)
{
CHECK_EQ
(
matrix
.
getElementCnt
(),
shape
.
getElements
());
}
BufferArg
(
const
Vector
&
vector
)
:
buf_
((
void
*
)
vector
.
getData
()),
valueType_
(
DataType
<
real
>::
value
),
shape_
(
1
)
{
shape_
.
setDim
(
0
,
vector
.
getSize
());
}
BufferArg
(
const
IVector
&
vector
)
:
buf_
((
void
*
)
vector
.
getData
()),
valueType_
(
VALUE_TYPE_INT32
),
shape_
(
1
)
{
shape_
.
setDim
(
0
,
vector
.
getSize
());
}
template
<
DeviceType
DType
>
typename
Tensor
<
real
,
DType
>::
Matrix
matrix
()
const
{
CHECK
(
buf_
);
CHECK
(
valueType_
==
DataType
<
real
>::
value
);
// CHECK(deviceType_ == DType);
CHECK_EQ
(
2
,
shape_
.
ndims
());
return
typename
Tensor
<
real
,
DType
>::
Matrix
(
reinterpret_cast
<
real
*>
(
buf_
),
shape_
[
0
],
shape_
[
1
]);
}
template
<
typename
VType
,
DeviceType
DType
>
typename
Tensor
<
VType
,
DType
>::
Vector
vector
()
const
{
CHECK
(
buf_
);
CHECK
(
valueType_
==
DataType
<
VType
>::
value
);
// CHECK(deviceType_ == DType);
CHECK_EQ
(
1
,
shape_
.
ndims
());
return
typename
Tensor
<
VType
,
DType
>::
Vector
(
shape_
[
0
],
reinterpret_cast
<
VType
*>
(
buf_
));
}
virtual
~
BufferArg
()
{}
template
<
typename
T
>
T
*
data
()
const
{
return
reinterpret_cast
<
T
*>
(
buf_
);
}
void
*
data
()
const
{
return
buf_
;
}
ValueType
valueType
()
const
{
return
valueType_
;
}
BufferType
bufferType
()
const
{
return
bufferType_
;
}
const
TensorShape
&
shape
()
const
{
return
shape_
;
}
const
SequenceArg
&
sequence
()
const
;
const
SparseMatrixArg
&
sparse
()
const
;
protected:
void
*
buf_
;
ValueType
valueType_
;
TensorShape
shape_
;
BufferType
bufferType_
;
// leading dimensions. The size is dims_.size()
// Dims lds_;
};
// sequence start positions in a mini-batch of sequences
// shape_.ndims() == 1
// valueType_ = int32
// if a < b than value_.buf_[a] < value_.buf_[b]
class
SequenceIdArg
:
public
BufferArg
{
public:
SequenceIdArg
(
void
*
buf
,
const
TensorShape
&
shape
)
:
BufferArg
(
buf
,
VALUE_TYPE_INT32
,
shape
)
{
CHECK_EQ
(
shape_
.
ndims
(),
1
);
numSeqs_
=
shape_
[
0
]
-
1
;
}
SequenceIdArg
(
const
IVector
&
vector
)
:
BufferArg
(
vector
)
{
numSeqs_
=
shape_
[
0
]
-
1
;
}
~
SequenceIdArg
()
{}
size_t
numSeqs
()
const
{
return
numSeqs_
;
}
private:
size_t
numSeqs_
;
};
// sequence data
class
SequenceArg
:
public
BufferArg
{
public:
SequenceArg
(
void
*
buf
,
ValueType
valueType
,
const
TensorShape
&
shape
,
const
SequenceIdArg
&
startPositions
)
:
BufferArg
(
buf
,
valueType
,
shape
),
startPositions_
(
startPositions
)
{}
SequenceArg
(
const
Matrix
&
matrix
,
const
IVector
&
vector
)
:
BufferArg
(
matrix
),
startPositions_
(
vector
)
{}
~
SequenceArg
()
{}
void
*
getIdBuf
()
const
{
return
startPositions_
.
data
();
}
size_t
numSeqs
()
const
{
return
startPositions_
.
numSeqs
();
}
private:
SequenceIdArg
startPositions_
;
};
// sparse matrix
// valueType_ == float or double
// shape_.ndims() == 2
class
SparseMatrixArg
:
public
BufferArg
{
public:
SparseMatrixArg
(
void
*
buf
,
ValueType
valueType
,
const
TensorShape
&
shape
,
const
BufferArg
&
row
,
const
BufferArg
&
col
,
size_t
nnz
,
SparseDataFormat
format
,
SparseDataType
type
)
:
BufferArg
(
buf
,
valueType
,
shape
),
row_
(
row
),
col_
(
col
),
nnz_
(
nnz
),
format_
(
format
),
type_
(
type
)
{
CHECK
((
valueType
==
VALUE_TYPE_FLOAT
)
||
(
valueType
==
VALUE_TYPE_DOUBLE
));
CHECK_EQ
(
shape_
.
ndims
(),
2
);
CHECK_EQ
(
row_
.
shape
().
ndims
(),
1
);
CHECK_EQ
(
col_
.
shape
().
ndims
(),
1
);
if
(
format
==
SPARSE_CSR_FORMAT
)
{
CHECK_EQ
(
nnz
,
col
.
shape
()[
0
]);
}
else
if
(
format
==
SPARSE_CSC_FORMAT
)
{
CHECK_EQ
(
nnz
,
row
.
shape
()[
0
]);
}
}
SparseMatrixArg
(
const
CpuSparseMatrix
&
sparse
)
:
BufferArg
(
sparse
),
row_
((
void
*
)
sparse
.
getRows
(),
VALUE_TYPE_INT32
),
col_
((
void
*
)
sparse
.
getCols
(),
VALUE_TYPE_INT32
)
{}
SparseMatrixArg
(
const
GpuSparseMatrix
&
sparse
)
:
BufferArg
(
sparse
),
row_
((
void
*
)
sparse
.
getRows
(),
VALUE_TYPE_INT32
),
col_
((
void
*
)
sparse
.
getCols
(),
VALUE_TYPE_INT32
)
{}
~
SparseMatrixArg
()
{}
void
*
getRowBuf
()
const
{
return
row_
.
data
();
}
void
*
getColBuf
()
const
{
return
col_
.
data
();
}
size_t
nnz
()
const
{
return
nnz_
;
}
SparseDataFormat
dataFormat
()
const
{
return
format_
;
}
SparseDataType
dataType
()
const
{
return
type_
;
}
private:
BufferArg
row_
;
BufferArg
col_
;
size_t
nnz_
;
SparseDataFormat
format_
;
SparseDataType
type_
;
};
}
// namespace paddle
paddle/function/BufferArgTest.cpp
0 → 100644
浏览文件 @
c5c80516
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "BufferArg.h"
#include <gtest/gtest.h>
#include "paddle/math/MemoryHandle.h"
namespace
paddle
{
TEST
(
BufferTest
,
BufferArg
)
{
TensorShape
shape
({
8
,
10
});
CpuMemoryHandle
memory
(
shape
.
getElements
()
*
sizeOfValuType
(
VALUE_TYPE_FLOAT
));
BufferArg
buffer
(
memory
.
getBuf
(),
VALUE_TYPE_FLOAT
,
shape
);
EXPECT_EQ
(
buffer
.
data
(),
memory
.
getBuf
());
}
TEST
(
BufferTest
,
SequenceIdArg
)
{
TensorShape
shape
({
10
});
CpuMemoryHandle
memory
(
shape
.
getElements
()
*
sizeOfValuType
(
VALUE_TYPE_INT32
));
SequenceIdArg
buffer
(
memory
.
getBuf
(),
shape
);
EXPECT_EQ
(
buffer
.
data
(),
memory
.
getBuf
());
EXPECT_EQ
(
buffer
.
numSeqs
(),
9
);
}
TEST
(
BufferTest
,
asArgument
)
{
MatrixPtr
matrix
=
Matrix
::
create
(
100
,
200
);
VectorPtr
vector
=
Vector
::
create
(
100
,
false
);
CpuSparseMatrix
sparse
(
200
,
300
,
50
);
// prepare arguments
BufferArgs
argments
;
argments
.
addArg
(
*
matrix
);
argments
.
addArg
(
*
vector
);
argments
.
addArg
(
sparse
);
// function
auto
function
=
[
=
](
const
BufferArgs
&
inputs
)
{
EXPECT_EQ
(
inputs
.
size
(),
3
);
// check inputs[0]
EXPECT_EQ
(
inputs
[
0
].
shape
().
ndims
(),
2
);
EXPECT_EQ
(
inputs
[
0
].
shape
()[
0
],
100
);
EXPECT_EQ
(
inputs
[
0
].
shape
()[
1
],
200
);
EXPECT_EQ
(
inputs
[
0
].
data
(),
matrix
->
getData
());
EXPECT_EQ
(
inputs
[
0
].
matrix
<
DEVICE_TYPE_CPU
>
().
getHeight
(),
matrix
->
getHeight
());
EXPECT_EQ
(
inputs
[
0
].
matrix
<
DEVICE_TYPE_CPU
>
().
getWidth
(),
matrix
->
getWidth
());
EXPECT_EQ
(
inputs
[
0
].
matrix
<
DEVICE_TYPE_CPU
>
().
getData
(),
matrix
->
getData
());
// check inputs[1]
EXPECT_EQ
(
inputs
[
1
].
shape
().
ndims
(),
1
);
EXPECT_EQ
(
inputs
[
1
].
shape
()[
0
],
100
);
EXPECT_EQ
(
inputs
[
1
].
data
(),
vector
->
getData
());
CpuVector
inVector
=
inputs
[
1
].
vector
<
real
,
DEVICE_TYPE_CPU
>
();
EXPECT_EQ
(
inVector
.
getSize
(),
vector
->
getSize
());
EXPECT_EQ
(
inVector
.
getData
(),
vector
->
getData
());
// check inputs[2]
EXPECT_EQ
(
inputs
[
2
].
shape
().
ndims
(),
2
);
EXPECT_EQ
(
inputs
[
2
].
shape
()[
0
],
200
);
EXPECT_EQ
(
inputs
[
2
].
shape
()[
1
],
300
);
EXPECT_EQ
(
inputs
[
2
].
data
(),
sparse
.
getData
());
// CHECK_EQ(inputs[2].sparse().nnz(), 50);
// CHECK_EQ(inputs[2].sparse().dataFormat(), SPARSE_CSR_FORMAT);
// CHECK_EQ(inputs[2].sparse().dataType(), SPARSE_FLOAT_VALUE);
EXPECT_EQ
(
inputs
[
2
].
sparse
().
getRowBuf
(),
sparse
.
getRows
());
EXPECT_EQ
(
inputs
[
2
].
sparse
().
getColBuf
(),
sparse
.
getCols
());
};
// call function
function
(
argments
);
}
template
<
DeviceType
DType
>
void
FunctionApi
(
typename
Tensor
<
real
,
DType
>::
Matrix
&
output
,
const
typename
Tensor
<
real
,
DType
>::
Matrix
&
input
);
template
<
>
void
FunctionApi
<
DEVICE_TYPE_CPU
>
(
CpuMatrix
&
output
,
const
CpuMatrix
&
input
)
{
EXPECT_EQ
(
output
.
getHeight
(),
100
);
EXPECT_EQ
(
output
.
getWidth
(),
200
);
}
template
<
>
void
FunctionApi
<
DEVICE_TYPE_GPU
>
(
GpuMatrix
&
output
,
const
GpuMatrix
&
input
)
{
EXPECT_EQ
(
output
.
getHeight
(),
10
);
EXPECT_EQ
(
output
.
getWidth
(),
20
);
}
template
<
DeviceType
DType
>
void
Function
(
const
BufferArgs
&
arguments
)
{
auto
input
=
arguments
[
0
].
matrix
<
DType
>
();
auto
output
=
arguments
[
1
].
matrix
<
DType
>
();
FunctionApi
<
DType
>
(
output
,
input
);
}
TEST
(
BufferTest
,
Function
)
{
CpuMatrix
cpuInput
=
CpuMatrix
(
100
,
200
);
CpuMatrix
cpuOutput
=
CpuMatrix
(
100
,
200
);
BufferArgs
cpuArgments
;
cpuArgments
.
addArg
(
cpuInput
);
cpuArgments
.
addArg
(
cpuOutput
);
Function
<
DEVICE_TYPE_CPU
>
(
cpuArgments
);
GpuMatrix
gpuInput
=
GpuMatrix
(
10
,
20
);
GpuMatrix
gpuOutput
=
GpuMatrix
(
10
,
20
);
BufferArgs
gpuArgments
;
gpuArgments
.
addArg
(
gpuInput
);
gpuArgments
.
addArg
(
gpuOutput
);
Function
<
DEVICE_TYPE_GPU
>
(
gpuArgments
);
}
}
// namespace paddle
paddle/function/TensorType.h
浏览文件 @
c5c80516
...
@@ -57,6 +57,11 @@ struct DataType<double> {
...
@@ -57,6 +57,11 @@ struct DataType<double> {
static
const
ValueType
value
=
VALUE_TYPE_DOUBLE
;
static
const
ValueType
value
=
VALUE_TYPE_DOUBLE
;
};
};
template
<
>
struct
DataType
<
int
>
{
static
const
ValueType
value
=
VALUE_TYPE_INT32
;
};
namespace
detail
{
namespace
detail
{
template
<
typename
VType
,
DeviceType
Device
>
template
<
typename
VType
,
DeviceType
Device
>
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录