Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
68156c88
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
68156c88
编写于
1月 05, 2017
作者:
H
hedaoyuan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Modify the argument type of Function
上级
c5c80516
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
56 addition
and
97 deletion
+56
-97
paddle/function/CrossMapNormalOp.cpp
paddle/function/CrossMapNormalOp.cpp
+32
-36
paddle/function/Function.h
paddle/function/Function.h
+4
-49
paddle/gserver/layers/NormProjectionLayer.cpp
paddle/gserver/layers/NormProjectionLayer.cpp
+19
-11
paddle/gserver/layers/NormProjectionLayer.h
paddle/gserver/layers/NormProjectionLayer.h
+1
-1
未找到文件。
paddle/function/CrossMapNormalOp.cpp
浏览文件 @
68156c88
...
@@ -125,27 +125,25 @@ public:
...
@@ -125,27 +125,25 @@ public:
pow_
=
config
.
get
<
real
>
(
"pow"
);
pow_
=
config
.
get
<
real
>
(
"pow"
);
}
}
void
calc
(
const
Argument
s
&
inputs
,
void
calc
(
const
BufferArg
s
&
inputs
,
const
Argument
s
&
outputs
,
const
BufferArg
s
&
outputs
,
const
Argument
s
&
inouts
)
override
{
const
BufferArg
s
&
inouts
)
override
{
CHECK_EQ
(
1
,
inputs
.
size
());
CHECK_EQ
(
1
,
inputs
.
size
());
CHECK_EQ
(
2
,
outputs
.
size
());
CHECK_EQ
(
2
,
outputs
.
size
());
CHECK_EQ
(
0
,
inouts
.
size
());
CHECK_EQ
(
0
,
inouts
.
size
());
CHECK_EQ
(
inputs
[
0
].
dims_
.
size
(),
4
);
CHECK_EQ
(
inputs
[
0
].
shape
().
ndims
(),
4
);
for
(
size_t
i
=
0
;
i
<
inputs
[
0
].
dims_
.
size
();
i
++
)
{
CHECK
(
inputs
[
0
].
shape
()
==
outputs
[
0
].
shape
());
CHECK_EQ
(
inputs
[
0
].
dims_
[
i
],
outputs
[
0
].
dims_
[
i
]);
CHECK
(
inputs
[
0
].
shape
()
==
outputs
[
1
].
shape
());
CHECK_EQ
(
inputs
[
0
].
dims_
[
i
],
outputs
[
1
].
dims_
[
i
]);
}
size_t
samples
=
inputs
[
0
].
dims_
[
0
];
size_t
samples
=
inputs
[
0
].
shape
()
[
0
];
size_t
channels
=
inputs
[
0
].
dims_
[
1
];
size_t
channels
=
inputs
[
0
].
shape
()
[
1
];
size_t
height
=
inputs
[
0
].
dims_
[
2
];
size_t
height
=
inputs
[
0
].
shape
()
[
2
];
size_t
width
=
inputs
[
0
].
dims_
[
3
];
size_t
width
=
inputs
[
0
].
shape
()
[
3
];
CrossMapNormal
<
Device
>
(
outputs
[
0
].
getData
(),
CrossMapNormal
<
Device
>
(
outputs
[
0
].
data
<
real
>
(),
outputs
[
1
].
getData
(),
outputs
[
1
].
data
<
real
>
(),
inputs
[
0
].
getData
(),
inputs
[
0
].
data
<
real
>
(),
samples
,
samples
,
channels
,
channels
,
height
,
height
,
...
@@ -177,31 +175,29 @@ public:
...
@@ -177,31 +175,29 @@ public:
pow_
=
config
.
get
<
real
>
(
"pow"
);
pow_
=
config
.
get
<
real
>
(
"pow"
);
}
}
void
calc
(
const
Argument
s
&
inputs
,
void
calc
(
const
BufferArg
s
&
inputs
,
const
Argument
s
&
outputs
,
const
BufferArg
s
&
outputs
,
const
Argument
s
&
inouts
)
override
{
const
BufferArg
s
&
inouts
)
override
{
CHECK_EQ
(
4
,
inputs
.
size
());
CHECK_EQ
(
4
,
inputs
.
size
());
CHECK_EQ
(
1
,
outputs
.
size
());
CHECK_EQ
(
1
,
outputs
.
size
());
CHECK_EQ
(
0
,
inouts
.
size
());
CHECK_EQ
(
0
,
inouts
.
size
());
CHECK_EQ
(
inputs
[
0
].
dims_
.
size
(),
4
);
CHECK_EQ
(
inputs
[
0
].
shape
().
ndims
(),
4
);
for
(
size_t
i
=
0
;
i
<
inputs
[
0
].
dims_
.
size
();
i
++
)
{
CHECK
(
inputs
[
0
].
shape
()
==
inputs
[
1
].
shape
());
CHECK_EQ
(
inputs
[
0
].
dims_
[
i
],
inputs
[
1
].
dims_
[
i
]);
CHECK
(
inputs
[
0
].
shape
()
==
inputs
[
2
].
shape
());
CHECK_EQ
(
inputs
[
0
].
dims_
[
i
],
inputs
[
2
].
dims_
[
i
]);
CHECK
(
inputs
[
0
].
shape
()
==
inputs
[
3
].
shape
());
CHECK_EQ
(
inputs
[
0
].
dims_
[
i
],
inputs
[
3
].
dims_
[
i
]);
CHECK
(
inputs
[
0
].
shape
()
==
outputs
[
0
].
shape
());
CHECK_EQ
(
inputs
[
0
].
dims_
[
i
],
outputs
[
0
].
dims_
[
i
]);
}
size_t
samples
=
inputs
[
0
].
shape
()[
0
];
size_t
channels
=
inputs
[
0
].
shape
()[
1
];
size_t
samples
=
inputs
[
0
].
dims_
[
0
];
size_t
height
=
inputs
[
0
].
shape
()[
2
];
size_t
channels
=
inputs
[
0
].
dims_
[
1
];
size_t
width
=
inputs
[
0
].
shape
()[
3
];
size_t
height
=
inputs
[
0
].
dims_
[
2
];
size_t
width
=
inputs
[
0
].
dims_
[
3
];
CrossMapNormalGrad
<
Device
>
(
outputs
[
0
].
data
<
real
>
(),
inputs
[
0
].
data
<
real
>
(),
CrossMapNormalGrad
<
Device
>
(
outputs
[
0
].
getData
(),
inputs
[
1
].
data
<
real
>
(),
inputs
[
0
].
getData
(),
inputs
[
2
].
data
<
real
>
(),
inputs
[
1
].
getData
(),
inputs
[
3
].
data
<
real
>
(),
inputs
[
2
].
getData
(),
inputs
[
3
].
getData
(),
samples
,
samples
,
channels
,
channels
,
height
,
height
,
...
...
paddle/function/Function.h
浏览文件 @
68156c88
...
@@ -16,57 +16,12 @@ limitations under the License. */
...
@@ -16,57 +16,12 @@ limitations under the License. */
#include <map>
#include <map>
#include <vector>
#include <vector>
#include "BufferArg.h"
#include "paddle/math/Matrix.h"
#include "paddle/math/Matrix.h"
#include "paddle/utils/ClassRegistrar.h"
#include "paddle/utils/ClassRegistrar.h"
namespace
paddle
{
namespace
paddle
{
enum
DeviceType
{
DEVICE_TYPE_UNSPECIFIED
=
0
,
DEVICE_TYPE_CPU
=
1
,
DEVICE_TYPE_GPU
=
2
,
};
template
<
DeviceType
Device
>
struct
MatrixT
;
template
<
>
struct
MatrixT
<
DEVICE_TYPE_CPU
>
{
using
type
=
CpuMatrix
;
};
template
<
>
struct
MatrixT
<
DEVICE_TYPE_GPU
>
{
using
type
=
GpuMatrix
;
};
template
<
DeviceType
Device
>
struct
SequenceT
;
template
<
>
struct
SequenceT
<
DEVICE_TYPE_CPU
>
{
using
type
=
CpuIVector
;
};
template
<
>
struct
SequenceT
<
DEVICE_TYPE_GPU
>
{
using
type
=
GpuIVector
;
};
typedef
std
::
vector
<
size_t
>
Dims
;
class
Tensor
{
public:
Tensor
(
real
*
data
,
const
Dims
&
dim
)
:
buf_
(
data
),
dims_
(
dim
)
{}
real
*
getData
()
const
{
return
buf_
;
}
real
*
buf_
;
Dims
dims_
;
};
typedef
std
::
vector
<
Tensor
>
Arguments
;
class
FuncConfig
{
class
FuncConfig
{
public:
public:
union
value
{
union
value
{
...
@@ -92,9 +47,9 @@ public:
...
@@ -92,9 +47,9 @@ public:
virtual
void
init
(
const
FuncConfig
&
config
)
{}
virtual
void
init
(
const
FuncConfig
&
config
)
{}
virtual
void
calc
(
const
Argument
s
&
inputs
,
virtual
void
calc
(
const
BufferArg
s
&
inputs
,
const
Argument
s
&
outputs
,
const
BufferArg
s
&
outputs
,
const
Argument
s
&
inouts
)
{}
const
BufferArg
s
&
inouts
)
{}
static
ClassRegistrar
<
FunctionBase
>
funcRegistrar_
;
static
ClassRegistrar
<
FunctionBase
>
funcRegistrar_
;
};
};
...
...
paddle/gserver/layers/NormProjectionLayer.cpp
浏览文件 @
68156c88
...
@@ -71,11 +71,16 @@ void CMRProjectionNormLayer::forward(PassType passType) {
...
@@ -71,11 +71,16 @@ void CMRProjectionNormLayer::forward(PassType passType) {
Matrix
::
resizeOrCreate
(
denoms_
,
batchSize
,
size
,
/* trans */
false
,
useGpu_
);
Matrix
::
resizeOrCreate
(
denoms_
,
batchSize
,
size
,
/* trans */
false
,
useGpu_
);
dims_
=
{
batchSize
,
channels_
,
imgSizeH_
,
imgSizeW_
};
shape_
=
TensorShape
({
batchSize
,
channels_
,
imgSizeH_
,
imgSizeW_
});
forward_
[
0
]
->
calc
(
{
Tensor
(
input
->
getData
(),
dims_
)},
BufferArgs
inputs
;
{
Tensor
(
outV
->
getData
(),
dims_
),
Tensor
(
denoms_
->
getData
(),
dims_
)},
BufferArgs
outputs
;
{});
BufferArgs
inouts
;
inputs
.
addArg
(
*
input
,
shape_
);
outputs
.
addArg
(
*
outV
,
shape_
);
outputs
.
addArg
(
*
denoms_
,
shape_
);
forward_
[
0
]
->
calc
(
inputs
,
outputs
,
inouts
);
}
}
void
CMRProjectionNormLayer
::
backward
(
const
UpdateCallback
&
callback
)
{
void
CMRProjectionNormLayer
::
backward
(
const
UpdateCallback
&
callback
)
{
...
@@ -90,11 +95,14 @@ void CMRProjectionNormLayer::backward(const UpdateCallback& callback) {
...
@@ -90,11 +95,14 @@ void CMRProjectionNormLayer::backward(const UpdateCallback& callback) {
MatrixPtr
localOutV
=
getOutputValue
();
MatrixPtr
localOutV
=
getOutputValue
();
MatrixPtr
preOutV
=
inputLayers_
[
0
]
->
getOutputValue
();
MatrixPtr
preOutV
=
inputLayers_
[
0
]
->
getOutputValue
();
backward_
[
0
]
->
calc
({
Tensor
(
preOutV
->
getData
(),
dims_
),
BufferArgs
inputs
;
Tensor
(
localOutV
->
getData
(),
dims_
),
BufferArgs
outputs
;
Tensor
(
localGrad
->
getData
(),
dims_
),
BufferArgs
inouts
;
Tensor
(
denoms_
->
getData
(),
dims_
)},
inputs
.
addArg
(
*
preOutV
,
shape_
);
{
Tensor
(
preOutGrad
->
getData
(),
dims_
)},
inputs
.
addArg
(
*
localOutV
,
shape_
);
{});
inputs
.
addArg
(
*
localGrad
,
shape_
);
inputs
.
addArg
(
*
denoms_
,
shape_
);
outputs
.
addArg
(
*
preOutGrad
,
shape_
);
backward_
[
0
]
->
calc
(
inputs
,
outputs
,
inouts
);
}
}
}
// namespace paddle
}
// namespace paddle
paddle/gserver/layers/NormProjectionLayer.h
浏览文件 @
68156c88
...
@@ -41,6 +41,6 @@ public:
...
@@ -41,6 +41,6 @@ public:
void
backward
(
const
UpdateCallback
&
callback
=
nullptr
);
void
backward
(
const
UpdateCallback
&
callback
=
nullptr
);
protected:
protected:
Dims
dims
_
;
TensorShape
shape
_
;
};
};
}
// namespace paddle
}
// namespace paddle
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录