Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
2fd728a9
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
2fd728a9
编写于
4月 12, 2020
作者:
L
liuwei1031
提交者:
GitHub
4月 12, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add new dot op(#23418)
上级
cdbe5707
变更
10
显示空白变更内容
内联
并排
Showing
10 changed file
with
527 addition
and
5 deletion
+527
-5
paddle/fluid/framework/ddim.cc
paddle/fluid/framework/ddim.cc
+10
-0
paddle/fluid/framework/ddim.h
paddle/fluid/framework/ddim.h
+3
-0
paddle/fluid/operators/dot_op.cc
paddle/fluid/operators/dot_op.cc
+160
-0
paddle/fluid/operators/dot_op.cu
paddle/fluid/operators/dot_op.cu
+28
-0
paddle/fluid/operators/dot_op.h
paddle/fluid/operators/dot_op.h
+168
-0
python/paddle/__init__.py
python/paddle/__init__.py
+1
-1
python/paddle/fluid/tests/unittests/test_dot_op.py
python/paddle/fluid/tests/unittests/test_dot_op.py
+105
-0
python/paddle/fluid/tests/unittests/white_list/no_grad_set_white_list.py
...luid/tests/unittests/white_list/no_grad_set_white_list.py
+1
-0
python/paddle/tensor/__init__.py
python/paddle/tensor/__init__.py
+1
-2
python/paddle/tensor/linalg.py
python/paddle/tensor/linalg.py
+50
-2
未找到文件。
paddle/fluid/framework/ddim.cc
浏览文件 @
2fd728a9
...
@@ -48,6 +48,16 @@ bool DDim::operator==(const DDim& d) const {
...
@@ -48,6 +48,16 @@ bool DDim::operator==(const DDim& d) const {
bool
DDim
::
operator
!=
(
const
DDim
&
d
)
const
{
return
!
(
*
this
==
d
);
}
bool
DDim
::
operator
!=
(
const
DDim
&
d
)
const
{
return
!
(
*
this
==
d
);
}
std
::
string
DDim
::
to_str
()
const
{
std
::
stringstream
ss
;
ss
<<
'['
;
if
(
rank_
>
0
)
ss
<<
dim_
[
0
];
for
(
int
i
=
1
;
i
<
rank_
;
++
i
)
ss
<<
", "
<<
dim_
[
i
];
ss
<<
']'
;
return
ss
.
str
();
}
struct
ProductVisitor
{
struct
ProductVisitor
{
template
<
int
D
>
template
<
int
D
>
inline
int64_t
operator
()(
const
Dim
<
D
>&
dim
)
{
inline
int64_t
operator
()(
const
Dim
<
D
>&
dim
)
{
...
...
paddle/fluid/framework/ddim.h
浏览文件 @
2fd728a9
...
@@ -16,6 +16,7 @@ limitations under the License. */
...
@@ -16,6 +16,7 @@ limitations under the License. */
#include <initializer_list>
#include <initializer_list>
#include <stdexcept>
#include <stdexcept>
#include <string>
#include <vector>
#include <vector>
#include "paddle/fluid/framework/dim.h"
#include "paddle/fluid/framework/dim.h"
...
@@ -123,6 +124,8 @@ class DDim {
...
@@ -123,6 +124,8 @@ class DDim {
inline
int
size
()
const
{
return
rank_
;
}
inline
int
size
()
const
{
return
rank_
;
}
std
::
string
to_str
()
const
;
private:
private:
template
<
int
D
>
template
<
int
D
>
inline
Dim
<
D
>&
UnsafeCast
()
{
inline
Dim
<
D
>&
UnsafeCast
()
{
...
...
paddle/fluid/operators/dot_op.cc
0 → 100644
浏览文件 @
2fd728a9
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/dot_op.h"
namespace
paddle
{
namespace
operators
{
class
DotOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
true
,
ctx
->
HasInput
(
"X"
),
platform
::
errors
::
PreconditionNotMet
(
"Input(X) of DotOp should not be null."
));
PADDLE_ENFORCE_EQ
(
true
,
ctx
->
HasInput
(
"Y"
),
platform
::
errors
::
PreconditionNotMet
(
"Input(Y) of DotOp should not be null."
));
PADDLE_ENFORCE_EQ
(
true
,
ctx
->
HasOutput
(
"Out"
),
platform
::
errors
::
PreconditionNotMet
(
"Output(Out) of DotOp should not be null."
));
auto
x_dims
=
ctx
->
GetInputDim
(
"X"
);
auto
x_rank
=
(
size_t
)
x_dims
.
size
();
PADDLE_ENFORCE_EQ
(
true
,
1
==
x_rank
||
2
==
x_rank
,
platform
::
errors
::
PreconditionNotMet
(
"ShapeError: The dimensions of input tensor X (%s) "
"should be 1 or 2"
,
x_dims
.
to_str
()));
auto
y_dims
=
ctx
->
GetInputDim
(
"Y"
);
PADDLE_ENFORCE_EQ
(
true
,
x_rank
==
(
size_t
)
y_dims
.
size
(),
platform
::
errors
::
PreconditionNotMet
(
"ShapeError: The shape of input tensor Y: %s should match with "
"input tenosr X: %s"
,
y_dims
.
to_str
(),
x_dims
.
to_str
()));
bool
shape_match
=
true
;
for
(
size_t
i
=
0
;
i
<
x_rank
;
++
i
)
{
if
(
x_dims
[
i
]
!=
y_dims
[
i
])
{
shape_match
=
false
;
break
;
}
}
PADDLE_ENFORCE_EQ
(
true
,
shape_match
,
platform
::
errors
::
PreconditionNotMet
(
"ShapeError: The shape of input tensor X: %s should "
"be exactly the same "
"with input tensor Y: %s"
,
x_dims
.
to_str
(),
y_dims
.
to_str
()));
auto
dims
=
vectorize
(
x_dims
);
dims
[
dims
.
size
()
-
1
]
=
1
;
ctx
->
SetOutputDim
(
"Out"
,
framework
::
make_ddim
(
dims
));
}
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
return
framework
::
OpKernelType
(
OperatorWithKernel
::
IndicateVarDataType
(
ctx
,
"X"
),
ctx
.
GetPlace
());
}
};
class
DotOpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
final
{
AddInput
(
"X"
,
"(Tensor) The first input tensor. "
);
AddInput
(
"Y"
,
"(Tensor) The second input tensor. "
);
AddOutput
(
"Out"
,
"(Tensor) The result tensor."
);
AddComment
(
""
);
}
};
class
DotGradOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
PADDLE_ENFORCE_EQ
(
true
,
ctx
->
HasInput
(
"X"
),
platform
::
errors
::
PreconditionNotMet
(
"Input(X) should not be null."
));
PADDLE_ENFORCE_EQ
(
true
,
ctx
->
HasInput
(
"Y"
),
platform
::
errors
::
PreconditionNotMet
(
"Input(Y) should not be null."
));
PADDLE_ENFORCE_EQ
(
true
,
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
platform
::
errors
::
PreconditionNotMet
(
"Input(Out@GRAD) should not be null."
));
auto
x_grad_name
=
framework
::
GradVarName
(
"X"
);
auto
y_grad_name
=
framework
::
GradVarName
(
"Y"
);
if
(
ctx
->
HasOutput
(
x_grad_name
))
{
ctx
->
ShareDim
(
"X"
,
/*->*/
x_grad_name
);
ctx
->
ShareLoD
(
"X"
,
/*->*/
x_grad_name
);
}
if
(
ctx
->
HasOutput
(
y_grad_name
))
{
ctx
->
ShareDim
(
"Y"
,
/*->*/
y_grad_name
);
ctx
->
ShareLoD
(
"Y"
,
/*->*/
y_grad_name
);
}
}
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
return
framework
::
OpKernelType
(
OperatorWithKernel
::
IndicateVarDataType
(
ctx
,
framework
::
GradVarName
(
"Out"
)),
ctx
.
GetPlace
());
}
};
template
<
typename
T
>
class
DotOpGradMaker
:
public
framework
::
SingleGradOpMaker
<
T
>
{
public:
using
framework
::
SingleGradOpMaker
<
T
>::
SingleGradOpMaker
;
protected:
void
Apply
(
GradOpPtr
<
T
>
op
)
const
override
{
op
->
SetType
(
"dot_grad"
);
op
->
SetInput
(
"X"
,
this
->
Input
(
"X"
));
op
->
SetInput
(
"Y"
,
this
->
Input
(
"Y"
));
op
->
SetInput
(
framework
::
GradVarName
(
"Out"
),
this
->
OutputGrad
(
"Out"
));
op
->
SetAttrMap
(
this
->
Attrs
());
op
->
SetOutput
(
framework
::
GradVarName
(
"X"
),
this
->
InputGrad
(
"X"
));
op
->
SetOutput
(
framework
::
GradVarName
(
"Y"
),
this
->
InputGrad
(
"Y"
));
}
};
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OPERATOR
(
dot
,
ops
::
DotOp
,
ops
::
DotOpMaker
,
ops
::
DotOpGradMaker
<
paddle
::
framework
::
OpDesc
>
,
ops
::
DotOpGradMaker
<
paddle
::
imperative
::
OpBase
>
);
REGISTER_OPERATOR
(
dot_grad
,
ops
::
DotGradOp
);
REGISTER_OP_CPU_KERNEL
(
dot
,
ops
::
DotKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
ops
::
DotKernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
,
ops
::
DotKernel
<
paddle
::
platform
::
CPUDeviceContext
,
int
>
,
ops
::
DotKernel
<
paddle
::
platform
::
CPUDeviceContext
,
int64_t
>
);
REGISTER_OP_CPU_KERNEL
(
dot_grad
,
ops
::
DotGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
ops
::
DotGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
,
ops
::
DotGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
int
>
,
ops
::
DotGradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
int64_t
>
);
paddle/fluid/operators/dot_op.cu
0 → 100644
浏览文件 @
2fd728a9
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/dot_op.h"
namespace
ops
=
paddle
::
operators
;
namespace
plat
=
paddle
::
platform
;
REGISTER_OP_CUDA_KERNEL
(
dot
,
ops
::
DotKernel
<
plat
::
CUDADeviceContext
,
float
>
,
ops
::
DotKernel
<
plat
::
CUDADeviceContext
,
double
>
,
ops
::
DotKernel
<
plat
::
CUDADeviceContext
,
int
>
,
ops
::
DotKernel
<
plat
::
CUDADeviceContext
,
int64_t
>
);
REGISTER_OP_CUDA_KERNEL
(
dot_grad
,
ops
::
DotGradKernel
<
plat
::
CUDADeviceContext
,
float
>
,
ops
::
DotGradKernel
<
plat
::
CUDADeviceContext
,
double
>
,
ops
::
DotGradKernel
<
plat
::
CUDADeviceContext
,
int
>
,
ops
::
DotGradKernel
<
plat
::
CUDADeviceContext
,
int64_t
>
);
paddle/fluid/operators/dot_op.h
0 → 100644
浏览文件 @
2fd728a9
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
namespace
paddle
{
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
template
<
typename
T
,
int
MajorType
=
Eigen
::
RowMajor
,
typename
IndexType
=
Eigen
::
DenseIndex
>
using
EigenMatrix
=
framework
::
EigenMatrix
<
T
,
MajorType
,
IndexType
>
;
template
<
typename
DeviceContext
,
typename
T
>
class
DotKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
*
tensor_x
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
*
tensor_y
=
ctx
.
Input
<
Tensor
>
(
"Y"
);
auto
*
tensor_out
=
ctx
.
Output
<
Tensor
>
(
"Out"
);
tensor_out
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
#ifdef __NVCC__
if
(
1
==
tensor_out
->
dims
().
size
())
{
auto
out
=
framework
::
EigenScalar
<
T
>::
From
(
*
tensor_out
);
auto
x
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
tensor_x
);
auto
y
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
tensor_y
);
auto
&
dev
=
*
ctx
.
template
device_context
<
DeviceContext
>().
eigen_device
();
out
.
device
(
dev
)
=
(
x
*
y
).
sum
();
}
else
{
auto
out
=
EigenMatrix
<
T
>::
From
(
*
tensor_out
);
auto
x
=
EigenMatrix
<
T
>::
From
(
*
tensor_x
);
auto
y
=
EigenMatrix
<
T
>::
From
(
*
tensor_y
);
auto
&
dev
=
*
ctx
.
template
device_context
<
DeviceContext
>().
eigen_device
();
out
.
device
(
dev
)
=
(
x
*
y
).
sum
(
Eigen
::
DSizes
<
int
,
1
>
(
1
));
}
#else
const
auto
*
data_x
=
tensor_x
->
data
<
T
>
();
const
auto
*
data_y
=
tensor_y
->
data
<
T
>
();
auto
*
data_out
=
tensor_out
->
data
<
T
>
();
auto
x_dims
=
tensor_x
->
dims
();
auto
step
=
x_dims
[
x_dims
.
size
()
-
1
];
int
size
=
static_cast
<
int
>
(
framework
::
product
(
x_dims
));
for
(
int
ind
=
-
1
,
j
=
0
;
j
<
size
;
++
j
)
{
if
(
j
%
step
==
0
)
{
++
ind
;
data_out
[
ind
]
=
data_x
[
j
]
*
data_y
[
j
];
}
else
{
data_out
[
ind
]
+=
data_x
[
j
]
*
data_y
[
j
];
}
}
#endif
}
};
template
<
typename
DeviceContext
,
typename
T
>
class
DotGradKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
*
tensor_x
=
ctx
.
Input
<
Tensor
>
(
"X"
);
auto
*
tensor_y
=
ctx
.
Input
<
Tensor
>
(
"Y"
);
auto
*
tensor_dout
=
ctx
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
auto
*
tensor_dx
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X"
));
auto
*
tensor_dy
=
ctx
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"Y"
));
if
(
tensor_dx
)
tensor_dx
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
if
(
tensor_dy
)
tensor_dy
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
#ifdef __NVCC__
if
(
1
==
tensor_dout
->
dims
().
size
())
{
auto
dout
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
tensor_dout
);
if
(
tensor_dx
)
{
auto
y
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
tensor_y
);
auto
dx
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
tensor_dx
);
auto
&
dev
=
*
ctx
.
template
device_context
<
DeviceContext
>().
eigen_device
();
Eigen
::
DSizes
<
int
,
1
>
size
(
tensor_dx
->
numel
());
dx
.
device
(
dev
)
=
y
*
dout
.
broadcast
(
size
);
}
if
(
tensor_dy
)
{
auto
x
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
tensor_x
);
auto
dy
=
framework
::
EigenVector
<
T
>::
Flatten
(
*
tensor_dy
);
auto
&
dev
=
*
ctx
.
template
device_context
<
DeviceContext
>().
eigen_device
();
Eigen
::
DSizes
<
int
,
1
>
size
(
tensor_dy
->
numel
());
dy
.
device
(
dev
)
=
x
*
dout
.
broadcast
(
size
);
}
}
else
{
auto
dout
=
EigenMatrix
<
T
>::
From
(
*
tensor_dout
);
if
(
tensor_dx
)
{
tensor_dx
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
auto
y
=
EigenMatrix
<
T
>::
From
(
*
tensor_y
);
auto
dx
=
EigenMatrix
<
T
>::
From
(
*
tensor_dx
);
auto
&
dev
=
*
ctx
.
template
device_context
<
DeviceContext
>().
eigen_device
();
Eigen
::
DSizes
<
int
,
2
>
size
(
1
,
tensor_dx
->
dims
()[
1
]);
dx
.
device
(
dev
)
=
y
*
dout
.
broadcast
(
size
);
}
if
(
tensor_dy
)
{
tensor_dy
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
auto
x
=
EigenMatrix
<
T
>::
From
(
*
tensor_x
);
auto
dy
=
EigenMatrix
<
T
>::
From
(
*
tensor_dy
);
auto
&
dev
=
*
ctx
.
template
device_context
<
DeviceContext
>().
eigen_device
();
Eigen
::
DSizes
<
int
,
2
>
size
(
1
,
tensor_dy
->
dims
()[
1
]);
dy
.
device
(
dev
)
=
x
*
dout
.
broadcast
(
size
);
}
}
#else
const
auto
*
data_dout
=
tensor_dout
->
data
<
T
>
();
if
(
tensor_dx
)
{
auto
*
data_dx
=
tensor_dx
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
const
auto
*
data_y
=
tensor_y
->
data
<
T
>
();
const
framework
::
DDim
&
dim
=
tensor_x
->
dims
();
size_t
N
=
static_cast
<
size_t
>
(
framework
::
product
(
dim
));
auto
step
=
dim
[
dim
.
size
()
-
1
];
int
s
=
-
1
;
for
(
size_t
i
=
0
;
i
<
N
;
++
i
)
{
if
(
0
==
i
%
step
)
++
s
;
data_dx
[
i
]
=
data_y
[
i
]
*
data_dout
[
s
];
}
}
if
(
tensor_dy
)
{
auto
*
data_dy
=
tensor_dy
->
mutable_data
<
T
>
(
ctx
.
GetPlace
());
const
auto
*
data_x
=
tensor_x
->
data
<
T
>
();
const
framework
::
DDim
&
dim
=
tensor_y
->
dims
();
size_t
N
=
static_cast
<
size_t
>
(
framework
::
product
(
dim
));
auto
step
=
dim
[
dim
.
size
()
-
1
];
int
s
=
-
1
;
for
(
size_t
i
=
0
;
i
<
N
;
++
i
)
{
if
(
0
==
i
%
step
)
++
s
;
data_dy
[
i
]
=
data_x
[
i
]
*
data_dout
[
s
];
}
}
#endif
}
};
}
// namespace operators
}
// namespace paddle
python/paddle/__init__.py
浏览文件 @
2fd728a9
...
@@ -148,7 +148,7 @@ from .tensor.math import addmm #DEFINE_ALIAS
...
@@ -148,7 +148,7 @@ from .tensor.math import addmm #DEFINE_ALIAS
# from .tensor.io import save #DEFINE_ALIAS
# from .tensor.io import save #DEFINE_ALIAS
# from .tensor.io import load #DEFINE_ALIAS
# from .tensor.io import load #DEFINE_ALIAS
from
.tensor.linalg
import
matmul
#DEFINE_ALIAS
from
.tensor.linalg
import
matmul
#DEFINE_ALIAS
# from .tensor.linalg import dot
#DEFINE_ALIAS
from
.tensor.linalg
import
dot
#DEFINE_ALIAS
# from .tensor.linalg import einsum #DEFINE_ALIAS
# from .tensor.linalg import einsum #DEFINE_ALIAS
# from .tensor.linalg import morm #DEFINE_ALIAS
# from .tensor.linalg import morm #DEFINE_ALIAS
# from .tensor.linalg import transpose #DEFINE_ALIAS
# from .tensor.linalg import transpose #DEFINE_ALIAS
...
...
python/paddle/fluid/tests/unittests/test_dot_op.py
0 → 100644
浏览文件 @
2fd728a9
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
import
paddle
import
paddle.fluid
as
fluid
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
,
skip_check_grad_ci
from
paddle.fluid.op
import
Operator
from
paddle.fluid
import
compiler
,
Program
,
program_guard
class
DotOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"dot"
self
.
init_dtype
()
self
.
init_input_output
()
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
self
.
x
),
'Y'
:
OpTest
.
np_dtype_to_fluid_dtype
(
self
.
y
)
}
self
.
outputs
=
{
'Out'
:
self
.
out
}
self
.
attrs
=
{}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
)
def
test_check_grad_ingore_x
(
self
):
self
.
check_grad
([
'Y'
],
'Out'
,
no_grad_set
=
set
(
"X"
))
def
test_check_grad_ingore_y
(
self
):
self
.
check_grad
([
'X'
],
'Out'
,
no_grad_set
=
set
(
'Y'
))
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
121
]).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
uniform
(
1
,
3
,
[
121
]).
astype
(
self
.
dtype
)
self
.
out
=
np
.
dot
(
self
.
x
,
self
.
y
)
def
init_dtype
(
self
):
self
.
dtype
=
np
.
float64
class
DotOpBatch
(
DotOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
132
]).
astype
(
self
.
dtype
).
reshape
(
[
11
,
12
])
self
.
y
=
np
.
random
.
uniform
(
1
,
3
,
[
132
]).
astype
(
self
.
dtype
).
reshape
(
[
11
,
12
])
self
.
out
=
np
.
sum
(
self
.
x
*
self
.
y
,
axis
=
1
).
reshape
([
11
,
1
])
class
TestDotOpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
with
program_guard
(
Program
(),
Program
()):
# the input dtype of elementwise_mul must be float16 or float32 or float64 or int32 or int64
# float16 only can be set on GPU place
x1
=
fluid
.
layers
.
data
(
name
=
'x1'
,
shape
=
[
120
],
dtype
=
"uint8"
)
y1
=
fluid
.
layers
.
data
(
name
=
'y1'
,
shape
=
[
120
],
dtype
=
"uint8"
)
self
.
assertRaises
(
Exception
,
paddle
.
dot
,
x1
,
y1
)
x2
=
fluid
.
layers
.
data
(
name
=
'x2'
,
shape
=
[
2
,
3
],
dtype
=
"float32"
)
y2
=
fluid
.
layers
.
data
(
name
=
'y2'
,
shape
=
[
2
,
3
],
dtype
=
"float32"
)
self
.
assertRaises
(
Exception
,
paddle
.
dot
,
x2
,
y2
)
x3
=
fluid
.
layers
.
data
(
name
=
'x3'
,
shape
=
[
3
],
dtype
=
"float32"
)
y3
=
fluid
.
layers
.
data
(
name
=
'y3'
,
shape
=
[
2
,
3
],
dtype
=
"float32"
)
self
.
assertRaises
(
Exception
,
paddle
.
dot
,
x2
,
y3
)
class
TestDygraph
(
unittest
.
TestCase
):
def
test_dygraph
(
self
):
with
fluid
.
dygraph
.
guard
():
x1
=
fluid
.
dygraph
.
to_variable
(
np
.
array
([
1
,
3
]).
astype
(
np
.
float32
))
y1
=
fluid
.
dygraph
.
to_variable
(
np
.
array
([
2
,
5
]).
astype
(
np
.
float32
))
self
.
assertTrue
(
np
.
allclose
(
paddle
.
dot
(
x1
,
y1
).
numpy
(),
np
.
array
([
17
])))
x1
=
fluid
.
dygraph
.
to_variable
(
np
.
array
([[
1
,
3
],
[
3
,
5
]]).
astype
(
np
.
float32
))
y1
=
fluid
.
dygraph
.
to_variable
(
np
.
array
([[
2
,
5
],
[
6
,
8
]]).
astype
(
np
.
float32
))
self
.
assertTrue
(
np
.
array_equal
(
paddle
.
dot
(
x1
,
y1
).
numpy
(),
np
.
array
([[
17
],
[
58
]])))
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/white_list/no_grad_set_white_list.py
浏览文件 @
2fd728a9
...
@@ -34,6 +34,7 @@ NEED_TO_FIX_OP_LIST = [
...
@@ -34,6 +34,7 @@ NEED_TO_FIX_OP_LIST = [
'deformable_conv_v1'
,
'deformable_conv_v1'
,
'depthwise_conv2d'
,
'depthwise_conv2d'
,
'depthwise_conv2d_transpose'
,
'depthwise_conv2d_transpose'
,
'dot'
,
'elementwise_add'
,
'elementwise_add'
,
'elementwise_div'
,
'elementwise_div'
,
'elementwise_max'
,
'elementwise_max'
,
...
...
python/paddle/tensor/__init__.py
浏览文件 @
2fd728a9
...
@@ -123,7 +123,7 @@ from .math import addmm #DEFINE_ALIAS
...
@@ -123,7 +123,7 @@ from .math import addmm #DEFINE_ALIAS
# from .io import save #DEFINE_ALIAS
# from .io import save #DEFINE_ALIAS
# from .io import load #DEFINE_ALIAS
# from .io import load #DEFINE_ALIAS
from
.linalg
import
matmul
#DEFINE_ALIAS
from
.linalg
import
matmul
#DEFINE_ALIAS
# from .linalg import dot
#DEFINE_ALIAS
from
.linalg
import
dot
#DEFINE_ALIAS
# from .linalg import einsum #DEFINE_ALIAS
# from .linalg import einsum #DEFINE_ALIAS
# from .linalg import morm #DEFINE_ALIAS
# from .linalg import morm #DEFINE_ALIAS
# from .linalg import transpose #DEFINE_ALIAS
# from .linalg import transpose #DEFINE_ALIAS
...
@@ -131,7 +131,6 @@ from .linalg import dist #DEFINE_ALIAS
...
@@ -131,7 +131,6 @@ from .linalg import dist #DEFINE_ALIAS
# from .linalg import t #DEFINE_ALIAS
# from .linalg import t #DEFINE_ALIAS
# from .linalg import cross #DEFINE_ALIAS
# from .linalg import cross #DEFINE_ALIAS
# from .linalg import cholesky #DEFINE_ALIAS
# from .linalg import cholesky #DEFINE_ALIAS
# from .linalg import dot #DEFINE_ALIAS
# from .manipulation import cast #DEFINE_ALIAS
# from .manipulation import cast #DEFINE_ALIAS
# from .manipulation import concat #DEFINE_ALIAS
# from .manipulation import concat #DEFINE_ALIAS
# from .manipulation import expand #DEFINE_ALIAS
# from .manipulation import expand #DEFINE_ALIAS
...
...
python/paddle/tensor/linalg.py
浏览文件 @
2fd728a9
...
@@ -16,10 +16,9 @@ from ..fluid.layer_helper import LayerHelper
...
@@ -16,10 +16,9 @@ from ..fluid.layer_helper import LayerHelper
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
from
..fluid.framework
import
in_dygraph_mode
from
..fluid.framework
import
in_dygraph_mode
# TODO: define functions of linear algebra
__all__
=
[
__all__
=
[
'matmul'
,
'matmul'
,
#
'dot',
'dot'
,
# 'einsum',
# 'einsum',
# 'morm',
# 'morm',
# 'transpose',
# 'transpose',
...
@@ -234,3 +233,52 @@ def dist(x, y, p=2):
...
@@ -234,3 +233,52 @@ def dist(x, y, p=2):
helper
.
append_op
(
helper
.
append_op
(
type
=
'dist'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
type
=
'dist'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
return
out
def
dot
(
x
,
y
,
name
=
None
):
"""
This operator calculates inner product for vectors.
.. note::
Only support 1-d Tensor(vector).
Parameters:
x(Variable): 1-D ``Tensor`` or ``LoDTensor``. Its datatype should be ``float32``, ``float64``, ``int32``, ``int64``
y(Variable): 1-D ``Tensor`` or ``LoDTensor``. Its datatype soulde be ``float32``, ``float64``, ``int32``, ``int64``
name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(np.random.uniform(0.1, 1, [10]).astype(np.float32))
y = fluid.dygraph.to_variable(np.random.uniform(1, 3, [10]).astype(np.float32))
z = paddle.dot(x, y)
print(z.numpy())
"""
op_type
=
'dot'
assert
x
is
not
None
,
'x cannot be None in {}'
.
format
(
op_type
)
assert
y
is
not
None
,
'y cannot be None in {}'
.
format
(
op_type
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
op_type
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
op_type
)
helper
=
LayerHelper
(
op_type
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
helper
.
append_op
(
type
=
"dot"
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
attrs
=
{},
outputs
=
{
"Out"
:
out
})
return
out
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录