Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
918aeb71
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2299
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
918aeb71
编写于
6月 16, 2021
作者:
R
ronnywang
提交者:
GitHub
6月 17, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add atan2 op and test (#33067)
* add atan2_op * fix
上级
b0984c7c
变更
7
显示空白变更内容
内联
并排
Showing
7 changed file
with
528 addition
and
0 deletion
+528
-0
paddle/fluid/operators/atan2_op.cc
paddle/fluid/operators/atan2_op.cc
+138
-0
paddle/fluid/operators/atan2_op.cu
paddle/fluid/operators/atan2_op.cu
+31
-0
paddle/fluid/operators/atan2_op.h
paddle/fluid/operators/atan2_op.h
+168
-0
python/paddle/__init__.py
python/paddle/__init__.py
+2
-0
python/paddle/fluid/tests/unittests/test_atan2_op.py
python/paddle/fluid/tests/unittests/test_atan2_op.py
+132
-0
python/paddle/tensor/__init__.py
python/paddle/tensor/__init__.py
+1
-0
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+56
-0
未找到文件。
paddle/fluid/operators/atan2_op.cc
0 → 100644
浏览文件 @
918aeb71
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/atan2_op.h"
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
namespace
paddle
{
namespace
operators
{
class
Atan2Op
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"X1"
),
"Input"
,
"X1"
,
"atan2"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"X2"
),
"Input"
,
"X2"
,
"atan2"
);
OP_INOUT_CHECK
(
ctx
->
HasOutput
(
"Out"
),
"Output"
,
"Out"
,
"atan2"
);
auto
in_dims
=
ctx
->
GetInputDim
(
"X1"
);
ctx
->
SetOutputDim
(
"Out"
,
in_dims
);
}
};
class
Atan2OpMaker
:
public
framework
::
OpProtoAndCheckerMaker
{
public:
void
Make
()
override
{
AddInput
(
"X1"
,
"(Tensor), The input tensor of atan2 op."
);
AddInput
(
"X2"
,
"(Tensor), The input tensor of atan2 op."
);
AddOutput
(
"Out"
,
"(Tensor), The output tensor of atan2 op."
);
AddComment
(
R"DOC(
Atan2 Operator.
This operator is used to perform elementwise atan2 for input $X1$, $X2$.
$$out = atan2(x1, x2)$$
)DOC"
);
}
};
class
Atan2GradOp
:
public
framework
::
OperatorWithKernel
{
public:
using
framework
::
OperatorWithKernel
::
OperatorWithKernel
;
void
InferShape
(
framework
::
InferShapeContext
*
ctx
)
const
override
{
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"X1"
),
"Input"
,
"X1"
,
"Atan2Grad"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
"X2"
),
"Input"
,
"X2"
,
"Atan2Grad"
);
OP_INOUT_CHECK
(
ctx
->
HasInput
(
framework
::
GradVarName
(
"Out"
)),
"Input"
,
"Out@Grad"
,
"Atan2Grad"
);
auto
x1_grad_name
=
framework
::
GradVarName
(
"X1"
);
auto
x2_grad_name
=
framework
::
GradVarName
(
"X2"
);
auto
dout_dims
=
ctx
->
GetInputDim
(
framework
::
GradVarName
(
"Out"
));
if
(
ctx
->
HasOutput
(
x1_grad_name
))
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X1"
),
dout_dims
);
}
if
(
ctx
->
HasOutput
(
x2_grad_name
))
{
ctx
->
SetOutputDim
(
framework
::
GradVarName
(
"X2"
),
dout_dims
);
}
}
protected:
framework
::
OpKernelType
GetExpectedKernelType
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
auto
dtype
=
OperatorWithKernel
::
IndicateVarDataType
(
ctx
,
"X1"
);
return
framework
::
OpKernelType
(
dtype
,
ctx
.
GetPlace
());
}
};
template
<
typename
T
>
class
Atan2GradMaker
:
public
framework
::
SingleGradOpMaker
<
T
>
{
public:
using
framework
::
SingleGradOpMaker
<
T
>::
SingleGradOpMaker
;
void
Apply
(
GradOpPtr
<
T
>
retv
)
const
override
{
retv
->
SetType
(
"atan2_grad"
);
retv
->
SetInput
(
"X1"
,
this
->
Input
(
"X1"
));
retv
->
SetInput
(
"X2"
,
this
->
Input
(
"X2"
));
retv
->
SetInput
(
framework
::
GradVarName
(
"Out"
),
this
->
OutputGrad
(
"Out"
));
retv
->
SetAttrMap
(
this
->
Attrs
());
retv
->
SetOutput
(
framework
::
GradVarName
(
"X1"
),
this
->
InputGrad
(
"X1"
));
retv
->
SetOutput
(
framework
::
GradVarName
(
"X2"
),
this
->
InputGrad
(
"X2"
));
}
};
class
Atan2OpVarTypeInference
:
public
framework
::
VarTypeInference
{
public:
void
operator
()(
framework
::
InferVarTypeContext
*
ctx
)
const
override
{
auto
type
=
ctx
->
GetInputDataType
(
"X1"
);
if
(
ctx
->
GetInputDataType
(
"X1"
)
==
framework
::
proto
::
VarType
::
INT32
||
ctx
->
GetInputDataType
(
"X1"
)
==
framework
::
proto
::
VarType
::
INT64
||
ctx
->
GetInputDataType
(
"X2"
)
==
framework
::
proto
::
VarType
::
INT32
||
ctx
->
GetInputDataType
(
"X2"
)
==
framework
::
proto
::
VarType
::
INT64
)
{
type
=
framework
::
proto
::
VarType
::
FP64
;
}
ctx
->
SetOutputDataType
(
"Out"
,
type
);
}
};
}
// namespace operators
}
// namespace paddle
namespace
ops
=
paddle
::
operators
;
REGISTER_OPERATOR
(
atan2
,
ops
::
Atan2Op
,
ops
::
Atan2OpMaker
,
ops
::
Atan2GradMaker
<
paddle
::
framework
::
OpDesc
>
,
ops
::
Atan2GradMaker
<
paddle
::
imperative
::
OpBase
>
,
ops
::
Atan2OpVarTypeInference
);
REGISTER_OPERATOR
(
atan2_grad
,
ops
::
Atan2GradOp
);
REGISTER_OP_CPU_KERNEL
(
atan2
,
ops
::
Atan2Kernel
<
paddle
::
platform
::
CPUDeviceContext
,
int32_t
>
,
ops
::
Atan2Kernel
<
paddle
::
platform
::
CPUDeviceContext
,
int64_t
>
,
ops
::
Atan2Kernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
ops
::
Atan2Kernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
,
ops
::
Atan2Kernel
<
paddle
::
platform
::
CPUDeviceContext
,
paddle
::
platform
::
float16
>
);
REGISTER_OP_CPU_KERNEL
(
atan2_grad
,
ops
::
Atan2GradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
ops
::
Atan2GradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
,
ops
::
Atan2GradKernel
<
paddle
::
platform
::
CPUDeviceContext
,
paddle
::
platform
::
float16
>
);
paddle/fluid/operators/atan2_op.cu
0 → 100644
浏览文件 @
918aeb71
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/atan2_op.h"
namespace
ops
=
paddle
::
operators
;
REGISTER_OP_CUDA_KERNEL
(
atan2
,
ops
::
Atan2Kernel
<
paddle
::
platform
::
CUDADeviceContext
,
int32_t
>
,
ops
::
Atan2Kernel
<
paddle
::
platform
::
CUDADeviceContext
,
int64_t
>
,
ops
::
Atan2Kernel
<
paddle
::
platform
::
CUDADeviceContext
,
float
>
,
ops
::
Atan2Kernel
<
paddle
::
platform
::
CUDADeviceContext
,
double
>
,
ops
::
Atan2Kernel
<
paddle
::
platform
::
CUDADeviceContext
,
paddle
::
platform
::
float16
>
);
REGISTER_OP_CUDA_KERNEL
(
atan2_grad
,
ops
::
Atan2GradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
float
>
,
ops
::
Atan2GradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
double
>
,
ops
::
Atan2GradKernel
<
paddle
::
platform
::
CUDADeviceContext
,
paddle
::
platform
::
float16
>
);
paddle/fluid/operators/atan2_op.h
0 → 100644
浏览文件 @
918aeb71
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/for_range.h"
namespace
paddle
{
namespace
operators
{
using
Tensor
=
framework
::
Tensor
;
using
framework
::
To32BitIndex
;
template
<
typename
T
>
struct
Atan2Out
{
using
type
=
T
;
};
template
<
>
struct
Atan2Out
<
int32_t
>
{
using
type
=
double
;
};
template
<
>
struct
Atan2Out
<
int64_t
>
{
using
type
=
double
;
};
template
<
typename
T
>
struct
Atan2Functor
{
Atan2Functor
(
const
T
*
x1
,
const
T
*
x2
,
typename
Atan2Out
<
T
>::
type
*
out
,
int64_t
numel
)
:
x1_
(
x1
),
x2_
(
x2
),
out_
(
out
),
numel_
(
numel
)
{}
HOSTDEVICE
void
operator
()(
int64_t
idx
)
const
{
out_
[
idx
]
=
static_cast
<
typename
Atan2Out
<
T
>::
type
>
(
::
atan2f
(
static_cast
<
float
>
(
x1_
[
idx
]),
static_cast
<
float
>
(
x2_
[
idx
])));
}
const
T
*
x1_
;
const
T
*
x2_
;
typename
Atan2Out
<
T
>::
type
*
out_
;
int64_t
numel_
;
};
template
<
>
struct
Atan2Functor
<
double
>
{
Atan2Functor
(
const
double
*
x1
,
const
double
*
x2
,
double
*
out
,
int64_t
numel
)
:
x1_
(
x1
),
x2_
(
x2
),
out_
(
out
),
numel_
(
numel
)
{}
HOSTDEVICE
void
operator
()(
int64_t
idx
)
const
{
out_
[
idx
]
=
::
atan2
(
x1_
[
idx
],
x2_
[
idx
]);
}
const
double
*
x1_
;
const
double
*
x2_
;
double
*
out_
;
int64_t
numel_
;
};
// dx1 = dout * x2 / ((x1)^2 + (x2)^2)
// dx2 = - dout * x1 / ((x1)^2 + (x2)^2)
template
<
typename
T
>
struct
Atan2GradFunctor
{
Atan2GradFunctor
(
const
T
*
x1
,
const
T
*
x2
,
const
T
*
dout
,
T
*
dx1
,
T
*
dx2
,
int64_t
numel
)
:
x1_
(
x1
),
x2_
(
x2
),
dout_
(
dout
),
dx1_
(
dx1
),
dx2_
(
dx2
),
numel_
(
numel
)
{}
HOSTDEVICE
void
operator
()(
int64_t
idx
)
const
{
float
x1
=
static_cast
<
float
>
(
x1_
[
idx
]);
float
x2
=
static_cast
<
float
>
(
x2_
[
idx
]);
float
x
=
x1
*
x1
+
x2
*
x2
;
dx1_
[
idx
]
=
static_cast
<
T
>
(
static_cast
<
float
>
(
dout_
[
idx
])
*
x2
/
x
);
dx2_
[
idx
]
=
static_cast
<
T
>
(
-
static_cast
<
float
>
(
dout_
[
idx
])
*
x1
/
x
);
}
const
T
*
x1_
;
const
T
*
x2_
;
const
T
*
dout_
;
T
*
dx1_
;
T
*
dx2_
;
int64_t
numel_
;
};
template
<
>
struct
Atan2GradFunctor
<
double
>
{
Atan2GradFunctor
(
const
double
*
x1
,
const
double
*
x2
,
const
double
*
dout
,
double
*
dx1
,
double
*
dx2
,
int64_t
numel
)
:
x1_
(
x1
),
x2_
(
x2
),
dout_
(
dout
),
dx1_
(
dx1
),
dx2_
(
dx2
),
numel_
(
numel
)
{}
HOSTDEVICE
void
operator
()(
int64_t
idx
)
const
{
auto
x
=
x1_
[
idx
]
*
x1_
[
idx
]
+
x2_
[
idx
]
*
x2_
[
idx
];
dx1_
[
idx
]
=
dout_
[
idx
]
*
x2_
[
idx
]
/
x
;
dx2_
[
idx
]
=
-
dout_
[
idx
]
*
x1_
[
idx
]
/
x
;
}
const
double
*
x1_
;
const
double
*
x2_
;
const
double
*
dout_
;
double
*
dx1_
;
double
*
dx2_
;
int64_t
numel_
;
};
template
<
typename
DeviceContext
,
typename
T
>
class
Atan2Kernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
override
{
const
Tensor
*
X1
=
context
.
Input
<
Tensor
>
(
"X1"
);
const
Tensor
*
X2
=
context
.
Input
<
Tensor
>
(
"X2"
);
Tensor
*
Out
=
context
.
Output
<
Tensor
>
(
"Out"
);
auto
numel
=
X1
->
numel
();
auto
x1
=
X1
->
data
<
T
>
();
auto
x2
=
X2
->
data
<
T
>
();
auto
out
=
Out
->
mutable_data
<
typename
Atan2Out
<
T
>::
type
>
(
context
.
GetPlace
(),
size_t
(
numel
*
sizeof
(
typename
Atan2Out
<
T
>::
type
)));
auto
&
dev_ctx
=
context
.
template
device_context
<
DeviceContext
>();
platform
::
ForRange
<
DeviceContext
>
for_range
(
dev_ctx
,
numel
);
Atan2Functor
<
T
>
functor
(
x1
,
x2
,
out
,
numel
);
for_range
(
functor
);
}
};
template
<
typename
DeviceContext
,
typename
T
>
class
Atan2GradKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
void
Compute
(
const
framework
::
ExecutionContext
&
context
)
const
{
const
Tensor
*
X1
=
context
.
Input
<
Tensor
>
(
"X1"
);
const
Tensor
*
X2
=
context
.
Input
<
Tensor
>
(
"X2"
);
const
Tensor
*
dOut
=
context
.
Input
<
Tensor
>
(
framework
::
GradVarName
(
"Out"
));
Tensor
*
dX1
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X1"
));
Tensor
*
dX2
=
context
.
Output
<
Tensor
>
(
framework
::
GradVarName
(
"X2"
));
auto
numel
=
X1
->
numel
();
auto
x1
=
X1
->
data
<
T
>
();
auto
x2
=
X2
->
data
<
T
>
();
auto
dout
=
dOut
->
data
<
T
>
();
auto
dx1
=
dX1
->
mutable_data
<
T
>
(
context
.
GetPlace
(),
size_t
(
numel
*
sizeof
(
T
)));
auto
dx2
=
dX2
->
mutable_data
<
T
>
(
context
.
GetPlace
(),
size_t
(
numel
*
sizeof
(
T
)));
auto
&
dev_ctx
=
context
.
template
device_context
<
DeviceContext
>();
platform
::
ForRange
<
DeviceContext
>
for_range
(
dev_ctx
,
numel
);
Atan2GradFunctor
<
T
>
functor
(
x1
,
x2
,
dout
,
dx1
,
dx2
,
numel
);
for_range
(
functor
);
}
};
}
// namespace operators
}
// namespace paddle
python/paddle/__init__.py
浏览文件 @
918aeb71
...
@@ -152,6 +152,7 @@ from .tensor.math import abs # noqa: F401
...
@@ -152,6 +152,7 @@ from .tensor.math import abs # noqa: F401
from
.tensor.math
import
acos
# noqa: F401
from
.tensor.math
import
acos
# noqa: F401
from
.tensor.math
import
asin
# noqa: F401
from
.tensor.math
import
asin
# noqa: F401
from
.tensor.math
import
atan
# noqa: F401
from
.tensor.math
import
atan
# noqa: F401
from
.tensor.math
import
atan2
# noqa: F401
from
.tensor.math
import
ceil
# noqa: F401
from
.tensor.math
import
ceil
# noqa: F401
from
.tensor.math
import
cos
# noqa: F401
from
.tensor.math
import
cos
# noqa: F401
from
.tensor.math
import
tan
# noqa: F401
from
.tensor.math
import
tan
# noqa: F401
...
@@ -434,6 +435,7 @@ __all__ = [ # noqa
...
@@ -434,6 +435,7 @@ __all__ = [ # noqa
'divide'
,
'divide'
,
'ceil'
,
'ceil'
,
'atan'
,
'atan'
,
'atan2'
,
'expand'
,
'expand'
,
'broadcast_to'
,
'broadcast_to'
,
'ones_like'
,
'ones_like'
,
...
...
python/paddle/fluid/tests/unittests/test_atan2_op.py
0 → 100644
浏览文件 @
918aeb71
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
numpy
as
np
import
unittest
from
op_test
import
OpTest
import
paddle
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
from
paddle.fluid
import
compiler
,
Program
,
program_guard
paddle
.
enable_static
()
np
.
random
.
seed
(
0
)
def
atan2_grad
(
x1
,
x2
,
dout
):
dx1
=
dout
*
x2
/
(
x1
*
x1
+
x2
*
x2
)
dx2
=
-
dout
*
x1
/
(
x1
*
x1
+
x2
*
x2
)
return
dx1
,
dx2
class
TestAtan2
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"atan2"
self
.
init_dtype
()
x1
=
np
.
random
.
uniform
(
-
1
,
-
0.1
,
[
15
,
17
]).
astype
(
self
.
dtype
)
x2
=
np
.
random
.
uniform
(
0.1
,
1
,
[
15
,
17
]).
astype
(
self
.
dtype
)
out
=
np
.
arctan2
(
x1
,
x2
)
self
.
inputs
=
{
'X1'
:
x1
,
'X2'
:
x2
}
self
.
outputs
=
{
'Out'
:
out
}
def
test_check_grad
(
self
):
self
.
check_grad
([
'X1'
,
'X2'
],
'Out'
)
def
test_check_output
(
self
):
self
.
check_output
()
def
init_dtype
(
self
):
self
.
dtype
=
np
.
float64
class
TestAtan2_float
(
TestAtan2
):
def
init_dtype
(
self
):
self
.
dtype
=
np
.
float32
def
test_check_grad
(
self
):
if
self
.
dtype
not
in
[
np
.
int32
,
np
.
int64
]:
self
.
check_grad
(
[
'X1'
,
'X2'
],
'Out'
,
user_defined_grads
=
atan2_grad
(
self
.
inputs
[
'X1'
],
self
.
inputs
[
'X2'
],
1
/
self
.
inputs
[
'X1'
].
size
))
class
TestAtan2_float16
(
TestAtan2_float
):
def
init_dtype
(
self
):
self
.
dtype
=
np
.
float16
class
TestAtan2_int32
(
TestAtan2_float
):
def
init_dtype
(
self
):
self
.
dtype
=
np
.
int32
class
TestAtan2_int64
(
TestAtan2_float
):
def
init_dtype
(
self
):
self
.
dtype
=
np
.
int64
class
TestAtan2API
(
unittest
.
TestCase
):
def
init_dtype
(
self
):
self
.
dtype
=
'float64'
self
.
shape
=
[
11
,
17
]
def
setUp
(
self
):
self
.
init_dtype
()
self
.
x1
=
np
.
random
.
uniform
(
0.1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
self
.
x2
=
np
.
random
.
uniform
(
-
1
,
-
0.1
,
self
.
shape
).
astype
(
self
.
dtype
)
self
.
place
=
[
paddle
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
self
.
place
.
append
(
paddle
.
CUDAPlace
(
0
))
def
test_static_api
(
self
):
paddle
.
enable_static
()
def
run
(
place
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
X1
=
paddle
.
fluid
.
data
(
'X1'
,
self
.
shape
,
dtype
=
self
.
dtype
)
X2
=
paddle
.
fluid
.
data
(
'X2'
,
self
.
shape
,
dtype
=
self
.
dtype
)
out
=
paddle
.
atan2
(
X1
,
X2
)
exe
=
paddle
.
static
.
Executor
(
place
)
res
=
exe
.
run
(
feed
=
{
'X1'
:
self
.
x1
,
'X2'
:
self
.
x2
})
out_ref
=
np
.
arctan2
(
self
.
x1
,
self
.
x2
)
for
r
in
res
:
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
r
),
True
)
for
place
in
self
.
place
:
run
(
place
)
def
test_dygraph_api
(
self
):
def
run
(
place
):
paddle
.
disable_static
(
place
)
X1
=
paddle
.
to_tensor
(
self
.
x1
)
X2
=
paddle
.
to_tensor
(
self
.
x2
)
out
=
paddle
.
atan2
(
X1
,
X2
)
out_ref
=
np
.
arctan2
(
self
.
x1
,
self
.
x2
)
self
.
assertEqual
(
np
.
allclose
(
out_ref
,
out
.
numpy
()),
True
)
paddle
.
enable_static
()
for
place
in
self
.
place
:
run
(
place
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/tensor/__init__.py
浏览文件 @
918aeb71
...
@@ -147,6 +147,7 @@ from .math import add # noqa: F401
...
@@ -147,6 +147,7 @@ from .math import add # noqa: F401
from
.math
import
add_
# noqa: F401
from
.math
import
add_
# noqa: F401
from
.math
import
subtract
# noqa: F401
from
.math
import
subtract
# noqa: F401
from
.math
import
subtract_
# noqa: F401
from
.math
import
subtract_
# noqa: F401
from
.math
import
atan2
# noqa: F401
from
.math
import
logsumexp
# noqa: F401
from
.math
import
logsumexp
# noqa: F401
from
.math
import
inverse
# noqa: F401
from
.math
import
inverse
# noqa: F401
from
.math
import
log2
# noqa: F401
from
.math
import
log2
# noqa: F401
...
...
python/paddle/tensor/math.py
浏览文件 @
918aeb71
...
@@ -2386,3 +2386,59 @@ def neg(x, name=None):
...
@@ -2386,3 +2386,59 @@ def neg(x, name=None):
"""
"""
return
layers
.
scale
(
x
,
scale
=-
1.0
,
bias
=
0.0
,
bias_after_scale
=
True
,
act
=
None
,
name
=
name
)
return
layers
.
scale
(
x
,
scale
=-
1.0
,
bias
=
0.0
,
bias_after_scale
=
True
,
act
=
None
,
name
=
name
)
def
atan2
(
y
,
x
,
name
=
None
):
r
"""
Element-wise arctangent of y/x with consideration of the quadrant.
Equation:
.. math::
atan2(y,x)=\left\{\begin{matrix}
& tan^{-1}(\frac{y}{x}) & x > 0 \\
& tan^{-1}(\frac{y}{x}) + \pi & y>=0, x < 0 \\
& tan^{-1}(\frac{y}{x}) - \pi & y<0, x < 0 \\
& +\frac{\pi}{2} & y>0, x = 0 \\
& -\frac{\pi}{2} & y<0, x = 0 \\
&\text{undefined} & y=0, x = 0
\end{matrix}\right.
Args:
y (Tensor): An N-D Tensor, the data type is int32, int64, float16, float32, float64.
x (Tensor): An N-D Tensor, must have the same type as `x`.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
out (Tensor): An N-D Tensor, the shape and data type is the same with input (The output data type is float64 when the input data type is int).
Examples:
.. code-block:: python
import paddle
y = paddle.to_tensor([-1, +1, +1, -1]).astype('float32')
#Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [-1, 1, 1, -1])
x = paddle.to_tensor([-1, -1, +1, +1]).astype('float32')
#Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [-1, -1, 1, 1])
out = paddle.atan2(y, x)
#Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [-2.35619450, 2.35619450, 0.78539819, -0.78539819])
"""
if
in_dygraph_mode
():
return
core
.
ops
.
atan2
(
y
,
x
)
else
:
check_variable_and_dtype
(
y
,
'y'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'atan2'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'atan2'
)
helper
=
LayerHelper
(
'atan2'
,
**
locals
())
inputs
=
{
'X1'
:
y
,
'X2'
:
x
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'atan2'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
})
return
out
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录