未验证 提交 c284d42a 编写于 作者: C Chen Weihang 提交者: GitHub

Add logspace yaml (#49194)

* add logspace yaml

* update by comments

* resolve test framework conflicct
上级 aded3338
...@@ -1031,6 +1031,18 @@ ...@@ -1031,6 +1031,18 @@
kernel : kernel :
func : logical_xor func : logical_xor
- op : logspace
args : (Tensor start, Tensor stop, Tensor num, Tensor base, DataType dtype, Place place={})
output : Tensor(out)
infer_meta:
func : LogspaceInferMeta
param : [start, stop, num, base, dtype]
kernel :
func : logspace
param : [start, stop, num, base, dtype]
data_type : dtype
backend : place
- op : logsumexp - op : logsumexp
args : (Tensor x, int64_t[] axis, bool keepdim, bool reduce_all) args : (Tensor x, int64_t[] axis, bool keepdim, bool reduce_all)
output : Tensor(out) output : Tensor(out)
......
...@@ -1972,6 +1972,7 @@ void LogspaceInferMeta(const MetaTensor& start, ...@@ -1972,6 +1972,7 @@ void LogspaceInferMeta(const MetaTensor& start,
const MetaTensor& stop, const MetaTensor& stop,
const MetaTensor& number, const MetaTensor& number,
const MetaTensor& base, const MetaTensor& base,
DataType dtype,
MetaTensor* out) { MetaTensor* out) {
auto s_dims = start.dims(); auto s_dims = start.dims();
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
...@@ -2002,7 +2003,7 @@ void LogspaceInferMeta(const MetaTensor& start, ...@@ -2002,7 +2003,7 @@ void LogspaceInferMeta(const MetaTensor& start,
"but received input shape is [%s].", "but received input shape is [%s].",
b_dims)); b_dims));
out->set_dims(phi::make_ddim({-1})); out->set_dims(phi::make_ddim({-1}));
out->set_dtype(start.dtype()); out->set_dtype(dtype);
} }
void MergedAdamInferMeta( void MergedAdamInferMeta(
......
...@@ -341,6 +341,7 @@ void LogspaceInferMeta(const MetaTensor& start, ...@@ -341,6 +341,7 @@ void LogspaceInferMeta(const MetaTensor& start,
const MetaTensor& stop, const MetaTensor& stop,
const MetaTensor& number, const MetaTensor& number,
const MetaTensor& base, const MetaTensor& base,
DataType dtype,
MetaTensor* out); MetaTensor* out);
void MergedAdamInferMeta( void MergedAdamInferMeta(
......
...@@ -24,6 +24,9 @@ class TestLogspaceOpCommonCase(OpTest): ...@@ -24,6 +24,9 @@ class TestLogspaceOpCommonCase(OpTest):
def setUp(self): def setUp(self):
self.op_type = "logspace" self.op_type = "logspace"
self.python_api = paddle.logspace self.python_api = paddle.logspace
self.init_data()
def init_data(self):
dtype = 'float32' dtype = 'float32'
self.inputs = { self.inputs = {
'Start': np.array([0]).astype(dtype), 'Start': np.array([0]).astype(dtype),
...@@ -32,17 +35,14 @@ class TestLogspaceOpCommonCase(OpTest): ...@@ -32,17 +35,14 @@ class TestLogspaceOpCommonCase(OpTest):
'Base': np.array([2]).astype(dtype), 'Base': np.array([2]).astype(dtype),
} }
self.attrs = {'dtype': int(paddle.float32)} self.attrs = {'dtype': int(paddle.float32)}
self.outputs = {'Out': np.power(2, np.arange(0, 11)).astype(dtype)} self.outputs = {'Out': np.power(2, np.arange(0, 11)).astype(dtype)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
class TestLogspaceOpReverseCase(OpTest): class TestLogspaceOpReverseCase(TestLogspaceOpCommonCase):
def setUp(self): def init_data(self):
self.op_type = "logspace"
self.python_api = paddle.logspace
dtype = 'float32' dtype = 'float32'
self.inputs = { self.inputs = {
'Start': np.array([10]).astype(dtype), 'Start': np.array([10]).astype(dtype),
...@@ -51,17 +51,11 @@ class TestLogspaceOpReverseCase(OpTest): ...@@ -51,17 +51,11 @@ class TestLogspaceOpReverseCase(OpTest):
'Base': np.array([2]).astype(dtype), 'Base': np.array([2]).astype(dtype),
} }
self.attrs = {'dtype': int(paddle.float32)} self.attrs = {'dtype': int(paddle.float32)}
self.outputs = {'Out': np.power(2, np.arange(10, -1, -1)).astype(dtype)} self.outputs = {'Out': np.power(2, np.arange(10, -1, -1)).astype(dtype)}
def test_check_output(self):
self.check_output()
class TestLogspaceOpNumOneCase(TestLogspaceOpCommonCase):
class TestLogspaceOpNumOneCase(OpTest): def init_data(self):
def setUp(self):
self.op_type = "logspace"
self.python_api = paddle.logspace
dtype = 'float32' dtype = 'float32'
self.inputs = { self.inputs = {
'Start': np.array([10]).astype(dtype), 'Start': np.array([10]).astype(dtype),
...@@ -70,17 +64,11 @@ class TestLogspaceOpNumOneCase(OpTest): ...@@ -70,17 +64,11 @@ class TestLogspaceOpNumOneCase(OpTest):
'Base': np.array([2]).astype(dtype), 'Base': np.array([2]).astype(dtype),
} }
self.attrs = {'dtype': int(paddle.float32)} self.attrs = {'dtype': int(paddle.float32)}
self.outputs = {'Out': np.power(2, np.array(10)).astype(dtype)} self.outputs = {'Out': np.power(2, np.array(10)).astype(dtype)}
def test_check_output(self):
self.check_output()
class TestLogspaceOpMinusBaseCase(OpTest): class TestLogspaceOpMinusBaseCase(TestLogspaceOpCommonCase):
def setUp(self): def init_data(self):
self.op_type = "logspace"
self.python_api = paddle.logspace
dtype = 'float32' dtype = 'float32'
self.inputs = { self.inputs = {
'Start': np.array([0]).astype(dtype), 'Start': np.array([0]).astype(dtype),
...@@ -89,17 +77,11 @@ class TestLogspaceOpMinusBaseCase(OpTest): ...@@ -89,17 +77,11 @@ class TestLogspaceOpMinusBaseCase(OpTest):
'Base': np.array([-2]).astype(dtype), 'Base': np.array([-2]).astype(dtype),
} }
self.attrs = {'dtype': int(paddle.float32)} self.attrs = {'dtype': int(paddle.float32)}
self.outputs = {'Out': np.power(-2, np.arange(0, 11)).astype(dtype)} self.outputs = {'Out': np.power(-2, np.arange(0, 11)).astype(dtype)}
def test_check_output(self):
self.check_output()
class TestLogspaceOpZeroBaseCase(OpTest): class TestLogspaceOpZeroBaseCase(TestLogspaceOpCommonCase):
def setUp(self): def init_data(self):
self.op_type = "logspace"
self.python_api = paddle.logspace
dtype = 'float32' dtype = 'float32'
self.inputs = { self.inputs = {
'Start': np.array([0]).astype(dtype), 'Start': np.array([0]).astype(dtype),
...@@ -108,12 +90,8 @@ class TestLogspaceOpZeroBaseCase(OpTest): ...@@ -108,12 +90,8 @@ class TestLogspaceOpZeroBaseCase(OpTest):
'Base': np.array([0]).astype(dtype), 'Base': np.array([0]).astype(dtype),
} }
self.attrs = {'dtype': int(paddle.float32)} self.attrs = {'dtype': int(paddle.float32)}
self.outputs = {'Out': np.power(0, np.arange(0, 11)).astype(dtype)} self.outputs = {'Out': np.power(0, np.arange(0, 11)).astype(dtype)}
def test_check_output(self):
self.check_output()
class TestLogspaceAPI(unittest.TestCase): class TestLogspaceAPI(unittest.TestCase):
def test_variable_input1(self): def test_variable_input1(self):
......
...@@ -21,7 +21,7 @@ import warnings ...@@ -21,7 +21,7 @@ import warnings
import numpy as np import numpy as np
import paddle import paddle
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops
from paddle.common_ops_import import fill_constant from paddle.common_ops_import import fill_constant
from ..fluid.data_feeder import ( from ..fluid.data_feeder import (
...@@ -447,8 +447,13 @@ def logspace(start, stop, num, base=10.0, dtype=None, name=None): ...@@ -447,8 +447,13 @@ def logspace(start, stop, num, base=10.0, dtype=None, name=None):
with device_guard("cpu"): with device_guard("cpu"):
tensor_base = fill_constant([1], dtype, base) tensor_base = fill_constant([1], dtype, base)
if in_dygraph_mode(): if in_dygraph_mode():
return _legacy_C_ops.logspace( return _C_ops.logspace(
tensor_start, tensor_stop, tensor_num, tensor_base, 'dtype', dtype tensor_start,
tensor_stop,
tensor_num,
tensor_base,
dtype,
_current_expected_place(),
) )
else: else:
helper = LayerHelper("logspace", **locals()) helper = LayerHelper("logspace", **locals())
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册