未验证 提交 e4914734 编写于 作者: F From00 提交者: GitHub

Add some yaml config (#41053)

* Add yaml config

* Add yaml for flatten_contiguous_range_op

* Remove h_sigmoid yaml

* Fix CI errors

* Fix code format

* Fix flatten OP errors

* Fix conflicts

* Fix CI errors

* Remove flatten_contiguous_range OP

* Remove redundant code

* Fix typos
上级 2ae10efd
......@@ -31,11 +31,11 @@ void HierarchicalSigmoidGradKernelImpl(
const DenseTensor& x,
const DenseTensor& w,
const DenseTensor& label,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> path,
paddle::optional<const DenseTensor&> code,
paddle::optional<const DenseTensor&> bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
bool remote_prefetch,
int trainer_id,
......
......@@ -25,11 +25,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& w,
const DenseTensor& label,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> path,
paddle::optional<const DenseTensor&> code,
paddle::optional<const DenseTensor&> bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
bool remote_prefetch,
int trainer_id,
......@@ -44,11 +44,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
x,
w,
label,
pre_out,
out_grad,
path,
code,
bias,
pre_out,
out_grad,
num_classes,
remote_prefetch,
trainer_id,
......
......@@ -23,11 +23,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& w,
const DenseTensor& label,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> path,
paddle::optional<const DenseTensor&> code,
paddle::optional<const DenseTensor&> bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
bool remote_prefetch,
int trainer_id,
......
......@@ -40,11 +40,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& w,
const DenseTensor& label,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> path,
paddle::optional<const DenseTensor&> code,
paddle::optional<const DenseTensor&> bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
bool remote_prefetch,
int trainer_id,
......@@ -70,11 +70,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
x,
w,
label,
pre_out,
out_grad,
path,
code,
bias,
pre_out,
out_grad,
num_classes,
remote_prefetch,
trainer_id,
......
......@@ -25,11 +25,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& w,
const DenseTensor& label,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> path,
paddle::optional<const DenseTensor&> code,
paddle::optional<const DenseTensor&> bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
bool remote_prefetch,
int trainer_id,
......
......@@ -38,11 +38,11 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
{"X",
"W",
"Label",
"PreOut",
GradVarName("Out"),
"PathTable",
"PathCode",
"Bias"},
"Bias",
"PreOut",
GradVarName("Out")},
{"num_classes",
"remote_prefetch",
"trainer_id",
......@@ -57,11 +57,11 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
{"X",
"W",
"Label",
"PreOut",
GradVarName("Out"),
"PathTable",
"PathCode",
"Bias"},
"Bias",
"PreOut",
GradVarName("Out")},
{"num_classes",
"remote_prefetch",
"trainer_id",
......
......@@ -13,12 +13,13 @@
# limitations under the License.
import paddle
import paddle.nn.functional as F
from paddle import fluid
import unittest
import numpy as np
import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import numpy as np
import unittest
import paddle.nn.functional as F
from paddle import fluid
from paddle.fluid.framework import _test_eager_guard
from unittest import TestCase
......@@ -159,12 +160,22 @@ class TestFunctionalConv2D(TestCase):
self.place = fluid.CPUPlace()
self._test_identity()
def test_identity_cpu_check_eager(self):
with _test_eager_guard():
self.test_identity_cpu()
@unittest.skipIf(not fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def test_identity_gpu(self):
self.place = fluid.CUDAPlace(0)
self._test_identity()
@unittest.skipIf(not fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def test_identity_gpu_check_eager(self):
with _test_eager_guard():
self.test_identity_gpu()
class TestFunctionalConv2DError(TestCase):
batch_size = 4
......@@ -520,6 +531,10 @@ class TestFunctionalConv2DErrorCase10(TestCase):
with self.assertRaises(ValueError):
self.dygraph_case()
def test_dygraph_exception_check_eager(self):
with _test_eager_guard():
self.test_dygraph_exception()
def test_static_exception(self):
with self.assertRaises(ValueError):
self.static_graph_case()
......
......@@ -13,12 +13,13 @@
# limitations under the License.
import paddle
import paddle.nn.functional as F
from paddle import fluid
import numpy as np
import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I
import numpy as np
import paddle.nn.functional as F
import unittest
from paddle import fluid
from paddle.fluid.framework import _test_eager_guard
from unittest import TestCase
......@@ -165,12 +166,22 @@ class TestFunctionalConv3DTranspose(TestCase):
self.place = fluid.CPUPlace()
self._test_identity()
def test_identity_cpu_check_eager(self):
with _test_eager_guard():
self.test_identity_cpu()
@unittest.skipIf(not fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def test_identity_gpu(self):
self.place = fluid.CUDAPlace(0)
self._test_identity()
@unittest.skipIf(not fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def test_identity_gpu_check_eager(self):
with _test_eager_guard():
self.test_identity_gpu()
class TestFunctionalConv3DTransposeError(TestCase):
batch_size = 4
......@@ -540,6 +551,10 @@ class TestFunctionalConv3DTransposeErrorCase10(TestCase):
with self.assertRaises(ValueError):
self.dygraph_case()
def test_dygraph_exception_check_eager(self):
with _test_eager_guard():
self.test_dygraph_exception()
def test_static_exception(self):
with self.assertRaises(ValueError):
self.static_graph_case()
......
......@@ -25,6 +25,7 @@ from paddle.fluid import Program, program_guard
class TestIndexSelectOp(OpTest):
def setUp(self):
self.python_api = paddle.index_select
self.op_type = "index_select"
self.init_dtype_type()
index_np = np.random.randint(
......@@ -54,10 +55,10 @@ class TestIndexSelectOp(OpTest):
self.index_size = 100
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
class TestIndexSelectOpCase2(TestIndexSelectOp):
......
......@@ -86,8 +86,13 @@ def frobenius_norm(x, axis=None, keepdims=False):
return r
def final_state_frobenius_norm(x, dim, keep_dim, reduce_all):
return paddle.linalg.norm(x, p='fro', axis=dim, keepdim=keep_dim)
class TestFrobeniusNormOp(OpTest):
def setUp(self):
self.python_api = final_state_frobenius_norm
self.op_type = "frobenius_norm"
self.init_test_case()
x = (np.random.random(self.shape) + 1.0).astype(self.dtype)
......@@ -102,10 +107,10 @@ class TestFrobeniusNormOp(OpTest):
self.outputs = {'Out': norm}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
def init_test_case(self):
self.shape = [2, 3, 4, 5]
......@@ -122,7 +127,7 @@ class TestFrobeniusNormOp2(TestFrobeniusNormOp):
self.dtype = "float32"
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
class TestPnormOp(OpTest):
......
......@@ -12,16 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import unittest
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
import paddle
import paddle.nn.functional as F
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
def adaptive_start_index(index, input_size, output_size):
......@@ -244,6 +243,10 @@ class TestPool1D_API(unittest.TestCase):
self.check_avg_dygraph_padding_same(place)
self.check_max_dygraph_return_index_results(place)
def test_dygraph_final_state_api(self):
with _test_eager_guard():
self.test_pool1d()
class TestPool2DError_API(unittest.TestCase):
def test_error_api(self):
......@@ -370,6 +373,10 @@ class TestPool2DError_API(unittest.TestCase):
self.assertRaises(ValueError, run_stride_out_of_range)
def test_dygraph_final_state_api(self):
with _test_eager_guard():
self.test_error_api()
if __name__ == '__main__':
unittest.main()
......@@ -12,14 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from test_pool2d_op import adaptive_start_index, adaptive_end_index, pool2D_forward_naive, avg_pool2D_forward_naive, max_pool2D_forward_naive
import unittest
from op_test import OpTest
import paddle
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
from op_test import OpTest
from paddle.fluid.framework import _test_eager_guard
from paddle.nn.functional import avg_pool2d, max_pool2d
import paddle.fluid as fluid
import paddle
from test_pool2d_op import adaptive_start_index, adaptive_end_index, pool2D_forward_naive, avg_pool2D_forward_naive, max_pool2D_forward_naive
class TestPool2D_API(unittest.TestCase):
......@@ -324,6 +325,10 @@ class TestPool2D_API(unittest.TestCase):
self.check_max_dygraph_ceilmode_results(place)
self.check_max_dygraph_nhwc_results(place)
def test_dygraph_final_state_api(self):
with _test_eager_guard():
self.test_pool2d()
class TestPool2DError_API(unittest.TestCase):
def test_error_api(self):
......@@ -524,6 +529,10 @@ class TestPool2DError_API(unittest.TestCase):
self.assertRaises(ValueError, run_stride_out_of_range)
def test_dygraph_final_state_api(self):
with _test_eager_guard():
self.test_error_api()
if __name__ == '__main__':
unittest.main()
......@@ -15,13 +15,15 @@
from __future__ import print_function
from __future__ import division
import paddle
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
from paddle.nn.functional import avg_pool3d, max_pool3d
from paddle.fluid.framework import _test_eager_guard
from test_pool3d_op import adaptive_start_index, adaptive_end_index, pool3D_forward_naive, avg_pool3D_forward_naive, max_pool3D_forward_naive
......@@ -326,6 +328,10 @@ class TestPool3D_API(unittest.TestCase):
self.check_max_dygraph_ndhwc_results(place)
self.check_max_dygraph_ceilmode_results(place)
def test_dygraph_final_state_api(self):
with _test_eager_guard():
self.test_pool3d()
class TestPool3DError_API(unittest.TestCase):
def test_error_api(self):
......@@ -499,6 +505,10 @@ class TestPool3DError_API(unittest.TestCase):
self.assertRaises(ValueError, run_size_out_of_range)
def test_dygraph_final_state_api(self):
with _test_eager_guard():
self.test_error_api()
if __name__ == '__main__':
unittest.main()
......@@ -25,6 +25,7 @@ from paddle.fluid import Program, program_guard
class TestRollOp(OpTest):
def setUp(self):
self.python_api = paddle.roll
self.op_type = "roll"
self.init_dtype_type()
self.inputs = {'X': np.random.random(self.x_shape).astype(self.dtype)}
......@@ -41,10 +42,10 @@ class TestRollOp(OpTest):
self.axis = [0, -2]
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
class TestRollOpCase2(TestRollOp):
......
......@@ -25,7 +25,7 @@ from op_test import OpTest
class TestSearchSorted(OpTest):
def setUp(self):
self.python_api = paddle.searchsorted
self.op_type = "searchsorted"
self.init_test_case()
......@@ -41,7 +41,7 @@ class TestSearchSorted(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def init_test_case(self):
self.sorted_sequence = np.array([1, 3, 5, 7, 9]).astype("float32")
......
......@@ -28,6 +28,7 @@ class TrilTriuOpDefaultTest(OpTest):
def setUp(self):
self.initTestCase()
self.python_api = paddle.tril if self.real_op_type == 'tril' else paddle.triu
self.real_np_op = getattr(np, self.real_op_type)
self.op_type = "tril_triu"
......@@ -42,10 +43,10 @@ class TrilTriuOpDefaultTest(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
def initTestCase(self):
self.real_op_type = np.random.choice(['triu', 'tril'])
......
......@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from paddle.fluid.framework import _global_flags
import numpy as np
from ...device import get_cudnn_version
......@@ -22,15 +21,18 @@ from ...fluid.layers.utils import convert_to_list, _is_symmetric_padding
from ...fluid.data_feeder import check_variable_and_dtype
from ...framework import ParamAttr
from ...fluid.layer_helper import LayerHelper
from paddle import _C_ops
from ...tensor.manipulation import unsqueeze, squeeze
from ...tensor.math import add
from ...fluid.layers import nn
from paddle import _C_ops
from paddle import get_flags
from paddle import in_dynamic_mode
from paddle.device import is_compiled_with_cuda
from paddle.device import is_compiled_with_rocm
from paddle.device import is_compiled_with_npu
from paddle import in_dynamic_mode
from paddle import get_flags
from paddle.device import is_compiled_with_rocm
from paddle.fluid.framework import _global_flags
from paddle.fluid.framework import _in_legacy_dygraph
from paddle.fluid.framework import in_dygraph_mode
__all__ = []
......@@ -1061,7 +1063,17 @@ def conv2d_transpose(x,
op_type = 'depthwise_conv2d_transpose'
use_cudnn = False
if in_dynamic_mode():
if in_dygraph_mode():
final_state_op = _C_ops.final_state_conv2d_transpose if op_type == 'conv2d_transpose' else _C_ops.final_state_depthwise_conv2d_transpose
pre_bias = final_state_op(x, weight, stride, padding, output_padding,
output_size, padding_algorithm, groups,
dilation, data_format)
if bias is not None:
return nn.elementwise_add(pre_bias, bias, axis=channel_dim)
else:
return pre_bias
if _in_legacy_dygraph():
attrs = ('output_padding', output_padding, 'output_size', output_size,
'strides', stride, 'paddings', padding, 'padding_algorithm',
padding_algorithm, 'dilations', dilation, 'groups', groups,
......@@ -1468,7 +1480,16 @@ def conv3d_transpose(x,
op_type = 'conv3d_transpose'
data_format_ = "NHWC" if channel_last else "NCHW"
if in_dynamic_mode():
if in_dygraph_mode():
pre_bias = _C_ops.final_state_conv3d_transpose(
x, weight, stride, padding, output_padding, output_size,
padding_algorithm, groups, dilation, data_format_)
if bias is not None:
return nn.elementwise_add(pre_bias, bias, axis=channel_dim)
else:
return pre_bias
if _in_legacy_dygraph():
attrs = ('output_padding', output_padding, 'output_size', output_size,
'paddings', padding, "padding_algorithm", padding_algorithm,
'strides', stride, 'dilations', dilation, 'groups', groups,
......
......@@ -18,6 +18,8 @@ from ...tensor.manipulation import unsqueeze, squeeze
from ...fluid.data_feeder import check_type, check_variable_and_dtype
from paddle import _C_ops
from paddle import in_dynamic_mode
from paddle.fluid.framework import _in_legacy_dygraph
from paddle.fluid.framework import in_dygraph_mode
__all__ = []
......@@ -344,13 +346,18 @@ def avg_pool2d(x,
padding, padding_algorithm = _update_padding_nd(
padding, 2, channel_last, ceil_mode=ceil_mode)
if in_dynamic_mode():
output = _C_ops.pool2d(x, 'pooling_type', 'avg', 'ksize', kernel_size,
'global_pooling', False, 'padding_algorithm',
padding_algorithm, 'strides', stride, 'paddings',
padding, 'use_cudnn', True, 'ceil_mode',
ceil_mode, 'use_mkldnn', False, 'exclusive',
exclusive, 'data_format', data_format)
if in_dygraph_mode() or _in_legacy_dygraph():
if in_dygraph_mode():
output = _C_ops.final_state_pool2d(
x, kernel_size, stride, padding, ceil_mode, exclusive,
data_format, 'avg', False, False, padding_algorithm)
else:
output = _C_ops.pool2d(
x, 'pooling_type', 'avg', 'ksize', kernel_size,
'global_pooling', False, 'padding_algorithm', padding_algorithm,
'strides', stride, 'paddings', padding, 'use_cudnn', True,
'ceil_mode', ceil_mode, 'use_mkldnn', False, 'exclusive',
exclusive, 'data_format', data_format)
if divisor_override is None:
return output
else:
......@@ -466,13 +473,18 @@ def avg_pool3d(x,
_check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3)
_check_value_limitation(stride, "stride", min_limit=1e-3)
if in_dynamic_mode():
output = _C_ops.pool3d(
x, 'pooling_type', 'avg', 'ksize', kernel_size, 'strides', stride,
'paddings', padding, 'global_pooling', False, 'padding_algorithm',
padding_algorithm, 'use_cudnn', True, 'ceil_mode', ceil_mode,
'use_mkldnn', False, 'exclusive', exclusive, 'data_format',
data_format)
if in_dygraph_mode() or _in_legacy_dygraph():
if in_dygraph_mode():
output = _C_ops.final_state_pool3d(
x, kernel_size, stride, padding, ceil_mode, exclusive,
data_format, 'avg', False, False, padding_algorithm)
if _in_legacy_dygraph():
output = _C_ops.pool3d(
x, 'pooling_type', 'avg', 'ksize', kernel_size, 'strides',
stride, 'paddings', padding, 'global_pooling', False,
'padding_algorithm', padding_algorithm, 'use_cudnn', True,
'ceil_mode', ceil_mode, 'use_mkldnn', False, 'exclusive',
exclusive, 'data_format', data_format)
if divisor_override is None:
return output
else:
......@@ -585,7 +597,20 @@ def max_pool1d(x,
# use 2d to implenment 1d should expand padding in advance.
padding = _expand_low_nd_padding(padding)
if in_dynamic_mode():
if in_dygraph_mode():
if return_mask:
pool_out = _C_ops.final_state_max_pool2d_with_index(
x, kernel_size, stride, padding, False, False)
return (squeeze(pool_out[0], [2]),
squeeze(pool_out[1],
[2])) if return_mask else squeeze(pool_out[0], [2])
else:
pool_out = _C_ops.final_state_pool2d(
x, kernel_size, stride, padding, ceil_mode, True, data_format,
'max', False, False, padding_algorithm)
return squeeze(pool_out, [2])
if _in_legacy_dygraph():
if return_mask:
pool_out = _C_ops.max_pool2d_with_index(
x, 'ksize', kernel_size, 'global_pooling', False, 'strides',
......@@ -1027,7 +1052,17 @@ def max_pool2d(x,
"When setting return_mask to true, data_format must be set to NCHW in API:max_pool2d"
)
if in_dynamic_mode():
if in_dygraph_mode():
if return_mask:
output = _C_ops.final_state_max_pool2d_with_index(
x, kernel_size, stride, padding, False, False)
return output if return_mask else output[0]
else:
return _C_ops.final_state_pool2d(
x, kernel_size, stride, padding, ceil_mode, True, data_format,
'max', False, False, padding_algorithm)
if _in_legacy_dygraph():
if return_mask:
output = _C_ops.max_pool2d_with_index(
x, 'ksize', kernel_size, 'global_pooling', False, 'strides',
......@@ -1158,7 +1193,17 @@ def max_pool3d(x,
"When setting return_mask to true, data_format must be set to NCDHW in API:max_pool3d"
)
if in_dynamic_mode():
if in_dygraph_mode():
if return_mask:
output = _C_ops.final_state_max_pool3d_with_index(
x, kernel_size, stride, padding, False, False)
return output if return_mask else output[0]
else:
return _C_ops.final_state_pool3d(
x, kernel_size, stride, padding, ceil_mode, True, data_format,
'max', False, False, padding_algorithm)
if _in_legacy_dygraph():
if return_mask:
output = _C_ops.max_pool3d_with_index(
x, 'pooling_type', 'max', 'ksize', kernel_size, 'strides',
......@@ -1355,11 +1400,15 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
if output_size[1] == None:
output_size[1] = in_w
if in_dynamic_mode():
output = _C_ops.pool2d(x, 'pooling_type', 'avg', 'ksize', output_size,
'global_pooling', False, 'adaptive', True,
'data_format', data_format)
return output
if in_dygraph_mode():
return _C_ops.final_state_pool2d(x, output_size, [1, 1], [0, 0], False,
True, data_format, 'avg', False, True,
"EXPLICIT")
if _in_legacy_dygraph():
return _C_ops.pool2d(x, 'pooling_type', 'avg', 'ksize', output_size,
'global_pooling', False, 'adaptive', True,
'data_format', data_format)
l_type = 'pool2d'
......@@ -1462,10 +1511,9 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
output_size[2] = in_w
if in_dynamic_mode():
output = _C_ops.pool3d(x, 'pooling_type', 'avg', 'ksize', output_size,
'global_pooling', False, 'adaptive', True,
'data_format', data_format)
return output
return _C_ops.pool3d(x, 'pooling_type', 'avg', 'ksize', output_size,
'global_pooling', False, 'adaptive', True,
'data_format', data_format)
l_type = 'pool3d'
......
......@@ -661,7 +661,10 @@ def tril(x, diagonal=0, name=None):
# [ 9, 10, 0, 0]])
"""
if paddle.in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.final_state_tril_triu(x, diagonal, True)
if _in_legacy_dygraph():
op = getattr(_C_ops, 'tril_triu')
return op(x, 'diagonal', diagonal, "lower", True)
......@@ -728,7 +731,10 @@ def triu(x, diagonal=0, name=None):
# [ 0, 10, 11, 12]])
"""
if paddle.in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.final_state_tril_triu(x, diagonal, False)
if _in_legacy_dygraph():
op = getattr(_C_ops, 'tril_triu')
return op(x, 'diagonal', diagonal, "lower", False)
......
......@@ -254,7 +254,12 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
raise ValueError(
"The dim of frobenius norm op should be None or two elements list!"
)
if paddle.in_dynamic_mode():
if in_dygraph_mode():
if dim is None:
return _C_ops.final_state_frobenius_norm(input, keepdim, True)
return _C_ops.final_state_frobenius_norm(input, dim, keepdim, False)
if _in_legacy_dygraph():
if dim is None:
return _C_ops.frobenius_norm(input, 'keep_dim', keepdim,
'reduce_all', True)
......
......@@ -796,7 +796,10 @@ def roll(x, shifts, axis=None, name=None):
else:
axis = []
if paddle.in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.final_state_roll(x, shifts, axis)
if _in_legacy_dygraph():
return _C_ops.roll(x, 'axis', axis, 'shifts', shifts)
helper = LayerHelper("roll", **locals())
......
......@@ -319,7 +319,10 @@ def index_select(x, index, axis=0, name=None):
# [ 9. 10. 10.]]
"""
if paddle.in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.final_state_index_select(x, index, axis)
if _in_legacy_dygraph():
return _C_ops.index_select(x, index, 'dim', axis)
helper = LayerHelper("index_select", **locals())
......@@ -946,8 +949,11 @@ def searchsorted(sorted_sequence,
# [1, 3, 4, 5]])
"""
if in_dygraph_mode():
return _C_ops.final_state_searchsorted(sorted_sequence, values,
out_int32, right)
if paddle.in_dynamic_mode():
if _in_legacy_dygraph():
return _C_ops.searchsorted(sorted_sequence, values, "out_int32",
out_int32, "right", right)
......
......@@ -306,6 +306,24 @@
kernel :
func : conj
- api : conv2d_transpose
args : (Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(out)
infer_meta :
func : ConvTransposeInferMeta
kernel :
func : conv2d_transpose
backward : conv2d_transpose_grad
- api : conv3d_transpose
args : (Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(out)
infer_meta :
func : ConvTransposeInferMeta
kernel :
func : conv3d_transpose
backward : conv3d_transpose_grad
- api : copy_to
args : (Tensor x, Place place, bool blocking)
output : Tensor
......@@ -359,6 +377,15 @@
kernel :
func : cumsum
- api : depthwise_conv2d_transpose
args : (Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(out)
infer_meta :
func : ConvTransposeInferMeta
kernel :
func : depthwise_conv2d_transpose
backward : depthwise_conv2d_transpose_grad
- api : diag
args : (Tensor x, int offset, float padding_value)
output : Tensor
......@@ -558,6 +585,15 @@
func : fmin
backward : fmin_grad
- api : frobenius_norm
args : (Tensor x, int64_t[] axis, bool keep_dim, bool reduce_all)
output : Tensor(out)
infer_meta :
func : ReduceInferMetaBase
kernel :
func : frobenius_norm
backward : frobenius_norm_grad
- api : full
args : (IntArray shape, Scalar value, DataType dtype=DataType::FLOAT32, Place place=CPUPlace())
output: Tensor
......@@ -695,6 +731,16 @@
backward : index_sample_grad
# no_need_buffer : x
- api : index_select
args : (Tensor x, Tensor index, int dim)
output : Tensor(out)
infer_meta :
func : IndexSelectInferMeta
kernel :
func : index_select
data_type : x
backward : index_select_grad
# is_empty
- api : is_empty
args : (Tensor x)
......@@ -954,6 +1000,24 @@
func : max
backward : max_grad
- api : max_pool2d_with_index
args : (Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive)
output : Tensor(out), Tensor(mask)
infer_meta :
func : MaxPoolWithIndexInferMeta
kernel :
func : max_pool2d_with_index
backward : max_pool2d_with_index_grad
- api : max_pool3d_with_index
args : (Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive)
output : Tensor(out), Tensor(mask)
infer_meta :
func : MaxPoolWithIndexInferMeta
kernel :
func : max_pool3d_with_index
backward : max_pool3d_with_index_grad
- api : maximum
args : (Tensor x, Tensor y)
output : Tensor(out)
......@@ -1129,8 +1193,18 @@
output : Tensor(out)
infer_meta :
func : PoolInferMeta
kernel:
kernel :
func : pool2d
backward : pool2d_grad
- api : pool3d
args : (Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output : Tensor(out)
infer_meta :
func : PoolInferMeta
kernel :
func : pool3d
backward : pool3d_grad
- api : prelu
args : (Tensor x, Tensor alpha, str data_format, str mode)
......@@ -1194,6 +1268,15 @@
intermediate : xshape
backward: reshape_grad
- api : roll
args : (Tensor x, IntArray shifts, int64_t[] axis)
output : Tensor(out)
infer_meta :
func : RollInferMeta
kernel :
func : roll
backward : roll_grad
- api : round
args : (Tensor x)
output : Tensor(out)
......@@ -1235,6 +1318,14 @@
backward : scatter_nd_add_grad
# no_need_buffer : updates
- api : searchsorted
args : (Tensor sorted_sequence, Tensor value, bool out_int32, bool right)
output : Tensor(out)
infer_meta :
func : SearchsortedInferMeta
kernel :
func : searchsorted
# segment_pool
- api : segment_pool
args : (Tensor x, Tensor segment_ids, str pooltype)
......@@ -1522,6 +1613,15 @@
func : triangular_solve
# backward : triangular_solve_grad
- api : tril_triu
args : (Tensor x, int diagonal, bool lower)
output : Tensor(out)
infer_meta :
func : TrilTriuInferMeta
kernel :
func : tril_triu
backward : tril_triu_grad
- api : trunc
args : (Tensor x)
output : Tensor
......
......@@ -710,9 +710,9 @@ PADDLE_API {self.gene_return_type_code()} {self.get_api_func_name() + '_'}({self
self.outputs['types'], 'SetKernelOutput', code_indent, inplace_flag)
api_func_name = self.get_api_func_name() + ('_' if inplace_flag else '')
return f"""
{code_indent} VLOG(6) << "{self.api} API kernel key: [" << kernel_backend << ", " << kernel_layout << ", "<< kernel_data_type << "]";
{code_indent} const auto& kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError(
{code_indent} "{self.kernel['func'][0]}", {{kernel_backend, kernel_layout, kernel_data_type}});
{code_indent} VLOG(6) << "{self.api} API kernel key: [" << kernel_backend << ", " << kernel_layout << ", "<< kernel_data_type << "]";
{code_indent} VLOG(6) << "{self.api} API kernel: " << kernel;
{code_indent} auto* dev_ctx = GetDeviceContextByBackend(kernel_backend);
......
......@@ -172,6 +172,24 @@
kernel :
func : cholesky_solve_grad
- backward_api : conv2d_transpose_grad
forward : conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
args : (Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(x_grad), Tensor(filter_grad)
infer_meta :
func : ConvTransposeGradInferMeta
kernel :
func : conv2d_transpose_grad
- backward_api : conv3d_transpose_grad
forward : conv3d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
args : (Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(x_grad), Tensor(filter_grad)
infer_meta :
func : ConvTransposeGradInferMeta
kernel :
func : conv3d_transpose_grad
- backward_api : cos_grad
forward : cos (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
......@@ -221,6 +239,15 @@
# kernel :
# func : gumbel_softmax_grad
- backward_api : depthwise_conv2d_transpose_grad
forward : depthwise_conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
args : (Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(x_grad), Tensor(filter_grad)
infer_meta :
func : ConvTransposeGradInferMeta
kernel :
func : depthwise_conv2d_transpose_grad
- backward_api : diagonal_grad
forward : diagonal (Tensor x, int offset, int axis1, int axis2) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int offset = 0, int axis1 = 0, int axis2 = 1)
......@@ -352,6 +379,16 @@
kernel :
func : fmin_grad
- backward_api : frobenius_norm_grad
forward : frobenius_norm(Tensor x, int64_t[] axis, bool keep_dim, bool reduce_all) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] axis, bool keep_dim, bool reduce_all)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : frobenius_norm_grad
- backward_api : gather_nd_grad
forward : gather_nd (Tensor x, Tensor index) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad)
......@@ -403,6 +440,17 @@
func : index_sample_grad
data_type : out_grad
- backward_api : index_select_grad
forward : index_select(Tensor x, Tensor index, int dim) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad, int dim)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : index_select_grad
data_type : x
- backward_api : kldiv_loss_grad
forward : kldiv_loss(Tensor x, Tensor label, str reduction) -> Tensor(out)
args : (Tensor x, Tensor label, Tensor out_grad, str reduction)
......@@ -597,6 +645,24 @@
kernel :
func : max_grad
- backward_api : max_pool2d_with_index_grad
forward : max_pool2d_with_index(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive) -> Tensor(out), Tensor(mask)
args : (Tensor x, Tensor mask, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive)
output : Tensor(x_grad)
infer_meta :
func : MaxPoolWithIndexGradInferMeta
kernel :
func : max_pool2d_with_index_grad
- backward_api : max_pool3d_with_index_grad
forward : max_pool3d_with_index(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive) -> Tensor(out), Tensor(mask)
args : (Tensor x, Tensor mask, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive)
output : Tensor(x_grad)
infer_meta :
func : MaxPoolWithIndexGradInferMeta
kernel :
func : max_pool3d_with_index_grad
- backward_api : maximum_grad
forward : maximum(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis=-1)
......@@ -719,6 +785,24 @@
kernel :
func : pad3d_grad
- backward_api : pool2d_grad
forward : pool2d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output : Tensor(x_grad)
infer_meta :
func : PoolGradInferMeta
kernel :
func : pool2d_grad
- backward_api : pool3d_grad
forward : pool3d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output : Tensor(x_grad)
infer_meta :
func : PoolGradInferMeta
kernel :
func : pool3d_grad
- backward_api : prelu_grad
forward : prelu(Tensor x, Tensor alpha, str data_format, str mode) -> Tensor(out)
args : (Tensor x, Tensor alpha, Tensor out_grad, str data_format, str mode)
......@@ -806,6 +890,17 @@
backend: out_grad
layout: out_grad
- backward_api : roll_grad
forward : roll(Tensor x, IntArray shifts, int64_t[] axis) -> Tensor(out)
args : (Tensor x, Tensor out_grad, IntArray shifts, int64_t[] axis)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : roll_grad
data_type : x
- backward_api : round_grad
forward : round(Tensor x) -> Tensor(out)
args : (Tensor out_grad)
......@@ -1079,6 +1174,16 @@
kernel :
func : transpose_grad
- backward_api : tril_triu_grad
forward : tril_triu(Tensor x, int diagonal, bool lower) -> Tensor(out)
args : (Tensor out_grad, int diagonal, bool lower)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out_grad]
kernel :
func : tril_triu_grad
- backward_api : trunc_grad
forward : trunc (Tensor x) -> Tensor(out)
args : (Tensor out_grad)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册