未验证 提交 e4914734 编写于 作者: F From00 提交者: GitHub

Add some yaml config (#41053)

* Add yaml config

* Add yaml for flatten_contiguous_range_op

* Remove h_sigmoid yaml

* Fix CI errors

* Fix code format

* Fix flatten OP errors

* Fix conflicts

* Fix CI errors

* Remove flatten_contiguous_range OP

* Remove redundant code

* Fix typos
上级 2ae10efd
...@@ -31,11 +31,11 @@ void HierarchicalSigmoidGradKernelImpl( ...@@ -31,11 +31,11 @@ void HierarchicalSigmoidGradKernelImpl(
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& w, const DenseTensor& w,
const DenseTensor& label, const DenseTensor& label,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> path, paddle::optional<const DenseTensor&> path,
paddle::optional<const DenseTensor&> code, paddle::optional<const DenseTensor&> code,
paddle::optional<const DenseTensor&> bias, paddle::optional<const DenseTensor&> bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes, int num_classes,
bool remote_prefetch, bool remote_prefetch,
int trainer_id, int trainer_id,
......
...@@ -25,11 +25,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx, ...@@ -25,11 +25,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& w, const DenseTensor& w,
const DenseTensor& label, const DenseTensor& label,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> path, paddle::optional<const DenseTensor&> path,
paddle::optional<const DenseTensor&> code, paddle::optional<const DenseTensor&> code,
paddle::optional<const DenseTensor&> bias, paddle::optional<const DenseTensor&> bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes, int num_classes,
bool remote_prefetch, bool remote_prefetch,
int trainer_id, int trainer_id,
...@@ -44,11 +44,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx, ...@@ -44,11 +44,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
x, x,
w, w,
label, label,
pre_out,
out_grad,
path, path,
code, code,
bias, bias,
pre_out,
out_grad,
num_classes, num_classes,
remote_prefetch, remote_prefetch,
trainer_id, trainer_id,
......
...@@ -23,11 +23,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx, ...@@ -23,11 +23,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& w, const DenseTensor& w,
const DenseTensor& label, const DenseTensor& label,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> path, paddle::optional<const DenseTensor&> path,
paddle::optional<const DenseTensor&> code, paddle::optional<const DenseTensor&> code,
paddle::optional<const DenseTensor&> bias, paddle::optional<const DenseTensor&> bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes, int num_classes,
bool remote_prefetch, bool remote_prefetch,
int trainer_id, int trainer_id,
......
...@@ -40,11 +40,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx, ...@@ -40,11 +40,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& w, const DenseTensor& w,
const DenseTensor& label, const DenseTensor& label,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> path, paddle::optional<const DenseTensor&> path,
paddle::optional<const DenseTensor&> code, paddle::optional<const DenseTensor&> code,
paddle::optional<const DenseTensor&> bias, paddle::optional<const DenseTensor&> bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes, int num_classes,
bool remote_prefetch, bool remote_prefetch,
int trainer_id, int trainer_id,
...@@ -70,11 +70,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx, ...@@ -70,11 +70,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
x, x,
w, w,
label, label,
pre_out,
out_grad,
path, path,
code, code,
bias, bias,
pre_out,
out_grad,
num_classes, num_classes,
remote_prefetch, remote_prefetch,
trainer_id, trainer_id,
......
...@@ -25,11 +25,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx, ...@@ -25,11 +25,11 @@ void HierarchicalSigmoidGradKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& w, const DenseTensor& w,
const DenseTensor& label, const DenseTensor& label,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
paddle::optional<const DenseTensor&> path, paddle::optional<const DenseTensor&> path,
paddle::optional<const DenseTensor&> code, paddle::optional<const DenseTensor&> code,
paddle::optional<const DenseTensor&> bias, paddle::optional<const DenseTensor&> bias,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes, int num_classes,
bool remote_prefetch, bool remote_prefetch,
int trainer_id, int trainer_id,
......
...@@ -38,11 +38,11 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping( ...@@ -38,11 +38,11 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
{"X", {"X",
"W", "W",
"Label", "Label",
"PreOut",
GradVarName("Out"),
"PathTable", "PathTable",
"PathCode", "PathCode",
"Bias"}, "Bias",
"PreOut",
GradVarName("Out")},
{"num_classes", {"num_classes",
"remote_prefetch", "remote_prefetch",
"trainer_id", "trainer_id",
...@@ -57,11 +57,11 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping( ...@@ -57,11 +57,11 @@ KernelSignature HierarchicalSigmoidGradOpArgumentMapping(
{"X", {"X",
"W", "W",
"Label", "Label",
"PreOut",
GradVarName("Out"),
"PathTable", "PathTable",
"PathCode", "PathCode",
"Bias"}, "Bias",
"PreOut",
GradVarName("Out")},
{"num_classes", {"num_classes",
"remote_prefetch", "remote_prefetch",
"trainer_id", "trainer_id",
......
...@@ -13,12 +13,13 @@ ...@@ -13,12 +13,13 @@
# limitations under the License. # limitations under the License.
import paddle import paddle
import paddle.nn.functional as F import unittest
from paddle import fluid import numpy as np
import paddle.fluid.dygraph as dg import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I import paddle.fluid.initializer as I
import numpy as np import paddle.nn.functional as F
import unittest from paddle import fluid
from paddle.fluid.framework import _test_eager_guard
from unittest import TestCase from unittest import TestCase
...@@ -159,12 +160,22 @@ class TestFunctionalConv2D(TestCase): ...@@ -159,12 +160,22 @@ class TestFunctionalConv2D(TestCase):
self.place = fluid.CPUPlace() self.place = fluid.CPUPlace()
self._test_identity() self._test_identity()
def test_identity_cpu_check_eager(self):
with _test_eager_guard():
self.test_identity_cpu()
@unittest.skipIf(not fluid.core.is_compiled_with_cuda(), @unittest.skipIf(not fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
def test_identity_gpu(self): def test_identity_gpu(self):
self.place = fluid.CUDAPlace(0) self.place = fluid.CUDAPlace(0)
self._test_identity() self._test_identity()
@unittest.skipIf(not fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def test_identity_gpu_check_eager(self):
with _test_eager_guard():
self.test_identity_gpu()
class TestFunctionalConv2DError(TestCase): class TestFunctionalConv2DError(TestCase):
batch_size = 4 batch_size = 4
...@@ -520,6 +531,10 @@ class TestFunctionalConv2DErrorCase10(TestCase): ...@@ -520,6 +531,10 @@ class TestFunctionalConv2DErrorCase10(TestCase):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
self.dygraph_case() self.dygraph_case()
def test_dygraph_exception_check_eager(self):
with _test_eager_guard():
self.test_dygraph_exception()
def test_static_exception(self): def test_static_exception(self):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
self.static_graph_case() self.static_graph_case()
......
...@@ -13,12 +13,13 @@ ...@@ -13,12 +13,13 @@
# limitations under the License. # limitations under the License.
import paddle import paddle
import paddle.nn.functional as F import numpy as np
from paddle import fluid
import paddle.fluid.dygraph as dg import paddle.fluid.dygraph as dg
import paddle.fluid.initializer as I import paddle.fluid.initializer as I
import numpy as np import paddle.nn.functional as F
import unittest import unittest
from paddle import fluid
from paddle.fluid.framework import _test_eager_guard
from unittest import TestCase from unittest import TestCase
...@@ -165,12 +166,22 @@ class TestFunctionalConv3DTranspose(TestCase): ...@@ -165,12 +166,22 @@ class TestFunctionalConv3DTranspose(TestCase):
self.place = fluid.CPUPlace() self.place = fluid.CPUPlace()
self._test_identity() self._test_identity()
def test_identity_cpu_check_eager(self):
with _test_eager_guard():
self.test_identity_cpu()
@unittest.skipIf(not fluid.core.is_compiled_with_cuda(), @unittest.skipIf(not fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
def test_identity_gpu(self): def test_identity_gpu(self):
self.place = fluid.CUDAPlace(0) self.place = fluid.CUDAPlace(0)
self._test_identity() self._test_identity()
@unittest.skipIf(not fluid.core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
def test_identity_gpu_check_eager(self):
with _test_eager_guard():
self.test_identity_gpu()
class TestFunctionalConv3DTransposeError(TestCase): class TestFunctionalConv3DTransposeError(TestCase):
batch_size = 4 batch_size = 4
...@@ -540,6 +551,10 @@ class TestFunctionalConv3DTransposeErrorCase10(TestCase): ...@@ -540,6 +551,10 @@ class TestFunctionalConv3DTransposeErrorCase10(TestCase):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
self.dygraph_case() self.dygraph_case()
def test_dygraph_exception_check_eager(self):
with _test_eager_guard():
self.test_dygraph_exception()
def test_static_exception(self): def test_static_exception(self):
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
self.static_graph_case() self.static_graph_case()
......
...@@ -25,6 +25,7 @@ from paddle.fluid import Program, program_guard ...@@ -25,6 +25,7 @@ from paddle.fluid import Program, program_guard
class TestIndexSelectOp(OpTest): class TestIndexSelectOp(OpTest):
def setUp(self): def setUp(self):
self.python_api = paddle.index_select
self.op_type = "index_select" self.op_type = "index_select"
self.init_dtype_type() self.init_dtype_type()
index_np = np.random.randint( index_np = np.random.randint(
...@@ -54,10 +55,10 @@ class TestIndexSelectOp(OpTest): ...@@ -54,10 +55,10 @@ class TestIndexSelectOp(OpTest):
self.index_size = 100 self.index_size = 100
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestIndexSelectOpCase2(TestIndexSelectOp): class TestIndexSelectOpCase2(TestIndexSelectOp):
......
...@@ -86,8 +86,13 @@ def frobenius_norm(x, axis=None, keepdims=False): ...@@ -86,8 +86,13 @@ def frobenius_norm(x, axis=None, keepdims=False):
return r return r
def final_state_frobenius_norm(x, dim, keep_dim, reduce_all):
return paddle.linalg.norm(x, p='fro', axis=dim, keepdim=keep_dim)
class TestFrobeniusNormOp(OpTest): class TestFrobeniusNormOp(OpTest):
def setUp(self): def setUp(self):
self.python_api = final_state_frobenius_norm
self.op_type = "frobenius_norm" self.op_type = "frobenius_norm"
self.init_test_case() self.init_test_case()
x = (np.random.random(self.shape) + 1.0).astype(self.dtype) x = (np.random.random(self.shape) + 1.0).astype(self.dtype)
...@@ -102,10 +107,10 @@ class TestFrobeniusNormOp(OpTest): ...@@ -102,10 +107,10 @@ class TestFrobeniusNormOp(OpTest):
self.outputs = {'Out': norm} self.outputs = {'Out': norm}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
def init_test_case(self): def init_test_case(self):
self.shape = [2, 3, 4, 5] self.shape = [2, 3, 4, 5]
...@@ -122,7 +127,7 @@ class TestFrobeniusNormOp2(TestFrobeniusNormOp): ...@@ -122,7 +127,7 @@ class TestFrobeniusNormOp2(TestFrobeniusNormOp):
self.dtype = "float32" self.dtype = "float32"
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestPnormOp(OpTest): class TestPnormOp(OpTest):
......
...@@ -12,16 +12,15 @@ ...@@ -12,16 +12,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import numpy as np import paddle
import unittest import unittest
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard from paddle.fluid import compiler, Program, program_guard
import paddle from paddle.fluid.framework import _test_eager_guard
import paddle.nn.functional as F
import paddle.fluid as fluid
def adaptive_start_index(index, input_size, output_size): def adaptive_start_index(index, input_size, output_size):
...@@ -244,6 +243,10 @@ class TestPool1D_API(unittest.TestCase): ...@@ -244,6 +243,10 @@ class TestPool1D_API(unittest.TestCase):
self.check_avg_dygraph_padding_same(place) self.check_avg_dygraph_padding_same(place)
self.check_max_dygraph_return_index_results(place) self.check_max_dygraph_return_index_results(place)
def test_dygraph_final_state_api(self):
with _test_eager_guard():
self.test_pool1d()
class TestPool2DError_API(unittest.TestCase): class TestPool2DError_API(unittest.TestCase):
def test_error_api(self): def test_error_api(self):
...@@ -370,6 +373,10 @@ class TestPool2DError_API(unittest.TestCase): ...@@ -370,6 +373,10 @@ class TestPool2DError_API(unittest.TestCase):
self.assertRaises(ValueError, run_stride_out_of_range) self.assertRaises(ValueError, run_stride_out_of_range)
def test_dygraph_final_state_api(self):
with _test_eager_guard():
self.test_error_api()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -12,14 +12,15 @@ ...@@ -12,14 +12,15 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from test_pool2d_op import adaptive_start_index, adaptive_end_index, pool2D_forward_naive, avg_pool2D_forward_naive, max_pool2D_forward_naive
import unittest import unittest
from op_test import OpTest import paddle
import numpy as np import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from op_test import OpTest
from paddle.fluid.framework import _test_eager_guard
from paddle.nn.functional import avg_pool2d, max_pool2d from paddle.nn.functional import avg_pool2d, max_pool2d
import paddle.fluid as fluid from test_pool2d_op import adaptive_start_index, adaptive_end_index, pool2D_forward_naive, avg_pool2D_forward_naive, max_pool2D_forward_naive
import paddle
class TestPool2D_API(unittest.TestCase): class TestPool2D_API(unittest.TestCase):
...@@ -324,6 +325,10 @@ class TestPool2D_API(unittest.TestCase): ...@@ -324,6 +325,10 @@ class TestPool2D_API(unittest.TestCase):
self.check_max_dygraph_ceilmode_results(place) self.check_max_dygraph_ceilmode_results(place)
self.check_max_dygraph_nhwc_results(place) self.check_max_dygraph_nhwc_results(place)
def test_dygraph_final_state_api(self):
with _test_eager_guard():
self.test_pool2d()
class TestPool2DError_API(unittest.TestCase): class TestPool2DError_API(unittest.TestCase):
def test_error_api(self): def test_error_api(self):
...@@ -524,6 +529,10 @@ class TestPool2DError_API(unittest.TestCase): ...@@ -524,6 +529,10 @@ class TestPool2DError_API(unittest.TestCase):
self.assertRaises(ValueError, run_stride_out_of_range) self.assertRaises(ValueError, run_stride_out_of_range)
def test_dygraph_final_state_api(self):
with _test_eager_guard():
self.test_error_api()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -15,13 +15,15 @@ ...@@ -15,13 +15,15 @@
from __future__ import print_function from __future__ import print_function
from __future__ import division from __future__ import division
import paddle
import unittest import unittest
import numpy as np import numpy as np
import paddle import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid from paddle.fluid.framework import _test_eager_guard
from paddle.nn.functional import avg_pool3d, max_pool3d from paddle.nn.functional import avg_pool3d, max_pool3d
from paddle.fluid.framework import _test_eager_guard
from test_pool3d_op import adaptive_start_index, adaptive_end_index, pool3D_forward_naive, avg_pool3D_forward_naive, max_pool3D_forward_naive from test_pool3d_op import adaptive_start_index, adaptive_end_index, pool3D_forward_naive, avg_pool3D_forward_naive, max_pool3D_forward_naive
...@@ -326,6 +328,10 @@ class TestPool3D_API(unittest.TestCase): ...@@ -326,6 +328,10 @@ class TestPool3D_API(unittest.TestCase):
self.check_max_dygraph_ndhwc_results(place) self.check_max_dygraph_ndhwc_results(place)
self.check_max_dygraph_ceilmode_results(place) self.check_max_dygraph_ceilmode_results(place)
def test_dygraph_final_state_api(self):
with _test_eager_guard():
self.test_pool3d()
class TestPool3DError_API(unittest.TestCase): class TestPool3DError_API(unittest.TestCase):
def test_error_api(self): def test_error_api(self):
...@@ -499,6 +505,10 @@ class TestPool3DError_API(unittest.TestCase): ...@@ -499,6 +505,10 @@ class TestPool3DError_API(unittest.TestCase):
self.assertRaises(ValueError, run_size_out_of_range) self.assertRaises(ValueError, run_size_out_of_range)
def test_dygraph_final_state_api(self):
with _test_eager_guard():
self.test_error_api()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -25,6 +25,7 @@ from paddle.fluid import Program, program_guard ...@@ -25,6 +25,7 @@ from paddle.fluid import Program, program_guard
class TestRollOp(OpTest): class TestRollOp(OpTest):
def setUp(self): def setUp(self):
self.python_api = paddle.roll
self.op_type = "roll" self.op_type = "roll"
self.init_dtype_type() self.init_dtype_type()
self.inputs = {'X': np.random.random(self.x_shape).astype(self.dtype)} self.inputs = {'X': np.random.random(self.x_shape).astype(self.dtype)}
...@@ -41,10 +42,10 @@ class TestRollOp(OpTest): ...@@ -41,10 +42,10 @@ class TestRollOp(OpTest):
self.axis = [0, -2] self.axis = [0, -2]
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestRollOpCase2(TestRollOp): class TestRollOpCase2(TestRollOp):
......
...@@ -25,7 +25,7 @@ from op_test import OpTest ...@@ -25,7 +25,7 @@ from op_test import OpTest
class TestSearchSorted(OpTest): class TestSearchSorted(OpTest):
def setUp(self): def setUp(self):
self.python_api = paddle.searchsorted
self.op_type = "searchsorted" self.op_type = "searchsorted"
self.init_test_case() self.init_test_case()
...@@ -41,7 +41,7 @@ class TestSearchSorted(OpTest): ...@@ -41,7 +41,7 @@ class TestSearchSorted(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def init_test_case(self): def init_test_case(self):
self.sorted_sequence = np.array([1, 3, 5, 7, 9]).astype("float32") self.sorted_sequence = np.array([1, 3, 5, 7, 9]).astype("float32")
......
...@@ -28,6 +28,7 @@ class TrilTriuOpDefaultTest(OpTest): ...@@ -28,6 +28,7 @@ class TrilTriuOpDefaultTest(OpTest):
def setUp(self): def setUp(self):
self.initTestCase() self.initTestCase()
self.python_api = paddle.tril if self.real_op_type == 'tril' else paddle.triu
self.real_np_op = getattr(np, self.real_op_type) self.real_np_op = getattr(np, self.real_op_type)
self.op_type = "tril_triu" self.op_type = "tril_triu"
...@@ -42,10 +43,10 @@ class TrilTriuOpDefaultTest(OpTest): ...@@ -42,10 +43,10 @@ class TrilTriuOpDefaultTest(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
def initTestCase(self): def initTestCase(self):
self.real_op_type = np.random.choice(['triu', 'tril']) self.real_op_type = np.random.choice(['triu', 'tril'])
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import print_function from __future__ import print_function
from paddle.fluid.framework import _global_flags
import numpy as np import numpy as np
from ...device import get_cudnn_version from ...device import get_cudnn_version
...@@ -22,15 +21,18 @@ from ...fluid.layers.utils import convert_to_list, _is_symmetric_padding ...@@ -22,15 +21,18 @@ from ...fluid.layers.utils import convert_to_list, _is_symmetric_padding
from ...fluid.data_feeder import check_variable_and_dtype from ...fluid.data_feeder import check_variable_and_dtype
from ...framework import ParamAttr from ...framework import ParamAttr
from ...fluid.layer_helper import LayerHelper from ...fluid.layer_helper import LayerHelper
from paddle import _C_ops
from ...tensor.manipulation import unsqueeze, squeeze from ...tensor.manipulation import unsqueeze, squeeze
from ...tensor.math import add from ...tensor.math import add
from ...fluid.layers import nn from ...fluid.layers import nn
from paddle import _C_ops
from paddle import get_flags
from paddle import in_dynamic_mode
from paddle.device import is_compiled_with_cuda from paddle.device import is_compiled_with_cuda
from paddle.device import is_compiled_with_rocm
from paddle.device import is_compiled_with_npu from paddle.device import is_compiled_with_npu
from paddle import in_dynamic_mode from paddle.device import is_compiled_with_rocm
from paddle import get_flags from paddle.fluid.framework import _global_flags
from paddle.fluid.framework import _in_legacy_dygraph
from paddle.fluid.framework import in_dygraph_mode
__all__ = [] __all__ = []
...@@ -1061,7 +1063,17 @@ def conv2d_transpose(x, ...@@ -1061,7 +1063,17 @@ def conv2d_transpose(x,
op_type = 'depthwise_conv2d_transpose' op_type = 'depthwise_conv2d_transpose'
use_cudnn = False use_cudnn = False
if in_dynamic_mode(): if in_dygraph_mode():
final_state_op = _C_ops.final_state_conv2d_transpose if op_type == 'conv2d_transpose' else _C_ops.final_state_depthwise_conv2d_transpose
pre_bias = final_state_op(x, weight, stride, padding, output_padding,
output_size, padding_algorithm, groups,
dilation, data_format)
if bias is not None:
return nn.elementwise_add(pre_bias, bias, axis=channel_dim)
else:
return pre_bias
if _in_legacy_dygraph():
attrs = ('output_padding', output_padding, 'output_size', output_size, attrs = ('output_padding', output_padding, 'output_size', output_size,
'strides', stride, 'paddings', padding, 'padding_algorithm', 'strides', stride, 'paddings', padding, 'padding_algorithm',
padding_algorithm, 'dilations', dilation, 'groups', groups, padding_algorithm, 'dilations', dilation, 'groups', groups,
...@@ -1468,7 +1480,16 @@ def conv3d_transpose(x, ...@@ -1468,7 +1480,16 @@ def conv3d_transpose(x,
op_type = 'conv3d_transpose' op_type = 'conv3d_transpose'
data_format_ = "NHWC" if channel_last else "NCHW" data_format_ = "NHWC" if channel_last else "NCHW"
if in_dynamic_mode(): if in_dygraph_mode():
pre_bias = _C_ops.final_state_conv3d_transpose(
x, weight, stride, padding, output_padding, output_size,
padding_algorithm, groups, dilation, data_format_)
if bias is not None:
return nn.elementwise_add(pre_bias, bias, axis=channel_dim)
else:
return pre_bias
if _in_legacy_dygraph():
attrs = ('output_padding', output_padding, 'output_size', output_size, attrs = ('output_padding', output_padding, 'output_size', output_size,
'paddings', padding, "padding_algorithm", padding_algorithm, 'paddings', padding, "padding_algorithm", padding_algorithm,
'strides', stride, 'dilations', dilation, 'groups', groups, 'strides', stride, 'dilations', dilation, 'groups', groups,
......
...@@ -18,6 +18,8 @@ from ...tensor.manipulation import unsqueeze, squeeze ...@@ -18,6 +18,8 @@ from ...tensor.manipulation import unsqueeze, squeeze
from ...fluid.data_feeder import check_type, check_variable_and_dtype from ...fluid.data_feeder import check_type, check_variable_and_dtype
from paddle import _C_ops from paddle import _C_ops
from paddle import in_dynamic_mode from paddle import in_dynamic_mode
from paddle.fluid.framework import _in_legacy_dygraph
from paddle.fluid.framework import in_dygraph_mode
__all__ = [] __all__ = []
...@@ -344,13 +346,18 @@ def avg_pool2d(x, ...@@ -344,13 +346,18 @@ def avg_pool2d(x,
padding, padding_algorithm = _update_padding_nd( padding, padding_algorithm = _update_padding_nd(
padding, 2, channel_last, ceil_mode=ceil_mode) padding, 2, channel_last, ceil_mode=ceil_mode)
if in_dynamic_mode(): if in_dygraph_mode() or _in_legacy_dygraph():
output = _C_ops.pool2d(x, 'pooling_type', 'avg', 'ksize', kernel_size, if in_dygraph_mode():
'global_pooling', False, 'padding_algorithm', output = _C_ops.final_state_pool2d(
padding_algorithm, 'strides', stride, 'paddings', x, kernel_size, stride, padding, ceil_mode, exclusive,
padding, 'use_cudnn', True, 'ceil_mode', data_format, 'avg', False, False, padding_algorithm)
ceil_mode, 'use_mkldnn', False, 'exclusive', else:
exclusive, 'data_format', data_format) output = _C_ops.pool2d(
x, 'pooling_type', 'avg', 'ksize', kernel_size,
'global_pooling', False, 'padding_algorithm', padding_algorithm,
'strides', stride, 'paddings', padding, 'use_cudnn', True,
'ceil_mode', ceil_mode, 'use_mkldnn', False, 'exclusive',
exclusive, 'data_format', data_format)
if divisor_override is None: if divisor_override is None:
return output return output
else: else:
...@@ -466,13 +473,18 @@ def avg_pool3d(x, ...@@ -466,13 +473,18 @@ def avg_pool3d(x,
_check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3) _check_value_limitation(kernel_size, "kernel_size", min_limit=1e-3)
_check_value_limitation(stride, "stride", min_limit=1e-3) _check_value_limitation(stride, "stride", min_limit=1e-3)
if in_dynamic_mode(): if in_dygraph_mode() or _in_legacy_dygraph():
output = _C_ops.pool3d( if in_dygraph_mode():
x, 'pooling_type', 'avg', 'ksize', kernel_size, 'strides', stride, output = _C_ops.final_state_pool3d(
'paddings', padding, 'global_pooling', False, 'padding_algorithm', x, kernel_size, stride, padding, ceil_mode, exclusive,
padding_algorithm, 'use_cudnn', True, 'ceil_mode', ceil_mode, data_format, 'avg', False, False, padding_algorithm)
'use_mkldnn', False, 'exclusive', exclusive, 'data_format', if _in_legacy_dygraph():
data_format) output = _C_ops.pool3d(
x, 'pooling_type', 'avg', 'ksize', kernel_size, 'strides',
stride, 'paddings', padding, 'global_pooling', False,
'padding_algorithm', padding_algorithm, 'use_cudnn', True,
'ceil_mode', ceil_mode, 'use_mkldnn', False, 'exclusive',
exclusive, 'data_format', data_format)
if divisor_override is None: if divisor_override is None:
return output return output
else: else:
...@@ -585,7 +597,20 @@ def max_pool1d(x, ...@@ -585,7 +597,20 @@ def max_pool1d(x,
# use 2d to implenment 1d should expand padding in advance. # use 2d to implenment 1d should expand padding in advance.
padding = _expand_low_nd_padding(padding) padding = _expand_low_nd_padding(padding)
if in_dynamic_mode(): if in_dygraph_mode():
if return_mask:
pool_out = _C_ops.final_state_max_pool2d_with_index(
x, kernel_size, stride, padding, False, False)
return (squeeze(pool_out[0], [2]),
squeeze(pool_out[1],
[2])) if return_mask else squeeze(pool_out[0], [2])
else:
pool_out = _C_ops.final_state_pool2d(
x, kernel_size, stride, padding, ceil_mode, True, data_format,
'max', False, False, padding_algorithm)
return squeeze(pool_out, [2])
if _in_legacy_dygraph():
if return_mask: if return_mask:
pool_out = _C_ops.max_pool2d_with_index( pool_out = _C_ops.max_pool2d_with_index(
x, 'ksize', kernel_size, 'global_pooling', False, 'strides', x, 'ksize', kernel_size, 'global_pooling', False, 'strides',
...@@ -1027,7 +1052,17 @@ def max_pool2d(x, ...@@ -1027,7 +1052,17 @@ def max_pool2d(x,
"When setting return_mask to true, data_format must be set to NCHW in API:max_pool2d" "When setting return_mask to true, data_format must be set to NCHW in API:max_pool2d"
) )
if in_dynamic_mode(): if in_dygraph_mode():
if return_mask:
output = _C_ops.final_state_max_pool2d_with_index(
x, kernel_size, stride, padding, False, False)
return output if return_mask else output[0]
else:
return _C_ops.final_state_pool2d(
x, kernel_size, stride, padding, ceil_mode, True, data_format,
'max', False, False, padding_algorithm)
if _in_legacy_dygraph():
if return_mask: if return_mask:
output = _C_ops.max_pool2d_with_index( output = _C_ops.max_pool2d_with_index(
x, 'ksize', kernel_size, 'global_pooling', False, 'strides', x, 'ksize', kernel_size, 'global_pooling', False, 'strides',
...@@ -1158,7 +1193,17 @@ def max_pool3d(x, ...@@ -1158,7 +1193,17 @@ def max_pool3d(x,
"When setting return_mask to true, data_format must be set to NCDHW in API:max_pool3d" "When setting return_mask to true, data_format must be set to NCDHW in API:max_pool3d"
) )
if in_dynamic_mode(): if in_dygraph_mode():
if return_mask:
output = _C_ops.final_state_max_pool3d_with_index(
x, kernel_size, stride, padding, False, False)
return output if return_mask else output[0]
else:
return _C_ops.final_state_pool3d(
x, kernel_size, stride, padding, ceil_mode, True, data_format,
'max', False, False, padding_algorithm)
if _in_legacy_dygraph():
if return_mask: if return_mask:
output = _C_ops.max_pool3d_with_index( output = _C_ops.max_pool3d_with_index(
x, 'pooling_type', 'max', 'ksize', kernel_size, 'strides', x, 'pooling_type', 'max', 'ksize', kernel_size, 'strides',
...@@ -1355,11 +1400,15 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None): ...@@ -1355,11 +1400,15 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
if output_size[1] == None: if output_size[1] == None:
output_size[1] = in_w output_size[1] = in_w
if in_dynamic_mode(): if in_dygraph_mode():
output = _C_ops.pool2d(x, 'pooling_type', 'avg', 'ksize', output_size, return _C_ops.final_state_pool2d(x, output_size, [1, 1], [0, 0], False,
'global_pooling', False, 'adaptive', True, True, data_format, 'avg', False, True,
'data_format', data_format) "EXPLICIT")
return output
if _in_legacy_dygraph():
return _C_ops.pool2d(x, 'pooling_type', 'avg', 'ksize', output_size,
'global_pooling', False, 'adaptive', True,
'data_format', data_format)
l_type = 'pool2d' l_type = 'pool2d'
...@@ -1462,10 +1511,9 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None): ...@@ -1462,10 +1511,9 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
output_size[2] = in_w output_size[2] = in_w
if in_dynamic_mode(): if in_dynamic_mode():
output = _C_ops.pool3d(x, 'pooling_type', 'avg', 'ksize', output_size, return _C_ops.pool3d(x, 'pooling_type', 'avg', 'ksize', output_size,
'global_pooling', False, 'adaptive', True, 'global_pooling', False, 'adaptive', True,
'data_format', data_format) 'data_format', data_format)
return output
l_type = 'pool3d' l_type = 'pool3d'
......
...@@ -661,7 +661,10 @@ def tril(x, diagonal=0, name=None): ...@@ -661,7 +661,10 @@ def tril(x, diagonal=0, name=None):
# [ 9, 10, 0, 0]]) # [ 9, 10, 0, 0]])
""" """
if paddle.in_dynamic_mode(): if in_dygraph_mode():
return _C_ops.final_state_tril_triu(x, diagonal, True)
if _in_legacy_dygraph():
op = getattr(_C_ops, 'tril_triu') op = getattr(_C_ops, 'tril_triu')
return op(x, 'diagonal', diagonal, "lower", True) return op(x, 'diagonal', diagonal, "lower", True)
...@@ -728,7 +731,10 @@ def triu(x, diagonal=0, name=None): ...@@ -728,7 +731,10 @@ def triu(x, diagonal=0, name=None):
# [ 0, 10, 11, 12]]) # [ 0, 10, 11, 12]])
""" """
if paddle.in_dynamic_mode(): if in_dygraph_mode():
return _C_ops.final_state_tril_triu(x, diagonal, False)
if _in_legacy_dygraph():
op = getattr(_C_ops, 'tril_triu') op = getattr(_C_ops, 'tril_triu')
return op(x, 'diagonal', diagonal, "lower", False) return op(x, 'diagonal', diagonal, "lower", False)
......
...@@ -254,7 +254,12 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): ...@@ -254,7 +254,12 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
raise ValueError( raise ValueError(
"The dim of frobenius norm op should be None or two elements list!" "The dim of frobenius norm op should be None or two elements list!"
) )
if paddle.in_dynamic_mode():
if in_dygraph_mode():
if dim is None:
return _C_ops.final_state_frobenius_norm(input, keepdim, True)
return _C_ops.final_state_frobenius_norm(input, dim, keepdim, False)
if _in_legacy_dygraph():
if dim is None: if dim is None:
return _C_ops.frobenius_norm(input, 'keep_dim', keepdim, return _C_ops.frobenius_norm(input, 'keep_dim', keepdim,
'reduce_all', True) 'reduce_all', True)
......
...@@ -796,7 +796,10 @@ def roll(x, shifts, axis=None, name=None): ...@@ -796,7 +796,10 @@ def roll(x, shifts, axis=None, name=None):
else: else:
axis = [] axis = []
if paddle.in_dynamic_mode(): if in_dygraph_mode():
return _C_ops.final_state_roll(x, shifts, axis)
if _in_legacy_dygraph():
return _C_ops.roll(x, 'axis', axis, 'shifts', shifts) return _C_ops.roll(x, 'axis', axis, 'shifts', shifts)
helper = LayerHelper("roll", **locals()) helper = LayerHelper("roll", **locals())
......
...@@ -319,7 +319,10 @@ def index_select(x, index, axis=0, name=None): ...@@ -319,7 +319,10 @@ def index_select(x, index, axis=0, name=None):
# [ 9. 10. 10.]] # [ 9. 10. 10.]]
""" """
if paddle.in_dynamic_mode(): if in_dygraph_mode():
return _C_ops.final_state_index_select(x, index, axis)
if _in_legacy_dygraph():
return _C_ops.index_select(x, index, 'dim', axis) return _C_ops.index_select(x, index, 'dim', axis)
helper = LayerHelper("index_select", **locals()) helper = LayerHelper("index_select", **locals())
...@@ -946,8 +949,11 @@ def searchsorted(sorted_sequence, ...@@ -946,8 +949,11 @@ def searchsorted(sorted_sequence,
# [1, 3, 4, 5]]) # [1, 3, 4, 5]])
""" """
if in_dygraph_mode():
return _C_ops.final_state_searchsorted(sorted_sequence, values,
out_int32, right)
if paddle.in_dynamic_mode(): if _in_legacy_dygraph():
return _C_ops.searchsorted(sorted_sequence, values, "out_int32", return _C_ops.searchsorted(sorted_sequence, values, "out_int32",
out_int32, "right", right) out_int32, "right", right)
......
...@@ -306,6 +306,24 @@ ...@@ -306,6 +306,24 @@
kernel : kernel :
func : conj func : conj
- api : conv2d_transpose
args : (Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(out)
infer_meta :
func : ConvTransposeInferMeta
kernel :
func : conv2d_transpose
backward : conv2d_transpose_grad
- api : conv3d_transpose
args : (Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(out)
infer_meta :
func : ConvTransposeInferMeta
kernel :
func : conv3d_transpose
backward : conv3d_transpose_grad
- api : copy_to - api : copy_to
args : (Tensor x, Place place, bool blocking) args : (Tensor x, Place place, bool blocking)
output : Tensor output : Tensor
...@@ -359,6 +377,15 @@ ...@@ -359,6 +377,15 @@
kernel : kernel :
func : cumsum func : cumsum
- api : depthwise_conv2d_transpose
args : (Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(out)
infer_meta :
func : ConvTransposeInferMeta
kernel :
func : depthwise_conv2d_transpose
backward : depthwise_conv2d_transpose_grad
- api : diag - api : diag
args : (Tensor x, int offset, float padding_value) args : (Tensor x, int offset, float padding_value)
output : Tensor output : Tensor
...@@ -558,6 +585,15 @@ ...@@ -558,6 +585,15 @@
func : fmin func : fmin
backward : fmin_grad backward : fmin_grad
- api : frobenius_norm
args : (Tensor x, int64_t[] axis, bool keep_dim, bool reduce_all)
output : Tensor(out)
infer_meta :
func : ReduceInferMetaBase
kernel :
func : frobenius_norm
backward : frobenius_norm_grad
- api : full - api : full
args : (IntArray shape, Scalar value, DataType dtype=DataType::FLOAT32, Place place=CPUPlace()) args : (IntArray shape, Scalar value, DataType dtype=DataType::FLOAT32, Place place=CPUPlace())
output: Tensor output: Tensor
...@@ -695,6 +731,16 @@ ...@@ -695,6 +731,16 @@
backward : index_sample_grad backward : index_sample_grad
# no_need_buffer : x # no_need_buffer : x
- api : index_select
args : (Tensor x, Tensor index, int dim)
output : Tensor(out)
infer_meta :
func : IndexSelectInferMeta
kernel :
func : index_select
data_type : x
backward : index_select_grad
# is_empty # is_empty
- api : is_empty - api : is_empty
args : (Tensor x) args : (Tensor x)
...@@ -954,6 +1000,24 @@ ...@@ -954,6 +1000,24 @@
func : max func : max
backward : max_grad backward : max_grad
- api : max_pool2d_with_index
args : (Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive)
output : Tensor(out), Tensor(mask)
infer_meta :
func : MaxPoolWithIndexInferMeta
kernel :
func : max_pool2d_with_index
backward : max_pool2d_with_index_grad
- api : max_pool3d_with_index
args : (Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive)
output : Tensor(out), Tensor(mask)
infer_meta :
func : MaxPoolWithIndexInferMeta
kernel :
func : max_pool3d_with_index
backward : max_pool3d_with_index_grad
- api : maximum - api : maximum
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
output : Tensor(out) output : Tensor(out)
...@@ -1129,8 +1193,18 @@ ...@@ -1129,8 +1193,18 @@
output : Tensor(out) output : Tensor(out)
infer_meta : infer_meta :
func : PoolInferMeta func : PoolInferMeta
kernel: kernel :
func : pool2d func : pool2d
backward : pool2d_grad
- api : pool3d
args : (Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output : Tensor(out)
infer_meta :
func : PoolInferMeta
kernel :
func : pool3d
backward : pool3d_grad
- api : prelu - api : prelu
args : (Tensor x, Tensor alpha, str data_format, str mode) args : (Tensor x, Tensor alpha, str data_format, str mode)
...@@ -1194,6 +1268,15 @@ ...@@ -1194,6 +1268,15 @@
intermediate : xshape intermediate : xshape
backward: reshape_grad backward: reshape_grad
- api : roll
args : (Tensor x, IntArray shifts, int64_t[] axis)
output : Tensor(out)
infer_meta :
func : RollInferMeta
kernel :
func : roll
backward : roll_grad
- api : round - api : round
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
...@@ -1235,6 +1318,14 @@ ...@@ -1235,6 +1318,14 @@
backward : scatter_nd_add_grad backward : scatter_nd_add_grad
# no_need_buffer : updates # no_need_buffer : updates
- api : searchsorted
args : (Tensor sorted_sequence, Tensor value, bool out_int32, bool right)
output : Tensor(out)
infer_meta :
func : SearchsortedInferMeta
kernel :
func : searchsorted
# segment_pool # segment_pool
- api : segment_pool - api : segment_pool
args : (Tensor x, Tensor segment_ids, str pooltype) args : (Tensor x, Tensor segment_ids, str pooltype)
...@@ -1522,6 +1613,15 @@ ...@@ -1522,6 +1613,15 @@
func : triangular_solve func : triangular_solve
# backward : triangular_solve_grad # backward : triangular_solve_grad
- api : tril_triu
args : (Tensor x, int diagonal, bool lower)
output : Tensor(out)
infer_meta :
func : TrilTriuInferMeta
kernel :
func : tril_triu
backward : tril_triu_grad
- api : trunc - api : trunc
args : (Tensor x) args : (Tensor x)
output : Tensor output : Tensor
......
...@@ -710,9 +710,9 @@ PADDLE_API {self.gene_return_type_code()} {self.get_api_func_name() + '_'}({self ...@@ -710,9 +710,9 @@ PADDLE_API {self.gene_return_type_code()} {self.get_api_func_name() + '_'}({self
self.outputs['types'], 'SetKernelOutput', code_indent, inplace_flag) self.outputs['types'], 'SetKernelOutput', code_indent, inplace_flag)
api_func_name = self.get_api_func_name() + ('_' if inplace_flag else '') api_func_name = self.get_api_func_name() + ('_' if inplace_flag else '')
return f""" return f"""
{code_indent} VLOG(6) << "{self.api} API kernel key: [" << kernel_backend << ", " << kernel_layout << ", "<< kernel_data_type << "]";
{code_indent} const auto& kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError( {code_indent} const auto& kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError(
{code_indent} "{self.kernel['func'][0]}", {{kernel_backend, kernel_layout, kernel_data_type}}); {code_indent} "{self.kernel['func'][0]}", {{kernel_backend, kernel_layout, kernel_data_type}});
{code_indent} VLOG(6) << "{self.api} API kernel key: [" << kernel_backend << ", " << kernel_layout << ", "<< kernel_data_type << "]";
{code_indent} VLOG(6) << "{self.api} API kernel: " << kernel; {code_indent} VLOG(6) << "{self.api} API kernel: " << kernel;
{code_indent} auto* dev_ctx = GetDeviceContextByBackend(kernel_backend); {code_indent} auto* dev_ctx = GetDeviceContextByBackend(kernel_backend);
......
...@@ -172,6 +172,24 @@ ...@@ -172,6 +172,24 @@
kernel : kernel :
func : cholesky_solve_grad func : cholesky_solve_grad
- backward_api : conv2d_transpose_grad
forward : conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
args : (Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(x_grad), Tensor(filter_grad)
infer_meta :
func : ConvTransposeGradInferMeta
kernel :
func : conv2d_transpose_grad
- backward_api : conv3d_transpose_grad
forward : conv3d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
args : (Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(x_grad), Tensor(filter_grad)
infer_meta :
func : ConvTransposeGradInferMeta
kernel :
func : conv3d_transpose_grad
- backward_api : cos_grad - backward_api : cos_grad
forward : cos (Tensor x) -> Tensor(out) forward : cos (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
...@@ -221,6 +239,15 @@ ...@@ -221,6 +239,15 @@
# kernel : # kernel :
# func : gumbel_softmax_grad # func : gumbel_softmax_grad
- backward_api : depthwise_conv2d_transpose_grad
forward : depthwise_conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
args : (Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(x_grad), Tensor(filter_grad)
infer_meta :
func : ConvTransposeGradInferMeta
kernel :
func : depthwise_conv2d_transpose_grad
- backward_api : diagonal_grad - backward_api : diagonal_grad
forward : diagonal (Tensor x, int offset, int axis1, int axis2) -> Tensor(out) forward : diagonal (Tensor x, int offset, int axis1, int axis2) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int offset = 0, int axis1 = 0, int axis2 = 1) args : (Tensor x, Tensor out_grad, int offset = 0, int axis1 = 0, int axis2 = 1)
...@@ -352,6 +379,16 @@ ...@@ -352,6 +379,16 @@
kernel : kernel :
func : fmin_grad func : fmin_grad
- backward_api : frobenius_norm_grad
forward : frobenius_norm(Tensor x, int64_t[] axis, bool keep_dim, bool reduce_all) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] axis, bool keep_dim, bool reduce_all)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : frobenius_norm_grad
- backward_api : gather_nd_grad - backward_api : gather_nd_grad
forward : gather_nd (Tensor x, Tensor index) -> Tensor(out) forward : gather_nd (Tensor x, Tensor index) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad) args : (Tensor x, Tensor index, Tensor out_grad)
...@@ -403,6 +440,17 @@ ...@@ -403,6 +440,17 @@
func : index_sample_grad func : index_sample_grad
data_type : out_grad data_type : out_grad
- backward_api : index_select_grad
forward : index_select(Tensor x, Tensor index, int dim) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad, int dim)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : index_select_grad
data_type : x
- backward_api : kldiv_loss_grad - backward_api : kldiv_loss_grad
forward : kldiv_loss(Tensor x, Tensor label, str reduction) -> Tensor(out) forward : kldiv_loss(Tensor x, Tensor label, str reduction) -> Tensor(out)
args : (Tensor x, Tensor label, Tensor out_grad, str reduction) args : (Tensor x, Tensor label, Tensor out_grad, str reduction)
...@@ -597,6 +645,24 @@ ...@@ -597,6 +645,24 @@
kernel : kernel :
func : max_grad func : max_grad
- backward_api : max_pool2d_with_index_grad
forward : max_pool2d_with_index(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive) -> Tensor(out), Tensor(mask)
args : (Tensor x, Tensor mask, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive)
output : Tensor(x_grad)
infer_meta :
func : MaxPoolWithIndexGradInferMeta
kernel :
func : max_pool2d_with_index_grad
- backward_api : max_pool3d_with_index_grad
forward : max_pool3d_with_index(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive) -> Tensor(out), Tensor(mask)
args : (Tensor x, Tensor mask, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive)
output : Tensor(x_grad)
infer_meta :
func : MaxPoolWithIndexGradInferMeta
kernel :
func : max_pool3d_with_index_grad
- backward_api : maximum_grad - backward_api : maximum_grad
forward : maximum(Tensor x, Tensor y) -> Tensor(out) forward : maximum(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis=-1) args : (Tensor x, Tensor y, Tensor out_grad, int axis=-1)
...@@ -719,6 +785,24 @@ ...@@ -719,6 +785,24 @@
kernel : kernel :
func : pad3d_grad func : pad3d_grad
- backward_api : pool2d_grad
forward : pool2d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output : Tensor(x_grad)
infer_meta :
func : PoolGradInferMeta
kernel :
func : pool2d_grad
- backward_api : pool3d_grad
forward : pool3d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output : Tensor(x_grad)
infer_meta :
func : PoolGradInferMeta
kernel :
func : pool3d_grad
- backward_api : prelu_grad - backward_api : prelu_grad
forward : prelu(Tensor x, Tensor alpha, str data_format, str mode) -> Tensor(out) forward : prelu(Tensor x, Tensor alpha, str data_format, str mode) -> Tensor(out)
args : (Tensor x, Tensor alpha, Tensor out_grad, str data_format, str mode) args : (Tensor x, Tensor alpha, Tensor out_grad, str data_format, str mode)
...@@ -806,6 +890,17 @@ ...@@ -806,6 +890,17 @@
backend: out_grad backend: out_grad
layout: out_grad layout: out_grad
- backward_api : roll_grad
forward : roll(Tensor x, IntArray shifts, int64_t[] axis) -> Tensor(out)
args : (Tensor x, Tensor out_grad, IntArray shifts, int64_t[] axis)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : roll_grad
data_type : x
- backward_api : round_grad - backward_api : round_grad
forward : round(Tensor x) -> Tensor(out) forward : round(Tensor x) -> Tensor(out)
args : (Tensor out_grad) args : (Tensor out_grad)
...@@ -1079,6 +1174,16 @@ ...@@ -1079,6 +1174,16 @@
kernel : kernel :
func : transpose_grad func : transpose_grad
- backward_api : tril_triu_grad
forward : tril_triu(Tensor x, int diagonal, bool lower) -> Tensor(out)
args : (Tensor out_grad, int diagonal, bool lower)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out_grad]
kernel :
func : tril_triu_grad
- backward_api : trunc_grad - backward_api : trunc_grad
forward : trunc (Tensor x) -> Tensor(out) forward : trunc (Tensor x) -> Tensor(out)
args : (Tensor out_grad) args : (Tensor out_grad)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册