未验证 提交 36d76840 编写于 作者: Y YuanRisheng 提交者: GitHub

[Phi]Add multi_dot/maxout/multiplex op yaml (#41550)

* add multi_dot,maxout,multiplex yaml

* add code converage
上级 89bfa964
...@@ -1014,5 +1014,135 @@ std::vector<Tensor> meshgrid_grad_impl( ...@@ -1014,5 +1014,135 @@ std::vector<Tensor> meshgrid_grad_impl(
return api_output; return api_output;
} }
std::vector<Tensor> multi_dot_grad_impl(const std::vector<Tensor>& x,
const Tensor& out_grad) {
Backend kernel_backend = Backend::UNDEFINED;
DataLayout kernel_layout = DataLayout::UNDEFINED;
DataType kernel_data_type = DataType::UNDEFINED;
if (kernel_backend == Backend::UNDEFINED ||
kernel_layout == DataLayout::UNDEFINED ||
kernel_data_type == DataType::UNDEFINED) {
auto kernel_key_set = ParseKernelKeyByInputArgs(x, out_grad);
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
if (kernel_backend == Backend::UNDEFINED) {
kernel_backend = kernel_key.backend();
}
if (kernel_layout == DataLayout::UNDEFINED) {
kernel_layout = kernel_key.layout();
}
if (kernel_data_type == DataType::UNDEFINED) {
kernel_data_type = kernel_key.dtype();
}
}
VLOG(6) << "multi_dot_grad API kernel key: [" << kernel_backend << ", "
<< kernel_layout << ", " << kernel_data_type << "]";
const auto& kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError(
"multi_dot_grad", {kernel_backend, kernel_layout, kernel_data_type});
VLOG(6) << "multi_dot_grad API kernel: " << kernel;
auto* dev_ctx = GetDeviceContextByBackend(kernel_backend);
auto input_x_vec = PrepareData(x, kernel.InputAt(0), {});
std::vector<const phi::DenseTensor*> input_x(input_x_vec->size());
for (size_t i = 0; i < input_x.size(); ++i) {
input_x[i] = &input_x_vec->at(i);
}
auto input_out_grad = PrepareData(out_grad, kernel.InputAt(1), {});
size_t out_number = input_x.size();
std::vector<Tensor> api_output;
auto kernel_out = SetKernelOutput(out_number, kernel_backend, &api_output);
auto x_meta_vec = MakeMetaTensor(input_x);
std::vector<phi::MetaTensor*> x_metas(x_meta_vec.size());
for (size_t i = 0; i < x_meta_vec.size(); ++i) {
x_metas[i] = &x_meta_vec[i];
}
std::vector<phi::MetaTensor> meta_outs;
meta_outs.reserve(out_number);
std::vector<phi::MetaTensor*> meta_out_ptrs;
meta_out_ptrs.reserve(out_number);
for (size_t i = 0; i < out_number; ++i) {
meta_outs.push_back(kernel_out[i]);
meta_out_ptrs.push_back(&meta_outs.back());
}
phi::MultiDotGradInferMeta(
x_metas, MakeMetaTensor(*input_out_grad), meta_out_ptrs);
using kernel_signature = void (*)(const platform::DeviceContext&,
const std::vector<const phi::DenseTensor*>&,
const phi::DenseTensor&,
std::vector<phi::DenseTensor*>&);
auto* kernel_fn = kernel.GetVariadicKernelFn<kernel_signature>();
(*kernel_fn)(*dev_ctx, input_x, *input_out_grad, kernel_out);
return api_output;
}
std::vector<Tensor> multiplex_grad_impl(const std::vector<Tensor>& inputs,
const Tensor& ids,
const Tensor& out_grad) {
Backend kernel_backend = Backend::UNDEFINED;
DataLayout kernel_layout = DataLayout::UNDEFINED;
DataType kernel_data_type = DataType::UNDEFINED;
if (kernel_backend == Backend::UNDEFINED ||
kernel_layout == DataLayout::UNDEFINED ||
kernel_data_type == DataType::UNDEFINED) {
auto kernel_key_set = ParseKernelKeyByInputArgs(out_grad);
auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey();
if (kernel_backend == Backend::UNDEFINED) {
kernel_backend = kernel_key.backend();
}
if (kernel_layout == DataLayout::UNDEFINED) {
kernel_layout = kernel_key.layout();
}
if (kernel_data_type == DataType::UNDEFINED) {
kernel_data_type = kernel_key.dtype();
}
}
VLOG(6) << "multiplex_grad API kernel key: [" << kernel_backend << ", "
<< kernel_layout << ", " << kernel_data_type << "]";
const auto& kernel = phi::KernelFactory::Instance().SelectKernelOrThrowError(
"multiplex_grad", {kernel_backend, kernel_layout, kernel_data_type});
VLOG(6) << "multiplex_grad API kernel: " << kernel;
auto* dev_ctx = GetDeviceContextByBackend(kernel_backend);
auto input_ids = PrepareData(ids, kernel.InputAt(0), {});
auto input_out_grad = PrepareData(out_grad, kernel.InputAt(1), {});
auto out_number = inputs.size();
std::vector<Tensor> api_output;
auto kernel_out = SetKernelOutput(out_number, kernel_backend, &api_output);
std::vector<phi::MetaTensor> meta_outs;
meta_outs.reserve(out_number);
std::vector<phi::MetaTensor*> meta_out_ptrs;
meta_out_ptrs.reserve(out_number);
for (size_t i = 0; i < out_number; ++i) {
meta_outs.push_back(kernel_out[i]);
meta_out_ptrs.push_back(&meta_outs.back());
}
phi::MultiplexGradInferMeta(MakeMetaTensor(*input_ids),
MakeMetaTensor(*input_out_grad),
meta_out_ptrs);
using kernel_signature = void (*)(const platform::DeviceContext&,
const phi::DenseTensor&,
const phi::DenseTensor&,
std::vector<phi::DenseTensor*>&);
auto* kernel_fn = kernel.GetVariadicKernelFn<kernel_signature>();
(*kernel_fn)(*dev_ctx, *input_ids, *input_out_grad, kernel_out);
return api_output;
}
} // namespace experimental } // namespace experimental
} // namespace paddle } // namespace paddle
...@@ -62,6 +62,8 @@ std::vector<Tensor> split_impl(const Tensor& x, ...@@ -62,6 +62,8 @@ std::vector<Tensor> split_impl(const Tensor& x,
const IntArray& num_or_sections, const IntArray& num_or_sections,
const Scalar& axis); const Scalar& axis);
std::vector<Tensor> meshgrid_impl(const std::vector<Tensor>& inputs);
std::tuple<Tensor, Tensor, Tensor> momentum_impl( std::tuple<Tensor, Tensor, Tensor> momentum_impl(
const Tensor& param, const Tensor& param,
const Tensor& grad, const Tensor& grad,
...@@ -109,9 +111,15 @@ Tensor real_grad_impl(const Tensor& x); ...@@ -109,9 +111,15 @@ Tensor real_grad_impl(const Tensor& x);
std::vector<Tensor> stack_grad_impl(const std::vector<Tensor>& x, std::vector<Tensor> stack_grad_impl(const std::vector<Tensor>& x,
const Tensor& out_grad, const Tensor& out_grad,
int axis); int axis);
std::vector<Tensor> meshgrid_impl(const std::vector<Tensor>& inputs);
std::vector<Tensor> meshgrid_grad_impl(const std::vector<Tensor>& inputs, std::vector<Tensor> meshgrid_grad_impl(const std::vector<Tensor>& inputs,
const std::vector<Tensor>& outputs_grad); const std::vector<Tensor>& outputs_grad);
std::vector<Tensor> multi_dot_grad_impl(const std::vector<Tensor>& x,
const Tensor& out_grad);
std::vector<Tensor> multiplex_grad_impl(const std::vector<Tensor>& inputs,
const Tensor& ids,
const Tensor& out_grad);
} // namespace experimental } // namespace experimental
} // namespace paddle } // namespace paddle
...@@ -308,6 +308,38 @@ void MeshgridGradInferMeta(const std::vector<MetaTensor*>& inputs, ...@@ -308,6 +308,38 @@ void MeshgridGradInferMeta(const std::vector<MetaTensor*>& inputs,
} }
} }
void MultiDotGradInferMeta(const std::vector<MetaTensor*>& x,
const MetaTensor& out_grad,
std::vector<MetaTensor*> x_grad) {
PADDLE_ENFORCE_EQ(
x.size(),
x_grad.size(),
errors::InvalidArgument(
"Number of Inputs(X) should be equal with Outputs(X@Grad)."
"But received Inputs(X)' size = %d , Outputs(X@Grad)' size = %d.",
x.size(),
x_grad.size()));
for (size_t i = 0; i < x.size(); i++) {
if (x_grad[i] != nullptr) {
x_grad[i]->set_dims(x[i]->dims());
x_grad[i]->share_lod(*x[i]);
}
}
}
void MultiplexGradInferMeta(const MetaTensor& ids,
const MetaTensor& out_grad,
std::vector<MetaTensor*> ins_grad) {
PADDLE_ENFORCE_NE(
ins_grad.empty(),
true,
errors::InvalidArgument("Output(X@Grad) should not be null."));
auto dout_dim = out_grad.dims();
for (auto in_grad : ins_grad) {
in_grad->set_dims(dout_dim);
}
}
void NllLossGradInferMeta(const MetaTensor& x, void NllLossGradInferMeta(const MetaTensor& x,
const MetaTensor& label, const MetaTensor& label,
paddle::optional<const MetaTensor&> weight, paddle::optional<const MetaTensor&> weight,
......
...@@ -139,6 +139,14 @@ void MeshgridGradInferMeta(const std::vector<MetaTensor*>& inputs, ...@@ -139,6 +139,14 @@ void MeshgridGradInferMeta(const std::vector<MetaTensor*>& inputs,
const std::vector<MetaTensor*>& outputs_grad, const std::vector<MetaTensor*>& outputs_grad,
std::vector<MetaTensor*> inputs_grad); std::vector<MetaTensor*> inputs_grad);
void MultiDotGradInferMeta(const std::vector<MetaTensor*>& x,
const MetaTensor& out_grad,
std::vector<MetaTensor*> x_grad);
void MultiplexGradInferMeta(const MetaTensor& ids,
const MetaTensor& out_grad,
std::vector<MetaTensor*> ins_grad);
void NllLossGradInferMeta(const MetaTensor& input, void NllLossGradInferMeta(const MetaTensor& input,
const MetaTensor& label, const MetaTensor& label,
paddle::optional<const MetaTensor&> weight, paddle::optional<const MetaTensor&> weight,
......
...@@ -339,8 +339,8 @@ void MultiDotGradMatChainOrder(const Context& ctx, ...@@ -339,8 +339,8 @@ void MultiDotGradMatChainOrder(const Context& ctx,
template <typename T, typename Context> template <typename T, typename Context>
void MultiDotGradKernel(const Context& ctx, void MultiDotGradKernel(const Context& ctx,
const DenseTensor& out_grad,
const std::vector<const DenseTensor*>& x, const std::vector<const DenseTensor*>& x,
const DenseTensor& out_grad,
std::vector<DenseTensor*> x_grad) { std::vector<DenseTensor*> x_grad) {
auto ins = x; auto ins = x;
auto dout = out_grad; auto dout = out_grad;
......
...@@ -20,8 +20,8 @@ namespace phi { ...@@ -20,8 +20,8 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void MultiDotGradKernel(const Context& ctx, void MultiDotGradKernel(const Context& ctx,
const DenseTensor& out_grad,
const std::vector<const DenseTensor*>& x, const std::vector<const DenseTensor*>& x,
const DenseTensor& out_grad,
std::vector<DenseTensor*> x_grad); std::vector<DenseTensor*> x_grad);
} // namespace phi } // namespace phi
...@@ -19,7 +19,7 @@ namespace phi { ...@@ -19,7 +19,7 @@ namespace phi {
KernelSignature MultiDotGradOpArgumentMapping( KernelSignature MultiDotGradOpArgumentMapping(
const ArgumentMappingContext& ctx) { const ArgumentMappingContext& ctx) {
return KernelSignature( return KernelSignature(
"multi_dot_grad", {GradVarName("Out"), "X"}, {}, {GradVarName("X")}); "multi_dot_grad", {"X", GradVarName("Out")}, {}, {GradVarName("X")});
} }
} // namespace phi } // namespace phi
......
...@@ -5970,8 +5970,11 @@ def multiplex(inputs, index, name=None): ...@@ -5970,8 +5970,11 @@ def multiplex(inputs, index, name=None):
print(res) # [array([[5., 6.], [3., 4.]], dtype=float32)] print(res) # [array([[5., 6.], [3., 4.]], dtype=float32)]
""" """
if _non_static_mode():
if _in_legacy_dygraph():
return _C_ops.multiplex(index, inputs) return _C_ops.multiplex(index, inputs)
if in_dygraph_mode():
return _C_ops.final_state_multiplex(inputs, index)
helper = LayerHelper('multiplex', **locals()) helper = LayerHelper('multiplex', **locals())
check_type(inputs, 'inputs', (list), 'multiplex') check_type(inputs, 'inputs', (list), 'multiplex')
......
...@@ -21,6 +21,7 @@ import paddle.fluid as fluid ...@@ -21,6 +21,7 @@ import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.nn.functional as F import paddle.nn.functional as F
from op_test import OpTest from op_test import OpTest
from paddle.fluid.framework import _test_eager_guard
paddle.enable_static() paddle.enable_static()
np.random.seed(1) np.random.seed(1)
...@@ -38,6 +39,7 @@ def maxout_forward_naive(x, groups, channel_axis): ...@@ -38,6 +39,7 @@ def maxout_forward_naive(x, groups, channel_axis):
class TestMaxOutOp(OpTest): class TestMaxOutOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "maxout" self.op_type = "maxout"
self.python_api = paddle.nn.functional.maxout
self.dtype = 'float64' self.dtype = 'float64'
self.shape = [3, 6, 2, 4] self.shape = [3, 6, 2, 4]
self.groups = 2 self.groups = 2
...@@ -55,10 +57,10 @@ class TestMaxOutOp(OpTest): ...@@ -55,10 +57,10 @@ class TestMaxOutOp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestMaxOutOpAxis0(TestMaxOutOp): class TestMaxOutOpAxis0(TestMaxOutOp):
...@@ -144,6 +146,10 @@ class TestMaxoutAPI(unittest.TestCase): ...@@ -144,6 +146,10 @@ class TestMaxoutAPI(unittest.TestCase):
x_float32 = paddle.fluid.data(name='x_float32', shape=[2, 4, 6, 8]) x_float32 = paddle.fluid.data(name='x_float32', shape=[2, 4, 6, 8])
self.assertRaises(ValueError, F.maxout, x_float32, 2, 2) self.assertRaises(ValueError, F.maxout, x_float32, 2, 2)
def test_dygraph_final_state_api(self):
with _test_eager_guard():
self.test_dygraph_api()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -18,6 +18,7 @@ from op_test import OpTest, skip_check_grad_ci ...@@ -18,6 +18,7 @@ from op_test import OpTest, skip_check_grad_ci
from numpy.linalg import multi_dot from numpy.linalg import multi_dot
from op_test import OpTest from op_test import OpTest
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
paddle.enable_static() paddle.enable_static()
...@@ -27,6 +28,7 @@ paddle.enable_static() ...@@ -27,6 +28,7 @@ paddle.enable_static()
class TestMultiDotOp(OpTest): class TestMultiDotOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "multi_dot" self.op_type = "multi_dot"
self.python_api = paddle.linalg.multi_dot
self.dtype = self.get_dtype() self.dtype = self.get_dtype()
self.get_inputs_and_outputs() self.get_inputs_and_outputs()
...@@ -40,11 +42,11 @@ class TestMultiDotOp(OpTest): ...@@ -40,11 +42,11 @@ class TestMultiDotOp(OpTest):
self.outputs = {'Out': multi_dot([self.A, self.B])} self.outputs = {'Out': multi_dot([self.A, self.B])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out') self.check_grad(['x0'], 'Out', check_eager=True)
self.check_grad(['x1'], 'Out') self.check_grad(['x1'], 'Out', check_eager=True)
#(A*B)*C #(A*B)*C
...@@ -57,9 +59,9 @@ class TestMultiDotOp3Mat(TestMultiDotOp): ...@@ -57,9 +59,9 @@ class TestMultiDotOp3Mat(TestMultiDotOp):
self.outputs = {'Out': multi_dot([self.A, self.B, self.C])} self.outputs = {'Out': multi_dot([self.A, self.B, self.C])}
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out') self.check_grad(['x0'], 'Out', check_eager=True)
self.check_grad(['x1'], 'Out') self.check_grad(['x1'], 'Out', check_eager=True)
self.check_grad(['x2'], 'Out') self.check_grad(['x2'], 'Out', check_eager=True)
#A*(B*C) #A*(B*C)
...@@ -72,9 +74,9 @@ class TestMultiDotOp3Mat2(TestMultiDotOp): ...@@ -72,9 +74,9 @@ class TestMultiDotOp3Mat2(TestMultiDotOp):
self.outputs = {'Out': multi_dot([self.A, self.B, self.C])} self.outputs = {'Out': multi_dot([self.A, self.B, self.C])}
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out') self.check_grad(['x0'], 'Out', check_eager=True)
self.check_grad(['x1'], 'Out') self.check_grad(['x1'], 'Out', check_eager=True)
self.check_grad(['x2'], 'Out') self.check_grad(['x2'], 'Out', check_eager=True)
class TestMultiDotOp4Mat(TestMultiDotOp): class TestMultiDotOp4Mat(TestMultiDotOp):
...@@ -90,10 +92,10 @@ class TestMultiDotOp4Mat(TestMultiDotOp): ...@@ -90,10 +92,10 @@ class TestMultiDotOp4Mat(TestMultiDotOp):
self.outputs = {'Out': multi_dot([self.A, self.B, self.C, self.D])} self.outputs = {'Out': multi_dot([self.A, self.B, self.C, self.D])}
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out') self.check_grad(['x0'], 'Out', check_eager=True)
self.check_grad(['x1'], 'Out') self.check_grad(['x1'], 'Out', check_eager=True)
self.check_grad(['x2'], 'Out') self.check_grad(['x2'], 'Out', check_eager=True)
self.check_grad(['x3'], 'Out') self.check_grad(['x3'], 'Out', check_eager=True)
class TestMultiDotOpFirst1D(TestMultiDotOp): class TestMultiDotOpFirst1D(TestMultiDotOp):
...@@ -143,9 +145,9 @@ class TestMultiDotOp3MatLast1D(TestMultiDotOp3Mat): ...@@ -143,9 +145,9 @@ class TestMultiDotOp3MatLast1D(TestMultiDotOp3Mat):
self.outputs = {'Out': multi_dot([self.A, self.B, self.C])} self.outputs = {'Out': multi_dot([self.A, self.B, self.C])}
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['x0'], 'Out') self.check_grad(['x0'], 'Out', check_eager=True)
self.check_grad(['x1'], 'Out') self.check_grad(['x1'], 'Out', check_eager=True)
self.check_grad(['x2'], 'Out') self.check_grad(['x2'], 'Out', check_eager=True)
class TestMultiDotOp4MatLast1D(TestMultiDotOp4Mat): class TestMultiDotOp4MatLast1D(TestMultiDotOp4Mat):
...@@ -260,6 +262,10 @@ class APITestMultiDot(unittest.TestCase): ...@@ -260,6 +262,10 @@ class APITestMultiDot(unittest.TestCase):
expected_result = np.linalg.multi_dot([input_array1, input_array2]) expected_result = np.linalg.multi_dot([input_array1, input_array2])
self.assertTrue(np.allclose(expected_result, out.numpy())) self.assertTrue(np.allclose(expected_result, out.numpy()))
def test_dygraph_final_state_api(self):
with _test_eager_guard():
self.test_dygraph_without_out()
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -19,6 +19,7 @@ import numpy as np ...@@ -19,6 +19,7 @@ import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
class TestMultiplexOp(OpTest): class TestMultiplexOp(OpTest):
...@@ -102,6 +103,30 @@ class TestMultiplexODygrap(unittest.TestCase): ...@@ -102,6 +103,30 @@ class TestMultiplexODygrap(unittest.TestCase):
res = paddle.multiplex(inputs, index) res = paddle.multiplex(inputs, index)
paddle.enable_static() paddle.enable_static()
def test_dygraph_final_state_api(self):
with fluid.dygraph.guard():
img1 = np.array([[1, 2], [3, 4]]).astype(np.float32)
img2 = np.array([[5, 6], [7, 8]]).astype(np.float32)
inputs = [paddle.to_tensor(img1), paddle.to_tensor(img2)]
index = paddle.to_tensor(np.array([[1], [0]]).astype(np.int32))
inputs[0].stop_gradient = False
inputs[1].stop_gradient = False
res = paddle.multiplex(inputs, index)
res.backward()
with _test_eager_guard():
inputs_eager = [paddle.to_tensor(img1), paddle.to_tensor(img2)]
index_eager = paddle.to_tensor(
np.array([[1], [0]]).astype(np.int32))
inputs_eager[0].stop_gradient = False
inputs_eager[1].stop_gradient = False
res_eager = paddle.multiplex(inputs_eager, index_eager)
res_eager.backward()
self.assertEqual((res.numpy() == res_eager.numpy()).all(), True)
self.assertEqual((inputs[0].grad.numpy() ==
inputs_eager[0].grad.numpy()).all(), True)
self.assertEqual((inputs[1].grad.numpy() ==
inputs_eager[1].grad.numpy()).all(), True)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -684,10 +684,10 @@ def maxout(x, groups, axis=1, name=None): ...@@ -684,10 +684,10 @@ def maxout(x, groups, axis=1, name=None):
# [0.95313174 0.6228939 0.7129065 0.7087491 ] # [0.95313174 0.6228939 0.7129065 0.7087491 ]
# [0.7142536 0.88725346 0.61093384 0.38833922]]]] # [0.7142536 0.88725346 0.61093384 0.38833922]]]]
""" """
if _in_legacy_dygraph():
if in_dynamic_mode():
return _C_ops.maxout(x, 'groups', groups, 'axis', axis) return _C_ops.maxout(x, 'groups', groups, 'axis', axis)
if in_dygraph_mode():
return _C_ops.final_state_maxout(x, groups, axis)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'maxout') check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'maxout')
if axis not in [1, -1, 3]: if axis not in [1, -1, 3]:
raise ValueError( raise ValueError(
......
...@@ -2273,8 +2273,10 @@ def multi_dot(x, name=None): ...@@ -2273,8 +2273,10 @@ def multi_dot(x, name=None):
# [10, 7] # [10, 7]
""" """
if paddle.in_dynamic_mode(): if _in_legacy_dygraph():
return _C_ops.multi_dot(x) return _C_ops.multi_dot(x)
if in_dygraph_mode():
return _C_ops.final_state_multi_dot(x)
check_type(x, 'x', (list, tuple), 'multi_dot') check_type(x, 'x', (list, tuple), 'multi_dot')
for id, item in enumerate(x): for id, item in enumerate(x):
......
...@@ -1261,6 +1261,15 @@ ...@@ -1261,6 +1261,15 @@
func : maximum func : maximum
backward : maximum_grad backward : maximum_grad
- api : maxout
args : (Tensor x, int groups, int axis)
output : Tensor(out)
infer_meta :
func : MaxOutInferMeta
kernel :
func : maxout
backward : maxout_grad
- api : mean - api : mean
args : (Tensor x, int64_t[] dims={}, bool keep_dim=false) args : (Tensor x, int64_t[] dims={}, bool keep_dim=false)
output : Tensor(out) output : Tensor(out)
...@@ -1337,6 +1346,15 @@ ...@@ -1337,6 +1346,15 @@
invoke : momentum_impl(param, grad, velocity, learning_rate, master_param, mu, use_nesterov, regularization_method, regularization_coeff, multi_precision, rescale_grad) invoke : momentum_impl(param, grad, velocity, learning_rate, master_param, mu, use_nesterov, regularization_method, regularization_coeff, multi_precision, rescale_grad)
optional : master_param optional : master_param
- api : multi_dot
args : (Tensor[] x)
output : Tensor
infer_meta :
func : MultiDotInferMeta
kernel :
func : multi_dot
backward : multi_dot_grad
# multinomial # multinomial
- api : multinomial - api : multinomial
args : (Tensor x, int num_samples, bool replacement) args : (Tensor x, int num_samples, bool replacement)
...@@ -1346,6 +1364,16 @@ ...@@ -1346,6 +1364,16 @@
kernel : kernel :
func : multinomial func : multinomial
- api : multiplex
args : (Tensor[] ins, Tensor ids)
output : Tensor
infer_meta :
func : MultiplexInferMeta
kernel :
func : multiplex
data_type : ins
backward : multiplex_grad
- api : multiply - api : multiply
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
output : Tensor output : Tensor
......
...@@ -600,7 +600,7 @@ PADDLE_API {self.gene_return_type_code()} {self.get_api_func_name() + '_'}({self ...@@ -600,7 +600,7 @@ PADDLE_API {self.gene_return_type_code()} {self.get_api_func_name() + '_'}({self
if self.inputs['input_info'][param] == "const Tensor&": if self.inputs['input_info'][param] == "const Tensor&":
kernel_args = kernel_args + "*" + PREFIX_TENSOR_NAME + param + ", " kernel_args = kernel_args + "*" + PREFIX_TENSOR_NAME + param + ", "
elif self.inputs['input_info'][ elif self.inputs['input_info'][
input_name] == "const std::vector<Tensor>&": param] == "const std::vector<Tensor>&":
kernel_args = kernel_args + PREFIX_TENSOR_NAME + param + ", " kernel_args = kernel_args + PREFIX_TENSOR_NAME + param + ", "
else: else:
# do nothing # do nothing
......
...@@ -902,6 +902,16 @@ ...@@ -902,6 +902,16 @@
kernel : kernel :
func : maximum_grad func : maximum_grad
- backward_api : maxout_grad
forward : maxout(Tensor x, int groups, int axis) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int groups, int axis)
output : Tensor(x_grad)
infer_meta :
func : GeneralUnaryGradInferMeta
param: [x]
kernel :
func : maxout_grad
- backward_api : mean_all_grad - backward_api : mean_all_grad
forward : mean_all(Tensor x) -> Tensor(out) forward : mean_all(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
...@@ -979,6 +989,18 @@ ...@@ -979,6 +989,18 @@
func : modulo_grad func : modulo_grad
no_need_buffer : x, y no_need_buffer : x, y
- backward_api : multi_dot_grad
forward : multi_dot (Tensor[] x) -> Tensor(out)
args : (Tensor[] x, Tensor out_grad)
output : Tensor[](x_grad)
invoke : multi_dot_grad_impl(x, out_grad)
- backward_api : multiplex_grad
forward : multiplex (Tensor[] ins, Tensor ids) -> Tensor(out)
args : (Tensor[] ins, Tensor ids, Tensor out_grad)
output : Tensor[](ins_grad)
invoke : multiplex_grad_impl(ins, ids, out_grad)
- backward_api : multiply_grad - backward_api : multiply_grad
forward : multiply (Tensor x, Tensor y) -> Tensor(out) forward : multiply (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis = -1) args : (Tensor x, Tensor y, Tensor out_grad, int axis = -1)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册