未验证 提交 23036031 编写于 作者: Z zyfncg 提交者: GitHub

add comment for kernel of api in api.yaml (#43799)

上级 73e3fc96
...@@ -135,8 +135,8 @@ endforeach() ...@@ -135,8 +135,8 @@ endforeach()
# validation of api yamls # validation of api yamls
message("validate api yaml: message("validate api yaml:
- ${parsed_api_dir}/new_api.parsed.yaml - ${parsed_api_dir}/api.parsed.yaml
- ${parsed_api_dir}/new_backward_api.parsed.yaml") - ${parsed_api_dir}/backward_api.parsed.yaml")
execute_process( execute_process(
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/python/paddle/utils/code_gen WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/python/paddle/utils/code_gen
COMMAND COMMAND
......
...@@ -19,6 +19,14 @@ ...@@ -19,6 +19,14 @@
namespace phi { namespace phi {
/**
* @brief This Kernel returns a Tensor filled with random binary(0 or 1) number
* from a Bernoulli distribution.
* @param ctx device context
* @param x A tensor with probabilities for generating the random binary
* number
* @param out A Tensor filled with random binary number
*/
template <typename T, typename Context> template <typename T, typename Context>
void BernoulliKernel(const Context& ctx, void BernoulliKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x,
......
...@@ -18,6 +18,19 @@ limitations under the License. */ ...@@ -18,6 +18,19 @@ limitations under the License. */
namespace phi { namespace phi {
/**
* @brief Erf Kernel.
* The equation is:
* $$
* f(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x}e^{- \eta^{2}}d\eta
* $$
*
* The input `x` can carry the LoD (Level of Details) information,
* or not. And the output shares the LoD information with input `x`.
* @param ctx device context
* @param x The input tensor of erf kernel
* @param out The output tensor of erf kernel
*/
template <typename T, typename Context> template <typename T, typename Context>
void ErfKernel(const Context& dev_ctx, const DenseTensor& x, DenseTensor* out); void ErfKernel(const Context& dev_ctx, const DenseTensor& x, DenseTensor* out);
......
...@@ -18,6 +18,14 @@ ...@@ -18,6 +18,14 @@
namespace phi { namespace phi {
/**
* @brief This kernel is used to perform matrix vector multiplication
* of the input tensors `X` and `Vec`
* @param ctx device context
* @param x The matrix input of mv
* @param vec The vector input of mv
* @param out The output of mv
*/
template <typename T, typename Context> template <typename T, typename Context>
void MvKernel(const Context& ctx, void MvKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x,
......
...@@ -18,6 +18,13 @@ ...@@ -18,6 +18,13 @@
namespace phi { namespace phi {
/**
* @brief This kernel generate random value that obey poisson distribution.
* @param ctx device context
* @param x The input tensor of poisson kernel
* @param out The output tensor of poisson kernel, it has the same shape and
* dtype with input. Each element corresponds to input tensor
*/
template <typename T, typename Context> template <typename T, typename Context>
void PoissonKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out); void PoissonKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out);
......
...@@ -18,6 +18,25 @@ ...@@ -18,6 +18,25 @@
namespace phi { namespace phi {
/**
* @brief Trace Kernel.
* Return the sum along diagonals of the input tensor.
* The behavior of this operator is similar to how `numpy.trace` works.
*
* If Input is 2-D, returns the sum of diagonal.
* If Input has larger dimensions, then returns an tensor of diagonals
* sum, diagonals be taken from the 2-D planes specified by dim1 and
* dim2.
* @param ctx device context
* @param x The input tensor, from which the diagonals are taken
* @param offset offset of the diagonal from the main diagonal.
* Can be bothpositive and negative.
* @param axis1 the first axis of the 2-D planes from which the diagonals
* should be taken. Can be either positive or negative
* @param axis2 the second axis of the 2-D planes from which the diagonals
* should be taken. Can be either positive or negative
* @param out the sum along diagonals of the input tensor
*/
template <typename T, typename Context> template <typename T, typename Context>
void TraceKernel(const Context& ctx, void TraceKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x,
......
...@@ -18,6 +18,12 @@ ...@@ -18,6 +18,12 @@
namespace phi { namespace phi {
/**
* @brief Returns a new tensor with the truncated integer values of input.
* @param ctx device context
* @param x The input tensor of trunc kernel
* @param out The output tensor of trunc kernel
*/
template <typename T, typename Context> template <typename T, typename Context>
void TruncKernel(const Context& dev_ctx, void TruncKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
......
...@@ -128,7 +128,9 @@ PD_REGISTER_ARG_MAPPING_FN({{api["name"]}}, phi::{{api["name"] | to_pascal_case} ...@@ -128,7 +128,9 @@ PD_REGISTER_ARG_MAPPING_FN({{api["name"]}}, phi::{{api["name"] | to_pascal_case}
{% macro get_input_list(inputs, kernel_args) %}{# inline #} {% macro get_input_list(inputs, kernel_args) %}{# inline #}
paddle::small_vector<const char*> inputs { paddle::small_vector<const char*> inputs {
{%- for input in inputs %} {%- for input in inputs %}
{%- if input["name"] in kernel_args %}
{{input["name"] | to_opmaker_name_cstr}}{{", " if not loop.last}} {{input["name"] | to_opmaker_name_cstr}}{{", " if not loop.last}}
{%- endif %}
{%- endfor %} {%- endfor %}
} }
{%- endmacro %} {%- endmacro %}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
新手
引导
客服 返回
顶部