提交 2988ca08 编写于 作者: M Mauro Carvalho Chehab 提交者: Jason Gunthorpe

IB: Fix kernel-doc markups

Some functions have different names between their prototypes and the
kernel-doc markup.

Others need to be fixed, as kernel-doc markups should use this format:
        identifier - description

Link: https://lore.kernel.org/r/78b98c41a5a0f4c0106433d305b143028a4168b0.1606823973.git.mchehab+huawei@kernel.orgSigned-off-by: NMauro Carvalho Chehab <mchehab+huawei@kernel.org>
Signed-off-by: NJason Gunthorpe <jgg@nvidia.com>
上级 c63e1c4d
......@@ -1251,7 +1251,8 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
EXPORT_SYMBOL(ib_cm_listen);
/**
* Create a new listening ib_cm_id and listen on the given service ID.
* ib_cm_insert_listen - Create a new listening ib_cm_id and listen on
* the given service ID.
*
* If there's an existing ID listening on that same device and service ID,
* return it.
......@@ -1764,7 +1765,7 @@ static u16 cm_get_bth_pkey(struct cm_work *work)
}
/**
* Convert OPA SGID to IB SGID
* cm_opa_to_ib_sgid - Convert OPA SGID to IB SGID
* ULPs (such as IPoIB) do not understand OPA GIDs and will
* reject them as the local_gid will not match the sgid. Therefore,
* change the pathrec's SGID to an IB SGID.
......
......@@ -123,7 +123,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
}
/**
* ib_process_direct_cq - process a CQ in caller context
* ib_process_cq_direct - process a CQ in caller context
* @cq: CQ to process
* @budget: number of CQEs to poll for
*
......@@ -197,7 +197,7 @@ static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
}
/**
* __ib_alloc_cq allocate a completion queue
* __ib_alloc_cq - allocate a completion queue
* @dev: device to allocate the CQ for
* @private: driver private data, accessible from cq->cq_context
* @nr_cqe: number of CQEs to allocate
......
......@@ -141,7 +141,7 @@ int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request);
int iwpm_get_nlmsg_seq(void);
/**
* iwpm_add_reminfo - Add remote address info of the connecting peer
* iwpm_add_remote_info - Add remote address info of the connecting peer
* to the remote info hash table
* @reminfo: The remote info to be added
*/
......
......@@ -1435,7 +1435,8 @@ enum opa_pr_supported {
};
/**
* Check if current PR query can be an OPA query.
* opa_pr_query_possible - Check if current PR query can be an OPA query.
*
* Retuns PR_NOT_SUPPORTED if a path record query is not
* possible, PR_OPA_SUPPORTED if an OPA path record query
* is possible and PR_IB_SUPPORTED if an IB path record
......
......@@ -244,7 +244,7 @@ EXPORT_SYMBOL(rdma_port_get_link_layer);
/* Protection domains */
/**
* ib_alloc_pd - Allocates an unused protection domain.
* __ib_alloc_pd - Allocates an unused protection domain.
* @device: The device on which to allocate the protection domain.
* @flags: protection domain flags
* @caller: caller's build-time module name
......@@ -1666,7 +1666,7 @@ static bool is_qp_type_connected(const struct ib_qp *qp)
qp->qp_type == IB_QPT_XRC_TGT);
}
/**
/*
* IB core internal function to perform QP attributes modification.
*/
static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
......
......@@ -126,7 +126,7 @@ int rvt_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
}
/**
* rvt_destory_ah - Destory an address handle
* rvt_destroy_ah - Destroy an address handle
* @ibah: address handle
* @destroy_flags: destroy address handle flags (see enum rdma_destroy_ah_flags)
* Return: 0 on success
......
......@@ -54,7 +54,7 @@
#include "mcast.h"
/**
* rvt_driver_mcast - init resources for multicast
* rvt_driver_mcast_init - init resources for multicast
* @rdi: rvt dev struct
*
* This is per device that registers with rdmavt
......@@ -69,7 +69,7 @@ void rvt_driver_mcast_init(struct rvt_dev_info *rdi)
}
/**
* mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
* rvt_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
* @qp: the QP to link
*/
static struct rvt_mcast_qp *rvt_mcast_qp_alloc(struct rvt_qp *qp)
......@@ -98,7 +98,7 @@ static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp)
}
/**
* mcast_alloc - allocate the multicast GID structure
* rvt_mcast_alloc - allocate the multicast GID structure
* @mgid: the multicast GID
* @lid: the muilticast LID (host order)
*
......@@ -181,7 +181,7 @@ struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
EXPORT_SYMBOL(rvt_mcast_find);
/**
* mcast_add - insert mcast GID into table and attach QP struct
* rvt_mcast_add - insert mcast GID into table and attach QP struct
* @mcast: the mcast GID table
* @mqp: the QP to attach
*
......@@ -426,8 +426,8 @@ int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
}
/**
*rvt_mast_tree_empty - determine if any qps are attached to any mcast group
*@rdi: rvt dev struct
* rvt_mcast_tree_empty - determine if any qps are attached to any mcast group
* @rdi: rvt dev struct
*
* Return: in use count
*/
......
......@@ -1827,7 +1827,7 @@ int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
/**
* rvt_post_receive - post a receive on a QP
* rvt_post_recv - post a receive on a QP
* @ibqp: the QP to post the receive on
* @wr: the WR to post
* @bad_wr: the first bad WR is put here
......@@ -2249,7 +2249,7 @@ int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
}
/**
* rvt_post_srq_receive - post a receive on a shared receive queue
* rvt_post_srq_recv - post a receive on a shared receive queue
* @ibsrq: the SRQ to post the receive on
* @wr: the list of work requests to post
* @bad_wr: A pointer to the first WR to cause a problem is put here
......@@ -2501,7 +2501,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
EXPORT_SYMBOL(rvt_get_rwqe);
/**
* qp_comm_est - handle trap with QP established
* rvt_comm_est - handle trap with QP established
* @qp: the QP
*/
void rvt_comm_est(struct rvt_qp *qp)
......@@ -2947,7 +2947,7 @@ static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp,
}
/**
* ruc_loopback - handle UC and RC loopback requests
* rvt_ruc_loopback - handle UC and RC loopback requests
* @sqp: the sending QP
*
* This is called from rvt_do_send() to forward a WQE addressed to the same HFI
......
......@@ -739,7 +739,7 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
}
/**
* iscsi_iser_set_param() - set class connection parameter
* iscsi_iser_conn_get_stats() - get iscsi connection statistics
* @cls_conn: iscsi class connection
* @stats: iscsi stats to output
*
......
......@@ -437,7 +437,7 @@ struct opa_veswport_trap {
} __packed;
/**
* struct opa_vnic_iface_macs_entry - single entry in the mac list
* struct opa_vnic_iface_mac_entry - single entry in the mac list
* @mac_addr: MAC address
*/
struct opa_vnic_iface_mac_entry {
......
......@@ -74,7 +74,7 @@ void opa_vnic_vema_report_event(struct opa_vnic_adapter *adapter, u8 event)
}
/**
* opa_vnic_get_error_counters - get summary counters
* opa_vnic_get_summary_counters - get summary counters
* @adapter: vnic port adapter
* @cntrs: pointer to destination summary counters structure
*
......
......@@ -347,7 +347,7 @@ struct srpt_nexus {
};
/**
* struct srpt_port_attib - attributes for SRPT port
* struct srpt_port_attrib - attributes for SRPT port
* @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
* @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
* @srp_sq_size: Shared receive queue (SRQ) size.
......
......@@ -3394,6 +3394,17 @@ enum ib_pd_flags {
struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
const char *caller);
/**
* ib_alloc_pd - Allocates an unused protection domain.
* @device: The device on which to allocate the protection domain.
* @flags: protection domain flags
*
* A protection domain object provides an association between QPs, shared
* receive queues, address handles, memory regions, and memory windows.
*
* Every PD has a local_dma_lkey which can be used as the lkey value for local
* memory operations.
*/
#define ib_alloc_pd(device, flags) \
__ib_alloc_pd((device), (flags), KBUILD_MODNAME)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册