提交 82d07a4e 编写于 作者: W Weihang Li 提交者: Jason Gunthorpe

RDMA/hns: Change all page_shift to unsigned

page_shift is used to calculate the page size, it's always non-negative,
and should be in type of unsigned.

Link: https://lore.kernel.org/r/1589982799-28728-7-git-send-email-liweihang@huawei.comSigned-off-by: NWeihang Li <liweihang@huawei.com>
Signed-off-by: NJason Gunthorpe <jgg@mellanox.com>
上级 e9f2cd28
......@@ -254,7 +254,7 @@ int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, int start, struct ib_umem *umem,
int page_shift)
unsigned int page_shift)
{
struct ib_block_iter biter;
int total = 0;
......
......@@ -342,7 +342,7 @@ struct hns_roce_buf_attr {
int hopnum; /* multi-hop addressing hop num */
} region[HNS_ROCE_MAX_BT_REGION];
int region_count; /* valid region count */
int page_shift; /* buffer page shift */
unsigned int page_shift; /* buffer page shift */
bool fixed_page; /* decide page shift is fixed-size or maximum size */
int user_access; /* umem access flag */
bool mtt_only; /* only alloc buffer-required MTT memory */
......@@ -351,14 +351,14 @@ struct hns_roce_buf_attr {
/* memory translate region */
struct hns_roce_mtr {
struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */
struct ib_umem *umem; /* user space buffer */
struct hns_roce_buf *kmem; /* kernel space buffer */
struct ib_umem *umem; /* user space buffer */
struct hns_roce_buf *kmem; /* kernel space buffer */
struct {
dma_addr_t root_ba; /* root BA table's address */
bool is_direct; /* addressing without BA table */
int ba_pg_shift; /* BA table page shift */
int buf_pg_shift; /* buffer page shift */
int buf_pg_count; /* buffer page count */
dma_addr_t root_ba; /* root BA table's address */
bool is_direct; /* addressing without BA table */
unsigned int ba_pg_shift; /* BA table page shift */
unsigned int buf_pg_shift; /* buffer page shift */
int buf_pg_count; /* buffer page count */
} hem_cfg; /* config for hardware addressing */
};
......@@ -423,7 +423,7 @@ struct hns_roce_buf {
struct hns_roce_buf_list *page_list;
u32 npages;
u32 size;
int page_shift;
unsigned int page_shift;
};
struct hns_roce_db_pgdir {
......@@ -1139,8 +1139,9 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct hns_roce_buf_attr *buf_attr, int page_shift,
struct ib_udata *udata, unsigned long user_addr);
struct hns_roce_buf_attr *buf_attr,
unsigned int page_shift, struct ib_udata *udata,
unsigned long user_addr);
void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
struct hns_roce_mtr *mtr);
int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
......@@ -1210,7 +1211,7 @@ int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, int start, struct hns_roce_buf *buf);
int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int buf_cnt, int start, struct ib_umem *umem,
int page_shift);
unsigned int page_shift);
int hns_roce_create_srq(struct ib_srq *srq,
struct ib_srq_init_attr *srq_init_attr,
......
......@@ -1400,7 +1400,7 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
const struct hns_roce_buf_region *regions,
int region_cnt, int bt_pg_shift)
int region_cnt, unsigned int bt_pg_shift)
{
const struct hns_roce_buf_region *r;
int ofs, end;
......
......@@ -133,7 +133,7 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
const struct hns_roce_buf_region *regions,
int region_cnt, int bt_pg_shift);
int region_cnt, unsigned int bt_pg_shift);
void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list);
void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
......
......@@ -706,7 +706,8 @@ static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr)
return size;
}
static inline int mtr_umem_page_count(struct ib_umem *umem, int page_shift)
static inline int mtr_umem_page_count(struct ib_umem *umem,
unsigned int page_shift)
{
int count = ib_umem_page_count(umem);
......@@ -719,7 +720,7 @@ static inline int mtr_umem_page_count(struct ib_umem *umem, int page_shift)
}
static inline size_t mtr_kmem_direct_size(bool is_direct, size_t alloc_size,
int page_shift)
unsigned int page_shift)
{
if (is_direct)
return ALIGN(alloc_size, 1 << page_shift);
......@@ -732,7 +733,7 @@ static inline size_t mtr_kmem_direct_size(bool is_direct, size_t alloc_size,
* Returns 0 on success, or the error page num.
*/
static inline int mtr_check_direct_pages(dma_addr_t *pages, int page_count,
int page_shift)
unsigned int page_shift)
{
size_t page_size = 1 << page_shift;
int i;
......@@ -765,8 +766,8 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct ib_udata *udata, unsigned long user_addr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
int max_pg_shift = buf_attr->page_shift;
int best_pg_shift = 0;
unsigned int max_pg_shift = buf_attr->page_shift;
unsigned int best_pg_shift = 0;
int all_pg_count = 0;
size_t direct_size;
size_t total_size;
......@@ -836,7 +837,7 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
}
static int mtr_get_pages(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
dma_addr_t *pages, int count, int page_shift)
dma_addr_t *pages, int count, unsigned int page_shift)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
int npage;
......@@ -946,7 +947,7 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
/* convert buffer size to page index and page count */
static int mtr_init_region(struct hns_roce_buf_attr *attr, int page_cnt,
struct hns_roce_buf_region *regions, int region_cnt,
int page_shift)
unsigned int page_shift)
{
unsigned int page_size = 1 << page_shift;
int max_region = attr->region_count;
......@@ -977,8 +978,9 @@ static int mtr_init_region(struct hns_roce_buf_attr *attr, int page_cnt,
* @buf_alloced: mtr has private buffer, true means need to alloc
*/
int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct hns_roce_buf_attr *buf_attr, int page_shift,
struct ib_udata *udata, unsigned long user_addr)
struct hns_roce_buf_attr *buf_attr,
unsigned int page_shift, struct ib_udata *udata,
unsigned long user_addr)
{
struct hns_roce_buf_region regions[HNS_ROCE_MAX_BT_REGION] = {};
struct ib_device *ibdev = &hr_dev->ib_dev;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册