sgl.c 7.4 KB
Newer Older
1 2 3
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <linux/dma-mapping.h>
4
#include <linux/hisi_acc_qm.h>
5
#include <linux/module.h>
6
#include <linux/slab.h>
7 8 9 10

#define HISI_ACC_SGL_SGE_NR_MIN		1
#define HISI_ACC_SGL_NR_MAX		256
#define HISI_ACC_SGL_ALIGN_SIZE		64
11
#define HISI_ACC_MEM_BLOCK_NR		5
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33

struct acc_hw_sge {
	dma_addr_t buf;
	void *page_ctrl;
	__le32 len;
	__le32 pad;
	__le32 pad0;
	__le32 pad1;
};

/* use default sgl head size 64B */
struct hisi_acc_hw_sgl {
	dma_addr_t next_dma;
	__le16 entry_sum_in_chain;
	__le16 entry_sum_in_sgl;
	__le16 entry_length_in_sgl;
	__le16 pad0;
	__le64 pad1[5];
	struct hisi_acc_hw_sgl *next;
	struct acc_hw_sge sge_entries[];
} __aligned(1);

34
struct hisi_acc_sgl_pool {
35 36 37 38 39 40 41
	struct mem_block {
		struct hisi_acc_hw_sgl *sgl;
		dma_addr_t sgl_dma;
		size_t size;
	} mem_block[HISI_ACC_MEM_BLOCK_NR];
	u32 sgl_num_per_block;
	u32 block_num;
42 43 44 45 46
	u32 count;
	u32 sge_nr;
	size_t sgl_size;
};

47 48 49 50
/**
 * hisi_acc_create_sgl_pool() - Create a hw sgl pool.
 * @dev: The device which hw sgl pool belongs to.
 * @count: Count of hisi_acc_hw_sgl in pool.
51
 * @sge_nr: The count of sge in hw_sgl
52 53 54 55
 *
 * This function creates a hw sgl pool, after this user can get hw sgl memory
 * from it.
 */
56 57
struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
						   u32 count, u32 sge_nr)
58
{
59
	u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl;
60
	struct hisi_acc_sgl_pool *pool;
61 62
	struct mem_block *block;
	u32 i, j;
63

64 65
	if (!dev || !count || !sge_nr || sge_nr > HISI_ACC_SGL_SGE_NR_MAX)
		return ERR_PTR(-EINVAL);
66

67
	sgl_size = sizeof(struct acc_hw_sge) * sge_nr +
68
		   sizeof(struct hisi_acc_hw_sgl);
69 70 71 72 73

	/*
	 * the pool may allocate a block of memory of size PAGE_SIZE * 2^(MAX_ORDER - 1),
	 * block size may exceed 2^31 on ia64, so the max of block size is 2^31
	 */
74 75
	block_size = 1 << (PAGE_SHIFT + MAX_ORDER <= 32 ?
			   PAGE_SHIFT + MAX_ORDER - 1 : 31);
76 77 78 79 80 81 82
	sgl_num_per_block = block_size / sgl_size;
	block_num = count / sgl_num_per_block;
	remain_sgl = count % sgl_num_per_block;

	if ((!remain_sgl && block_num > HISI_ACC_MEM_BLOCK_NR) ||
	    (remain_sgl > 0 && block_num > HISI_ACC_MEM_BLOCK_NR - 1))
		return ERR_PTR(-EINVAL);
83

84 85 86
	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
	if (!pool)
		return ERR_PTR(-ENOMEM);
87
	block = pool->mem_block;
88

89 90 91 92
	for (i = 0; i < block_num; i++) {
		block[i].sgl = dma_alloc_coherent(dev, block_size,
						  &block[i].sgl_dma,
						  GFP_KERNEL);
93 94
		if (!block[i].sgl) {
			dev_err(dev, "Fail to allocate hw SG buffer!\n");
95
			goto err_free_mem;
96
		}
97 98

		block[i].size = block_size;
99
	}
100

101 102 103 104
	if (remain_sgl > 0) {
		block[i].sgl = dma_alloc_coherent(dev, remain_sgl * sgl_size,
						  &block[i].sgl_dma,
						  GFP_KERNEL);
105 106
		if (!block[i].sgl) {
			dev_err(dev, "Fail to allocate remained hw SG buffer!\n");
107
			goto err_free_mem;
108
		}
109 110 111 112 113 114

		block[i].size = remain_sgl * sgl_size;
	}

	pool->sgl_num_per_block = sgl_num_per_block;
	pool->block_num = remain_sgl ? block_num + 1 : block_num;
115 116
	pool->count = count;
	pool->sgl_size = sgl_size;
117
	pool->sge_nr = sge_nr;
118

119
	return pool;
120 121 122 123 124 125 126 127 128

err_free_mem:
	for (j = 0; j < i; j++) {
		dma_free_coherent(dev, block_size, block[j].sgl,
				  block[j].sgl_dma);
		memset(block + j, 0, sizeof(*block));
	}
	kfree(pool);
	return ERR_PTR(-ENOMEM);
129 130 131 132 133 134 135 136 137 138 139 140
}
EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool);

/**
 * hisi_acc_free_sgl_pool() - Free a hw sgl pool.
 * @dev: The device which hw sgl pool belongs to.
 * @pool: Pointer of pool.
 *
 * This function frees memory of a hw sgl pool.
 */
void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool)
{
141 142 143
	struct mem_block *block;
	int i;

144 145 146
	if (!dev || !pool)
		return;

147 148 149 150 151 152
	block = pool->mem_block;

	for (i = 0; i < pool->block_num; i++)
		dma_free_coherent(dev, block[i].size, block[i].sgl,
				  block[i].sgl_dma);

153
	kfree(pool);
154 155 156
}
EXPORT_SYMBOL_GPL(hisi_acc_free_sgl_pool);

Z
Zhou Wang 已提交
157 158
static struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool,
					   u32 index, dma_addr_t *hw_sgl_dma)
159
{
160 161 162 163
	struct mem_block *block;
	u32 block_index, offset;

	if (!pool || !hw_sgl_dma || index >= pool->count)
164 165
		return ERR_PTR(-EINVAL);

166 167 168 169 170 171
	block = pool->mem_block;
	block_index = index / pool->sgl_num_per_block;
	offset = index % pool->sgl_num_per_block;

	*hw_sgl_dma = block[block_index].sgl_dma + pool->sgl_size * offset;
	return (void *)block[block_index].sgl + pool->sgl_size * offset;
172 173 174 175 176
}

static void sg_map_to_hw_sg(struct scatterlist *sgl,
			    struct acc_hw_sge *hw_sge)
{
177 178
	hw_sge->buf = sg_dma_address(sgl);
	hw_sge->len = cpu_to_le32(sg_dma_len(sgl));
179
	hw_sge->page_ctrl = sg_virt(sgl);
180 181 182 183
}

static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
{
Z
Zhou Wang 已提交
184 185 186 187
	u16 var = le16_to_cpu(hw_sgl->entry_sum_in_sgl);

	var++;
	hw_sgl->entry_sum_in_sgl = cpu_to_le16(var);
188 189 190 191
}

static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum)
{
Z
Zhou Wang 已提交
192
	hw_sgl->entry_sum_in_chain = cpu_to_le16(sum);
193 194
}

195 196 197 198 199 200 201 202 203 204 205 206
static void clear_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
{
	struct acc_hw_sge *hw_sge = hw_sgl->sge_entries;
	int i;

	for (i = 0; i < le16_to_cpu(hw_sgl->entry_sum_in_sgl); i++) {
		hw_sge[i].page_ctrl = NULL;
		hw_sge[i].buf = 0;
		hw_sge[i].len = 0;
	}
}

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
/**
 * hisi_acc_sg_buf_map_to_hw_sgl - Map a scatterlist to a hw sgl.
 * @dev: The device which hw sgl belongs to.
 * @sgl: Scatterlist which will be mapped to hw sgl.
 * @pool: Pool which hw sgl memory will be allocated in.
 * @index: Index of hisi_acc_hw_sgl in pool.
 * @hw_sgl_dma: The dma address of allocated hw sgl.
 *
 * This function builds hw sgl according input sgl, user can use hw_sgl_dma
 * as src/dst in its BD. Only support single hw sgl currently.
 */
struct hisi_acc_hw_sgl *
hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
			      struct scatterlist *sgl,
			      struct hisi_acc_sgl_pool *pool,
			      u32 index, dma_addr_t *hw_sgl_dma)
{
	struct hisi_acc_hw_sgl *curr_hw_sgl;
225
	dma_addr_t curr_sgl_dma = 0;
226 227
	struct acc_hw_sge *curr_hw_sge;
	struct scatterlist *sg;
228
	int i, sg_n, sg_n_mapped;
229

Z
Zhou Wang 已提交
230 231 232 233
	if (!dev || !sgl || !pool || !hw_sgl_dma)
		return ERR_PTR(-EINVAL);

	sg_n = sg_nents(sgl);
234 235

	sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
236 237
	if (!sg_n_mapped) {
		dev_err(dev, "DMA mapping for SG error!\n");
238
		return ERR_PTR(-EINVAL);
239
	}
240

241
	if (sg_n_mapped > pool->sge_nr) {
242
		dev_err(dev, "the number of entries in input scatterlist is bigger than SGL pool setting.\n");
243
		return ERR_PTR(-EINVAL);
244
	}
245 246

	curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
Z
Zhou Wang 已提交
247
	if (IS_ERR(curr_hw_sgl)) {
248
		dev_err(dev, "Get SGL error!\n");
Z
Zhou Wang 已提交
249 250 251
		dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
		return ERR_PTR(-ENOMEM);

252
	}
Z
Zhou Wang 已提交
253
	curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
254 255
	curr_hw_sge = curr_hw_sgl->sge_entries;

256
	for_each_sg(sgl, sg, sg_n_mapped, i) {
257 258 259 260 261
		sg_map_to_hw_sg(sg, curr_hw_sge);
		inc_hw_sgl_sge(curr_hw_sgl);
		curr_hw_sge++;
	}

262
	update_hw_sgl_sum_sge(curr_hw_sgl, pool->sge_nr);
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
	*hw_sgl_dma = curr_sgl_dma;

	return curr_hw_sgl;
}
EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl);

/**
 * hisi_acc_sg_buf_unmap() - Unmap allocated hw sgl.
 * @dev: The device which hw sgl belongs to.
 * @sgl: Related scatterlist.
 * @hw_sgl: Virtual address of hw sgl.
 *
 * This function unmaps allocated hw sgl.
 */
void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
			   struct hisi_acc_hw_sgl *hw_sgl)
{
Z
Zhou Wang 已提交
280 281 282
	if (!dev || !sgl || !hw_sgl)
		return;

283
	dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL);
284
	clear_hw_sgl_sge(hw_sgl);
285 286 287 288 289
	hw_sgl->entry_sum_in_chain = 0;
	hw_sgl->entry_sum_in_sgl = 0;
	hw_sgl->entry_length_in_sgl = 0;
}
EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_unmap);