cx18-queue.c 11.7 KB
Newer Older
1 2 3 4 5 6
/*
 *  cx18 buffer queues
 *
 *  Derived from ivtv-queue.c
 *
 *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
7
 *  Copyright (C) 2008  Andy Walls <awalls@md.metrocast.net>
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
 *  02111-1307  USA
 */

#include "cx18-driver.h"
#include "cx18-queue.h"
27
#include "cx18-streams.h"
28
#include "cx18-scb.h"
29
#include "cx18-io.h"
30 31 32 33 34 35 36 37 38

void cx18_buf_swap(struct cx18_buffer *buf)
{
	int i;

	for (i = 0; i < buf->bytesused; i += 4)
		swab32s((u32 *)(buf->buf + i));
}

39 40 41 42 43 44 45 46 47 48 49
void _cx18_mdl_swap(struct cx18_mdl *mdl)
{
	struct cx18_buffer *buf;

	list_for_each_entry(buf, &mdl->buf_list, list) {
		if (buf->bytesused == 0)
			break;
		cx18_buf_swap(buf);
	}
}

50 51 52
void cx18_queue_init(struct cx18_queue *q)
{
	INIT_LIST_HEAD(&q->list);
53
	atomic_set(&q->depth, 0);
54 55 56
	q->bytesused = 0;
}

57
struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_mdl *mdl,
58
				 struct cx18_queue *q, int to_front)
59
{
60
	/* clear the mdl if it is not to be enqueued to the full queue */
61
	if (q != &s->q_full) {
62 63 64 65 66
		mdl->bytesused = 0;
		mdl->readpos = 0;
		mdl->m_flags = 0;
		mdl->skipped = 0;
		mdl->curr_buf = NULL;
67
	}
68

69 70
	/* q_busy is restricted to a max buffer count imposed by firmware */
	if (q == &s->q_busy &&
71
	    atomic_read(&q->depth) >= CX18_MAX_FW_MDLS_PER_STREAM)
72 73
		q = &s->q_free;

74 75
	spin_lock(&q->lock);

76
	if (to_front)
77
		list_add(&mdl->list, &q->list); /* LIFO */
78
	else
79 80
		list_add_tail(&mdl->list, &q->list); /* FIFO */
	q->bytesused += mdl->bytesused - mdl->readpos;
81
	atomic_inc(&q->depth);
82

83
	spin_unlock(&q->lock);
84
	return q;
85 86
}

87
struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
88
{
89
	struct cx18_mdl *mdl = NULL;
90

91
	spin_lock(&q->lock);
92
	if (!list_empty(&q->list)) {
93 94 95 96
		mdl = list_first_entry(&q->list, struct cx18_mdl, list);
		list_del_init(&mdl->list);
		q->bytesused -= mdl->bytesused - mdl->readpos;
		mdl->skipped = 0;
97
		atomic_dec(&q->depth);
98
	}
99
	spin_unlock(&q->lock);
100 101 102
	return mdl;
}

103 104
static void _cx18_mdl_update_bufs_for_cpu(struct cx18_stream *s,
					  struct cx18_mdl *mdl)
105 106 107 108 109 110 111 112 113 114 115 116 117 118
{
	struct cx18_buffer *buf;
	u32 buf_size = s->buf_size;
	u32 bytesused = mdl->bytesused;

	list_for_each_entry(buf, &mdl->buf_list, list) {
		buf->readpos = 0;
		if (bytesused >= buf_size) {
			buf->bytesused = buf_size;
			bytesused -= buf_size;
		} else {
			buf->bytesused = bytesused;
			bytesused = 0;
		}
119
		cx18_buf_sync_for_cpu(s, buf);
120
	}
121 122
}

123 124
static inline void cx18_mdl_update_bufs_for_cpu(struct cx18_stream *s,
						struct cx18_mdl *mdl)
125 126 127 128 129 130 131 132
{
	struct cx18_buffer *buf;

	if (list_is_singular(&mdl->buf_list)) {
		buf = list_first_entry(&mdl->buf_list, struct cx18_buffer,
				       list);
		buf->bytesused = mdl->bytesused;
		buf->readpos = 0;
133
		cx18_buf_sync_for_cpu(s, buf);
134
	} else {
135
		_cx18_mdl_update_bufs_for_cpu(s, mdl);
136 137 138 139
	}
}

struct cx18_mdl *cx18_queue_get_mdl(struct cx18_stream *s, u32 id,
140 141 142
	u32 bytesused)
{
	struct cx18 *cx = s->cx;
143 144 145
	struct cx18_mdl *mdl;
	struct cx18_mdl *tmp;
	struct cx18_mdl *ret = NULL;
146 147 148 149 150
	LIST_HEAD(sweep_up);

	/*
	 * We don't have to acquire multiple q locks here, because we are
	 * serialized by the single threaded work handler.
151
	 * MDLs from the firmware will thus remain in order as
152 153 154
	 * they are moved from q_busy to q_full or to the dvb ring buffer.
	 */
	spin_lock(&s->q_busy.lock);
155
	list_for_each_entry_safe(mdl, tmp, &s->q_busy.list, list) {
156 157 158
		/*
		 * We should find what the firmware told us is done,
		 * right at the front of the queue.  If we don't, we likely have
159 160
		 * missed an mdl done message from the firmware.
		 * Once we skip an mdl repeatedly, relative to the size of
161 162
		 * q_busy, we have high confidence we've missed it.
		 */
163 164 165 166 167
		if (mdl->id != id) {
			mdl->skipped++;
			if (mdl->skipped >= atomic_read(&s->q_busy.depth)-1) {
				/* mdl must have fallen out of rotation */
				CX18_WARN("Skipped %s, MDL %d, %d "
168
					  "times - it must have dropped out of "
169 170
					  "rotation\n", s->name, mdl->id,
					  mdl->skipped);
171
				/* Sweep it up to put it back into rotation */
172
				list_move_tail(&mdl->list, &sweep_up);
173
				atomic_dec(&s->q_busy.depth);
174
			}
175
			continue;
176
		}
177
		/*
178
		 * We pull the desired mdl off of the queue here.  Something
179 180
		 * will have to put it back on a queue later.
		 */
181
		list_del_init(&mdl->list);
182
		atomic_dec(&s->q_busy.depth);
183
		ret = mdl;
184
		break;
185
	}
186 187 188
	spin_unlock(&s->q_busy.lock);

	/*
189
	 * We found the mdl for which we were looking.  Get it ready for
190 191 192 193 194
	 * the caller to put on q_full or in the dvb ring buffer.
	 */
	if (ret != NULL) {
		ret->bytesused = bytesused;
		ret->skipped = 0;
195
		/* 0'ed readpos, m_flags & curr_buf when mdl went on q_busy */
196
		cx18_mdl_update_bufs_for_cpu(s, ret);
197
		if (s->type != CX18_ENC_STREAM_TYPE_TS)
198
			set_bit(CX18_F_M_NEED_SWAP, &ret->m_flags);
199 200
	}

201 202 203 204
	/* Put any mdls the firmware is ignoring back into normal rotation */
	list_for_each_entry_safe(mdl, tmp, &sweep_up, list) {
		list_del_init(&mdl->list);
		cx18_enqueue(s, mdl, &s->q_free);
205
	}
206
	return ret;
207 208
}

209 210 211
/* Move all mdls of a queue, while flushing the mdl */
static void cx18_queue_flush(struct cx18_stream *s,
			     struct cx18_queue *q_src, struct cx18_queue *q_dst)
212
{
213
	struct cx18_mdl *mdl;
214

215 216
	/* It only makes sense to flush to q_free or q_idle */
	if (q_src == q_dst || q_dst == &s->q_full || q_dst == &s->q_busy)
217
		return;
218

219 220 221 222 223 224 225 226 227 228 229
	spin_lock(&q_src->lock);
	spin_lock(&q_dst->lock);
	while (!list_empty(&q_src->list)) {
		mdl = list_first_entry(&q_src->list, struct cx18_mdl, list);
		list_move_tail(&mdl->list, &q_dst->list);
		mdl->bytesused = 0;
		mdl->readpos = 0;
		mdl->m_flags = 0;
		mdl->skipped = 0;
		mdl->curr_buf = NULL;
		atomic_inc(&q_dst->depth);
230
	}
231 232 233
	cx18_queue_init(q_src);
	spin_unlock(&q_src->lock);
	spin_unlock(&q_dst->lock);
234 235 236 237
}

void cx18_flush_queues(struct cx18_stream *s)
{
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
	cx18_queue_flush(s, &s->q_busy, &s->q_free);
	cx18_queue_flush(s, &s->q_full, &s->q_free);
}

/*
 * Note, s->buf_pool is not protected by a lock,
 * the stream better not have *anything* going on when calling this
 */
void cx18_unload_queues(struct cx18_stream *s)
{
	struct cx18_queue *q_idle = &s->q_idle;
	struct cx18_mdl *mdl;
	struct cx18_buffer *buf;

	/* Move all MDLS to q_idle */
	cx18_queue_flush(s, &s->q_busy, q_idle);
	cx18_queue_flush(s, &s->q_full, q_idle);
	cx18_queue_flush(s, &s->q_free, q_idle);

	/* Reset MDL id's and move all buffers back to the stream's buf_pool */
	spin_lock(&q_idle->lock);
	list_for_each_entry(mdl, &q_idle->list, list) {
		while (!list_empty(&mdl->buf_list)) {
			buf = list_first_entry(&mdl->buf_list,
					       struct cx18_buffer, list);
			list_move_tail(&buf->list, &s->buf_pool);
			buf->bytesused = 0;
			buf->readpos = 0;
		}
		mdl->id = s->mdl_base_idx; /* reset id to a "safe" value */
		/* all other mdl fields were cleared by cx18_queue_flush() */
	}
	spin_unlock(&q_idle->lock);
}

/*
 * Note, s->buf_pool is not protected by a lock,
 * the stream better not have *anything* going on when calling this
 */
void cx18_load_queues(struct cx18_stream *s)
{
	struct cx18 *cx = s->cx;
	struct cx18_mdl *mdl;
	struct cx18_buffer *buf;
	int mdl_id;
	int i;
284
	u32 partial_buf_size;
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312

	/*
	 * Attach buffers to MDLs, give the MDLs ids, and add MDLs to q_free
	 * Excess MDLs are left on q_idle
	 * Excess buffers are left in buf_pool and/or on an MDL in q_idle
	 */
	mdl_id = s->mdl_base_idx;
	for (mdl = cx18_dequeue(s, &s->q_idle), i = s->bufs_per_mdl;
	     mdl != NULL && i == s->bufs_per_mdl;
	     mdl = cx18_dequeue(s, &s->q_idle)) {

		mdl->id = mdl_id;

		for (i = 0; i < s->bufs_per_mdl; i++) {
			if (list_empty(&s->buf_pool))
				break;

			buf = list_first_entry(&s->buf_pool, struct cx18_buffer,
					       list);
			list_move_tail(&buf->list, &mdl->buf_list);

			/* update the firmware's MDL array with this buffer */
			cx18_writel(cx, buf->dma_handle,
				    &cx->scb->cpu_mdl[mdl_id + i].paddr);
			cx18_writel(cx, s->buf_size,
				    &cx->scb->cpu_mdl[mdl_id + i].length);
		}

313 314 315 316 317 318 319 320 321 322 323 324 325
		if (i == s->bufs_per_mdl) {
			/*
			 * The encoder doesn't honor s->mdl_size.  So in the
			 * case of a non-integral number of buffers to meet
			 * mdl_size, we lie about the size of the last buffer
			 * in the MDL to get the encoder to really only send
			 * us mdl_size bytes per MDL transfer.
			 */
			partial_buf_size = s->mdl_size % s->buf_size;
			if (partial_buf_size) {
				cx18_writel(cx, partial_buf_size,
				      &cx->scb->cpu_mdl[mdl_id + i - 1].length);
			}
326
			cx18_enqueue(s, mdl, &s->q_free);
327 328 329 330
		} else {
			/* Not enough buffers for this MDL; we won't use it */
			cx18_push(s, mdl, &s->q_idle);
		}
331 332 333 334 335 336 337 338 339 340 341 342 343 344
		mdl_id += i;
	}
}

void _cx18_mdl_sync_for_device(struct cx18_stream *s, struct cx18_mdl *mdl)
{
	int dma = s->dma;
	u32 buf_size = s->buf_size;
	struct pci_dev *pci_dev = s->cx->pci_dev;
	struct cx18_buffer *buf;

	list_for_each_entry(buf, &mdl->buf_list, list)
		pci_dma_sync_single_for_device(pci_dev, buf->dma_handle,
					       buf_size, dma);
345 346 347 348 349 350 351 352 353 354
}

int cx18_stream_alloc(struct cx18_stream *s)
{
	struct cx18 *cx = s->cx;
	int i;

	if (s->buffers == 0)
		return 0;

355 356
	CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers "
			"(%d.%02d kB total)\n",
357
		s->name, s->buffers, s->buf_size,
358 359
		s->buffers * s->buf_size / 1024,
		(s->buffers * s->buf_size * 100 / 1024) % 100);
360

361
	if (((char __iomem *)&cx->scb->cpu_mdl[cx->free_mdl_idx + s->buffers] -
362 363 364
				(char __iomem *)cx->scb) > SCB_RESERVED_SIZE) {
		unsigned bufsz = (((char __iomem *)cx->scb) + SCB_RESERVED_SIZE -
					((char __iomem *)cx->scb->cpu_mdl));
365 366

		CX18_ERR("Too many buffers, cannot fit in SCB area\n");
367
		CX18_ERR("Max buffers = %zu\n",
368
			bufsz / sizeof(struct cx18_mdl_ent));
369 370 371
		return -ENOMEM;
	}

372
	s->mdl_base_idx = cx->free_mdl_idx;
373

374
	/* allocate stream buffers and MDLs */
375
	for (i = 0; i < s->buffers; i++) {
376 377
		struct cx18_mdl *mdl;
		struct cx18_buffer *buf;
378

379 380 381
		/* 1 MDL per buffer to handle the worst & also default case */
		mdl = kzalloc(sizeof(struct cx18_mdl), GFP_KERNEL|__GFP_NOWARN);
		if (mdl == NULL)
382
			break;
383 384 385 386 387 388 389 390

		buf = kzalloc(sizeof(struct cx18_buffer),
				GFP_KERNEL|__GFP_NOWARN);
		if (buf == NULL) {
			kfree(mdl);
			break;
		}

391
		buf->buf = kmalloc(s->buf_size, GFP_KERNEL|__GFP_NOWARN);
392
		if (buf->buf == NULL) {
393
			kfree(mdl);
394 395 396
			kfree(buf);
			break;
		}
397 398 399 400 401 402

		INIT_LIST_HEAD(&mdl->list);
		INIT_LIST_HEAD(&mdl->buf_list);
		mdl->id = s->mdl_base_idx; /* a somewhat safe value */
		cx18_enqueue(s, mdl, &s->q_idle);

403
		INIT_LIST_HEAD(&buf->list);
404
		buf->dma_handle = pci_map_single(s->cx->pci_dev,
405 406
				buf->buf, s->buf_size, s->dma);
		cx18_buf_sync_for_cpu(s, buf);
407
		list_add_tail(&buf->list, &s->buf_pool);
408 409
	}
	if (i == s->buffers) {
410
		cx->free_mdl_idx += s->buffers;
411 412 413 414 415 416 417 418 419
		return 0;
	}
	CX18_ERR("Couldn't allocate buffers for %s stream\n", s->name);
	cx18_stream_free(s);
	return -ENOMEM;
}

void cx18_stream_free(struct cx18_stream *s)
{
420
	struct cx18_mdl *mdl;
421
	struct cx18_buffer *buf;
422 423 424
	struct cx18 *cx = s->cx;

	CX18_DEBUG_INFO("Deallocating buffers for %s stream\n", s->name);
425

426 427 428 429 430 431 432 433 434 435 436
	/* move all buffers to buf_pool and all MDLs to q_idle */
	cx18_unload_queues(s);

	/* empty q_idle */
	while ((mdl = cx18_dequeue(s, &s->q_idle)))
		kfree(mdl);

	/* empty buf_pool */
	while (!list_empty(&s->buf_pool)) {
		buf = list_first_entry(&s->buf_pool, struct cx18_buffer, list);
		list_del_init(&buf->list);
437

438
		pci_unmap_single(s->cx->pci_dev, buf->dma_handle,
439 440 441 442 443
				s->buf_size, s->dma);
		kfree(buf->buf);
		kfree(buf);
	}
}