cx18-queue.c 6.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
/*
 *  cx18 buffer queues
 *
 *  Derived from ivtv-queue.c
 *
 *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
 *  02111-1307  USA
 */

#include "cx18-driver.h"
#include "cx18-streams.h"
#include "cx18-queue.h"
#include "cx18-scb.h"

void cx18_buf_swap(struct cx18_buffer *buf)
{
	int i;

	for (i = 0; i < buf->bytesused; i += 4)
		swab32s((u32 *)(buf->buf + i));
}

void cx18_queue_init(struct cx18_queue *q)
{
	INIT_LIST_HEAD(&q->list);
40
	atomic_set(&q->buffers, 0);
41 42 43 44 45 46 47 48 49 50 51
	q->bytesused = 0;
}

void cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
		struct cx18_queue *q)
{
	/* clear the buffer if it is going to be enqueued to the free queue */
	if (q == &s->q_free) {
		buf->bytesused = 0;
		buf->readpos = 0;
		buf->b_flags = 0;
52
		buf->skipped = 0;
53
	}
54
	mutex_lock(&s->qlock);
55
	list_add_tail(&buf->list, &q->list);
56
	atomic_inc(&q->buffers);
57
	q->bytesused += buf->bytesused - buf->readpos;
58
	mutex_unlock(&s->qlock);
59 60 61 62 63 64
}

struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
{
	struct cx18_buffer *buf = NULL;

65
	mutex_lock(&s->qlock);
66 67 68
	if (!list_empty(&q->list)) {
		buf = list_entry(q->list.next, struct cx18_buffer, list);
		list_del_init(q->list.next);
69
		atomic_dec(&q->buffers);
70
		q->bytesused -= buf->bytesused - buf->readpos;
71
		buf->skipped = 0;
72
	}
73
	mutex_unlock(&s->qlock);
74 75 76
	return buf;
}

77
struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id,
78 79 80
	u32 bytesused)
{
	struct cx18 *cx = s->cx;
81 82 83 84
	struct cx18_buffer *buf;
	struct cx18_buffer *ret = NULL;
	struct list_head *p, *t;
	LIST_HEAD(r);
85

86
	mutex_lock(&s->qlock);
87 88
	list_for_each_safe(p, t, &s->q_free.list) {
		buf = list_entry(p, struct cx18_buffer, list);
89

90
		if (buf->id != id) {
91 92 93 94 95 96 97 98 99 100
			buf->skipped++;
			if (buf->skipped >= atomic_read(&s->q_free.buffers)-1) {
				/* buffer must have fallen out of rotation */
				atomic_dec(&s->q_free.buffers);
				list_move_tail(&buf->list, &r);
				CX18_WARN("Skipped %s, buffer %d, %d "
					  "times - it must have dropped out of "
					  "rotation\n", s->name, buf->id,
					  buf->skipped);
			}
101
			continue;
102
		}
103

104
		buf->bytesused = bytesused;
105 106 107 108 109 110 111 112 113 114
		atomic_dec(&s->q_free.buffers);
		if (s->type == CX18_ENC_STREAM_TYPE_TS) {
			/*
			 * TS doesn't use q_full, but for sweeping up lost
			 * buffers, we want the TS to requeue the buffer just
			 * before sending the MDL back to the firmware, so we
			 * pull it off the list here.
			 */
			list_del_init(&buf->list);
		} else {
115 116 117 118 119
			atomic_inc(&s->q_full.buffers);
			s->q_full.bytesused += buf->bytesused;
			list_move_tail(&buf->list, &s->q_full.list);
		}

120 121
		ret = buf;
		break;
122
	}
123
	mutex_unlock(&s->qlock);
124 125 126 127 128 129 130 131 132 133 134 135 136 137

	/* Put lost buffers back into firmware transfer rotation */
	while (!list_empty(&r)) {
		buf = list_entry(r.next, struct cx18_buffer, list);
		list_del_init(r.next);
		cx18_enqueue(s, buf, &s->q_free);
		cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle,
		       (void __iomem *)&cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
		       1, buf->id, s->buf_size);
		CX18_INFO("Returning %s, buffer %d back to transfer rotation\n",
			  s->name, buf->id);
		/* and there was much rejoicing... */
	}
	return ret;
138 139
}

140 141
/* Move all buffers of a queue to q_free, while flushing the buffers */
static void cx18_queue_flush(struct cx18_stream *s, struct cx18_queue *q)
142
{
143
	struct cx18_buffer *buf;
144

145 146
	if (q == &s->q_free)
		return;
147

148
	mutex_lock(&s->qlock);
149 150 151
	while (!list_empty(&q->list)) {
		buf = list_entry(q->list.next, struct cx18_buffer, list);
		list_move_tail(q->list.next, &s->q_free.list);
152
		buf->bytesused = buf->readpos = buf->b_flags = buf->skipped = 0;
153
		atomic_inc(&s->q_free.buffers);
154
	}
155
	cx18_queue_init(q);
156
	mutex_unlock(&s->qlock);
157 158 159 160
}

void cx18_flush_queues(struct cx18_stream *s)
{
161 162
	cx18_queue_flush(s, &s->q_io);
	cx18_queue_flush(s, &s->q_full);
163 164 165 166 167 168 169 170 171 172 173 174 175 176
}

int cx18_stream_alloc(struct cx18_stream *s)
{
	struct cx18 *cx = s->cx;
	int i;

	if (s->buffers == 0)
		return 0;

	CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers (%dkB total)\n",
		s->name, s->buffers, s->buf_size,
		s->buffers * s->buf_size / 1024);

177 178 179 180
	if (((char __iomem *)&cx->scb->cpu_mdl[cx->mdl_offset + s->buffers] -
				(char __iomem *)cx->scb) > SCB_RESERVED_SIZE) {
		unsigned bufsz = (((char __iomem *)cx->scb) + SCB_RESERVED_SIZE -
					((char __iomem *)cx->scb->cpu_mdl));
181 182 183 184 185 186 187 188 189 190 191

		CX18_ERR("Too many buffers, cannot fit in SCB area\n");
		CX18_ERR("Max buffers = %zd\n",
			bufsz / sizeof(struct cx18_mdl));
		return -ENOMEM;
	}

	s->mdl_offset = cx->mdl_offset;

	/* allocate stream buffers. Initially all buffers are in q_free. */
	for (i = 0; i < s->buffers; i++) {
192 193
		struct cx18_buffer *buf = kzalloc(sizeof(struct cx18_buffer),
						GFP_KERNEL|__GFP_NOWARN);
194 195 196

		if (buf == NULL)
			break;
197
		buf->buf = kmalloc(s->buf_size, GFP_KERNEL|__GFP_NOWARN);
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
		if (buf->buf == NULL) {
			kfree(buf);
			break;
		}
		buf->id = cx->buffer_id++;
		INIT_LIST_HEAD(&buf->list);
		buf->dma_handle = pci_map_single(s->cx->dev,
				buf->buf, s->buf_size, s->dma);
		cx18_buf_sync_for_cpu(s, buf);
		cx18_enqueue(s, buf, &s->q_free);
	}
	if (i == s->buffers) {
		cx->mdl_offset += s->buffers;
		return 0;
	}
	CX18_ERR("Couldn't allocate buffers for %s stream\n", s->name);
	cx18_stream_free(s);
	return -ENOMEM;
}

void cx18_stream_free(struct cx18_stream *s)
{
	struct cx18_buffer *buf;

	/* move all buffers to q_free */
	cx18_flush_queues(s);

	/* empty q_free */
	while ((buf = cx18_dequeue(s, &s->q_free))) {
		pci_unmap_single(s->cx->dev, buf->dma_handle,
				s->buf_size, s->dma);
		kfree(buf->buf);
		kfree(buf);
	}
}