cx18-queue.c 6.1 KB
Newer Older
1 2 3 4 5 6
/*
 *  cx18 buffer queues
 *
 *  Derived from ivtv-queue.c
 *
 *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
7
 *  Copyright (C) 2008  Andy Walls <awalls@radix.net>
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
 *  02111-1307  USA
 */

#include "cx18-driver.h"
#include "cx18-streams.h"
#include "cx18-queue.h"
#include "cx18-scb.h"

void cx18_buf_swap(struct cx18_buffer *buf)
{
	int i;

	for (i = 0; i < buf->bytesused; i += 4)
		swab32s((u32 *)(buf->buf + i));
}

void cx18_queue_init(struct cx18_queue *q)
{
	INIT_LIST_HEAD(&q->list);
41
	atomic_set(&q->buffers, 0);
42 43 44
	q->bytesused = 0;
}

45 46
struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
				 struct cx18_queue *q, int to_front)
47
{
48 49
	/* clear the buffer if it is not to be enqueued to the full queue */
	if (q != &s->q_full) {
50 51 52
		buf->bytesused = 0;
		buf->readpos = 0;
		buf->b_flags = 0;
53
		buf->skipped = 0;
54
	}
55

56
	mutex_lock(&s->qlock);
57 58 59 60 61

	/* q_busy is restricted to 63 buffers to stay within firmware limits */
	if (q == &s->q_busy && atomic_read(&q->buffers) >= 63)
		q = &s->q_free;

62 63 64 65
	if (to_front)
		list_add(&buf->list, &q->list); /* LIFO */
	else
		list_add_tail(&buf->list, &q->list); /* FIFO */
66
	q->bytesused += buf->bytesused - buf->readpos;
67 68
	atomic_inc(&q->buffers);

69
	mutex_unlock(&s->qlock);
70
	return q;
71 72 73 74 75 76
}

struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
{
	struct cx18_buffer *buf = NULL;

77
	mutex_lock(&s->qlock);
78 79 80 81
	if (!list_empty(&q->list)) {
		buf = list_entry(q->list.next, struct cx18_buffer, list);
		list_del_init(q->list.next);
		q->bytesused -= buf->bytesused - buf->readpos;
82
		buf->skipped = 0;
83
		atomic_dec(&q->buffers);
84
	}
85
	mutex_unlock(&s->qlock);
86 87 88
	return buf;
}

89
struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id,
90 91 92
	u32 bytesused)
{
	struct cx18 *cx = s->cx;
93 94 95
	struct cx18_buffer *buf;
	struct cx18_buffer *ret = NULL;
	struct list_head *p, *t;
96

97
	mutex_lock(&s->qlock);
98
	list_for_each_safe(p, t, &s->q_busy.list) {
99
		buf = list_entry(p, struct cx18_buffer, list);
100

101
		if (buf->id != id) {
102
			buf->skipped++;
103
			if (buf->skipped >= atomic_read(&s->q_busy.buffers)-1) {
104 105 106 107 108
				/* buffer must have fallen out of rotation */
				CX18_WARN("Skipped %s, buffer %d, %d "
					  "times - it must have dropped out of "
					  "rotation\n", s->name, buf->id,
					  buf->skipped);
109 110 111 112 113 114
				/* move it to q_free */
				list_move_tail(&buf->list, &s->q_free.list);
				buf->bytesused = buf->readpos = buf->b_flags =
					buf->skipped = 0;
				atomic_dec(&s->q_busy.buffers);
				atomic_inc(&s->q_free.buffers);
115
			}
116
			continue;
117
		}
118

119
		buf->bytesused = bytesused;
120 121
		/* Sync the buffer before we release the qlock */
		cx18_buf_sync_for_cpu(s, buf);
122 123
		if (s->type == CX18_ENC_STREAM_TYPE_TS) {
			/*
124 125
			 * TS doesn't use q_full.  As we pull the buffer off of
			 * the queue here, the caller will have to put it back.
126 127 128
			 */
			list_del_init(&buf->list);
		} else {
129
			/* Move buffer from q_busy to q_full */
130
			list_move_tail(&buf->list, &s->q_full.list);
131
			set_bit(CX18_F_B_NEED_BUF_SWAP, &buf->b_flags);
132 133
			s->q_full.bytesused += buf->bytesused;
			atomic_inc(&s->q_full.buffers);
134
		}
135
		atomic_dec(&s->q_busy.buffers);
136

137 138
		ret = buf;
		break;
139
	}
140
	mutex_unlock(&s->qlock);
141
	return ret;
142 143
}

144 145
/* Move all buffers of a queue to q_free, while flushing the buffers */
static void cx18_queue_flush(struct cx18_stream *s, struct cx18_queue *q)
146
{
147
	struct cx18_buffer *buf;
148

149 150
	if (q == &s->q_free)
		return;
151

152
	mutex_lock(&s->qlock);
153 154 155
	while (!list_empty(&q->list)) {
		buf = list_entry(q->list.next, struct cx18_buffer, list);
		list_move_tail(q->list.next, &s->q_free.list);
156
		buf->bytesused = buf->readpos = buf->b_flags = buf->skipped = 0;
157
		atomic_inc(&s->q_free.buffers);
158
	}
159
	cx18_queue_init(q);
160
	mutex_unlock(&s->qlock);
161 162 163 164
}

void cx18_flush_queues(struct cx18_stream *s)
{
165
	cx18_queue_flush(s, &s->q_busy);
166
	cx18_queue_flush(s, &s->q_full);
167 168 169 170 171 172 173 174 175 176 177 178 179 180
}

int cx18_stream_alloc(struct cx18_stream *s)
{
	struct cx18 *cx = s->cx;
	int i;

	if (s->buffers == 0)
		return 0;

	CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers (%dkB total)\n",
		s->name, s->buffers, s->buf_size,
		s->buffers * s->buf_size / 1024);

181 182 183 184
	if (((char __iomem *)&cx->scb->cpu_mdl[cx->mdl_offset + s->buffers] -
				(char __iomem *)cx->scb) > SCB_RESERVED_SIZE) {
		unsigned bufsz = (((char __iomem *)cx->scb) + SCB_RESERVED_SIZE -
					((char __iomem *)cx->scb->cpu_mdl));
185 186 187 188 189 190 191 192 193 194 195

		CX18_ERR("Too many buffers, cannot fit in SCB area\n");
		CX18_ERR("Max buffers = %zd\n",
			bufsz / sizeof(struct cx18_mdl));
		return -ENOMEM;
	}

	s->mdl_offset = cx->mdl_offset;

	/* allocate stream buffers. Initially all buffers are in q_free. */
	for (i = 0; i < s->buffers; i++) {
196 197
		struct cx18_buffer *buf = kzalloc(sizeof(struct cx18_buffer),
						GFP_KERNEL|__GFP_NOWARN);
198 199 200

		if (buf == NULL)
			break;
201
		buf->buf = kmalloc(s->buf_size, GFP_KERNEL|__GFP_NOWARN);
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
		if (buf->buf == NULL) {
			kfree(buf);
			break;
		}
		buf->id = cx->buffer_id++;
		INIT_LIST_HEAD(&buf->list);
		buf->dma_handle = pci_map_single(s->cx->dev,
				buf->buf, s->buf_size, s->dma);
		cx18_buf_sync_for_cpu(s, buf);
		cx18_enqueue(s, buf, &s->q_free);
	}
	if (i == s->buffers) {
		cx->mdl_offset += s->buffers;
		return 0;
	}
	CX18_ERR("Couldn't allocate buffers for %s stream\n", s->name);
	cx18_stream_free(s);
	return -ENOMEM;
}

void cx18_stream_free(struct cx18_stream *s)
{
	struct cx18_buffer *buf;

	/* move all buffers to q_free */
	cx18_flush_queues(s);

	/* empty q_free */
	while ((buf = cx18_dequeue(s, &s->q_free))) {
		pci_unmap_single(s->cx->dev, buf->dma_handle,
				s->buf_size, s->dma);
		kfree(buf->buf);
		kfree(buf);
	}
}