提交 ee2d64f5 编写于 作者: A Andy Walls 提交者: Mauro Carvalho Chehab

V4L/DVB (9720): cx18: Major rewrite of interrupt handling for incoming mailbox processing

A major rewrite of interrupt handling for incoming mailbox processing, to split
the timing critical steps from the the deferrable steps as the sending XPU on
the CX23418 will time out and overwrite our incoming mailboxes rather quickly.
Setup a pool of work "order forms" for the irq handler to send jobs to the new
work handler routine which uses the kernel default work queue to do the
deferrable work.  Started optimizing some of the cx18-io calls as they are now
the low hanging fruit for recoving microseconds back from the timeline.
Future optimizations will get rid of mmio read retries, mmio stats logging, and
combine smaller functions in the irq path into the larger ones to save ~2 us
each.
Signed-off-by: NAndy Walls <awalls@radix.net>
Signed-off-by: NMauro Carvalho Chehab <mchehab@redhat.com>
上级 ba38ee8e
......@@ -440,6 +440,8 @@ static void cx18_process_options(struct cx18 *cx)
*/
static int __devinit cx18_init_struct1(struct cx18 *cx)
{
int i;
cx->base_addr = pci_resource_start(cx->dev, 0);
mutex_init(&cx->serialize_lock);
......@@ -451,7 +453,11 @@ static int __devinit cx18_init_struct1(struct cx18 *cx)
spin_lock_init(&cx->lock);
INIT_WORK(&cx->work, cx18_work_handler);
for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++) {
cx->epu_work_order[i].cx = cx;
cx->epu_work_order[i].str = cx->epu_debug_str;
INIT_WORK(&cx->epu_work_order[i].work, cx18_epu_work_handler);
}
/* start counting open_id at 1 */
cx->open_id = 1;
......
......@@ -203,8 +203,6 @@ struct cx18_options {
#define CX18_F_I_EOS 4 /* End of encoder stream */
#define CX18_F_I_RADIO_USER 5 /* radio tuner is selected */
#define CX18_F_I_ENC_PAUSED 13 /* the encoder is paused */
#define CX18_F_I_HAVE_WORK 15 /* there is work to be done */
#define CX18_F_I_WORK_HANDLER_DVB 18 /* work to be done for DVB */
#define CX18_F_I_INITED 21 /* set after first open */
#define CX18_F_I_FAILED 22 /* set if first open failed */
......@@ -247,6 +245,19 @@ struct cx18_dvb {
struct cx18; /* forward reference */
struct cx18_scb; /* forward reference */
#define CX18_MAX_MDL_ACKS 2
#define CX18_MAX_EPU_WORK_ORDERS 70 /* CPU_DE_RELEASE_MDL bursts 63 commands */
struct cx18_epu_work_order {
struct work_struct work;
atomic_t pending;
struct cx18 *cx;
int rpu;
struct cx18_mailbox mb;
struct cx18_mdl_ack mdl_ack[CX18_MAX_MDL_ACKS];
char *str;
};
#define CX18_INVALID_TASK_HANDLE 0xffffffff
struct cx18_stream {
......@@ -388,7 +399,6 @@ struct cx18 {
struct mutex epu2apu_mb_lock; /* protect driver to chip mailbox in SCB*/
struct mutex epu2cpu_mb_lock; /* protect driver to chip mailbox in SCB*/
struct cx18_av_state av_state;
/* codec settings */
......@@ -441,7 +451,8 @@ struct cx18 {
/* when the current DMA is finished this queue is woken up */
wait_queue_head_t dma_waitq;
struct work_struct work;
struct cx18_epu_work_order epu_work_order[CX18_MAX_EPU_WORK_ORDERS];
char epu_debug_str[256]; /* CX18_EPU_DEBUG is rare: use shared space */
/* i2c */
struct i2c_adapter i2c_adap[2];
......
......@@ -23,8 +23,6 @@
#include "cx18-dvb.h"
#include "cx18-io.h"
#include "cx18-streams.h"
#include "cx18-queue.h"
#include "cx18-scb.h"
#include "cx18-cards.h"
#include "s5h1409.h"
#include "mxl5005s.h"
......@@ -305,26 +303,3 @@ static int dvb_register(struct cx18_stream *stream)
return ret;
}
void cx18_dvb_work_handler(struct cx18 *cx)
{
struct cx18_buffer *buf;
struct cx18_stream *s = &cx->streams[CX18_ENC_STREAM_TYPE_TS];
while ((buf = cx18_dequeue(s, &s->q_full)) != NULL) {
if (s->dvb.enabled)
dvb_dmx_swfilter(&s->dvb.demux, buf->buf,
buf->bytesused);
cx18_buf_sync_for_device(s, buf);
cx18_enqueue(s, buf, &s->q_free);
if (s->handle == CX18_INVALID_TASK_HANDLE ||
!test_bit(CX18_F_S_STREAMING, &s->s_flags))
continue;
cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle,
(void __iomem *)&cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
1, buf->id, s->buf_size);
}
}
......@@ -23,4 +23,3 @@
int cx18_dvb_register(struct cx18_stream *stream);
void cx18_dvb_unregister(struct cx18_stream *stream);
void cx18_dvb_work_handler(struct cx18 *cx);
......@@ -121,6 +121,7 @@ static int load_cpu_fw_direct(const char *fn, u8 __iomem *mem, struct cx18 *cx)
if (cx18_raw_readl(cx, dst) != *src) {
CX18_ERR("Mismatch at offset %x\n", i);
release_firmware(fw);
cx18_setup_page(cx, 0);
return -EIO;
}
dst++;
......@@ -131,6 +132,7 @@ static int load_cpu_fw_direct(const char *fn, u8 __iomem *mem, struct cx18 *cx)
CX18_INFO("loaded %s firmware (%zd bytes)\n", fn, fw->size);
size = fw->size;
release_firmware(fw);
cx18_setup_page(cx, SCB_OFFSET);
return size;
}
......@@ -150,6 +152,7 @@ static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx,
if (request_firmware(&fw, fn, &cx->dev->dev)) {
CX18_ERR("unable to open firmware %s\n", fn);
CX18_ERR("did you put the firmware in the hotplug firmware directory?\n");
cx18_setup_page(cx, 0);
return -ENOMEM;
}
......@@ -185,6 +188,7 @@ static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx,
CX18_ERR("Mismatch at offset %x\n",
offset + j);
release_firmware(fw);
cx18_setup_page(cx, 0);
return -EIO;
}
}
......@@ -196,6 +200,7 @@ static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx,
fn, apu_version, fw->size);
size = fw->size;
release_firmware(fw);
cx18_setup_page(cx, 0);
return size;
}
......
......@@ -166,41 +166,6 @@ u8 cx18_readb_retry(struct cx18 *cx, const void __iomem *addr)
return val;
}
void cx18_memcpy_fromio(struct cx18 *cx, void *to,
const void __iomem *from, unsigned int len)
{
const u8 __iomem *src = from;
u8 *dst = to;
/* Align reads on the CX23418's addresses */
if ((len > 0) && ((unsigned long) src & 1)) {
*dst = cx18_readb(cx, src);
len--;
dst++;
src++;
}
if ((len > 1) && ((unsigned long) src & 2)) {
*((u16 *)dst) = cx18_raw_readw(cx, src);
len -= 2;
dst += 2;
src += 2;
}
while (len > 3) {
*((u32 *)dst) = cx18_raw_readl(cx, src);
len -= 4;
dst += 4;
src += 4;
}
if (len > 1) {
*((u16 *)dst) = cx18_raw_readw(cx, src);
len -= 2;
dst += 2;
src += 2;
}
if (len > 0)
*dst = cx18_readb(cx, src);
}
void cx18_memset_io(struct cx18 *cx, void __iomem *addr, int val, size_t count)
{
u8 __iomem *dst = addr;
......
......@@ -249,8 +249,13 @@ static inline u32 cx18_write_sync(struct cx18 *cx, u32 val, void __iomem *addr)
}
static inline
void cx18_memcpy_fromio(struct cx18 *cx, void *to,
const void __iomem *from, unsigned int len);
const void __iomem *from, unsigned int len)
{
memcpy_fromio(to, from, len);
}
void cx18_memset_io(struct cx18 *cx, void __iomem *addr, int val, size_t count);
......
......@@ -21,121 +21,9 @@
#include "cx18-driver.h"
#include "cx18-io.h"
#include "cx18-firmware.h"
#include "cx18-fileops.h"
#include "cx18-queue.h"
#include "cx18-irq.h"
#include "cx18-ioctl.h"
#include "cx18-mailbox.h"
#include "cx18-vbi.h"
#include "cx18-scb.h"
#include "cx18-dvb.h"
void cx18_work_handler(struct work_struct *work)
{
struct cx18 *cx = container_of(work, struct cx18, work);
if (test_and_clear_bit(CX18_F_I_WORK_HANDLER_DVB, &cx->i_flags))
cx18_dvb_work_handler(cx);
}
static void epu_dma_done(struct cx18 *cx, struct cx18_mailbox *mb, int rpu)
{
u32 handle = mb->args[0];
struct cx18_stream *s = NULL;
struct cx18_buffer *buf;
u32 off;
int i;
int id;
for (i = 0; i < CX18_MAX_STREAMS; i++) {
s = &cx->streams[i];
if ((handle == s->handle) && (s->dvb.enabled))
break;
if (s->v4l2dev && handle == s->handle)
break;
}
if (i == CX18_MAX_STREAMS) {
CX18_WARN("Got DMA done notification for unknown/inactive"
" handle %d\n", handle);
mb->error = CXERR_NOT_OPEN;
mb->cmd = 0;
cx18_mb_ack(cx, mb, rpu);
return;
}
off = mb->args[1];
if (mb->args[2] != 1)
CX18_WARN("Ack struct = %d for %s\n",
mb->args[2], s->name);
id = cx18_read_enc(cx, off);
buf = cx18_queue_get_buf_irq(s, id, cx18_read_enc(cx, off + 4));
CX18_DEBUG_HI_DMA("DMA DONE for %s (buffer %d)\n", s->name, id);
if (buf) {
cx18_buf_sync_for_cpu(s, buf);
if (s->type == CX18_ENC_STREAM_TYPE_TS && s->dvb.enabled) {
CX18_DEBUG_HI_DMA("TS recv bytesused = %d\n",
buf->bytesused);
set_bit(CX18_F_I_WORK_HANDLER_DVB, &cx->i_flags);
set_bit(CX18_F_I_HAVE_WORK, &cx->i_flags);
} else
set_bit(CX18_F_B_NEED_BUF_SWAP, &buf->b_flags);
} else {
CX18_WARN("Could not find buf %d for stream %s\n",
cx18_read_enc(cx, off), s->name);
}
mb->error = 0;
mb->cmd = 0;
cx18_mb_ack(cx, mb, rpu);
wake_up(&cx->dma_waitq);
if (s->id != -1)
wake_up(&s->waitq);
}
static void epu_debug(struct cx18 *cx, struct cx18_mailbox *mb, int rpu)
{
char str[256] = { 0 };
char *p;
if (mb->args[1]) {
cx18_setup_page(cx, mb->args[1]);
cx18_memcpy_fromio(cx, str, cx->enc_mem + mb->args[1], 252);
str[252] = 0;
}
cx18_mb_ack(cx, mb, rpu);
CX18_DEBUG_INFO("%x %s\n", mb->args[0], str);
p = strchr(str, '.');
if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags) && p && p > str)
CX18_INFO("FW version: %s\n", p - 1);
}
static void epu_cmd(struct cx18 *cx, u32 sw1)
{
struct cx18_mailbox mb;
if (sw1 & IRQ_CPU_TO_EPU) {
cx18_memcpy_fromio(cx, &mb, &cx->scb->cpu2epu_mb, sizeof(mb));
mb.error = 0;
switch (mb.cmd) {
case CX18_EPU_DMA_DONE:
epu_dma_done(cx, &mb, CPU);
break;
case CX18_EPU_DEBUG:
epu_debug(cx, &mb, CPU);
break;
default:
CX18_WARN("Unknown CPU_TO_EPU mailbox command %#08x\n",
mb.cmd);
break;
}
}
if (sw1 & IRQ_APU_TO_EPU) {
cx18_memcpy_fromio(cx, &mb, &cx->scb->apu2epu_mb, sizeof(mb));
CX18_WARN("Unknown APU_TO_EPU mailbox command %#08x\n", mb.cmd);
}
}
static void xpu_ack(struct cx18 *cx, u32 sw2)
{
......@@ -145,6 +33,14 @@ static void xpu_ack(struct cx18 *cx, u32 sw2)
wake_up(&cx->mb_apu_waitq);
}
static void epu_cmd(struct cx18 *cx, u32 sw1)
{
if (sw1 & IRQ_CPU_TO_EPU)
cx18_api_epu_cmd_irq(cx, CPU);
if (sw1 & IRQ_APU_TO_EPU)
cx18_api_epu_cmd_irq(cx, APU);
}
irqreturn_t cx18_irq_handler(int irq, void *dev_id)
{
struct cx18 *cx = (struct cx18 *)dev_id;
......@@ -170,6 +66,13 @@ irqreturn_t cx18_irq_handler(int irq, void *dev_id)
CX18_DEBUG_HI_IRQ("received interrupts "
"SW1: %x SW2: %x HW2: %x\n", sw1, sw2, hw2);
/*
* SW1 responses have to happen first. The sending XPU times out the
* incoming mailboxes on us rather rapidly.
*/
if (sw1)
epu_cmd(cx, sw1);
/* To do: interrupt-based I2C handling
if (hw2 & (HW2_I2C1_INT|HW2_I2C2_INT)) {
}
......@@ -178,11 +81,5 @@ irqreturn_t cx18_irq_handler(int irq, void *dev_id)
if (sw2)
xpu_ack(cx, sw2);
if (sw1)
epu_cmd(cx, sw1);
if (test_and_clear_bit(CX18_F_I_HAVE_WORK, &cx->i_flags))
schedule_work(&cx->work);
return (sw1 || sw2 || hw2) ? IRQ_HANDLED : IRQ_NONE;
}
......@@ -32,5 +32,3 @@
#define SW2_INT_ENABLE_PCI 0xc7315c
irqreturn_t cx18_irq_handler(int irq, void *dev_id);
void cx18_work_handler(struct work_struct *work);
......@@ -26,6 +26,10 @@
#include "cx18-scb.h"
#include "cx18-irq.h"
#include "cx18-mailbox.h"
#include "cx18-queue.h"
#include "cx18-streams.h"
static const char *rpu_str[] = { "APU", "CPU", "EPU", "HPU" };
#define API_FAST (1 << 2) /* Short timeout */
#define API_SLOW (1 << 3) /* Additional 300ms timeout */
......@@ -92,12 +96,149 @@ static const struct cx18_api_info *find_api_info(u32 cmd)
return NULL;
}
long cx18_mb_ack(struct cx18 *cx, const struct cx18_mailbox *mb, int rpu)
static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name)
{
char argstr[MAX_MB_ARGUMENTS*11+1];
char *p;
int i;
if (!(cx18_debug & CX18_DBGFLG_API))
return;
for (i = 0, p = argstr; i < MAX_MB_ARGUMENTS; i++, p += 11) {
/* kernel snprintf() appends '\0' always */
snprintf(p, 12, " %#010x", mb->args[i]);
}
CX18_DEBUG_API("%s: req %#010x ack %#010x cmd %#010x err %#010x args%s"
"\n", name, mb->request, mb->ack, mb->cmd, mb->error, argstr);
}
/*
* Functions that run in a work_queue work handling context
*/
static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order)
{
u32 handle, mdl_ack_count;
struct cx18_mailbox *mb;
struct cx18_mdl_ack *mdl_ack;
struct cx18_stream *s;
struct cx18_buffer *buf;
int i;
mb = &order->mb;
handle = mb->args[0];
s = cx18_handle_to_stream(cx, handle);
if (s == NULL) {
CX18_WARN("Got DMA done notification for unknown/inactive"
" handle %d\n", handle);
return;
}
mdl_ack_count = mb->args[2];
mdl_ack = order->mdl_ack;
for (i = 0; i < mdl_ack_count; i++, mdl_ack++) {
buf = cx18_queue_get_buf(s, mdl_ack->id, mdl_ack->data_used);
CX18_DEBUG_HI_DMA("DMA DONE for %s (buffer %d)\n", s->name,
mdl_ack->id);
if (buf == NULL) {
CX18_WARN("Could not find buf %d for stream %s\n",
mdl_ack->id, s->name);
continue;
}
cx18_buf_sync_for_cpu(s, buf);
if (s->type == CX18_ENC_STREAM_TYPE_TS && s->dvb.enabled) {
CX18_DEBUG_HI_DMA("TS recv bytesused = %d\n",
buf->bytesused);
dvb_dmx_swfilter(&s->dvb.demux, buf->buf,
buf->bytesused);
cx18_buf_sync_for_device(s, buf);
if (s->handle != CX18_INVALID_TASK_HANDLE &&
test_bit(CX18_F_S_STREAMING, &s->s_flags))
cx18_vapi(cx,
CX18_CPU_DE_SET_MDL, 5, s->handle,
(void __iomem *)
&cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
1, buf->id, s->buf_size);
} else
set_bit(CX18_F_B_NEED_BUF_SWAP, &buf->b_flags);
}
wake_up(&cx->dma_waitq);
if (s->id != -1)
wake_up(&s->waitq);
}
static void epu_debug(struct cx18 *cx, struct cx18_epu_work_order *order)
{
char *p;
char *str = order->str;
CX18_DEBUG_INFO("%x %s\n", order->mb.args[0], str);
p = strchr(str, '.');
if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags) && p && p > str)
CX18_INFO("FW version: %s\n", p - 1);
}
static void epu_cmd(struct cx18 *cx, struct cx18_epu_work_order *order)
{
switch (order->rpu) {
case CPU:
{
switch (order->mb.cmd) {
case CX18_EPU_DMA_DONE:
epu_dma_done(cx, order);
break;
case CX18_EPU_DEBUG:
epu_debug(cx, order);
break;
default:
CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
order->mb.cmd);
break;
}
break;
}
case APU:
CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
order->mb.cmd);
break;
default:
break;
}
}
static
void free_epu_work_order(struct cx18 *cx, struct cx18_epu_work_order *order)
{
atomic_set(&order->pending, 0);
}
void cx18_epu_work_handler(struct work_struct *work)
{
struct cx18_epu_work_order *order =
container_of(work, struct cx18_epu_work_order, work);
struct cx18 *cx = order->cx;
epu_cmd(cx, order);
free_epu_work_order(cx, order);
}
/*
* Functions that run in an interrupt handling context
*/
static void mb_ack_irq(struct cx18 *cx, const struct cx18_epu_work_order *order)
{
struct cx18_mailbox __iomem *ack_mb;
u32 ack_irq;
u32 ack_irq, req;
switch (rpu) {
switch (order->rpu) {
case APU:
ack_irq = IRQ_EPU_TO_APU_ACK;
ack_mb = &cx->scb->apu2epu_mb;
......@@ -108,16 +249,175 @@ long cx18_mb_ack(struct cx18 *cx, const struct cx18_mailbox *mb, int rpu)
break;
default:
CX18_WARN("Unhandled RPU (%d) for command %x ack\n",
rpu, mb->cmd);
return -EINVAL;
order->rpu, order->mb.cmd);
return;
}
cx18_setup_page(cx, SCB_OFFSET);
cx18_write_sync(cx, mb->request, &ack_mb->ack);
req = order->mb.request;
/* Don't ack if the RPU has gotten impatient and timed us out */
if (req != cx18_readl(cx, &ack_mb->request) ||
req == cx18_readl(cx, &ack_mb->ack))
return;
cx18_writel(cx, req, &ack_mb->ack);
cx18_write_reg_expect(cx, ack_irq, SW2_INT_SET, ack_irq, ack_irq);
return 0;
return;
}
static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order,
int stale)
{
u32 handle, mdl_ack_offset, mdl_ack_count;
struct cx18_mailbox *mb;
mb = &order->mb;
handle = mb->args[0];
mdl_ack_offset = mb->args[1];
mdl_ack_count = mb->args[2];
if (handle == CX18_INVALID_TASK_HANDLE ||
mdl_ack_count == 0 || mdl_ack_count > CX18_MAX_MDL_ACKS) {
if (!stale)
mb_ack_irq(cx, order);
return -1;
}
cx18_memcpy_fromio(cx, order->mdl_ack, cx->enc_mem + mdl_ack_offset,
sizeof(struct cx18_mdl_ack) * mdl_ack_count);
if (!stale)
mb_ack_irq(cx, order);
return 1;
}
static
int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order, int stale)
{
u32 str_offset;
char *str = order->str;
str[0] = '\0';
str_offset = order->mb.args[1];
if (str_offset) {
cx18_setup_page(cx, str_offset);
cx18_memcpy_fromio(cx, str, cx->enc_mem + str_offset, 252);
str[252] = '\0';
cx18_setup_page(cx, SCB_OFFSET);
}
if (!stale)
mb_ack_irq(cx, order);
return str_offset ? 1 : 0;
}
static inline
int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order, int stale)
{
int ret = -1;
switch (order->rpu) {
case CPU:
{
switch (order->mb.cmd) {
case CX18_EPU_DMA_DONE:
ret = epu_dma_done_irq(cx, order, stale);
break;
case CX18_EPU_DEBUG:
ret = epu_debug_irq(cx, order, stale);
break;
default:
CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
order->mb.cmd);
break;
}
break;
}
case APU:
CX18_WARN("Unknown APU to EPU mailbox command %#0x\n",
order->mb.cmd);
break;
default:
break;
}
return ret;
}
static inline
struct cx18_epu_work_order *alloc_epu_work_order_irq(struct cx18 *cx)
{
int i;
struct cx18_epu_work_order *order = NULL;
for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++) {
/*
* We only need "pending" atomic to inspect its contents,
* and need not do a check and set because:
* 1. Any work handler thread only clears "pending" and only
* on one, particular work order at a time, per handler thread.
* 2. "pending" is only set here, and we're serialized because
* we're called in an IRQ handler context.
*/
if (atomic_read(&cx->epu_work_order[i].pending) == 0) {
order = &cx->epu_work_order[i];
atomic_set(&order->pending, 1);
break;
}
}
return order;
}
void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
{
struct cx18_mailbox __iomem *mb;
struct cx18_mailbox *order_mb;
struct cx18_epu_work_order *order;
int stale = 0;
int submit;
switch (rpu) {
case CPU:
mb = &cx->scb->cpu2epu_mb;
break;
case APU:
mb = &cx->scb->apu2epu_mb;
break;
default:
return;
}
order = alloc_epu_work_order_irq(cx);
if (order == NULL) {
CX18_WARN("Unable to find blank work order form to schedule "
"incoming mailbox command processing\n");
return;
}
order->rpu = rpu;
order_mb = &order->mb;
cx18_memcpy_fromio(cx, order_mb, mb, sizeof(struct cx18_mailbox));
if (order_mb->request == order_mb->ack) {
CX18_WARN("Possibly falling behind: %s self-ack'ed our incoming"
" %s to EPU mailbox (sequence no. %u)\n",
rpu_str[rpu], rpu_str[rpu], order_mb->request);
dump_mb(cx, order_mb, "incoming");
stale = 1;
}
/*
* Individual EPU command processing is responsible for ack-ing
* a non-stale mailbox as soon as possible
*/
submit = epu_cmd_irq(cx, order, stale);
if (submit > 0) {
schedule_work(&order->work);
}
}
/*
* Functions called from a non-interrupt, non work_queue context
*/
static void cx18_api_log_ack_delay(struct cx18 *cx, int msecs)
{
if (msecs > CX18_MAX_MB_ACK_DELAY)
......@@ -167,8 +467,6 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
}
mutex_lock(mb_lock);
cx18_setup_page(cx, SCB_OFFSET);
/*
* Wait for an in-use mailbox to complete
*
......
......@@ -37,6 +37,17 @@
struct cx18;
/*
* This structure is used by CPU to provide completed buffers information
* Its structure is dictrated by the layout of the SCB, required by the
* firmware, but its defintion needs to be here, instead of in cx18-scb.h,
* for mailbox work order scheduling
*/
struct cx18_mdl_ack {
u32 id; /* ID of a completed MDL */
u32 data_used; /* Total data filled in the MDL for buffer 'id' */
};
/* The cx18_mailbox struct is the mailbox structure which is used for passing
messages between processors */
struct cx18_mailbox {
......@@ -73,6 +84,9 @@ int cx18_vapi_result(struct cx18 *cx, u32 data[MAX_MB_ARGUMENTS], u32 cmd,
int cx18_vapi(struct cx18 *cx, u32 cmd, int args, ...);
int cx18_api_func(void *priv, u32 cmd, int in, int out,
u32 data[CX2341X_MBOX_MAX_DATA]);
long cx18_mb_ack(struct cx18 *cx, const struct cx18_mailbox *mb, int rpu);
void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu);
void cx18_epu_work_handler(struct work_struct *work);
#endif
......@@ -75,30 +75,37 @@ struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
return buf;
}
struct cx18_buffer *cx18_queue_get_buf_irq(struct cx18_stream *s, u32 id,
struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id,
u32 bytesused)
{
struct cx18 *cx = s->cx;
struct list_head *p;
unsigned long flags = 0;
spin_lock(&s->qlock);
spin_lock_irqsave(&s->qlock, flags);
list_for_each(p, &s->q_free.list) {
struct cx18_buffer *buf =
list_entry(p, struct cx18_buffer, list);
if (buf->id != id)
if (buf->id != id) {
CX18_DEBUG_HI_DMA("Skipping buffer %d searching for %d "
"in stream %s q_free\n", buf->id, id,
s->name);
continue;
}
buf->bytesused = bytesused;
atomic_dec(&s->q_free.buffers);
atomic_inc(&s->q_full.buffers);
s->q_full.bytesused += buf->bytesused;
list_move_tail(&buf->list, &s->q_full.list);
if (s->type != CX18_ENC_STREAM_TYPE_TS) {
atomic_dec(&s->q_free.buffers);
atomic_inc(&s->q_full.buffers);
s->q_full.bytesused += buf->bytesused;
list_move_tail(&buf->list, &s->q_full.list);
}
spin_unlock(&s->qlock);
spin_unlock_irqrestore(&s->qlock, flags);
return buf;
}
spin_unlock(&s->qlock);
spin_unlock_irqrestore(&s->qlock, flags);
CX18_ERR("Cannot find buffer %d for stream %s\n", id, s->name);
return NULL;
}
......
......@@ -46,7 +46,7 @@ void cx18_queue_init(struct cx18_queue *q);
void cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
struct cx18_queue *q);
struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q);
struct cx18_buffer *cx18_queue_get_buf_irq(struct cx18_stream *s, u32 id,
struct cx18_buffer *cx18_queue_get_buf(struct cx18_stream *s, u32 id,
u32 bytesused);
void cx18_flush_queues(struct cx18_stream *s);
......
......@@ -85,12 +85,6 @@ struct cx18_mdl {
u32 length; /* Length of the buffer segment */
};
/* This structure is used by CPU to provide completed buffers information */
struct cx18_mdl_ack {
u32 id; /* ID of a completed MDL */
u32 data_used; /* Total data filled in the MDL for buffer 'id' */
};
struct cx18_scb {
/* These fields form the System Control Block which is used at boot time
for localizing the IPC data as well as the code positions for all
......@@ -276,7 +270,7 @@ struct cx18_scb {
struct cx18_mailbox hpu2epu_mb;
struct cx18_mailbox ppu2epu_mb;
struct cx18_mdl_ack cpu_mdl_ack[CX18_MAX_STREAMS][2];
struct cx18_mdl_ack cpu_mdl_ack[CX18_MAX_STREAMS][CX18_MAX_MDL_ACKS];
struct cx18_mdl cpu_mdl[1];
};
......
......@@ -595,3 +595,21 @@ u32 cx18_find_handle(struct cx18 *cx)
}
return CX18_INVALID_TASK_HANDLE;
}
struct cx18_stream *cx18_handle_to_stream(struct cx18 *cx, u32 handle)
{
int i;
struct cx18_stream *s;
if (handle == CX18_INVALID_TASK_HANDLE)
return NULL;
for (i = 0; i < CX18_MAX_STREAMS; i++) {
s = &cx->streams[i];
if (s->handle != handle)
continue;
if (s->v4l2dev || s->dvb.enabled)
return s;
}
return NULL;
}
......@@ -22,6 +22,7 @@
*/
u32 cx18_find_handle(struct cx18 *cx);
struct cx18_stream *cx18_handle_to_stream(struct cx18 *cx, u32 handle);
int cx18_streams_setup(struct cx18 *cx);
int cx18_streams_register(struct cx18 *cx);
void cx18_streams_cleanup(struct cx18 *cx, int unregister);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册