提交 82a96f57 编写于 作者: K Krzysztof Halasa 提交者: Russell King

[ARM] 4713/3: Adds drivers for IXP4xx QMgr and NPE features

This patch adds drivers for IXP4xx hardware Queue Manager and for
Network Processor Engines. Requires patch #4712 (reading/writing
CPU feature (aka fuse) bits).

Posted to linux-arm-kernel on 2 Dec 2007 and revised.
Signed-off-by: NKrzysztof Halasa <khc@pm.waw.pl>
Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
上级 c18f6581
......@@ -189,6 +189,20 @@ config IXP4XX_INDIRECT_PCI
need to use the indirect method instead. If you don't know
what you need, leave this option unselected.
config IXP4XX_QMGR
tristate "IXP4xx Queue Manager support"
help
This driver supports IXP4xx built-in hardware queue manager
and is automatically selected by Ethernet and HSS drivers.
config IXP4XX_NPE
tristate "IXP4xx Network Processor Engine support"
select HOTPLUG
select FW_LOADER
help
This driver supports IXP4xx built-in network coprocessors
and is automatically selected by Ethernet and HSS drivers.
endmenu
endif
......@@ -30,3 +30,5 @@ obj-$(CONFIG_MACH_GATEWAY7001) += gateway7001-setup.o
obj-$(CONFIG_MACH_WG302V2) += wg302v2-setup.o
obj-$(CONFIG_PCI) += $(obj-pci-$(CONFIG_PCI)) common-pci.o
obj-$(CONFIG_IXP4XX_QMGR) += ixp4xx_qmgr.o
obj-$(CONFIG_IXP4XX_NPE) += ixp4xx_npe.o
此差异已折叠。
/*
* Intel IXP4xx Queue Manager driver for Linux
*
* Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*/
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/arch/qmgr.h>
#define DEBUG 0
struct qmgr_regs __iomem *qmgr_regs;
static struct resource *mem_res;
static spinlock_t qmgr_lock;
static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
static void (*irq_handlers[HALF_QUEUES])(void *pdev);
static void *irq_pdevs[HALF_QUEUES];
void qmgr_set_irq(unsigned int queue, int src,
void (*handler)(void *pdev), void *pdev)
{
u32 __iomem *reg = &qmgr_regs->irqsrc[queue / 8]; /* 8 queues / u32 */
int bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
unsigned long flags;
src &= 7;
spin_lock_irqsave(&qmgr_lock, flags);
__raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit), reg);
irq_handlers[queue] = handler;
irq_pdevs[queue] = pdev;
spin_unlock_irqrestore(&qmgr_lock, flags);
}
static irqreturn_t qmgr_irq1(int irq, void *pdev)
{
int i;
u32 val = __raw_readl(&qmgr_regs->irqstat[0]);
__raw_writel(val, &qmgr_regs->irqstat[0]); /* ACK */
for (i = 0; i < HALF_QUEUES; i++)
if (val & (1 << i))
irq_handlers[i](irq_pdevs[i]);
return val ? IRQ_HANDLED : 0;
}
void qmgr_enable_irq(unsigned int queue)
{
unsigned long flags;
spin_lock_irqsave(&qmgr_lock, flags);
__raw_writel(__raw_readl(&qmgr_regs->irqen[0]) | (1 << queue),
&qmgr_regs->irqen[0]);
spin_unlock_irqrestore(&qmgr_lock, flags);
}
void qmgr_disable_irq(unsigned int queue)
{
unsigned long flags;
spin_lock_irqsave(&qmgr_lock, flags);
__raw_writel(__raw_readl(&qmgr_regs->irqen[0]) & ~(1 << queue),
&qmgr_regs->irqen[0]);
spin_unlock_irqrestore(&qmgr_lock, flags);
}
static inline void shift_mask(u32 *mask)
{
mask[3] = mask[3] << 1 | mask[2] >> 31;
mask[2] = mask[2] << 1 | mask[1] >> 31;
mask[1] = mask[1] << 1 | mask[0] >> 31;
mask[0] <<= 1;
}
int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
unsigned int nearly_empty_watermark,
unsigned int nearly_full_watermark)
{
u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
int err;
if (queue >= HALF_QUEUES)
return -ERANGE;
if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
return -EINVAL;
switch (len) {
case 16:
cfg = 0 << 24;
mask[0] = 0x1;
break;
case 32:
cfg = 1 << 24;
mask[0] = 0x3;
break;
case 64:
cfg = 2 << 24;
mask[0] = 0xF;
break;
case 128:
cfg = 3 << 24;
mask[0] = 0xFF;
break;
default:
return -EINVAL;
}
cfg |= nearly_empty_watermark << 26;
cfg |= nearly_full_watermark << 29;
len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */
mask[1] = mask[2] = mask[3] = 0;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
spin_lock_irq(&qmgr_lock);
if (__raw_readl(&qmgr_regs->sram[queue])) {
err = -EBUSY;
goto err;
}
while (1) {
if (!(used_sram_bitmap[0] & mask[0]) &&
!(used_sram_bitmap[1] & mask[1]) &&
!(used_sram_bitmap[2] & mask[2]) &&
!(used_sram_bitmap[3] & mask[3]))
break; /* found free space */
addr++;
shift_mask(mask);
if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
printk(KERN_ERR "qmgr: no free SRAM space for"
" queue %i\n", queue);
err = -ENOMEM;
goto err;
}
}
used_sram_bitmap[0] |= mask[0];
used_sram_bitmap[1] |= mask[1];
used_sram_bitmap[2] |= mask[2];
used_sram_bitmap[3] |= mask[3];
__raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
spin_unlock_irq(&qmgr_lock);
#if DEBUG
printk(KERN_DEBUG "qmgr: requested queue %i, addr = 0x%02X\n",
queue, addr);
#endif
return 0;
err:
spin_unlock_irq(&qmgr_lock);
module_put(THIS_MODULE);
return err;
}
void qmgr_release_queue(unsigned int queue)
{
u32 cfg, addr, mask[4];
BUG_ON(queue >= HALF_QUEUES); /* not in valid range */
spin_lock_irq(&qmgr_lock);
cfg = __raw_readl(&qmgr_regs->sram[queue]);
addr = (cfg >> 14) & 0xFF;
BUG_ON(!addr); /* not requested */
switch ((cfg >> 24) & 3) {
case 0: mask[0] = 0x1; break;
case 1: mask[0] = 0x3; break;
case 2: mask[0] = 0xF; break;
case 3: mask[0] = 0xFF; break;
}
while (addr--)
shift_mask(mask);
__raw_writel(0, &qmgr_regs->sram[queue]);
used_sram_bitmap[0] &= ~mask[0];
used_sram_bitmap[1] &= ~mask[1];
used_sram_bitmap[2] &= ~mask[2];
used_sram_bitmap[3] &= ~mask[3];
irq_handlers[queue] = NULL; /* catch IRQ bugs */
spin_unlock_irq(&qmgr_lock);
module_put(THIS_MODULE);
#if DEBUG
printk(KERN_DEBUG "qmgr: released queue %i\n", queue);
#endif
}
static int qmgr_init(void)
{
int i, err;
mem_res = request_mem_region(IXP4XX_QMGR_BASE_PHYS,
IXP4XX_QMGR_REGION_SIZE,
"IXP4xx Queue Manager");
if (mem_res == NULL)
return -EBUSY;
qmgr_regs = ioremap(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
if (qmgr_regs == NULL) {
err = -ENOMEM;
goto error_map;
}
/* reset qmgr registers */
for (i = 0; i < 4; i++) {
__raw_writel(0x33333333, &qmgr_regs->stat1[i]);
__raw_writel(0, &qmgr_regs->irqsrc[i]);
}
for (i = 0; i < 2; i++) {
__raw_writel(0, &qmgr_regs->stat2[i]);
__raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
__raw_writel(0, &qmgr_regs->irqen[i]);
}
for (i = 0; i < QUEUES; i++)
__raw_writel(0, &qmgr_regs->sram[i]);
err = request_irq(IRQ_IXP4XX_QM1, qmgr_irq1, 0,
"IXP4xx Queue Manager", NULL);
if (err) {
printk(KERN_ERR "qmgr: failed to request IRQ%i\n",
IRQ_IXP4XX_QM1);
goto error_irq;
}
used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
spin_lock_init(&qmgr_lock);
printk(KERN_INFO "IXP4xx Queue Manager initialized.\n");
return 0;
error_irq:
iounmap(qmgr_regs);
error_map:
release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
return err;
}
static void qmgr_remove(void)
{
free_irq(IRQ_IXP4XX_QM1, NULL);
synchronize_irq(IRQ_IXP4XX_QM1);
iounmap(qmgr_regs);
release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
}
module_init(qmgr_init);
module_exit(qmgr_remove);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Krzysztof Halasa");
EXPORT_SYMBOL(qmgr_regs);
EXPORT_SYMBOL(qmgr_set_irq);
EXPORT_SYMBOL(qmgr_enable_irq);
EXPORT_SYMBOL(qmgr_disable_irq);
EXPORT_SYMBOL(qmgr_request_queue);
EXPORT_SYMBOL(qmgr_release_queue);
#ifndef __IXP4XX_NPE_H
#define __IXP4XX_NPE_H
#include <linux/kernel.h>
extern const char *npe_names[];
struct npe_regs {
u32 exec_addr, exec_data, exec_status_cmd, exec_count;
u32 action_points[4];
u32 watchpoint_fifo, watch_count;
u32 profile_count;
u32 messaging_status, messaging_control;
u32 mailbox_status, /*messaging_*/ in_out_fifo;
};
struct npe {
struct resource *mem_res;
struct npe_regs __iomem *regs;
u32 regs_phys;
int id;
int valid;
};
static inline const char *npe_name(struct npe *npe)
{
return npe_names[npe->id];
}
int npe_running(struct npe *npe);
int npe_send_message(struct npe *npe, const void *msg, const char *what);
int npe_recv_message(struct npe *npe, void *msg, const char *what);
int npe_send_recv_message(struct npe *npe, void *msg, const char *what);
int npe_load_firmware(struct npe *npe, const char *name, struct device *dev);
struct npe *npe_request(int id);
void npe_release(struct npe *npe);
#endif /* __IXP4XX_NPE_H */
/*
* Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*/
#ifndef IXP4XX_QMGR_H
#define IXP4XX_QMGR_H
#include <linux/io.h>
#include <linux/kernel.h>
#define HALF_QUEUES 32
#define QUEUES 64 /* only 32 lower queues currently supported */
#define MAX_QUEUE_LENGTH 4 /* in dwords */
#define QUEUE_STAT1_EMPTY 1 /* queue status bits */
#define QUEUE_STAT1_NEARLY_EMPTY 2
#define QUEUE_STAT1_NEARLY_FULL 4
#define QUEUE_STAT1_FULL 8
#define QUEUE_STAT2_UNDERFLOW 1
#define QUEUE_STAT2_OVERFLOW 2
#define QUEUE_WATERMARK_0_ENTRIES 0
#define QUEUE_WATERMARK_1_ENTRY 1
#define QUEUE_WATERMARK_2_ENTRIES 2
#define QUEUE_WATERMARK_4_ENTRIES 3
#define QUEUE_WATERMARK_8_ENTRIES 4
#define QUEUE_WATERMARK_16_ENTRIES 5
#define QUEUE_WATERMARK_32_ENTRIES 6
#define QUEUE_WATERMARK_64_ENTRIES 7
/* queue interrupt request conditions */
#define QUEUE_IRQ_SRC_EMPTY 0
#define QUEUE_IRQ_SRC_NEARLY_EMPTY 1
#define QUEUE_IRQ_SRC_NEARLY_FULL 2
#define QUEUE_IRQ_SRC_FULL 3
#define QUEUE_IRQ_SRC_NOT_EMPTY 4
#define QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY 5
#define QUEUE_IRQ_SRC_NOT_NEARLY_FULL 6
#define QUEUE_IRQ_SRC_NOT_FULL 7
struct qmgr_regs {
u32 acc[QUEUES][MAX_QUEUE_LENGTH]; /* 0x000 - 0x3FF */
u32 stat1[4]; /* 0x400 - 0x40F */
u32 stat2[2]; /* 0x410 - 0x417 */
u32 statne_h; /* 0x418 - queue nearly empty */
u32 statf_h; /* 0x41C - queue full */
u32 irqsrc[4]; /* 0x420 - 0x42F IRC source */
u32 irqen[2]; /* 0x430 - 0x437 IRQ enabled */
u32 irqstat[2]; /* 0x438 - 0x43F - IRQ access only */
u32 reserved[1776];
u32 sram[2048]; /* 0x2000 - 0x3FFF - config and buffer */
};
void qmgr_set_irq(unsigned int queue, int src,
void (*handler)(void *pdev), void *pdev);
void qmgr_enable_irq(unsigned int queue);
void qmgr_disable_irq(unsigned int queue);
/* request_ and release_queue() must be called from non-IRQ context */
int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
unsigned int nearly_empty_watermark,
unsigned int nearly_full_watermark);
void qmgr_release_queue(unsigned int queue);
static inline void qmgr_put_entry(unsigned int queue, u32 val)
{
extern struct qmgr_regs __iomem *qmgr_regs;
__raw_writel(val, &qmgr_regs->acc[queue][0]);
}
static inline u32 qmgr_get_entry(unsigned int queue)
{
extern struct qmgr_regs __iomem *qmgr_regs;
return __raw_readl(&qmgr_regs->acc[queue][0]);
}
static inline int qmgr_get_stat1(unsigned int queue)
{
extern struct qmgr_regs __iomem *qmgr_regs;
return (__raw_readl(&qmgr_regs->stat1[queue >> 3])
>> ((queue & 7) << 2)) & 0xF;
}
static inline int qmgr_get_stat2(unsigned int queue)
{
extern struct qmgr_regs __iomem *qmgr_regs;
return (__raw_readl(&qmgr_regs->stat2[queue >> 4])
>> ((queue & 0xF) << 1)) & 0x3;
}
static inline int qmgr_stat_empty(unsigned int queue)
{
return !!(qmgr_get_stat1(queue) & QUEUE_STAT1_EMPTY);
}
static inline int qmgr_stat_nearly_empty(unsigned int queue)
{
return !!(qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_EMPTY);
}
static inline int qmgr_stat_nearly_full(unsigned int queue)
{
return !!(qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_FULL);
}
static inline int qmgr_stat_full(unsigned int queue)
{
return !!(qmgr_get_stat1(queue) & QUEUE_STAT1_FULL);
}
static inline int qmgr_stat_underflow(unsigned int queue)
{
return !!(qmgr_get_stat2(queue) & QUEUE_STAT2_UNDERFLOW);
}
static inline int qmgr_stat_overflow(unsigned int queue)
{
return !!(qmgr_get_stat2(queue) & QUEUE_STAT2_OVERFLOW);
}
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册