提交 cc9e6c49 编写于 作者: M Michael Grzeschik 提交者: Greg Kroah-Hartman

usb: chipidea: udc: manage dynamic amount of tds with a linked list

Instead of having a limited number of usable tds in the udc we use a
linked list to support dynamic amount of needed tds for all special
gadget types. This improves throughput.
Signed-off-by: NMichael Grzeschik <m.grzeschik@pengutronix.de>
Reviewed-by: NFelipe Balbi <balbi@ti.com>
Signed-off-by: NAlexander Shishkin <alexander.shishkin@linux.intel.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
上级 20a677fd
...@@ -162,6 +162,7 @@ static int ci_requests_show(struct seq_file *s, void *data) ...@@ -162,6 +162,7 @@ static int ci_requests_show(struct seq_file *s, void *data)
unsigned long flags; unsigned long flags;
struct list_head *ptr = NULL; struct list_head *ptr = NULL;
struct ci13xxx_req *req = NULL; struct ci13xxx_req *req = NULL;
struct td_node *node, *tmpnode;
unsigned i, j, qsize = sizeof(struct ci13xxx_td)/sizeof(u32); unsigned i, j, qsize = sizeof(struct ci13xxx_td)/sizeof(u32);
if (ci->role != CI_ROLE_GADGET) { if (ci->role != CI_ROLE_GADGET) {
...@@ -174,13 +175,17 @@ static int ci_requests_show(struct seq_file *s, void *data) ...@@ -174,13 +175,17 @@ static int ci_requests_show(struct seq_file *s, void *data)
list_for_each(ptr, &ci->ci13xxx_ep[i].qh.queue) { list_for_each(ptr, &ci->ci13xxx_ep[i].qh.queue) {
req = list_entry(ptr, struct ci13xxx_req, queue); req = list_entry(ptr, struct ci13xxx_req, queue);
seq_printf(s, "EP=%02i: TD=%08X %s\n", list_for_each_entry_safe(node, tmpnode, &req->tds, td) {
i % (ci->hw_ep_max / 2), (u32)req->dma, seq_printf(s, "EP=%02i: TD=%08X %s\n",
((i < ci->hw_ep_max/2) ? "RX" : "TX")); i % (ci->hw_ep_max / 2),
(u32)node->dma,
for (j = 0; j < qsize; j++) ((i < ci->hw_ep_max/2) ?
seq_printf(s, " %04X: %08X\n", j, "RX" : "TX"));
*((u32 *)req->ptr + j));
for (j = 0; j < qsize; j++)
seq_printf(s, " %04X: %08X\n", j,
*((u32 *)node->ptr + j));
}
} }
spin_unlock_irqrestore(&ci->lock, flags); spin_unlock_irqrestore(&ci->lock, flags);
......
...@@ -368,6 +368,46 @@ static int hw_usb_reset(struct ci13xxx *ci) ...@@ -368,6 +368,46 @@ static int hw_usb_reset(struct ci13xxx *ci)
/****************************************************************************** /******************************************************************************
* UTIL block * UTIL block
*****************************************************************************/ *****************************************************************************/
static void setup_td_bits(struct td_node *tdnode, unsigned length)
{
memset(tdnode->ptr, 0, sizeof(*tdnode->ptr));
tdnode->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
tdnode->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
tdnode->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
}
static int add_td_to_list(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq,
unsigned length)
{
struct td_node *lastnode, *node = kzalloc(sizeof(struct td_node),
GFP_ATOMIC);
if (node == NULL)
return -ENOMEM;
node->ptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
&node->dma);
if (node->ptr == NULL) {
kfree(node);
return -ENOMEM;
}
setup_td_bits(node, length);
if (!list_empty(&mReq->tds)) {
/* get the last entry */
lastnode = list_entry(mReq->tds.prev,
struct td_node, td);
lastnode->ptr->next = cpu_to_le32(node->dma);
}
INIT_LIST_HEAD(&node->td);
list_add_tail(&node->td, &mReq->tds);
return 0;
}
/** /**
* _usb_addr: calculates endpoint address from direction & number * _usb_addr: calculates endpoint address from direction & number
* @ep: endpoint * @ep: endpoint
...@@ -390,6 +430,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) ...@@ -390,6 +430,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
unsigned i; unsigned i;
int ret = 0; int ret = 0;
unsigned length = mReq->req.length; unsigned length = mReq->req.length;
struct td_node *firstnode, *lastnode;
/* don't queue twice */ /* don't queue twice */
if (mReq->req.status == -EALREADY) if (mReq->req.status == -EALREADY)
...@@ -397,58 +438,46 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) ...@@ -397,58 +438,46 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
mReq->req.status = -EALREADY; mReq->req.status = -EALREADY;
if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
&mReq->zdma);
if (mReq->zptr == NULL)
return -ENOMEM;
memset(mReq->zptr, 0, sizeof(*mReq->zptr));
mReq->zptr->next = cpu_to_le32(TD_TERMINATE);
mReq->zptr->token = cpu_to_le32(TD_STATUS_ACTIVE);
if (!mReq->req.no_interrupt)
mReq->zptr->token |= cpu_to_le32(TD_IOC);
}
ret = usb_gadget_map_request(&ci->gadget, &mReq->req, mEp->dir); ret = usb_gadget_map_request(&ci->gadget, &mReq->req, mEp->dir);
if (ret) if (ret)
return ret; return ret;
/* firstnode = list_first_entry(&mReq->tds,
* TD configuration struct td_node, td);
* TODO - handle requests which spawns into several TDs
*/ setup_td_bits(firstnode, length);
memset(mReq->ptr, 0, sizeof(*mReq->ptr));
mReq->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES)); firstnode->ptr->page[0] = cpu_to_le32(mReq->req.dma);
mReq->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
mReq->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
if (mReq->zptr) {
mReq->ptr->next = cpu_to_le32(mReq->zdma);
} else {
mReq->ptr->next = cpu_to_le32(TD_TERMINATE);
if (!mReq->req.no_interrupt)
mReq->ptr->token |= cpu_to_le32(TD_IOC);
}
mReq->ptr->page[0] = cpu_to_le32(mReq->req.dma);
for (i = 1; i < TD_PAGE_COUNT; i++) { for (i = 1; i < TD_PAGE_COUNT; i++) {
u32 page = mReq->req.dma + i * CI13XXX_PAGE_SIZE; u32 page = mReq->req.dma + i * CI13XXX_PAGE_SIZE;
page &= ~TD_RESERVED_MASK; page &= ~TD_RESERVED_MASK;
mReq->ptr->page[i] = cpu_to_le32(page); firstnode->ptr->page[i] = cpu_to_le32(page);
} }
if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0))
add_td_to_list(mEp, mReq, 0);
lastnode = list_entry(mReq->tds.prev,
struct td_node, td);
lastnode->ptr->next = cpu_to_le32(TD_TERMINATE);
if (!mReq->req.no_interrupt)
lastnode->ptr->token |= cpu_to_le32(TD_IOC);
wmb(); wmb();
if (!list_empty(&mEp->qh.queue)) { if (!list_empty(&mEp->qh.queue)) {
struct ci13xxx_req *mReqPrev; struct ci13xxx_req *mReqPrev;
int n = hw_ep_bit(mEp->num, mEp->dir); int n = hw_ep_bit(mEp->num, mEp->dir);
int tmp_stat; int tmp_stat;
u32 next = mReq->dma & TD_ADDR_MASK; struct td_node *prevlastnode;
u32 next = firstnode->dma & TD_ADDR_MASK;
mReqPrev = list_entry(mEp->qh.queue.prev, mReqPrev = list_entry(mEp->qh.queue.prev,
struct ci13xxx_req, queue); struct ci13xxx_req, queue);
if (mReqPrev->zptr) prevlastnode = list_entry(mReqPrev->tds.prev,
mReqPrev->zptr->next = cpu_to_le32(next); struct td_node, td);
else
mReqPrev->ptr->next = cpu_to_le32(next); prevlastnode->ptr->next = cpu_to_le32(next);
wmb(); wmb();
if (hw_read(ci, OP_ENDPTPRIME, BIT(n))) if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
goto done; goto done;
...@@ -462,7 +491,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) ...@@ -462,7 +491,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
} }
/* QH configuration */ /* QH configuration */
mEp->qh.ptr->td.next = cpu_to_le32(mReq->dma); /* TERMINATE = 0 */ mEp->qh.ptr->td.next = cpu_to_le32(firstnode->dma);
mEp->qh.ptr->td.token &= mEp->qh.ptr->td.token &=
cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE)); cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
...@@ -491,19 +520,25 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) ...@@ -491,19 +520,25 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
*/ */
static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq) static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
{ {
u32 tmptoken = le32_to_cpu(mReq->ptr->token); u32 tmptoken;
struct td_node *node, *tmpnode, *firstnode;
if (mReq->req.status != -EALREADY) if (mReq->req.status != -EALREADY)
return -EINVAL; return -EINVAL;
if ((TD_STATUS_ACTIVE & tmptoken) != 0) firstnode = list_first_entry(&mReq->tds,
return -EBUSY; struct td_node, td);
if (mReq->zptr) { list_for_each_entry_safe(node, tmpnode, &mReq->tds, td) {
if ((cpu_to_le32(TD_STATUS_ACTIVE) & mReq->zptr->token) != 0) tmptoken = le32_to_cpu(node->ptr->token);
if ((TD_STATUS_ACTIVE & tmptoken) != 0)
return -EBUSY; return -EBUSY;
dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma); if (node != firstnode) {
mReq->zptr = NULL; dma_pool_free(mEp->td_pool, node->ptr, node->dma);
list_del_init(&node->td);
node->ptr = NULL;
kfree(node);
}
} }
mReq->req.status = 0; mReq->req.status = 0;
...@@ -537,6 +572,7 @@ static int _ep_nuke(struct ci13xxx_ep *mEp) ...@@ -537,6 +572,7 @@ static int _ep_nuke(struct ci13xxx_ep *mEp)
__releases(mEp->lock) __releases(mEp->lock)
__acquires(mEp->lock) __acquires(mEp->lock)
{ {
struct td_node *node, *tmpnode, *firstnode;
if (mEp == NULL) if (mEp == NULL)
return -EINVAL; return -EINVAL;
...@@ -549,9 +585,17 @@ __acquires(mEp->lock) ...@@ -549,9 +585,17 @@ __acquires(mEp->lock)
list_entry(mEp->qh.queue.next, list_entry(mEp->qh.queue.next,
struct ci13xxx_req, queue); struct ci13xxx_req, queue);
if (mReq->zptr) { firstnode = list_first_entry(&mReq->tds,
dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma); struct td_node, td);
mReq->zptr = NULL;
list_for_each_entry_safe(node, tmpnode, &mReq->tds, td) {
if (node != firstnode) {
dma_pool_free(mEp->td_pool, node->ptr,
node->dma);
list_del_init(&node->td);
node->ptr = NULL;
kfree(node);
}
} }
list_del_init(&mReq->queue); list_del_init(&mReq->queue);
...@@ -838,9 +882,13 @@ __acquires(mEp->lock) ...@@ -838,9 +882,13 @@ __acquires(mEp->lock)
struct ci13xxx_req *mReq, *mReqTemp; struct ci13xxx_req *mReq, *mReqTemp;
struct ci13xxx_ep *mEpTemp = mEp; struct ci13xxx_ep *mEpTemp = mEp;
int retval = 0; int retval = 0;
struct td_node *firstnode;
list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue, list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
queue) { queue) {
firstnode = list_first_entry(&mReq->tds,
struct td_node, td);
retval = _hardware_dequeue(mEp, mReq); retval = _hardware_dequeue(mEp, mReq);
if (retval < 0) if (retval < 0)
break; break;
...@@ -1143,19 +1191,26 @@ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) ...@@ -1143,19 +1191,26 @@ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
{ {
struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
struct ci13xxx_req *mReq = NULL; struct ci13xxx_req *mReq = NULL;
struct td_node *node;
if (ep == NULL) if (ep == NULL)
return NULL; return NULL;
mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags); mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
if (mReq != NULL) { node = kzalloc(sizeof(struct td_node), gfp_flags);
if (mReq != NULL && node != NULL) {
INIT_LIST_HEAD(&mReq->queue); INIT_LIST_HEAD(&mReq->queue);
INIT_LIST_HEAD(&mReq->tds);
INIT_LIST_HEAD(&node->td);
mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags, node->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
&mReq->dma); &node->dma);
if (mReq->ptr == NULL) { if (node->ptr == NULL) {
kfree(node);
kfree(mReq); kfree(mReq);
mReq = NULL; mReq = NULL;
} else {
list_add_tail(&node->td, &mReq->tds);
} }
} }
...@@ -1171,6 +1226,7 @@ static void ep_free_request(struct usb_ep *ep, struct usb_request *req) ...@@ -1171,6 +1226,7 @@ static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
{ {
struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep); struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req); struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
struct td_node *firstnode;
unsigned long flags; unsigned long flags;
if (ep == NULL || req == NULL) { if (ep == NULL || req == NULL) {
...@@ -1182,8 +1238,11 @@ static void ep_free_request(struct usb_ep *ep, struct usb_request *req) ...@@ -1182,8 +1238,11 @@ static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
spin_lock_irqsave(mEp->lock, flags); spin_lock_irqsave(mEp->lock, flags);
if (mReq->ptr) firstnode = list_first_entry(&mReq->tds,
dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma); struct td_node, td);
if (firstnode->ptr)
dma_pool_free(mEp->td_pool, firstnode->ptr, firstnode->dma);
kfree(mReq); kfree(mReq);
spin_unlock_irqrestore(mEp->lock, flags); spin_unlock_irqrestore(mEp->lock, flags);
......
...@@ -60,6 +60,12 @@ struct ci13xxx_qh { ...@@ -60,6 +60,12 @@ struct ci13xxx_qh {
struct usb_ctrlrequest setup; struct usb_ctrlrequest setup;
} __attribute__ ((packed, aligned(4))); } __attribute__ ((packed, aligned(4)));
struct td_node {
struct list_head td;
dma_addr_t dma;
struct ci13xxx_td *ptr;
};
/** /**
* struct ci13xxx_req - usb request representation * struct ci13xxx_req - usb request representation
* @req: request structure for gadget drivers * @req: request structure for gadget drivers
...@@ -72,10 +78,7 @@ struct ci13xxx_qh { ...@@ -72,10 +78,7 @@ struct ci13xxx_qh {
struct ci13xxx_req { struct ci13xxx_req {
struct usb_request req; struct usb_request req;
struct list_head queue; struct list_head queue;
struct ci13xxx_td *ptr; struct list_head tds;
dma_addr_t dma;
struct ci13xxx_td *zptr;
dma_addr_t zdma;
}; };
#ifdef CONFIG_USB_CHIPIDEA_UDC #ifdef CONFIG_USB_CHIPIDEA_UDC
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册