提交 d0cc3d41 编写于 作者: M Martin Fuzzey 提交者: Greg Kroah-Hartman

USB: imx21-hcd accept arbitary transfer buffer alignement.

The hardware can only do DMA to 4 byte aligned addresses.
When this requirement is not met use PIO or a bounce buffer.

PIO is used when the buffer is small enough to directly
use the hardware data memory (2*maxpacket).

A bounce buffer is used for larger transfers.
Signed-off-by: NMartin Fuzzey <mfuzzey@gmail.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@suse.de>
上级 1dae423d
...@@ -57,6 +57,7 @@ ...@@ -57,6 +57,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/usb.h> #include <linux/usb.h>
#include <linux/usb/hcd.h> #include <linux/usb/hcd.h>
#include <linux/dma-mapping.h>
#include "imx21-hcd.h" #include "imx21-hcd.h"
...@@ -136,9 +137,18 @@ static int imx21_hc_get_frame(struct usb_hcd *hcd) ...@@ -136,9 +137,18 @@ static int imx21_hc_get_frame(struct usb_hcd *hcd)
return wrap_frame(readl(imx21->regs + USBH_FRMNUB)); return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
} }
static inline bool unsuitable_for_dma(dma_addr_t addr)
{
return (addr & 3) != 0;
}
#include "imx21-dbg.c" #include "imx21-dbg.c"
static void nonisoc_urb_completed_for_etd(
struct imx21 *imx21, struct etd_priv *etd, int status);
static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
static void free_dmem(struct imx21 *imx21, struct etd_priv *etd);
/* =========================================== */ /* =========================================== */
/* ETD management */ /* ETD management */
/* =========================================== */ /* =========================================== */
...@@ -185,7 +195,8 @@ static void reset_etd(struct imx21 *imx21, int num) ...@@ -185,7 +195,8 @@ static void reset_etd(struct imx21 *imx21, int num)
etd_writel(imx21, num, i, 0); etd_writel(imx21, num, i, 0);
etd->urb = NULL; etd->urb = NULL;
etd->ep = NULL; etd->ep = NULL;
etd->td = NULL;; etd->td = NULL;
etd->bounce_buffer = NULL;
} }
static void free_etd(struct imx21 *imx21, int num) static void free_etd(struct imx21 *imx21, int num)
...@@ -221,26 +232,94 @@ static void setup_etd_dword0(struct imx21 *imx21, ...@@ -221,26 +232,94 @@ static void setup_etd_dword0(struct imx21 *imx21,
((u32) maxpacket << DW0_MAXPKTSIZ)); ((u32) maxpacket << DW0_MAXPKTSIZ));
} }
static void activate_etd(struct imx21 *imx21, /**
int etd_num, dma_addr_t dma, u8 dir) * Copy buffer to data controller data memory.
* We cannot use memcpy_toio() because the hardware requires 32bit writes
*/
static void copy_to_dmem(
struct imx21 *imx21, int dmem_offset, void *src, int count)
{
void __iomem *dmem = imx21->regs + USBOTG_DMEM + dmem_offset;
u32 word = 0;
u8 *p = src;
int byte = 0;
int i;
for (i = 0; i < count; i++) {
byte = i % 4;
word += (*p++ << (byte * 8));
if (byte == 3) {
writel(word, dmem);
dmem += 4;
word = 0;
}
}
if (count && byte != 3)
writel(word, dmem);
}
static void activate_etd(struct imx21 *imx21, int etd_num, u8 dir)
{ {
u32 etd_mask = 1 << etd_num; u32 etd_mask = 1 << etd_num;
struct etd_priv *etd = &imx21->etd[etd_num]; struct etd_priv *etd = &imx21->etd[etd_num];
if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) {
/* For non aligned isoc the condition below is always true */
if (etd->len <= etd->dmem_size) {
/* Fits into data memory, use PIO */
if (dir != TD_DIR_IN) {
copy_to_dmem(imx21,
etd->dmem_offset,
etd->cpu_buffer, etd->len);
}
etd->dma_handle = 0;
} else {
/* Too big for data memory, use bounce buffer */
enum dma_data_direction dmadir;
if (dir == TD_DIR_IN) {
dmadir = DMA_FROM_DEVICE;
etd->bounce_buffer = kmalloc(etd->len,
GFP_ATOMIC);
} else {
dmadir = DMA_TO_DEVICE;
etd->bounce_buffer = kmemdup(etd->cpu_buffer,
etd->len,
GFP_ATOMIC);
}
if (!etd->bounce_buffer) {
dev_err(imx21->dev, "failed bounce alloc\n");
goto err_bounce_alloc;
}
etd->dma_handle =
dma_map_single(imx21->dev,
etd->bounce_buffer,
etd->len,
dmadir);
if (dma_mapping_error(imx21->dev, etd->dma_handle)) {
dev_err(imx21->dev, "failed bounce map\n");
goto err_bounce_map;
}
}
}
clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask); clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
set_register_bits(imx21, USBH_ETDDONEEN, etd_mask); set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
if (dma) { if (etd->dma_handle) {
set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask); set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask); clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask); clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
writel(dma, imx21->regs + USB_ETDSMSA(etd_num)); writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num));
set_register_bits(imx21, USB_ETDDMAEN, etd_mask); set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
} else { } else {
if (dir != TD_DIR_IN) { if (dir != TD_DIR_IN) {
/* need to set for ZLP */ /* need to set for ZLP and PIO */
set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
} }
...@@ -263,6 +342,14 @@ static void activate_etd(struct imx21 *imx21, ...@@ -263,6 +342,14 @@ static void activate_etd(struct imx21 *imx21,
etd->active_count = 1; etd->active_count = 1;
writel(etd_mask, imx21->regs + USBH_ETDENSET); writel(etd_mask, imx21->regs + USBH_ETDENSET);
return;
err_bounce_map:
kfree(etd->bounce_buffer);
err_bounce_alloc:
free_dmem(imx21, etd);
nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM);
} }
/* =========================================== */ /* =========================================== */
...@@ -325,7 +412,7 @@ static void activate_queued_etd(struct imx21 *imx21, ...@@ -325,7 +412,7 @@ static void activate_queued_etd(struct imx21 *imx21,
etd->dmem_offset = dmem_offset; etd->dmem_offset = dmem_offset;
urb_priv->active = 1; urb_priv->active = 1;
activate_etd(imx21, etd_num, etd->dma_handle, dir); activate_etd(imx21, etd_num, dir);
} }
static void free_dmem(struct imx21 *imx21, struct etd_priv *etd) static void free_dmem(struct imx21 *imx21, struct etd_priv *etd)
...@@ -385,7 +472,6 @@ static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep) ...@@ -385,7 +472,6 @@ static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
/* =========================================== */ /* =========================================== */
/* End handling */ /* End handling */
/* =========================================== */ /* =========================================== */
static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
/* Endpoint now idle - release it's ETD(s) or asssign to queued request */ /* Endpoint now idle - release it's ETD(s) or asssign to queued request */
static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv) static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
...@@ -448,6 +534,24 @@ __acquires(imx21->lock) ...@@ -448,6 +534,24 @@ __acquires(imx21->lock)
ep_idle(imx21, ep_priv); ep_idle(imx21, ep_priv);
} }
static void nonisoc_urb_completed_for_etd(
struct imx21 *imx21, struct etd_priv *etd, int status)
{
struct usb_host_endpoint *ep = etd->ep;
urb_done(imx21->hcd, etd->urb, status);
etd->urb = NULL;
if (!list_empty(&ep->urb_list)) {
struct urb *urb = list_first_entry(
&ep->urb_list, struct urb, urb_list);
dev_vdbg(imx21->dev, "next URB %p\n", urb);
schedule_nonisoc_etd(imx21, urb);
}
}
/* =========================================== */ /* =========================================== */
/* ISOC Handling ... */ /* ISOC Handling ... */
/* =========================================== */ /* =========================================== */
...@@ -500,6 +604,8 @@ static void schedule_isoc_etds(struct usb_hcd *hcd, ...@@ -500,6 +604,8 @@ static void schedule_isoc_etds(struct usb_hcd *hcd,
etd->ep = td->ep; etd->ep = td->ep;
etd->urb = td->urb; etd->urb = td->urb;
etd->len = td->len; etd->len = td->len;
etd->dma_handle = td->dma_handle;
etd->cpu_buffer = td->cpu_buffer;
debug_isoc_submitted(imx21, cur_frame, td); debug_isoc_submitted(imx21, cur_frame, td);
...@@ -513,16 +619,17 @@ static void schedule_isoc_etds(struct usb_hcd *hcd, ...@@ -513,16 +619,17 @@ static void schedule_isoc_etds(struct usb_hcd *hcd,
(TD_NOTACCESSED << DW3_COMPCODE0) | (TD_NOTACCESSED << DW3_COMPCODE0) |
(td->len << DW3_PKTLEN0)); (td->len << DW3_PKTLEN0));
activate_etd(imx21, etd_num, td->data, dir); activate_etd(imx21, etd_num, dir);
} }
} }
static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) static void isoc_etd_done(struct usb_hcd *hcd, int etd_num)
{ {
struct imx21 *imx21 = hcd_to_imx21(hcd); struct imx21 *imx21 = hcd_to_imx21(hcd);
int etd_mask = 1 << etd_num; int etd_mask = 1 << etd_num;
struct urb_priv *urb_priv = urb->hcpriv;
struct etd_priv *etd = imx21->etd + etd_num; struct etd_priv *etd = imx21->etd + etd_num;
struct urb *urb = etd->urb;
struct urb_priv *urb_priv = urb->hcpriv;
struct td *td = etd->td; struct td *td = etd->td;
struct usb_host_endpoint *ep = etd->ep; struct usb_host_endpoint *ep = etd->ep;
int isoc_index = td->isoc_index; int isoc_index = td->isoc_index;
...@@ -556,8 +663,13 @@ static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) ...@@ -556,8 +663,13 @@ static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
bytes_xfrd, td->len, urb, etd_num, isoc_index); bytes_xfrd, td->len, urb, etd_num, isoc_index);
} }
if (dir_in) if (dir_in) {
clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
if (!etd->dma_handle)
memcpy_fromio(etd->cpu_buffer,
imx21->regs + USBOTG_DMEM + etd->dmem_offset,
bytes_xfrd);
}
urb->actual_length += bytes_xfrd; urb->actual_length += bytes_xfrd;
urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd; urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
...@@ -716,12 +828,14 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd, ...@@ -716,12 +828,14 @@ static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
/* set up transfers */ /* set up transfers */
td = urb_priv->isoc_td; td = urb_priv->isoc_td;
for (i = 0; i < urb->number_of_packets; i++, td++) { for (i = 0; i < urb->number_of_packets; i++, td++) {
unsigned int offset = urb->iso_frame_desc[i].offset;
td->ep = ep; td->ep = ep;
td->urb = urb; td->urb = urb;
td->len = urb->iso_frame_desc[i].length; td->len = urb->iso_frame_desc[i].length;
td->isoc_index = i; td->isoc_index = i;
td->frame = wrap_frame(urb->start_frame + urb->interval * i); td->frame = wrap_frame(urb->start_frame + urb->interval * i);
td->data = urb->transfer_dma + urb->iso_frame_desc[i].offset; td->dma_handle = urb->transfer_dma + offset;
td->cpu_buffer = urb->transfer_buffer + offset;
list_add_tail(&td->list, &ep_priv->td_list); list_add_tail(&td->list, &ep_priv->td_list);
} }
...@@ -812,13 +926,15 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb) ...@@ -812,13 +926,15 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) { if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
if (state == US_CTRL_SETUP) { if (state == US_CTRL_SETUP) {
dir = TD_DIR_SETUP; dir = TD_DIR_SETUP;
if (unsuitable_for_dma(urb->setup_dma))
unmap_urb_setup_for_dma(imx21->hcd, urb);
etd->dma_handle = urb->setup_dma; etd->dma_handle = urb->setup_dma;
etd->cpu_buffer = urb->setup_packet;
bufround = 0; bufround = 0;
count = 8; count = 8;
datatoggle = TD_TOGGLE_DATA0; datatoggle = TD_TOGGLE_DATA0;
} else { /* US_CTRL_ACK */ } else { /* US_CTRL_ACK */
dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT; dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
etd->dma_handle = urb->transfer_dma;
bufround = 0; bufround = 0;
count = 0; count = 0;
datatoggle = TD_TOGGLE_DATA1; datatoggle = TD_TOGGLE_DATA1;
...@@ -826,7 +942,11 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb) ...@@ -826,7 +942,11 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
} else { } else {
dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN; dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
bufround = (dir == TD_DIR_IN) ? 1 : 0; bufround = (dir == TD_DIR_IN) ? 1 : 0;
if (unsuitable_for_dma(urb->transfer_dma))
unmap_urb_for_dma(imx21->hcd, urb);
etd->dma_handle = urb->transfer_dma; etd->dma_handle = urb->transfer_dma;
etd->cpu_buffer = urb->transfer_buffer;
if (usb_pipebulk(pipe) && (state == US_BULK0)) if (usb_pipebulk(pipe) && (state == US_BULK0))
count = 0; count = 0;
else else
...@@ -901,14 +1021,15 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb) ...@@ -901,14 +1021,15 @@ static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
/* enable the ETD to kick off transfer */ /* enable the ETD to kick off transfer */
dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n", dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
etd_num, count, dir != TD_DIR_IN ? "out" : "in"); etd_num, count, dir != TD_DIR_IN ? "out" : "in");
activate_etd(imx21, etd_num, etd->dma_handle, dir); activate_etd(imx21, etd_num, dir);
} }
static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) static void nonisoc_etd_done(struct usb_hcd *hcd, int etd_num)
{ {
struct imx21 *imx21 = hcd_to_imx21(hcd); struct imx21 *imx21 = hcd_to_imx21(hcd);
struct etd_priv *etd = &imx21->etd[etd_num]; struct etd_priv *etd = &imx21->etd[etd_num];
struct urb *urb = etd->urb;
u32 etd_mask = 1 << etd_num; u32 etd_mask = 1 << etd_num;
struct urb_priv *urb_priv = urb->hcpriv; struct urb_priv *urb_priv = urb->hcpriv;
int dir; int dir;
...@@ -930,7 +1051,20 @@ static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) ...@@ -930,7 +1051,20 @@ static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
if (dir == TD_DIR_IN) { if (dir == TD_DIR_IN) {
clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
if (etd->bounce_buffer) {
memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd);
dma_unmap_single(imx21->dev,
etd->dma_handle, etd->len, DMA_FROM_DEVICE);
} else if (!etd->dma_handle && bytes_xfrd) {/* PIO */
memcpy_fromio(etd->cpu_buffer,
imx21->regs + USBOTG_DMEM + etd->dmem_offset,
bytes_xfrd);
}
} }
kfree(etd->bounce_buffer);
etd->bounce_buffer = NULL;
free_dmem(imx21, etd); free_dmem(imx21, etd);
urb->error_count = 0; urb->error_count = 0;
...@@ -988,24 +1122,15 @@ static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) ...@@ -988,24 +1122,15 @@ static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
break; break;
} }
if (!etd_done) { if (etd_done)
nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]);
else {
dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state); dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
schedule_nonisoc_etd(imx21, urb); schedule_nonisoc_etd(imx21, urb);
} else {
struct usb_host_endpoint *ep = urb->ep;
urb_done(hcd, urb, cc_to_error[cc]);
etd->urb = NULL;
if (!list_empty(&ep->urb_list)) {
urb = list_first_entry(&ep->urb_list,
struct urb, urb_list);
dev_vdbg(imx21->dev, "next URB %p\n", urb);
schedule_nonisoc_etd(imx21, urb);
}
} }
} }
static struct ep_priv *alloc_ep(void) static struct ep_priv *alloc_ep(void)
{ {
int i; int i;
...@@ -1146,9 +1271,13 @@ static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, ...@@ -1146,9 +1271,13 @@ static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
} else if (urb_priv->active) { } else if (urb_priv->active) {
int etd_num = ep_priv->etd[0]; int etd_num = ep_priv->etd[0];
if (etd_num != -1) { if (etd_num != -1) {
struct etd_priv *etd = &imx21->etd[etd_num];
disactivate_etd(imx21, etd_num); disactivate_etd(imx21, etd_num);
free_dmem(imx21, &imx21->etd[etd_num]); free_dmem(imx21, etd);
imx21->etd[etd_num].urb = NULL; etd->urb = NULL;
kfree(etd->bounce_buffer);
etd->bounce_buffer = NULL;
} }
} }
...@@ -1248,9 +1377,9 @@ static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof) ...@@ -1248,9 +1377,9 @@ static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
} }
if (usb_pipeisoc(etd->urb->pipe)) if (usb_pipeisoc(etd->urb->pipe))
isoc_etd_done(hcd, etd->urb, etd_num); isoc_etd_done(hcd, etd_num);
else else
nonisoc_etd_done(hcd, etd->urb, etd_num); nonisoc_etd_done(hcd, etd_num);
} }
/* only enable SOF interrupt if it may be needed for the kludge */ /* only enable SOF interrupt if it may be needed for the kludge */
...@@ -1718,6 +1847,7 @@ static int imx21_probe(struct platform_device *pdev) ...@@ -1718,6 +1847,7 @@ static int imx21_probe(struct platform_device *pdev)
} }
imx21 = hcd_to_imx21(hcd); imx21 = hcd_to_imx21(hcd);
imx21->hcd = hcd;
imx21->dev = &pdev->dev; imx21->dev = &pdev->dev;
imx21->pdata = pdev->dev.platform_data; imx21->pdata = pdev->dev.platform_data;
if (!imx21->pdata) if (!imx21->pdata)
......
...@@ -250,6 +250,7 @@ ...@@ -250,6 +250,7 @@
#define USBCTRL_USB_BYP (1 << 2) #define USBCTRL_USB_BYP (1 << 2)
#define USBCTRL_HOST1_TXEN_OE (1 << 1) #define USBCTRL_HOST1_TXEN_OE (1 << 1)
#define USBOTG_DMEM 0x1000
/* Values in TD blocks */ /* Values in TD blocks */
#define TD_DIR_SETUP 0 #define TD_DIR_SETUP 0
...@@ -346,8 +347,8 @@ struct td { ...@@ -346,8 +347,8 @@ struct td {
struct list_head list; struct list_head list;
struct urb *urb; struct urb *urb;
struct usb_host_endpoint *ep; struct usb_host_endpoint *ep;
dma_addr_t data; dma_addr_t dma_handle;
unsigned long buf_addr; void *cpu_buffer;
int len; int len;
int frame; int frame;
int isoc_index; int isoc_index;
...@@ -360,6 +361,8 @@ struct etd_priv { ...@@ -360,6 +361,8 @@ struct etd_priv {
struct td *td; struct td *td;
struct list_head queue; struct list_head queue;
dma_addr_t dma_handle; dma_addr_t dma_handle;
void *cpu_buffer;
void *bounce_buffer;
int alloc; int alloc;
int len; int len;
int dmem_size; int dmem_size;
...@@ -412,6 +415,7 @@ struct debug_isoc_trace { ...@@ -412,6 +415,7 @@ struct debug_isoc_trace {
struct imx21 { struct imx21 {
spinlock_t lock; spinlock_t lock;
struct device *dev; struct device *dev;
struct usb_hcd *hcd;
struct mx21_usbh_platform_data *pdata; struct mx21_usbh_platform_data *pdata;
struct list_head dmem_list; struct list_head dmem_list;
struct list_head queue_for_etd; /* eps queued due to etd shortage */ struct list_head queue_for_etd; /* eps queued due to etd shortage */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册