提交 70b5c4ee 编写于 作者: M Michael Ellerman

Merge branch 'topic/hvc' into next

This branch held some hvc related commits (Hypervisor Virtual Console)
so that they could get some wider testing in linux-next before merging.
...@@ -73,7 +73,7 @@ static LIST_HEAD(hvc_structs); ...@@ -73,7 +73,7 @@ static LIST_HEAD(hvc_structs);
* Protect the list of hvc_struct instances from inserts and removals during * Protect the list of hvc_struct instances from inserts and removals during
* list traversal. * list traversal.
*/ */
static DEFINE_SPINLOCK(hvc_structs_lock); static DEFINE_MUTEX(hvc_structs_mutex);
/* /*
* This value is used to assign a tty->index value to a hvc_struct based * This value is used to assign a tty->index value to a hvc_struct based
...@@ -83,7 +83,7 @@ static DEFINE_SPINLOCK(hvc_structs_lock); ...@@ -83,7 +83,7 @@ static DEFINE_SPINLOCK(hvc_structs_lock);
static int last_hvc = -1; static int last_hvc = -1;
/* /*
* Do not call this function with either the hvc_structs_lock or the hvc_struct * Do not call this function with either the hvc_structs_mutex or the hvc_struct
* lock held. If successful, this function increments the kref reference * lock held. If successful, this function increments the kref reference
* count against the target hvc_struct so it should be released when finished. * count against the target hvc_struct so it should be released when finished.
*/ */
...@@ -92,24 +92,46 @@ static struct hvc_struct *hvc_get_by_index(int index) ...@@ -92,24 +92,46 @@ static struct hvc_struct *hvc_get_by_index(int index)
struct hvc_struct *hp; struct hvc_struct *hp;
unsigned long flags; unsigned long flags;
spin_lock(&hvc_structs_lock); mutex_lock(&hvc_structs_mutex);
list_for_each_entry(hp, &hvc_structs, next) { list_for_each_entry(hp, &hvc_structs, next) {
spin_lock_irqsave(&hp->lock, flags); spin_lock_irqsave(&hp->lock, flags);
if (hp->index == index) { if (hp->index == index) {
tty_port_get(&hp->port); tty_port_get(&hp->port);
spin_unlock_irqrestore(&hp->lock, flags); spin_unlock_irqrestore(&hp->lock, flags);
spin_unlock(&hvc_structs_lock); mutex_unlock(&hvc_structs_mutex);
return hp; return hp;
} }
spin_unlock_irqrestore(&hp->lock, flags); spin_unlock_irqrestore(&hp->lock, flags);
} }
hp = NULL; hp = NULL;
mutex_unlock(&hvc_structs_mutex);
spin_unlock(&hvc_structs_lock);
return hp; return hp;
} }
static int __hvc_flush(const struct hv_ops *ops, uint32_t vtermno, bool wait)
{
if (wait)
might_sleep();
if (ops->flush)
return ops->flush(vtermno, wait);
return 0;
}
static int hvc_console_flush(const struct hv_ops *ops, uint32_t vtermno)
{
return __hvc_flush(ops, vtermno, false);
}
/*
* Wait for the console to flush before writing more to it. This sleeps.
*/
static int hvc_flush(struct hvc_struct *hp)
{
return __hvc_flush(hp->ops, hp->vtermno, true);
}
/* /*
* Initial console vtermnos for console API usage prior to full console * Initial console vtermnos for console API usage prior to full console
...@@ -156,8 +178,12 @@ static void hvc_console_print(struct console *co, const char *b, ...@@ -156,8 +178,12 @@ static void hvc_console_print(struct console *co, const char *b,
if (r <= 0) { if (r <= 0) {
/* throw away characters on error /* throw away characters on error
* but spin in case of -EAGAIN */ * but spin in case of -EAGAIN */
if (r != -EAGAIN) if (r != -EAGAIN) {
i = 0; i = 0;
} else {
hvc_console_flush(cons_ops[index],
vtermnos[index]);
}
} else if (r > 0) { } else if (r > 0) {
i -= r; i -= r;
if (i > 0) if (i > 0)
...@@ -165,6 +191,7 @@ static void hvc_console_print(struct console *co, const char *b, ...@@ -165,6 +191,7 @@ static void hvc_console_print(struct console *co, const char *b,
} }
} }
} }
hvc_console_flush(cons_ops[index], vtermnos[index]);
} }
static struct tty_driver *hvc_console_device(struct console *c, int *index) static struct tty_driver *hvc_console_device(struct console *c, int *index)
...@@ -224,13 +251,13 @@ static void hvc_port_destruct(struct tty_port *port) ...@@ -224,13 +251,13 @@ static void hvc_port_destruct(struct tty_port *port)
struct hvc_struct *hp = container_of(port, struct hvc_struct, port); struct hvc_struct *hp = container_of(port, struct hvc_struct, port);
unsigned long flags; unsigned long flags;
spin_lock(&hvc_structs_lock); mutex_lock(&hvc_structs_mutex);
spin_lock_irqsave(&hp->lock, flags); spin_lock_irqsave(&hp->lock, flags);
list_del(&(hp->next)); list_del(&(hp->next));
spin_unlock_irqrestore(&hp->lock, flags); spin_unlock_irqrestore(&hp->lock, flags);
spin_unlock(&hvc_structs_lock); mutex_unlock(&hvc_structs_mutex);
kfree(hp); kfree(hp);
} }
...@@ -494,13 +521,12 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count ...@@ -494,13 +521,12 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
if (hp->port.count <= 0) if (hp->port.count <= 0)
return -EIO; return -EIO;
while (count > 0) {
spin_lock_irqsave(&hp->lock, flags); spin_lock_irqsave(&hp->lock, flags);
/* Push pending writes */ rsize = hp->outbuf_size - hp->n_outbuf;
if (hp->n_outbuf > 0)
hvc_push(hp);
while (count > 0 && (rsize = hp->outbuf_size - hp->n_outbuf) > 0) { if (rsize) {
if (rsize > count) if (rsize > count)
rsize = count; rsize = count;
memcpy(hp->outbuf + hp->n_outbuf, buf, rsize); memcpy(hp->outbuf + hp->n_outbuf, buf, rsize);
...@@ -508,10 +534,20 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count ...@@ -508,10 +534,20 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
buf += rsize; buf += rsize;
hp->n_outbuf += rsize; hp->n_outbuf += rsize;
written += rsize; written += rsize;
hvc_push(hp);
} }
if (hp->n_outbuf > 0)
hvc_push(hp);
spin_unlock_irqrestore(&hp->lock, flags); spin_unlock_irqrestore(&hp->lock, flags);
if (count) {
if (hp->n_outbuf > 0)
hvc_flush(hp);
cond_resched();
}
}
/* /*
* Racy, but harmless, kick thread if there is still pending data. * Racy, but harmless, kick thread if there is still pending data.
*/ */
...@@ -590,10 +626,10 @@ static u32 timeout = MIN_TIMEOUT; ...@@ -590,10 +626,10 @@ static u32 timeout = MIN_TIMEOUT;
#define HVC_POLL_READ 0x00000001 #define HVC_POLL_READ 0x00000001
#define HVC_POLL_WRITE 0x00000002 #define HVC_POLL_WRITE 0x00000002
int hvc_poll(struct hvc_struct *hp) static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
{ {
struct tty_struct *tty; struct tty_struct *tty;
int i, n, poll_mask = 0; int i, n, count, poll_mask = 0;
char buf[N_INBUF] __ALIGNED__; char buf[N_INBUF] __ALIGNED__;
unsigned long flags; unsigned long flags;
int read_total = 0; int read_total = 0;
...@@ -612,6 +648,12 @@ int hvc_poll(struct hvc_struct *hp) ...@@ -612,6 +648,12 @@ int hvc_poll(struct hvc_struct *hp)
timeout = (written_total) ? 0 : MIN_TIMEOUT; timeout = (written_total) ? 0 : MIN_TIMEOUT;
} }
if (may_sleep) {
spin_unlock_irqrestore(&hp->lock, flags);
cond_resched();
spin_lock_irqsave(&hp->lock, flags);
}
/* No tty attached, just skip */ /* No tty attached, just skip */
tty = tty_port_tty_get(&hp->port); tty = tty_port_tty_get(&hp->port);
if (tty == NULL) if (tty == NULL)
...@@ -619,7 +661,7 @@ int hvc_poll(struct hvc_struct *hp) ...@@ -619,7 +661,7 @@ int hvc_poll(struct hvc_struct *hp)
/* Now check if we can get data (are we throttled ?) */ /* Now check if we can get data (are we throttled ?) */
if (tty_throttled(tty)) if (tty_throttled(tty))
goto throttled; goto out;
/* If we aren't notifier driven and aren't throttled, we always /* If we aren't notifier driven and aren't throttled, we always
* request a reschedule * request a reschedule
...@@ -628,13 +670,13 @@ int hvc_poll(struct hvc_struct *hp) ...@@ -628,13 +670,13 @@ int hvc_poll(struct hvc_struct *hp)
poll_mask |= HVC_POLL_READ; poll_mask |= HVC_POLL_READ;
/* Read data if any */ /* Read data if any */
for (;;) {
int count = tty_buffer_request_room(&hp->port, N_INBUF); count = tty_buffer_request_room(&hp->port, N_INBUF);
/* If flip is full, just reschedule a later read */ /* If flip is full, just reschedule a later read */
if (count == 0) { if (count == 0) {
poll_mask |= HVC_POLL_READ; poll_mask |= HVC_POLL_READ;
break; goto out;
} }
n = hp->ops->get_chars(hp->vtermno, buf, count); n = hp->ops->get_chars(hp->vtermno, buf, count);
...@@ -652,8 +694,9 @@ int hvc_poll(struct hvc_struct *hp) ...@@ -652,8 +694,9 @@ int hvc_poll(struct hvc_struct *hp)
*/ */
poll_mask |= HVC_POLL_READ; poll_mask |= HVC_POLL_READ;
} }
break; goto out;
} }
for (i = 0; i < n; ++i) { for (i = 0; i < n; ++i) {
#ifdef CONFIG_MAGIC_SYSRQ #ifdef CONFIG_MAGIC_SYSRQ
if (hp->index == hvc_console.index) { if (hp->index == hvc_console.index) {
...@@ -674,10 +717,11 @@ int hvc_poll(struct hvc_struct *hp) ...@@ -674,10 +717,11 @@ int hvc_poll(struct hvc_struct *hp)
#endif /* CONFIG_MAGIC_SYSRQ */ #endif /* CONFIG_MAGIC_SYSRQ */
tty_insert_flip_char(&hp->port, buf[i], 0); tty_insert_flip_char(&hp->port, buf[i], 0);
} }
if (n == count)
poll_mask |= HVC_POLL_READ;
read_total = n;
read_total += n; out:
}
throttled:
/* Wakeup write queue if necessary */ /* Wakeup write queue if necessary */
if (hp->do_wakeup) { if (hp->do_wakeup) {
hp->do_wakeup = 0; hp->do_wakeup = 0;
...@@ -697,6 +741,11 @@ int hvc_poll(struct hvc_struct *hp) ...@@ -697,6 +741,11 @@ int hvc_poll(struct hvc_struct *hp)
return poll_mask; return poll_mask;
} }
int hvc_poll(struct hvc_struct *hp)
{
return __hvc_poll(hp, false);
}
EXPORT_SYMBOL_GPL(hvc_poll); EXPORT_SYMBOL_GPL(hvc_poll);
/** /**
...@@ -733,11 +782,12 @@ static int khvcd(void *unused) ...@@ -733,11 +782,12 @@ static int khvcd(void *unused)
try_to_freeze(); try_to_freeze();
wmb(); wmb();
if (!cpus_are_in_xmon()) { if (!cpus_are_in_xmon()) {
spin_lock(&hvc_structs_lock); mutex_lock(&hvc_structs_mutex);
list_for_each_entry(hp, &hvc_structs, next) { list_for_each_entry(hp, &hvc_structs, next) {
poll_mask |= hvc_poll(hp); poll_mask |= __hvc_poll(hp, true);
cond_resched();
} }
spin_unlock(&hvc_structs_lock); mutex_unlock(&hvc_structs_mutex);
} else } else
poll_mask |= HVC_POLL_READ; poll_mask |= HVC_POLL_READ;
if (hvc_kicked) if (hvc_kicked)
...@@ -871,7 +921,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data, ...@@ -871,7 +921,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
INIT_WORK(&hp->tty_resize, hvc_set_winsz); INIT_WORK(&hp->tty_resize, hvc_set_winsz);
spin_lock_init(&hp->lock); spin_lock_init(&hp->lock);
spin_lock(&hvc_structs_lock); mutex_lock(&hvc_structs_mutex);
/* /*
* find index to use: * find index to use:
...@@ -891,7 +941,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data, ...@@ -891,7 +941,7 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
vtermnos[i] = vtermno; vtermnos[i] = vtermno;
list_add_tail(&(hp->next), &hvc_structs); list_add_tail(&(hp->next), &hvc_structs);
spin_unlock(&hvc_structs_lock); mutex_unlock(&hvc_structs_mutex);
/* check if we need to re-register the kernel console */ /* check if we need to re-register the kernel console */
hvc_check_console(i); hvc_check_console(i);
......
...@@ -54,6 +54,7 @@ struct hvc_struct { ...@@ -54,6 +54,7 @@ struct hvc_struct {
struct hv_ops { struct hv_ops {
int (*get_chars)(uint32_t vtermno, char *buf, int count); int (*get_chars)(uint32_t vtermno, char *buf, int count);
int (*put_chars)(uint32_t vtermno, const char *buf, int count); int (*put_chars)(uint32_t vtermno, const char *buf, int count);
int (*flush)(uint32_t vtermno, bool wait);
/* Callbacks for notification. Called in open, close and hangup */ /* Callbacks for notification. Called in open, close and hangup */
int (*notifier_add)(struct hvc_struct *hp, int irq); int (*notifier_add)(struct hvc_struct *hp, int irq);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册