提交 ec79d605 编写于 作者: A Arnd Bergmann 提交者: Greg Kroah-Hartman

tty: replace BKL with a new tty_lock

As a preparation for replacing the big kernel lock
in the TTY layer, wrap all the callers in new
macros tty_lock, tty_lock_nested and tty_unlock.
Signed-off-by: NArnd Bergmann <arnd@arndb.de>
Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
Signed-off-by: NGreg Kroah-Hartman <gregkh@suse.de>
上级 3f582b8c
......@@ -1072,7 +1072,7 @@ static int get_serial_info(struct async_struct * info,
if (!retinfo)
return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
lock_kernel();
tty_lock();
tmp.type = state->type;
tmp.line = state->line;
tmp.port = state->port;
......@@ -1083,7 +1083,7 @@ static int get_serial_info(struct async_struct * info,
tmp.close_delay = state->close_delay;
tmp.closing_wait = state->closing_wait;
tmp.custom_divisor = state->custom_divisor;
unlock_kernel();
tty_unlock();
if (copy_to_user(retinfo,&tmp,sizeof(*retinfo)))
return -EFAULT;
return 0;
......@@ -1100,14 +1100,14 @@ static int set_serial_info(struct async_struct * info,
if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
return -EFAULT;
lock_kernel();
tty_lock();
state = info->state;
old_state = *state;
change_irq = new_serial.irq != state->irq;
change_port = (new_serial.port != state->port);
if(change_irq || change_port || (new_serial.xmit_fifo_size != state->xmit_fifo_size)) {
unlock_kernel();
tty_unlock();
return -EINVAL;
}
......@@ -1127,7 +1127,7 @@ static int set_serial_info(struct async_struct * info,
}
if (new_serial.baud_base < 9600) {
unlock_kernel();
tty_unlock();
return -EINVAL;
}
......@@ -1163,7 +1163,7 @@ static int set_serial_info(struct async_struct * info,
}
} else
retval = startup(info);
unlock_kernel();
tty_unlock();
return retval;
}
......@@ -1538,7 +1538,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
orig_jiffies = jiffies;
lock_kernel();
tty_lock_nested(); /* tty_wait_until_sent is called from lots of places */
/*
* Set the check interval to be 1/5 of the estimated time to
* send a single character, and make it at least 1. The check
......@@ -1579,7 +1579,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
break;
}
__set_current_state(TASK_RUNNING);
unlock_kernel();
tty_unlock();
#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
#endif
......
......@@ -67,15 +67,15 @@ static void set_led(char state)
static int briq_panel_open(struct inode *ino, struct file *filep)
{
lock_kernel();
tty_lock();
/* enforce single access, vfd_is_open is protected by BKL */
if (vfd_is_open) {
unlock_kernel();
tty_unlock();
return -EBUSY;
}
vfd_is_open = 1;
unlock_kernel();
tty_unlock();
return 0;
}
......
......@@ -598,18 +598,18 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
return -EFAULT;
}
lock_kernel();
tty_lock();
for (;;) {
if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
unlock_kernel();
tty_unlock();
return -EIO;
}
n_hdlc = tty2n_hdlc (tty);
if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC ||
tty != n_hdlc->tty) {
unlock_kernel();
tty_unlock();
return 0;
}
......@@ -619,13 +619,13 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
/* no data */
if (file->f_flags & O_NONBLOCK) {
unlock_kernel();
tty_unlock();
return -EAGAIN;
}
interruptible_sleep_on (&tty->read_wait);
if (signal_pending(current)) {
unlock_kernel();
tty_unlock();
return -EINTR;
}
}
......@@ -648,7 +648,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
kfree(rbuf);
else
n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,rbuf);
unlock_kernel();
tty_unlock();
return ret;
} /* end of n_hdlc_tty_read() */
......@@ -691,7 +691,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
count = maxframe;
}
lock_kernel();
tty_lock();
add_wait_queue(&tty->write_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
......@@ -731,7 +731,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf);
n_hdlc_send_frames(n_hdlc,tty);
}
unlock_kernel();
tty_unlock();
return error;
} /* end of n_hdlc_tty_write() */
......
......@@ -1067,7 +1067,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
TRACE_L("read()");
lock_kernel();
tty_lock();
pClient = findClient(pInfo, task_pid(current));
if (pClient) {
......@@ -1109,7 +1109,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
}
ret = -EPERM;
unlock:
unlock_kernel();
tty_unlock();
return ret;
}
......@@ -1158,7 +1158,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
pHeader->locks = 0;
pHeader->owner = NULL;
lock_kernel();
tty_lock();
pClient = findClient(pInfo, task_pid(current));
if (pClient) {
......@@ -1177,7 +1177,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
add_tx_queue(pInfo, pHeader);
trigger_transmit(pInfo);
unlock_kernel();
tty_unlock();
return 0;
}
......
......@@ -692,9 +692,9 @@ static int ptmx_open(struct inode *inode, struct file *filp)
{
int ret;
lock_kernel();
tty_lock();
ret = __ptmx_open(inode, filp);
unlock_kernel();
tty_unlock();
return ret;
}
......
......@@ -313,7 +313,7 @@ int paste_selection(struct tty_struct *tty)
struct tty_ldisc *ld;
DECLARE_WAITQUEUE(wait, current);
lock_kernel();
tty_lock_nested(); /* always called with BTM from vt_ioctl */
acquire_console_sem();
poke_blanked_console();
......@@ -338,6 +338,6 @@ int paste_selection(struct tty_struct *tty)
__set_current_state(TASK_RUNNING);
tty_ldisc_deref(ld);
unlock_kernel();
tty_unlock();
return 0;
}
......@@ -1505,7 +1505,7 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
printk("cy_ioctl %s, cmd = %x arg = %lx\n", tty->name, cmd, arg); /* */
#endif
lock_kernel();
tty_lock();
switch (cmd) {
case CYGETMON:
......@@ -1561,7 +1561,7 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
default:
ret_val = -ENOIOCTLCMD;
}
unlock_kernel();
tty_unlock();
#ifdef SERIAL_DEBUG_OTHER
printk("cy_ioctl done\n");
......
......@@ -1699,7 +1699,7 @@ static long sx_fw_ioctl(struct file *filp, unsigned int cmd,
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
lock_kernel();
tty_lock();
sx_dprintk(SX_DEBUG_FIRMWARE, "IOCTL %x: %lx\n", cmd, arg);
......@@ -1848,7 +1848,7 @@ static long sx_fw_ioctl(struct file *filp, unsigned int cmd,
break;
}
out:
unlock_kernel();
tty_unlock();
func_exit();
return rc;
}
......@@ -1859,7 +1859,7 @@ static int sx_break(struct tty_struct *tty, int flag)
int rv;
func_enter();
lock_kernel();
tty_lock();
if (flag)
rv = sx_send_command(port, HS_START, -1, HS_IDLE_BREAK);
......@@ -1868,7 +1868,7 @@ static int sx_break(struct tty_struct *tty, int flag)
if (rv != 1)
printk(KERN_ERR "sx: couldn't send break (%x).\n",
read_sx_byte(port->board, CHAN_OFFSET(port, hi_hstat)));
unlock_kernel();
tty_unlock();
func_exit();
return 0;
}
......@@ -1909,7 +1909,7 @@ static int sx_ioctl(struct tty_struct *tty, struct file *filp,
/* func_enter2(); */
rc = 0;
lock_kernel();
tty_lock();
switch (cmd) {
case TIOCGSERIAL:
rc = gs_getserial(&port->gs, argp);
......@@ -1921,7 +1921,7 @@ static int sx_ioctl(struct tty_struct *tty, struct file *filp,
rc = -ENOIOCTLCMD;
break;
}
unlock_kernel();
tty_unlock();
/* func_exit(); */
return rc;
......
......@@ -149,6 +149,7 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
#else
#define tty_compat_ioctl NULL
#endif
static int __tty_fasync(int fd, struct file *filp, int on);
static int tty_fasync(int fd, struct file *filp, int on);
static void release_tty(struct tty_struct *tty, int idx);
static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
......@@ -483,7 +484,7 @@ EXPORT_SYMBOL_GPL(tty_wakeup);
* remains intact.
*
* Locking:
* BKL
* BTM
* redirect lock for undoing redirection
* file list lock for manipulating list of ttys
* tty_ldisc_lock from called functions
......@@ -513,8 +514,11 @@ static void do_tty_hangup(struct work_struct *work)
}
spin_unlock(&redirect_lock);
/* inuse_filps is protected by the single kernel lock */
lock_kernel();
/* inuse_filps is protected by the single tty lock,
this really needs to change if we want to flush the
workqueue with the lock held */
tty_lock_nested(); /* called with BTM held from pty_close and
others */
check_tty_count(tty, "do_tty_hangup");
file_list_lock();
......@@ -525,7 +529,7 @@ static void do_tty_hangup(struct work_struct *work)
if (filp->f_op->write != tty_write)
continue;
closecount++;
tty_fasync(-1, filp, 0); /* can't block */
__tty_fasync(-1, filp, 0); /* can't block */
filp->f_op = &hung_up_tty_fops;
}
file_list_unlock();
......@@ -594,7 +598,7 @@ static void do_tty_hangup(struct work_struct *work)
*/
set_bit(TTY_HUPPED, &tty->flags);
tty_ldisc_enable(tty);
unlock_kernel();
tty_unlock();
if (f)
fput(f);
}
......@@ -696,7 +700,8 @@ static void session_clear_tty(struct pid *session)
* exiting; it is 0 if called by the ioctl TIOCNOTTY.
*
* Locking:
* BKL is taken for hysterical raisins
* BTM is taken for hysterical raisins, and held when
* called from no_tty().
* tty_mutex is taken to protect tty
* ->siglock is taken to protect ->signal/->sighand
* tasklist_lock is taken to walk process list for sessions
......@@ -714,10 +719,10 @@ void disassociate_ctty(int on_exit)
tty = get_current_tty();
if (tty) {
tty_pgrp = get_pid(tty->pgrp);
lock_kernel();
tty_lock_nested(); /* see above */
if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY)
tty_vhangup(tty);
unlock_kernel();
tty_unlock();
tty_kref_put(tty);
} else if (on_exit) {
struct pid *old_pgrp;
......@@ -774,9 +779,9 @@ void disassociate_ctty(int on_exit)
void no_tty(void)
{
struct task_struct *tsk = current;
lock_kernel();
tty_lock();
disassociate_ctty(0);
unlock_kernel();
tty_unlock();
proc_clear_tty(tsk);
}
......@@ -1013,19 +1018,19 @@ static inline ssize_t do_tty_write(
* We don't put it into the syslog queue right now maybe in the future if
* really needed.
*
* We must still hold the BKL and test the CLOSING flag for the moment.
* We must still hold the BTM and test the CLOSING flag for the moment.
*/
void tty_write_message(struct tty_struct *tty, char *msg)
{
if (tty) {
mutex_lock(&tty->atomic_write_lock);
lock_kernel();
tty_lock();
if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags)) {
unlock_kernel();
tty_unlock();
tty->ops->write(tty, msg, strlen(msg));
} else
unlock_kernel();
tty_unlock();
tty_write_unlock(tty);
}
return;
......@@ -1208,18 +1213,18 @@ static int tty_driver_install_tty(struct tty_driver *driver,
int ret;
if (driver->ops->install) {
lock_kernel();
tty_lock_nested(); /* already called with BTM held */
ret = driver->ops->install(driver, tty);
unlock_kernel();
tty_unlock();
return ret;
}
if (tty_init_termios(tty) == 0) {
lock_kernel();
tty_lock_nested();
tty_driver_kref_get(driver);
tty->count++;
driver->ttys[idx] = tty;
unlock_kernel();
tty_unlock();
return 0;
}
return -ENOMEM;
......@@ -1312,14 +1317,15 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx,
struct tty_struct *tty;
int retval;
lock_kernel();
tty_lock_nested(); /* always called with tty lock held already */
/* Check if pty master is being opened multiple times */
if (driver->subtype == PTY_TYPE_MASTER &&
(driver->flags & TTY_DRIVER_DEVPTS_MEM) && !first_ok) {
unlock_kernel();
tty_unlock();
return ERR_PTR(-EIO);
}
unlock_kernel();
tty_unlock();
/*
* First time open is complex, especially for PTY devices.
......@@ -1363,9 +1369,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx,
if (printk_ratelimit())
printk(KERN_INFO "tty_init_dev: ldisc open failed, "
"clearing slot %d\n", idx);
lock_kernel();
tty_lock_nested();
release_tty(tty, idx);
unlock_kernel();
tty_unlock();
return ERR_PTR(retval);
}
......@@ -1512,10 +1518,10 @@ int tty_release(struct inode *inode, struct file *filp)
if (tty_paranoia_check(tty, inode, "tty_release_dev"))
return 0;
lock_kernel();
tty_lock();
check_tty_count(tty, "tty_release_dev");
tty_fasync(-1, filp, 0);
__tty_fasync(-1, filp, 0);
idx = tty->index;
pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
......@@ -1527,18 +1533,18 @@ int tty_release(struct inode *inode, struct file *filp)
if (idx < 0 || idx >= tty->driver->num) {
printk(KERN_DEBUG "tty_release_dev: bad idx when trying to "
"free (%s)\n", tty->name);
unlock_kernel();
tty_unlock();
return 0;
}
if (!devpts) {
if (tty != tty->driver->ttys[idx]) {
unlock_kernel();
tty_unlock();
printk(KERN_DEBUG "tty_release_dev: driver.table[%d] not tty "
"for (%s)\n", idx, tty->name);
return 0;
}
if (tty->termios != tty->driver->termios[idx]) {
unlock_kernel();
tty_unlock();
printk(KERN_DEBUG "tty_release_dev: driver.termios[%d] not termios "
"for (%s)\n",
idx, tty->name);
......@@ -1556,21 +1562,21 @@ int tty_release(struct inode *inode, struct file *filp)
if (tty->driver->other &&
!(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
if (o_tty != tty->driver->other->ttys[idx]) {
unlock_kernel();
tty_unlock();
printk(KERN_DEBUG "tty_release_dev: other->table[%d] "
"not o_tty for (%s)\n",
idx, tty->name);
return 0 ;
}
if (o_tty->termios != tty->driver->other->termios[idx]) {
unlock_kernel();
tty_unlock();
printk(KERN_DEBUG "tty_release_dev: other->termios[%d] "
"not o_termios for (%s)\n",
idx, tty->name);
return 0;
}
if (o_tty->link != tty) {
unlock_kernel();
tty_unlock();
printk(KERN_DEBUG "tty_release_dev: bad pty pointers\n");
return 0;
}
......@@ -1579,7 +1585,7 @@ int tty_release(struct inode *inode, struct file *filp)
if (tty->ops->close)
tty->ops->close(tty, filp);
unlock_kernel();
tty_unlock();
/*
* Sanity check: if tty->count is going to zero, there shouldn't be
* any waiters on tty->read_wait or tty->write_wait. We test the
......@@ -1602,7 +1608,7 @@ int tty_release(struct inode *inode, struct file *filp)
opens on /dev/tty */
mutex_lock(&tty_mutex);
lock_kernel();
tty_lock();
tty_closing = tty->count <= 1;
o_tty_closing = o_tty &&
(o_tty->count <= (pty_master ? 1 : 0));
......@@ -1633,7 +1639,7 @@ int tty_release(struct inode *inode, struct file *filp)
printk(KERN_WARNING "tty_release_dev: %s: read/write wait queue "
"active!\n", tty_name(tty, buf));
unlock_kernel();
tty_unlock();
mutex_unlock(&tty_mutex);
schedule();
}
......@@ -1698,7 +1704,7 @@ int tty_release(struct inode *inode, struct file *filp)
/* check whether both sides are closing ... */
if (!tty_closing || (o_tty && !o_tty_closing)) {
unlock_kernel();
tty_unlock();
return 0;
}
......@@ -1718,7 +1724,7 @@ int tty_release(struct inode *inode, struct file *filp)
/* Make this pty number available for reallocation */
if (devpts)
devpts_kill_index(inode, idx);
unlock_kernel();
tty_unlock();
return 0;
}
......@@ -1760,12 +1766,12 @@ static int tty_open(struct inode *inode, struct file *filp)
retval = 0;
mutex_lock(&tty_mutex);
lock_kernel();
tty_lock();
if (device == MKDEV(TTYAUX_MAJOR, 0)) {
tty = get_current_tty();
if (!tty) {
unlock_kernel();
tty_unlock();
mutex_unlock(&tty_mutex);
return -ENXIO;
}
......@@ -1797,14 +1803,14 @@ static int tty_open(struct inode *inode, struct file *filp)
goto got_driver;
}
}
unlock_kernel();
tty_unlock();
mutex_unlock(&tty_mutex);
return -ENODEV;
}
driver = get_tty_driver(device, &index);
if (!driver) {
unlock_kernel();
tty_unlock();
mutex_unlock(&tty_mutex);
return -ENODEV;
}
......@@ -1814,7 +1820,7 @@ static int tty_open(struct inode *inode, struct file *filp)
tty = tty_driver_lookup_tty(driver, inode, index);
if (IS_ERR(tty)) {
unlock_kernel();
tty_unlock();
mutex_unlock(&tty_mutex);
return PTR_ERR(tty);
}
......@@ -1830,7 +1836,7 @@ static int tty_open(struct inode *inode, struct file *filp)
mutex_unlock(&tty_mutex);
tty_driver_kref_put(driver);
if (IS_ERR(tty)) {
unlock_kernel();
tty_unlock();
return PTR_ERR(tty);
}
......@@ -1862,11 +1868,11 @@ static int tty_open(struct inode *inode, struct file *filp)
#endif
tty_release(inode, filp);
if (retval != -ERESTARTSYS) {
unlock_kernel();
tty_unlock();
return retval;
}
if (signal_pending(current)) {
unlock_kernel();
tty_unlock();
return retval;
}
schedule();
......@@ -1875,14 +1881,14 @@ static int tty_open(struct inode *inode, struct file *filp)
*/
if (filp->f_op == &hung_up_tty_fops)
filp->f_op = &tty_fops;
unlock_kernel();
tty_unlock();
goto retry_open;
}
unlock_kernel();
tty_unlock();
mutex_lock(&tty_mutex);
lock_kernel();
tty_lock();
spin_lock_irq(&current->sighand->siglock);
if (!noctty &&
current->signal->leader &&
......@@ -1890,7 +1896,7 @@ static int tty_open(struct inode *inode, struct file *filp)
tty->session == NULL)
__proc_set_tty(current, tty);
spin_unlock_irq(&current->sighand->siglock);
unlock_kernel();
tty_unlock();
mutex_unlock(&tty_mutex);
return 0;
}
......@@ -1926,13 +1932,12 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait)
return ret;
}
static int tty_fasync(int fd, struct file *filp, int on)
static int __tty_fasync(int fd, struct file *filp, int on)
{
struct tty_struct *tty;
unsigned long flags;
int retval = 0;
lock_kernel();
tty = (struct tty_struct *)filp->private_data;
if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync"))
goto out;
......@@ -1966,7 +1971,15 @@ static int tty_fasync(int fd, struct file *filp, int on)
}
retval = 0;
out:
unlock_kernel();
return retval;
}
static int tty_fasync(int fd, struct file *filp, int on)
{
int retval;
tty_lock();
retval = __tty_fasync(fd, filp, on);
tty_unlock();
return retval;
}
......
......@@ -440,6 +440,8 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
*
* A helper opening method. Also a convenient debugging and check
* point.
*
* Locking: always called with BTM already held.
*/
static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
......@@ -447,10 +449,10 @@ static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
WARN_ON(test_and_set_bit(TTY_LDISC_OPEN, &tty->flags));
if (ld->ops->open) {
int ret;
/* BKL here locks verus a hangup event */
lock_kernel();
/* BTM here locks versus a hangup event */
tty_lock_nested(); /* always held here already */
ret = ld->ops->open(tty);
unlock_kernel();
tty_unlock();
return ret;
}
return 0;
......@@ -553,7 +555,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
if (IS_ERR(new_ldisc))
return PTR_ERR(new_ldisc);
lock_kernel();
tty_lock();
/*
* We need to look at the tty locking here for pty/tty pairs
* when both sides try to change in parallel.
......@@ -567,12 +569,12 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
*/
if (tty->ldisc->ops->num == ldisc) {
unlock_kernel();
tty_unlock();
tty_ldisc_put(new_ldisc);
return 0;
}
unlock_kernel();
tty_unlock();
/*
* Problem: What do we do if this blocks ?
* We could deadlock here
......@@ -594,7 +596,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
mutex_lock(&tty->ldisc_mutex);
}
lock_kernel();
tty_lock();
set_bit(TTY_LDISC_CHANGING, &tty->flags);
......@@ -607,7 +609,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
o_ldisc = tty->ldisc;
unlock_kernel();
tty_unlock();
/*
* Make sure we don't change while someone holds a
* reference to the line discipline. The TTY_LDISC bit
......@@ -633,14 +635,14 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
flush_scheduled_work();
mutex_lock(&tty->ldisc_mutex);
lock_kernel();
tty_lock();
if (test_bit(TTY_HUPPED, &tty->flags)) {
/* We were raced by the hangup method. It will have stomped
the ldisc data and closed the ldisc down */
clear_bit(TTY_LDISC_CHANGING, &tty->flags);
mutex_unlock(&tty->ldisc_mutex);
tty_ldisc_put(new_ldisc);
unlock_kernel();
tty_unlock();
return -EIO;
}
......@@ -682,7 +684,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
if (o_work)
schedule_delayed_work(&o_tty->buf.work, 1);
mutex_unlock(&tty->ldisc_mutex);
unlock_kernel();
tty_unlock();
return retval;
}
......
......@@ -463,10 +463,10 @@ vcs_open(struct inode *inode, struct file *filp)
unsigned int currcons = iminor(inode) & 127;
int ret = 0;
lock_kernel();
tty_lock();
if(currcons && !vc_cons_allocated(currcons-1))
ret = -ENXIO;
unlock_kernel();
tty_unlock();
return ret;
}
......
......@@ -509,7 +509,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
console = vc->vc_num;
lock_kernel();
tty_lock();
if (!vc_cons_allocated(console)) { /* impossible? */
ret = -ENOIOCTLCMD;
......@@ -1336,7 +1336,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
ret = -ENOIOCTLCMD;
}
out:
unlock_kernel();
tty_unlock();
return ret;
eperm:
ret = -EPERM;
......@@ -1503,7 +1503,7 @@ long vt_compat_ioctl(struct tty_struct *tty, struct file * file,
console = vc->vc_num;
lock_kernel();
tty_lock();
if (!vc_cons_allocated(console)) { /* impossible? */
ret = -ENOIOCTLCMD;
......@@ -1571,11 +1571,11 @@ long vt_compat_ioctl(struct tty_struct *tty, struct file * file,
goto fallback;
}
out:
unlock_kernel();
tty_unlock();
return ret;
fallback:
unlock_kernel();
tty_unlock();
return vt_ioctl(tty, file, cmd, arg);
}
......
......@@ -1705,7 +1705,7 @@ static void rs_360_wait_until_sent(struct tty_struct *tty, int timeout)
printk("jiff=%lu...", jiffies);
#endif
lock_kernel();
tty_lock_nested(); /* always held already since we come from ->close */
/* We go through the loop at least once because we can't tell
* exactly when the last character exits the shifter. There can
* be at least two characters waiting to be sent after the buffers
......@@ -1734,7 +1734,7 @@ static void rs_360_wait_until_sent(struct tty_struct *tty, int timeout)
bdp--;
} while (bdp->status & BD_SC_READY);
current->state = TASK_RUNNING;
unlock_kernel();
tty_unlock();
#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
#endif
......
......@@ -3935,7 +3935,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
* Check R_DMA_CHx_STATUS bit 0-6=number of available bytes in FIFO
* R_DMA_CHx_HWSW bit 31-16=nbr of bytes left in DMA buffer (0=64k)
*/
lock_kernel();
tty_lock_nested(); /* locked already when coming from close */
orig_jiffies = jiffies;
while (info->xmit.head != info->xmit.tail || /* More in send queue */
(*info->ostatusadr & 0x007f) || /* more in FIFO */
......@@ -3952,7 +3952,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
curr_time_usec - info->last_tx_active_usec;
}
set_current_state(TASK_RUNNING);
unlock_kernel();
tty_unlock();
}
/*
......
......@@ -1274,7 +1274,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
struct uart_port *uport;
unsigned long flags;
BUG_ON(!kernel_locked());
BUG_ON(!tty_locked());
if (!state)
return;
......@@ -1382,7 +1382,7 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
if (port->type == PORT_UNKNOWN || port->fifosize == 0)
return;
lock_kernel();
tty_lock_nested(); /* already locked when coming from close */
/*
* Set the check interval to be 1/5 of the estimated time to
......@@ -1429,7 +1429,7 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
break;
}
set_current_state(TASK_RUNNING); /* might not be needed */
unlock_kernel();
tty_unlock();
}
/*
......@@ -1444,7 +1444,7 @@ static void uart_hangup(struct tty_struct *tty)
struct tty_port *port = &state->port;
unsigned long flags;
BUG_ON(!kernel_locked());
BUG_ON(!tty_locked());
pr_debug("uart_hangup(%d)\n", state->uart_port->line);
mutex_lock(&port->mutex);
......@@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
struct tty_port *port;
int retval, line = tty->index;
BUG_ON(!kernel_locked());
BUG_ON(!tty_locked());
pr_debug("uart_open(%d) called\n", line);
/*
......
......@@ -1108,7 +1108,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
charmap += 4 * cmapsz;
#endif
unlock_kernel();
tty_unlock();
spin_lock_irq(&vga_lock);
/* First, the Sequencer */
vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x1);
......@@ -1192,7 +1192,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0);
}
spin_unlock_irq(&vga_lock);
lock_kernel();
tty_lock();
return 0;
}
......
......@@ -13,6 +13,7 @@
#include <linux/tty_driver.h>
#include <linux/tty_ldisc.h>
#include <linux/mutex.h>
#include <linux/smp_lock.h>
#include <asm/system.h>
......@@ -576,5 +577,35 @@ extern int vt_ioctl(struct tty_struct *tty, struct file *file,
extern long vt_compat_ioctl(struct tty_struct *tty, struct file * file,
unsigned int cmd, unsigned long arg);
/* functions for preparation of BKL removal */
/*
* tty_lock_nested get the tty_lock while potentially holding it
*
* The Big TTY Mutex is a recursive lock, meaning you can take it
* from a thread that is already holding it.
* This is bad for a number of reasons, so tty_lock_nested should
* really be used as rarely as possible. If a code location can
* be shown to never get called with this held already, it should
* use tty_lock() instead.
*/
static inline void __lockfunc tty_lock_nested(void) __acquires(kernel_lock)
{
lock_kernel();
}
static inline void tty_lock(void) __acquires(kernel_lock)
{
#ifdef CONFIG_LOCK_KERNEL
/* kernel_locked is 1 for !CONFIG_LOCK_KERNEL */
WARN_ON(kernel_locked());
#endif
lock_kernel();
}
static inline void tty_unlock(void) __releases(kernel_lock)
{
unlock_kernel();
}
#define tty_locked() (kernel_locked())
#endif /* __KERNEL__ */
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册