提交 3b07e9ca 编写于 作者: T Tejun Heo

workqueue: deprecate system_nrt[_freezable]_wq

system_nrt[_freezable]_wq are now spurious.  Mark them deprecated and
convert all users to system[_freezable]_wq.

If you're cc'd and wondering what's going on: Now all workqueues are
non-reentrant, so there's no reason to use system_nrt[_freezable]_wq.
Please use system[_freezable]_wq instead.

This patch doesn't make any functional difference.
Signed-off-by: NTejun Heo <tj@kernel.org>
Acked-By: NLai Jiangshan <laijs@cn.fujitsu.com>

Cc: Jens Axboe <axboe@kernel.dk>
Cc: David Airlie <airlied@linux.ie>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: David Howells <dhowells@redhat.com>
上级 43829731
...@@ -180,7 +180,7 @@ static inline unsigned int total_nr_queued(struct throtl_data *td) ...@@ -180,7 +180,7 @@ static inline unsigned int total_nr_queued(struct throtl_data *td)
/* /*
* Worker for allocating per cpu stat for tgs. This is scheduled on the * Worker for allocating per cpu stat for tgs. This is scheduled on the
* system_nrt_wq once there are some groups on the alloc_list waiting for * system_wq once there are some groups on the alloc_list waiting for
* allocation. * allocation.
*/ */
static void tg_stats_alloc_fn(struct work_struct *work) static void tg_stats_alloc_fn(struct work_struct *work)
...@@ -194,8 +194,7 @@ static void tg_stats_alloc_fn(struct work_struct *work) ...@@ -194,8 +194,7 @@ static void tg_stats_alloc_fn(struct work_struct *work)
stats_cpu = alloc_percpu(struct tg_stats_cpu); stats_cpu = alloc_percpu(struct tg_stats_cpu);
if (!stats_cpu) { if (!stats_cpu) {
/* allocation failed, try again after some time */ /* allocation failed, try again after some time */
queue_delayed_work(system_nrt_wq, dwork, schedule_delayed_work(dwork, msecs_to_jiffies(10));
msecs_to_jiffies(10));
return; return;
} }
} }
...@@ -238,7 +237,7 @@ static void throtl_pd_init(struct blkcg_gq *blkg) ...@@ -238,7 +237,7 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
*/ */
spin_lock_irqsave(&tg_stats_alloc_lock, flags); spin_lock_irqsave(&tg_stats_alloc_lock, flags);
list_add(&tg->stats_alloc_node, &tg_stats_alloc_list); list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0); schedule_delayed_work(&tg_stats_alloc_work, 0);
spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
} }
......
...@@ -1490,9 +1490,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now) ...@@ -1490,9 +1490,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
intv = disk_events_poll_jiffies(disk); intv = disk_events_poll_jiffies(disk);
set_timer_slack(&ev->dwork.timer, intv / 4); set_timer_slack(&ev->dwork.timer, intv / 4);
if (check_now) if (check_now)
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
else if (intv) else if (intv)
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
out_unlock: out_unlock:
spin_unlock_irqrestore(&ev->lock, flags); spin_unlock_irqrestore(&ev->lock, flags);
} }
...@@ -1535,7 +1535,7 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask) ...@@ -1535,7 +1535,7 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
spin_lock_irq(&ev->lock); spin_lock_irq(&ev->lock);
ev->clearing |= mask; ev->clearing |= mask;
if (!ev->block) if (!ev->block)
mod_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); mod_delayed_work(system_freezable_wq, &ev->dwork, 0);
spin_unlock_irq(&ev->lock); spin_unlock_irq(&ev->lock);
} }
...@@ -1571,7 +1571,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) ...@@ -1571,7 +1571,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
/* uncondtionally schedule event check and wait for it to finish */ /* uncondtionally schedule event check and wait for it to finish */
disk_block_events(disk); disk_block_events(disk);
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
flush_delayed_work(&ev->dwork); flush_delayed_work(&ev->dwork);
__disk_unblock_events(disk, false); __disk_unblock_events(disk, false);
...@@ -1608,7 +1608,7 @@ static void disk_events_workfn(struct work_struct *work) ...@@ -1608,7 +1608,7 @@ static void disk_events_workfn(struct work_struct *work)
intv = disk_events_poll_jiffies(disk); intv = disk_events_poll_jiffies(disk);
if (!ev->block && intv) if (!ev->block && intv)
queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
spin_unlock_irq(&ev->lock); spin_unlock_irq(&ev->lock);
......
...@@ -968,7 +968,7 @@ static void output_poll_execute(struct work_struct *work) ...@@ -968,7 +968,7 @@ static void output_poll_execute(struct work_struct *work)
} }
if (repoll) if (repoll)
queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD); schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
} }
void drm_kms_helper_poll_disable(struct drm_device *dev) void drm_kms_helper_poll_disable(struct drm_device *dev)
...@@ -993,7 +993,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev) ...@@ -993,7 +993,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
} }
if (poll) if (poll)
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
} }
EXPORT_SYMBOL(drm_kms_helper_poll_enable); EXPORT_SYMBOL(drm_kms_helper_poll_enable);
...@@ -1020,6 +1020,6 @@ void drm_helper_hpd_irq_event(struct drm_device *dev) ...@@ -1020,6 +1020,6 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
/* kill timer and schedule immediate execution, this doesn't block */ /* kill timer and schedule immediate execution, this doesn't block */
cancel_delayed_work(&dev->mode_config.output_poll_work); cancel_delayed_work(&dev->mode_config.output_poll_work);
if (drm_kms_helper_poll) if (drm_kms_helper_poll)
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
} }
EXPORT_SYMBOL(drm_helper_hpd_irq_event); EXPORT_SYMBOL(drm_helper_hpd_irq_event);
...@@ -204,7 +204,7 @@ static void wiiext_worker(struct work_struct *work) ...@@ -204,7 +204,7 @@ static void wiiext_worker(struct work_struct *work)
/* schedule work only once, otherwise mark for reschedule */ /* schedule work only once, otherwise mark for reschedule */
static void wiiext_schedule(struct wiimote_ext *ext) static void wiiext_schedule(struct wiimote_ext *ext)
{ {
queue_work(system_nrt_wq, &ext->worker); schedule_work(&ext->worker);
} }
/* /*
......
...@@ -204,8 +204,8 @@ void mmc_host_clk_release(struct mmc_host *host) ...@@ -204,8 +204,8 @@ void mmc_host_clk_release(struct mmc_host *host)
host->clk_requests--; host->clk_requests--;
if (mmc_host_may_gate_card(host->card) && if (mmc_host_may_gate_card(host->card) &&
!host->clk_requests) !host->clk_requests)
queue_delayed_work(system_nrt_wq, &host->clk_gate_work, schedule_delayed_work(&host->clk_gate_work,
msecs_to_jiffies(host->clkgate_delay)); msecs_to_jiffies(host->clkgate_delay));
spin_unlock_irqrestore(&host->clk_lock, flags); spin_unlock_irqrestore(&host->clk_lock, flags);
} }
......
...@@ -521,7 +521,7 @@ static void refill_work(struct work_struct *work) ...@@ -521,7 +521,7 @@ static void refill_work(struct work_struct *work)
/* In theory, this can happen: if we don't get any buffers in /* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */ * we will *never* try to fill again. */
if (still_empty) if (still_empty)
queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2); schedule_delayed_work(&vi->refill, HZ/2);
} }
static int virtnet_poll(struct napi_struct *napi, int budget) static int virtnet_poll(struct napi_struct *napi, int budget)
...@@ -540,7 +540,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) ...@@ -540,7 +540,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
if (vi->num < vi->max / 2) { if (vi->num < vi->max / 2) {
if (!try_fill_recv(vi, GFP_ATOMIC)) if (!try_fill_recv(vi, GFP_ATOMIC))
queue_delayed_work(system_nrt_wq, &vi->refill, 0); schedule_delayed_work(&vi->refill, 0);
} }
/* Out of packets? */ /* Out of packets? */
...@@ -745,7 +745,7 @@ static int virtnet_open(struct net_device *dev) ...@@ -745,7 +745,7 @@ static int virtnet_open(struct net_device *dev)
/* Make sure we have some buffers: if oom use wq. */ /* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(vi, GFP_KERNEL)) if (!try_fill_recv(vi, GFP_KERNEL))
queue_delayed_work(system_nrt_wq, &vi->refill, 0); schedule_delayed_work(&vi->refill, 0);
virtnet_napi_enable(vi); virtnet_napi_enable(vi);
return 0; return 0;
...@@ -1020,7 +1020,7 @@ static void virtnet_config_changed(struct virtio_device *vdev) ...@@ -1020,7 +1020,7 @@ static void virtnet_config_changed(struct virtio_device *vdev)
{ {
struct virtnet_info *vi = vdev->priv; struct virtnet_info *vi = vdev->priv;
queue_work(system_nrt_wq, &vi->config_work); schedule_work(&vi->config_work);
} }
static int init_vqs(struct virtnet_info *vi) static int init_vqs(struct virtnet_info *vi)
...@@ -1152,7 +1152,7 @@ static int virtnet_probe(struct virtio_device *vdev) ...@@ -1152,7 +1152,7 @@ static int virtnet_probe(struct virtio_device *vdev)
otherwise get link status from config. */ otherwise get link status from config. */
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
netif_carrier_off(dev); netif_carrier_off(dev);
queue_work(system_nrt_wq, &vi->config_work); schedule_work(&vi->config_work);
} else { } else {
vi->status = VIRTIO_NET_S_LINK_UP; vi->status = VIRTIO_NET_S_LINK_UP;
netif_carrier_on(dev); netif_carrier_on(dev);
...@@ -1264,7 +1264,7 @@ static int virtnet_restore(struct virtio_device *vdev) ...@@ -1264,7 +1264,7 @@ static int virtnet_restore(struct virtio_device *vdev)
netif_device_attach(vi->dev); netif_device_attach(vi->dev);
if (!try_fill_recv(vi, GFP_KERNEL)) if (!try_fill_recv(vi, GFP_KERNEL))
queue_delayed_work(system_nrt_wq, &vi->refill, 0); schedule_delayed_work(&vi->refill, 0);
mutex_lock(&vi->config_lock); mutex_lock(&vi->config_lock);
vi->config_enable = true; vi->config_enable = true;
......
...@@ -310,12 +310,12 @@ extern struct workqueue_struct *system_long_wq; ...@@ -310,12 +310,12 @@ extern struct workqueue_struct *system_long_wq;
extern struct workqueue_struct *system_unbound_wq; extern struct workqueue_struct *system_unbound_wq;
extern struct workqueue_struct *system_freezable_wq; extern struct workqueue_struct *system_freezable_wq;
static inline struct workqueue_struct *__system_nrt_wq(void) static inline struct workqueue_struct * __deprecated __system_nrt_wq(void)
{ {
return system_wq; return system_wq;
} }
static inline struct workqueue_struct *__system_nrt_freezable_wq(void) static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void)
{ {
return system_freezable_wq; return system_freezable_wq;
} }
......
...@@ -379,7 +379,7 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head, ...@@ -379,7 +379,7 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
rcu_batch_queue(&sp->batch_queue, head); rcu_batch_queue(&sp->batch_queue, head);
if (!sp->running) { if (!sp->running) {
sp->running = true; sp->running = true;
queue_delayed_work(system_nrt_wq, &sp->work, 0); schedule_delayed_work(&sp->work, 0);
} }
spin_unlock_irqrestore(&sp->queue_lock, flags); spin_unlock_irqrestore(&sp->queue_lock, flags);
} }
...@@ -631,7 +631,7 @@ static void srcu_reschedule(struct srcu_struct *sp) ...@@ -631,7 +631,7 @@ static void srcu_reschedule(struct srcu_struct *sp)
} }
if (pending) if (pending)
queue_delayed_work(system_nrt_wq, &sp->work, SRCU_INTERVAL); schedule_delayed_work(&sp->work, SRCU_INTERVAL);
} }
/* /*
......
...@@ -62,7 +62,7 @@ void key_schedule_gc(time_t gc_at) ...@@ -62,7 +62,7 @@ void key_schedule_gc(time_t gc_at)
if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) { if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
kdebug("IMMEDIATE"); kdebug("IMMEDIATE");
queue_work(system_nrt_wq, &key_gc_work); schedule_work(&key_gc_work);
} else if (gc_at < key_gc_next_run) { } else if (gc_at < key_gc_next_run) {
kdebug("DEFERRED"); kdebug("DEFERRED");
key_gc_next_run = gc_at; key_gc_next_run = gc_at;
...@@ -77,7 +77,7 @@ void key_schedule_gc(time_t gc_at) ...@@ -77,7 +77,7 @@ void key_schedule_gc(time_t gc_at)
void key_schedule_gc_links(void) void key_schedule_gc_links(void)
{ {
set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags); set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
queue_work(system_nrt_wq, &key_gc_work); schedule_work(&key_gc_work);
} }
/* /*
...@@ -120,7 +120,7 @@ void key_gc_keytype(struct key_type *ktype) ...@@ -120,7 +120,7 @@ void key_gc_keytype(struct key_type *ktype)
set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags); set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
kdebug("schedule"); kdebug("schedule");
queue_work(system_nrt_wq, &key_gc_work); schedule_work(&key_gc_work);
kdebug("sleep"); kdebug("sleep");
wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit, wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit,
...@@ -369,7 +369,7 @@ static void key_garbage_collector(struct work_struct *work) ...@@ -369,7 +369,7 @@ static void key_garbage_collector(struct work_struct *work)
} }
if (gc_state & KEY_GC_REAP_AGAIN) if (gc_state & KEY_GC_REAP_AGAIN)
queue_work(system_nrt_wq, &key_gc_work); schedule_work(&key_gc_work);
kleave(" [end %x]", gc_state); kleave(" [end %x]", gc_state);
return; return;
......
...@@ -598,7 +598,7 @@ void key_put(struct key *key) ...@@ -598,7 +598,7 @@ void key_put(struct key *key)
key_check(key); key_check(key);
if (atomic_dec_and_test(&key->usage)) if (atomic_dec_and_test(&key->usage))
queue_work(system_nrt_wq, &key_gc_work); schedule_work(&key_gc_work);
} }
} }
EXPORT_SYMBOL(key_put); EXPORT_SYMBOL(key_put);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册