提交 25530553 编写于 作者: S Sebastian Ott 提交者: Martin Schwidefsky

[S390] cio: introduce css_eval_scheduled

Use css_eval_scheduled to determine if all scheduled subchannel
evaluation is finished. Wait for this value to be 0 in the
channel subsystem init function.
Signed-off-by: NSebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: NMartin Schwidefsky <schwidefsky@de.ibm.com>
上级 2f17644d
......@@ -409,10 +409,14 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
static struct idset *slow_subchannel_set;
static spinlock_t slow_subchannel_lock;
static wait_queue_head_t css_eval_wq;
static atomic_t css_eval_scheduled;
static int __init slow_subchannel_init(void)
{
spin_lock_init(&slow_subchannel_lock);
atomic_set(&css_eval_scheduled, 0);
init_waitqueue_head(&css_eval_wq);
slow_subchannel_set = idset_sch_new();
if (!slow_subchannel_set) {
CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
......@@ -468,9 +472,17 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
static void css_slow_path_func(struct work_struct *unused)
{
unsigned long flags;
CIO_TRACE_EVENT(4, "slowpath");
for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
NULL);
spin_lock_irqsave(&slow_subchannel_lock, flags);
if (idset_is_empty(slow_subchannel_set)) {
atomic_set(&css_eval_scheduled, 0);
wake_up(&css_eval_wq);
}
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
static DECLARE_WORK(slow_path_work, css_slow_path_func);
......@@ -482,6 +494,7 @@ void css_schedule_eval(struct subchannel_id schid)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_sch_add(slow_subchannel_set, schid);
atomic_set(&css_eval_scheduled, 1);
queue_work(slow_path_wq, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
......@@ -492,6 +505,7 @@ void css_schedule_eval_all(void)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_fill(slow_subchannel_set);
atomic_set(&css_eval_scheduled, 1);
queue_work(slow_path_wq, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
......@@ -1007,6 +1021,8 @@ static int __init channel_subsystem_init_sync(void)
{
/* Allocate and register subchannels. */
for_each_subchannel(setup_subchannel, NULL);
/* Wait for the evaluation of subchannels to finish. */
wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0);
/* Wait for the initialization of ccw devices to finish. */
wait_event(ccw_device_init_wq,
......
......@@ -110,3 +110,13 @@ int idset_sch_get_first(struct idset *set, struct subchannel_id *schid)
}
return rc;
}
int idset_is_empty(struct idset *set)
{
int bitnum;
bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id);
if (bitnum >= set->num_ssid * set->num_id)
return 1;
return 0;
}
......@@ -21,5 +21,6 @@ void idset_sch_add(struct idset *set, struct subchannel_id id);
void idset_sch_del(struct idset *set, struct subchannel_id id);
int idset_sch_contains(struct idset *set, struct subchannel_id id);
int idset_sch_get_first(struct idset *set, struct subchannel_id *id);
int idset_is_empty(struct idset *set);
#endif /* S390_IDSET_H */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册