padata.c 26.8 KB
Newer Older
1 2 3
/*
 * padata.c - generic interface to process data streams in parallel
 *
4 5
 * See Documentation/padata.txt for an api documentation.
 *
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 * Copyright (C) 2008, 2009 secunet Security Networks AG
 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 */

23
#include <linux/export.h>
24 25 26 27 28 29
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/cpu.h>
#include <linux/padata.h>
#include <linux/mutex.h>
#include <linux/sched.h>
30
#include <linux/slab.h>
31
#include <linux/sysfs.h>
32 33
#include <linux/rcupdate.h>

34
#define MAX_OBJ_NUM 1000
35 36 37 38 39

static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
{
	int cpu, target_cpu;

40
	target_cpu = cpumask_first(pd->cpumask.pcpu);
41
	for (cpu = 0; cpu < cpu_index; cpu++)
42
		target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
43 44 45 46

	return target_cpu;
}

47
static int padata_cpu_hash(struct parallel_data *pd)
48
{
49
	unsigned int seq_nr;
50 51 52 53 54 55
	int cpu_index;

	/*
	 * Hash the sequence numbers to the cpus by taking
	 * seq_nr mod. number of cpus in use.
	 */
56

57 58
	seq_nr = atomic_inc_return(&pd->seq_nr);
	cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
59 60 61 62

	return padata_index_to_cpu(pd, cpu_index);
}

63
static void padata_parallel_worker(struct work_struct *parallel_work)
64
{
65
	struct padata_parallel_queue *pqueue;
66 67 68 69 70
	struct parallel_data *pd;
	struct padata_instance *pinst;
	LIST_HEAD(local_list);

	local_bh_disable();
71 72 73
	pqueue = container_of(parallel_work,
			      struct padata_parallel_queue, work);
	pd = pqueue->pd;
74 75
	pinst = pd->pinst;

76 77 78
	spin_lock(&pqueue->parallel.lock);
	list_replace_init(&pqueue->parallel.list, &local_list);
	spin_unlock(&pqueue->parallel.lock);
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93

	while (!list_empty(&local_list)) {
		struct padata_priv *padata;

		padata = list_entry(local_list.next,
				    struct padata_priv, list);

		list_del_init(&padata->list);

		padata->parallel(padata);
	}

	local_bh_enable();
}

94
/**
95 96 97 98 99
 * padata_do_parallel - padata parallelization function
 *
 * @pinst: padata instance
 * @padata: object to be parallelized
 * @cb_cpu: cpu the serialization callback function will run on,
100
 *          must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
101 102 103 104 105 106 107 108 109
 *
 * The parallelization callback function will run with BHs off.
 * Note: Every object which is parallelized by padata_do_parallel
 * must be seen by padata_do_serial.
 */
int padata_do_parallel(struct padata_instance *pinst,
		       struct padata_priv *padata, int cb_cpu)
{
	int target_cpu, err;
110
	struct padata_parallel_queue *queue;
111 112 113 114
	struct parallel_data *pd;

	rcu_read_lock_bh();

115
	pd = rcu_dereference_bh(pinst->pd);
116

117
	err = -EINVAL;
118
	if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
119 120
		goto out;

121
	if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu))
122 123 124 125 126 127 128 129 130
		goto out;

	err =  -EBUSY;
	if ((pinst->flags & PADATA_RESET))
		goto out;

	if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
		goto out;

131
	err = 0;
132 133 134 135
	atomic_inc(&pd->refcnt);
	padata->pd = pd;
	padata->cb_cpu = cb_cpu;

136
	target_cpu = padata_cpu_hash(pd);
137
	queue = per_cpu_ptr(pd->pqueue, target_cpu);
138 139 140 141 142

	spin_lock(&queue->parallel.lock);
	list_add_tail(&padata->list, &queue->parallel.list);
	spin_unlock(&queue->parallel.lock);

143
	queue_work_on(target_cpu, pinst->wq, &queue->work);
144 145 146 147 148 149 150 151

out:
	rcu_read_unlock_bh();

	return err;
}
EXPORT_SYMBOL(padata_do_parallel);

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
/*
 * padata_get_next - Get the next object that needs serialization.
 *
 * Return values are:
 *
 * A pointer to the control struct of the next object that needs
 * serialization, if present in one of the percpu reorder queues.
 *
 * NULL, if all percpu reorder queues are empty.
 *
 * -EINPROGRESS, if the next object that needs serialization will
 *  be parallel processed by another cpu and is not yet present in
 *  the cpu's reorder queue.
 *
 * -ENODATA, if this cpu has to do the parallel processing for
 *  the next object.
 */
169 170
static struct padata_priv *padata_get_next(struct parallel_data *pd)
{
171
	int cpu, num_cpus;
172
	unsigned int next_nr, next_index;
173
	struct padata_parallel_queue *next_queue;
174 175 176
	struct padata_priv *padata;
	struct padata_list *reorder;

177
	num_cpus = cpumask_weight(pd->cpumask.pcpu);
178

179 180 181 182 183 184 185
	/*
	 * Calculate the percpu reorder queue and the sequence
	 * number of the next object.
	 */
	next_nr = pd->processed;
	next_index = next_nr % num_cpus;
	cpu = padata_index_to_cpu(pd, next_index);
186
	next_queue = per_cpu_ptr(pd->pqueue, cpu);
187

188 189 190 191 192 193 194 195 196 197 198 199 200
	padata = NULL;

	reorder = &next_queue->reorder;

	if (!list_empty(&reorder->list)) {
		padata = list_entry(reorder->list.next,
				    struct padata_priv, list);

		spin_lock(&reorder->lock);
		list_del_init(&padata->list);
		atomic_dec(&pd->reorder_objects);
		spin_unlock(&reorder->lock);

201
		pd->processed++;
202 203 204 205

		goto out;
	}

206
	if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
207 208 209 210 211 212 213 214 215 216 217
		padata = ERR_PTR(-ENODATA);
		goto out;
	}

	padata = ERR_PTR(-EINPROGRESS);
out:
	return padata;
}

static void padata_reorder(struct parallel_data *pd)
{
218
	int cb_cpu;
219
	struct padata_priv *padata;
220
	struct padata_serial_queue *squeue;
221 222
	struct padata_instance *pinst = pd->pinst;

223 224 225 226 227 228 229 230 231 232
	/*
	 * We need to ensure that only one cpu can work on dequeueing of
	 * the reorder queue the time. Calculating in which percpu reorder
	 * queue the next object will arrive takes some time. A spinlock
	 * would be highly contended. Also it is not clear in which order
	 * the objects arrive to the reorder queues. So a cpu could wait to
	 * get the lock just to notice that there is nothing to do at the
	 * moment. Therefore we use a trylock and let the holder of the lock
	 * care for all the objects enqueued during the holdtime of the lock.
	 */
233
	if (!spin_trylock_bh(&pd->lock))
234
		return;
235 236 237 238

	while (1) {
		padata = padata_get_next(pd);

239 240 241 242 243 244
		/*
		 * All reorder queues are empty, or the next object that needs
		 * serialization is parallel processed by another cpu and is
		 * still on it's way to the cpu's reorder queue, nothing to
		 * do for now.
		 */
245 246 247
		if (!padata || PTR_ERR(padata) == -EINPROGRESS)
			break;

248 249 250
		/*
		 * This cpu has to do the parallel processing of the next
		 * object. It's waiting in the cpu's parallelization queue,
L
Lucas De Marchi 已提交
251
		 * so exit immediately.
252
		 */
253
		if (PTR_ERR(padata) == -ENODATA) {
254
			del_timer(&pd->timer);
255
			spin_unlock_bh(&pd->lock);
256
			return;
257 258
		}

259 260
		cb_cpu = padata->cb_cpu;
		squeue = per_cpu_ptr(pd->squeue, cb_cpu);
261

262 263 264
		spin_lock(&squeue->serial.lock);
		list_add_tail(&padata->list, &squeue->serial.list);
		spin_unlock(&squeue->serial.lock);
265

266
		queue_work_on(cb_cpu, pinst->wq, &squeue->work);
267 268 269 270
	}

	spin_unlock_bh(&pd->lock);

271 272 273
	/*
	 * The next object that needs serialization might have arrived to
	 * the reorder queues in the meantime, we will be called again
L
Lucas De Marchi 已提交
274
	 * from the timer function if no one else cares for it.
275
	 */
276 277 278 279 280
	if (atomic_read(&pd->reorder_objects)
			&& !(pinst->flags & PADATA_RESET))
		mod_timer(&pd->timer, jiffies + HZ);
	else
		del_timer(&pd->timer);
281 282 283 284

	return;
}

285 286 287 288 289 290 291
static void padata_reorder_timer(unsigned long arg)
{
	struct parallel_data *pd = (struct parallel_data *)arg;

	padata_reorder(pd);
}

292
static void padata_serial_worker(struct work_struct *serial_work)
293
{
294
	struct padata_serial_queue *squeue;
295 296 297 298
	struct parallel_data *pd;
	LIST_HEAD(local_list);

	local_bh_disable();
299 300
	squeue = container_of(serial_work, struct padata_serial_queue, work);
	pd = squeue->pd;
301

302 303 304
	spin_lock(&squeue->serial.lock);
	list_replace_init(&squeue->serial.list, &local_list);
	spin_unlock(&squeue->serial.lock);
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319

	while (!list_empty(&local_list)) {
		struct padata_priv *padata;

		padata = list_entry(local_list.next,
				    struct padata_priv, list);

		list_del_init(&padata->list);

		padata->serial(padata);
		atomic_dec(&pd->refcnt);
	}
	local_bh_enable();
}

320
/**
321 322 323 324 325 326 327 328 329 330
 * padata_do_serial - padata serialization function
 *
 * @padata: object to be serialized.
 *
 * padata_do_serial must be called for every parallelized object.
 * The serialization callback function will run with BHs off.
 */
void padata_do_serial(struct padata_priv *padata)
{
	int cpu;
331
	struct padata_parallel_queue *pqueue;
332 333 334 335 336
	struct parallel_data *pd;

	pd = padata->pd;

	cpu = get_cpu();
337
	pqueue = per_cpu_ptr(pd->pqueue, cpu);
338

339
	spin_lock(&pqueue->reorder.lock);
340
	atomic_inc(&pd->reorder_objects);
341 342
	list_add_tail(&padata->list, &pqueue->reorder.list);
	spin_unlock(&pqueue->reorder.lock);
343 344 345 346 347 348 349

	put_cpu();

	padata_reorder(pd);
}
EXPORT_SYMBOL(padata_do_serial);

350 351 352
static int padata_setup_cpumasks(struct parallel_data *pd,
				 const struct cpumask *pcpumask,
				 const struct cpumask *cbcpumask)
353
{
354 355
	if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
		return -ENOMEM;
356

357
	cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
358 359 360 361
	if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
		free_cpumask_var(pd->cpumask.cbcpu);
		return -ENOMEM;
	}
362

363
	cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
364 365
	return 0;
}
366

367 368 369 370 371
static void __padata_list_init(struct padata_list *pd_list)
{
	INIT_LIST_HEAD(&pd_list->list);
	spin_lock_init(&pd_list->lock);
}
372

373 374 375 376 377
/* Initialize all percpu queues used by serial workers */
static void padata_init_squeues(struct parallel_data *pd)
{
	int cpu;
	struct padata_serial_queue *squeue;
378

379 380 381 382 383 384 385
	for_each_cpu(cpu, pd->cpumask.cbcpu) {
		squeue = per_cpu_ptr(pd->squeue, cpu);
		squeue->pd = pd;
		__padata_list_init(&squeue->serial);
		INIT_WORK(&squeue->work, padata_serial_worker);
	}
}
386

387 388 389
/* Initialize all percpu queues used by parallel workers */
static void padata_init_pqueues(struct parallel_data *pd)
{
390
	int cpu_index, cpu;
391
	struct padata_parallel_queue *pqueue;
392

393 394 395 396 397
	cpu_index = 0;
	for_each_cpu(cpu, pd->cpumask.pcpu) {
		pqueue = per_cpu_ptr(pd->pqueue, cpu);
		pqueue->pd = pd;
		pqueue->cpu_index = cpu_index;
398
		cpu_index++;
399

400 401 402 403
		__padata_list_init(&pqueue->reorder);
		__padata_list_init(&pqueue->parallel);
		INIT_WORK(&pqueue->work, padata_parallel_worker);
		atomic_set(&pqueue->num_obj, 0);
404
	}
405
}
406

407 408 409 410 411 412
/* Allocate and initialize the internal cpumask dependend resources. */
static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
					     const struct cpumask *pcpumask,
					     const struct cpumask *cbcpumask)
{
	struct parallel_data *pd;
413

414 415 416
	pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
	if (!pd)
		goto err;
417

418 419 420 421 422 423 424 425 426
	pd->pqueue = alloc_percpu(struct padata_parallel_queue);
	if (!pd->pqueue)
		goto err_free_pd;

	pd->squeue = alloc_percpu(struct padata_serial_queue);
	if (!pd->squeue)
		goto err_free_pqueue;
	if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0)
		goto err_free_squeue;
427

428 429
	padata_init_pqueues(pd);
	padata_init_squeues(pd);
430
	setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
431
	atomic_set(&pd->seq_nr, -1);
432 433 434 435 436 437 438
	atomic_set(&pd->reorder_objects, 0);
	atomic_set(&pd->refcnt, 0);
	pd->pinst = pinst;
	spin_lock_init(&pd->lock);

	return pd;

439 440 441 442
err_free_squeue:
	free_percpu(pd->squeue);
err_free_pqueue:
	free_percpu(pd->pqueue);
443 444 445 446 447 448 449 450
err_free_pd:
	kfree(pd);
err:
	return NULL;
}

static void padata_free_pd(struct parallel_data *pd)
{
451 452 453 454
	free_cpumask_var(pd->cpumask.pcpu);
	free_cpumask_var(pd->cpumask.cbcpu);
	free_percpu(pd->pqueue);
	free_percpu(pd->squeue);
455 456 457
	kfree(pd);
}

458
/* Flush all objects out of the padata queues. */
459 460 461
static void padata_flush_queues(struct parallel_data *pd)
{
	int cpu;
462 463
	struct padata_parallel_queue *pqueue;
	struct padata_serial_queue *squeue;
464

465 466 467
	for_each_cpu(cpu, pd->cpumask.pcpu) {
		pqueue = per_cpu_ptr(pd->pqueue, cpu);
		flush_work(&pqueue->work);
468 469 470 471 472 473 474
	}

	del_timer_sync(&pd->timer);

	if (atomic_read(&pd->reorder_objects))
		padata_reorder(pd);

475 476 477
	for_each_cpu(cpu, pd->cpumask.cbcpu) {
		squeue = per_cpu_ptr(pd->squeue, cpu);
		flush_work(&squeue->work);
478 479 480 481 482
	}

	BUG_ON(atomic_read(&pd->refcnt) != 0);
}

483 484 485 486 487
static void __padata_start(struct padata_instance *pinst)
{
	pinst->flags |= PADATA_INIT;
}

488 489 490 491 492 493 494 495 496 497 498 499 500 501
static void __padata_stop(struct padata_instance *pinst)
{
	if (!(pinst->flags & PADATA_INIT))
		return;

	pinst->flags &= ~PADATA_INIT;

	synchronize_rcu();

	get_online_cpus();
	padata_flush_queues(pinst->pd);
	put_online_cpus();
}

L
Lucas De Marchi 已提交
502
/* Replace the internal control structure with a new one. */
503 504 505 506
static void padata_replace(struct padata_instance *pinst,
			   struct parallel_data *pd_new)
{
	struct parallel_data *pd_old = pinst->pd;
507
	int notification_mask = 0;
508 509 510 511 512 513 514

	pinst->flags |= PADATA_RESET;

	rcu_assign_pointer(pinst->pd, pd_new);

	synchronize_rcu();

515 516 517 518 519
	if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu))
		notification_mask |= PADATA_CPU_PARALLEL;
	if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
		notification_mask |= PADATA_CPU_SERIAL;

520
	padata_flush_queues(pd_old);
521 522
	padata_free_pd(pd_old);

523 524
	if (notification_mask)
		blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
525 526
					     notification_mask,
					     &pd_new->cpumask);
527 528 529 530

	pinst->flags &= ~PADATA_RESET;
}

531
/**
532 533
 * padata_register_cpumask_notifier - Registers a notifier that will be called
 *                             if either pcpu or cbcpu or both cpumasks change.
534
 *
535 536
 * @pinst: A poineter to padata instance
 * @nblock: A pointer to notifier block.
537
 */
538 539
int padata_register_cpumask_notifier(struct padata_instance *pinst,
				     struct notifier_block *nblock)
540
{
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
	return blocking_notifier_chain_register(&pinst->cpumask_change_notifier,
						nblock);
}
EXPORT_SYMBOL(padata_register_cpumask_notifier);

/**
 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
 *        registered earlier  using padata_register_cpumask_notifier
 *
 * @pinst: A pointer to data instance.
 * @nlock: A pointer to notifier block.
 */
int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
				       struct notifier_block *nblock)
{
	return blocking_notifier_chain_unregister(
		&pinst->cpumask_change_notifier,
		nblock);
}
EXPORT_SYMBOL(padata_unregister_cpumask_notifier);


563 564 565 566
/* If cpumask contains no active cpu, we mark the instance as invalid. */
static bool padata_validate_cpumask(struct padata_instance *pinst,
				    const struct cpumask *cpumask)
{
567
	if (!cpumask_intersects(cpumask, cpu_online_mask)) {
568 569 570 571 572 573 574 575
		pinst->flags |= PADATA_INVALID;
		return false;
	}

	pinst->flags &= ~PADATA_INVALID;
	return true;
}

576 577 578 579 580
static int __padata_set_cpumasks(struct padata_instance *pinst,
				 cpumask_var_t pcpumask,
				 cpumask_var_t cbcpumask)
{
	int valid;
581
	struct parallel_data *pd;
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621

	valid = padata_validate_cpumask(pinst, pcpumask);
	if (!valid) {
		__padata_stop(pinst);
		goto out_replace;
	}

	valid = padata_validate_cpumask(pinst, cbcpumask);
	if (!valid)
		__padata_stop(pinst);

out_replace:
	pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
	if (!pd)
		return -ENOMEM;

	cpumask_copy(pinst->cpumask.pcpu, pcpumask);
	cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);

	padata_replace(pinst, pd);

	if (valid)
		__padata_start(pinst);

	return 0;
}

/**
 * padata_set_cpumasks - Set both parallel and serial cpumasks. The first
 *                       one is used by parallel workers and the second one
 *                       by the wokers doing serialization.
 *
 * @pinst: padata instance
 * @pcpumask: the cpumask to use for parallel workers
 * @cbcpumask: the cpumsak to use for serial workers
 */
int padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask,
			cpumask_var_t cbcpumask)
{
	int err;
622 623

	mutex_lock(&pinst->lock);
624
	get_online_cpus();
625

626 627 628 629 630 631 632 633 634 635
	err = __padata_set_cpumasks(pinst, pcpumask, cbcpumask);

	put_online_cpus();
	mutex_unlock(&pinst->lock);

	return err;

}
EXPORT_SYMBOL(padata_set_cpumasks);

636 637 638
/**
 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
 *                     equivalent to @cpumask.
639 640
 *
 * @pinst: padata instance
641 642
 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
 *                to parallel and serial cpumasks respectively.
643 644
 * @cpumask: the cpumask to use
 */
645 646 647 648
int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
		       cpumask_var_t cpumask)
{
	struct cpumask *serial_mask, *parallel_mask;
649 650 651
	int err = -EINVAL;

	mutex_lock(&pinst->lock);
652 653
	get_online_cpus();

654 655 656 657 658 659 660 661 662 663
	switch (cpumask_type) {
	case PADATA_CPU_PARALLEL:
		serial_mask = pinst->cpumask.cbcpu;
		parallel_mask = cpumask;
		break;
	case PADATA_CPU_SERIAL:
		parallel_mask = pinst->cpumask.pcpu;
		serial_mask = cpumask;
		break;
	default:
664
		 goto out;
665 666
	}

667
	err =  __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
668 669

out:
670
	put_online_cpus();
671 672 673 674 675 676 677 678 679 680
	mutex_unlock(&pinst->lock);

	return err;
}
EXPORT_SYMBOL(padata_set_cpumask);

static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
{
	struct parallel_data *pd;

681
	if (cpumask_test_cpu(cpu, cpu_online_mask)) {
682 683
		pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
				     pinst->cpumask.cbcpu);
684 685 686 687
		if (!pd)
			return -ENOMEM;

		padata_replace(pinst, pd);
688

689 690
		if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
		    padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
691
			__padata_start(pinst);
692 693 694 695 696
	}

	return 0;
}

697 698 699
 /**
 * padata_add_cpu - add a cpu to one or both(parallel and serial)
 *                  padata cpumasks.
700 701 702
 *
 * @pinst: padata instance
 * @cpu: cpu to add
703 704 705 706
 * @mask: bitmask of flags specifying to which cpumask @cpu shuld be added.
 *        The @mask may be any combination of the following flags:
 *          PADATA_CPU_SERIAL   - serial cpumask
 *          PADATA_CPU_PARALLEL - parallel cpumask
707
 */
708 709

int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask)
710 711 712
{
	int err;

713 714 715
	if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
		return -EINVAL;

716 717
	mutex_lock(&pinst->lock);

718
	get_online_cpus();
719 720 721 722 723
	if (mask & PADATA_CPU_SERIAL)
		cpumask_set_cpu(cpu, pinst->cpumask.cbcpu);
	if (mask & PADATA_CPU_PARALLEL)
		cpumask_set_cpu(cpu, pinst->cpumask.pcpu);

724
	err = __padata_add_cpu(pinst, cpu);
725
	put_online_cpus();
726 727 728 729 730 731 732 733 734

	mutex_unlock(&pinst->lock);

	return err;
}
EXPORT_SYMBOL(padata_add_cpu);

static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
{
735
	struct parallel_data *pd = NULL;
736 737

	if (cpumask_test_cpu(cpu, cpu_online_mask)) {
738

739
		if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
740
		    !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
741 742
			__padata_stop(pinst);

743 744
		pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
				     pinst->cpumask.cbcpu);
745 746 747 748
		if (!pd)
			return -ENOMEM;

		padata_replace(pinst, pd);
S
Steffen Klassert 已提交
749 750 751

		cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
		cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
752 753 754 755 756
	}

	return 0;
}

757
 /**
L
Lucas De Marchi 已提交
758
 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
759
 *                     padata cpumasks.
760 761 762
 *
 * @pinst: padata instance
 * @cpu: cpu to remove
763 764 765 766
 * @mask: bitmask specifying from which cpumask @cpu should be removed
 *        The @mask may be any combination of the following flags:
 *          PADATA_CPU_SERIAL   - serial cpumask
 *          PADATA_CPU_PARALLEL - parallel cpumask
767
 */
768
int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask)
769 770 771
{
	int err;

772 773 774
	if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
		return -EINVAL;

775 776
	mutex_lock(&pinst->lock);

777
	get_online_cpus();
778 779 780 781 782
	if (mask & PADATA_CPU_SERIAL)
		cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu);
	if (mask & PADATA_CPU_PARALLEL)
		cpumask_clear_cpu(cpu, pinst->cpumask.pcpu);

783
	err = __padata_remove_cpu(pinst, cpu);
784
	put_online_cpus();
785 786 787 788 789 790 791

	mutex_unlock(&pinst->lock);

	return err;
}
EXPORT_SYMBOL(padata_remove_cpu);

792
/**
793 794 795 796
 * padata_start - start the parallel processing
 *
 * @pinst: padata instance to start
 */
797
int padata_start(struct padata_instance *pinst)
798
{
799 800
	int err = 0;

801
	mutex_lock(&pinst->lock);
802 803 804 805 806 807

	if (pinst->flags & PADATA_INVALID)
		err =-EINVAL;

	 __padata_start(pinst);

808
	mutex_unlock(&pinst->lock);
809 810

	return err;
811 812 813
}
EXPORT_SYMBOL(padata_start);

814
/**
815 816 817 818 819 820 821
 * padata_stop - stop the parallel processing
 *
 * @pinst: padata instance to stop
 */
void padata_stop(struct padata_instance *pinst)
{
	mutex_lock(&pinst->lock);
822
	__padata_stop(pinst);
823 824 825 826
	mutex_unlock(&pinst->lock);
}
EXPORT_SYMBOL(padata_stop);

827
#ifdef CONFIG_HOTPLUG_CPU
828 829 830 831 832 833 834 835

static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
{
	return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
		cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
}


H
Henrik Kretzschmar 已提交
836 837
static int padata_cpu_callback(struct notifier_block *nfb,
			       unsigned long action, void *hcpu)
838 839 840 841 842 843 844 845 846 847
{
	int err;
	struct padata_instance *pinst;
	int cpu = (unsigned long)hcpu;

	pinst = container_of(nfb, struct padata_instance, cpu_notifier);

	switch (action) {
	case CPU_ONLINE:
	case CPU_ONLINE_FROZEN:
848 849
	case CPU_DOWN_FAILED:
	case CPU_DOWN_FAILED_FROZEN:
850
		if (!pinst_has_cpu(pinst, cpu))
851 852 853 854 855
			break;
		mutex_lock(&pinst->lock);
		err = __padata_add_cpu(pinst, cpu);
		mutex_unlock(&pinst->lock);
		if (err)
856
			return notifier_from_errno(err);
857 858 859 860
		break;

	case CPU_DOWN_PREPARE:
	case CPU_DOWN_PREPARE_FROZEN:
861 862
	case CPU_UP_CANCELED:
	case CPU_UP_CANCELED_FROZEN:
863
		if (!pinst_has_cpu(pinst, cpu))
864 865 866 867 868
			break;
		mutex_lock(&pinst->lock);
		err = __padata_remove_cpu(pinst, cpu);
		mutex_unlock(&pinst->lock);
		if (err)
869
			return notifier_from_errno(err);
870 871 872 873 874
		break;
	}

	return NOTIFY_OK;
}
875
#endif
876

877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919
static void __padata_free(struct padata_instance *pinst)
{
#ifdef CONFIG_HOTPLUG_CPU
	unregister_hotcpu_notifier(&pinst->cpu_notifier);
#endif

	padata_stop(pinst);
	padata_free_pd(pinst->pd);
	free_cpumask_var(pinst->cpumask.pcpu);
	free_cpumask_var(pinst->cpumask.cbcpu);
	kfree(pinst);
}

#define kobj2pinst(_kobj)					\
	container_of(_kobj, struct padata_instance, kobj)
#define attr2pentry(_attr)					\
	container_of(_attr, struct padata_sysfs_entry, attr)

static void padata_sysfs_release(struct kobject *kobj)
{
	struct padata_instance *pinst = kobj2pinst(kobj);
	__padata_free(pinst);
}

struct padata_sysfs_entry {
	struct attribute attr;
	ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
	ssize_t (*store)(struct padata_instance *, struct attribute *,
			 const char *, size_t);
};

static ssize_t show_cpumask(struct padata_instance *pinst,
			    struct attribute *attr,  char *buf)
{
	struct cpumask *cpumask;
	ssize_t len;

	mutex_lock(&pinst->lock);
	if (!strcmp(attr->name, "serial_cpumask"))
		cpumask = pinst->cpumask.cbcpu;
	else
		cpumask = pinst->cpumask.pcpu;

920 921
	len = snprintf(buf, PAGE_SIZE, "%*pb\n",
		       nr_cpu_ids, cpumask_bits(cpumask));
922
	mutex_unlock(&pinst->lock);
923
	return len < PAGE_SIZE ? len : -EINVAL;
924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
}

static ssize_t store_cpumask(struct padata_instance *pinst,
			     struct attribute *attr,
			     const char *buf, size_t count)
{
	cpumask_var_t new_cpumask;
	ssize_t ret;
	int mask_type;

	if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
		return -ENOMEM;

	ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
			   nr_cpumask_bits);
	if (ret < 0)
		goto out;

	mask_type = !strcmp(attr->name, "serial_cpumask") ?
		PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
	ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
	if (!ret)
		ret = count;

out:
	free_cpumask_var(new_cpumask);
	return ret;
}

#define PADATA_ATTR_RW(_name, _show_name, _store_name)		\
	static struct padata_sysfs_entry _name##_attr =		\
		__ATTR(_name, 0644, _show_name, _store_name)
#define PADATA_ATTR_RO(_name, _show_name)		\
	static struct padata_sysfs_entry _name##_attr = \
		__ATTR(_name, 0400, _show_name, NULL)

PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);

/*
 * Padata sysfs provides the following objects:
 * serial_cpumask   [RW] - cpumask for serial workers
 * parallel_cpumask [RW] - cpumask for parallel workers
 */
static struct attribute *padata_default_attrs[] = {
	&serial_cpumask_attr.attr,
	&parallel_cpumask_attr.attr,
	NULL,
};

static ssize_t padata_sysfs_show(struct kobject *kobj,
				 struct attribute *attr, char *buf)
{
	struct padata_instance *pinst;
	struct padata_sysfs_entry *pentry;
	ssize_t ret = -EIO;

	pinst = kobj2pinst(kobj);
	pentry = attr2pentry(attr);
	if (pentry->show)
		ret = pentry->show(pinst, attr, buf);

	return ret;
}

static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
				  const char *buf, size_t count)
{
	struct padata_instance *pinst;
	struct padata_sysfs_entry *pentry;
	ssize_t ret = -EIO;

	pinst = kobj2pinst(kobj);
	pentry = attr2pentry(attr);
	if (pentry->show)
		ret = pentry->store(pinst, attr, buf, count);

	return ret;
}

static const struct sysfs_ops padata_sysfs_ops = {
	.show = padata_sysfs_show,
	.store = padata_sysfs_store,
};

static struct kobj_type padata_attr_type = {
	.sysfs_ops = &padata_sysfs_ops,
	.default_attrs = padata_default_attrs,
	.release = padata_sysfs_release,
};

1015
/**
1016 1017 1018
 * padata_alloc_possible - Allocate and initialize padata instance.
 *                         Use the cpu_possible_mask for serial and
 *                         parallel workers.
1019 1020 1021
 *
 * @wq: workqueue to use for the allocated padata instance
 */
1022
struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
1023
{
1024
	return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
1025
}
1026
EXPORT_SYMBOL(padata_alloc_possible);
1027 1028

/**
1029 1030
 * padata_alloc - allocate and initialize a padata instance and specify
 *                cpumasks for serial and parallel workers.
1031 1032
 *
 * @wq: workqueue to use for the allocated padata instance
1033 1034
 * @pcpumask: cpumask that will be used for padata parallelization
 * @cbcpumask: cpumask that will be used for padata serialization
1035
 */
1036 1037 1038
struct padata_instance *padata_alloc(struct workqueue_struct *wq,
				     const struct cpumask *pcpumask,
				     const struct cpumask *cbcpumask)
1039 1040
{
	struct padata_instance *pinst;
1041
	struct parallel_data *pd = NULL;
1042 1043 1044 1045 1046

	pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
	if (!pinst)
		goto err;

1047
	get_online_cpus();
1048
	if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
1049
		goto err_free_inst;
1050 1051
	if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
		free_cpumask_var(pinst->cpumask.pcpu);
1052
		goto err_free_inst;
1053
	}
1054 1055 1056
	if (!padata_validate_cpumask(pinst, pcpumask) ||
	    !padata_validate_cpumask(pinst, cbcpumask))
		goto err_free_masks;
1057

1058 1059 1060
	pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
	if (!pd)
		goto err_free_masks;
1061

1062 1063 1064 1065
	rcu_assign_pointer(pinst->pd, pd);

	pinst->wq = wq;

1066 1067
	cpumask_copy(pinst->cpumask.pcpu, pcpumask);
	cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
1068 1069 1070

	pinst->flags = 0;

1071 1072
	put_online_cpus();

1073
	BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
1074
	kobject_init(&pinst->kobj, &padata_attr_type);
1075 1076
	mutex_init(&pinst->lock);

1077 1078 1079 1080 1081 1082
#ifdef CONFIG_HOTPLUG_CPU
	pinst->cpu_notifier.notifier_call = padata_cpu_callback;
	pinst->cpu_notifier.priority = 0;
	register_hotcpu_notifier(&pinst->cpu_notifier);
#endif

1083 1084
	return pinst;

1085 1086 1087
err_free_masks:
	free_cpumask_var(pinst->cpumask.pcpu);
	free_cpumask_var(pinst->cpumask.cbcpu);
1088 1089
err_free_inst:
	kfree(pinst);
1090
	put_online_cpus();
1091 1092 1093 1094 1095
err:
	return NULL;
}
EXPORT_SYMBOL(padata_alloc);

1096
/**
1097 1098
 * padata_free - free a padata instance
 *
1099
 * @padata_inst: padata instance to free
1100 1101 1102
 */
void padata_free(struct padata_instance *pinst)
{
1103
	kobject_put(&pinst->kobj);
1104 1105
}
EXPORT_SYMBOL(padata_free);