padata.c 27.1 KB
Newer Older
1 2 3
/*
 * padata.c - generic interface to process data streams in parallel
 *
4 5
 * See Documentation/padata.txt for an api documentation.
 *
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 * Copyright (C) 2008, 2009 secunet Security Networks AG
 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 */

23
#include <linux/export.h>
24 25 26 27 28 29
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/cpu.h>
#include <linux/padata.h>
#include <linux/mutex.h>
#include <linux/sched.h>
30
#include <linux/slab.h>
31
#include <linux/sysfs.h>
32 33
#include <linux/rcupdate.h>

34
#define MAX_OBJ_NUM 1000
35 36 37 38 39

static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
{
	int cpu, target_cpu;

40
	target_cpu = cpumask_first(pd->cpumask.pcpu);
41
	for (cpu = 0; cpu < cpu_index; cpu++)
42
		target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
43 44 45 46

	return target_cpu;
}

47
static int padata_cpu_hash(struct parallel_data *pd)
48 49 50 51 52 53 54
{
	int cpu_index;

	/*
	 * Hash the sequence numbers to the cpus by taking
	 * seq_nr mod. number of cpus in use.
	 */
55 56 57 58 59

	spin_lock(&pd->seq_lock);
	cpu_index =  pd->seq_nr % cpumask_weight(pd->cpumask.pcpu);
	pd->seq_nr++;
	spin_unlock(&pd->seq_lock);
60 61 62 63

	return padata_index_to_cpu(pd, cpu_index);
}

64
static void padata_parallel_worker(struct work_struct *parallel_work)
65
{
66
	struct padata_parallel_queue *pqueue;
67 68 69 70 71
	struct parallel_data *pd;
	struct padata_instance *pinst;
	LIST_HEAD(local_list);

	local_bh_disable();
72 73 74
	pqueue = container_of(parallel_work,
			      struct padata_parallel_queue, work);
	pd = pqueue->pd;
75 76
	pinst = pd->pinst;

77 78 79
	spin_lock(&pqueue->parallel.lock);
	list_replace_init(&pqueue->parallel.list, &local_list);
	spin_unlock(&pqueue->parallel.lock);
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94

	while (!list_empty(&local_list)) {
		struct padata_priv *padata;

		padata = list_entry(local_list.next,
				    struct padata_priv, list);

		list_del_init(&padata->list);

		padata->parallel(padata);
	}

	local_bh_enable();
}

95
/**
96 97 98 99 100
 * padata_do_parallel - padata parallelization function
 *
 * @pinst: padata instance
 * @padata: object to be parallelized
 * @cb_cpu: cpu the serialization callback function will run on,
101
 *          must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
102 103 104 105 106 107 108 109 110
 *
 * The parallelization callback function will run with BHs off.
 * Note: Every object which is parallelized by padata_do_parallel
 * must be seen by padata_do_serial.
 */
int padata_do_parallel(struct padata_instance *pinst,
		       struct padata_priv *padata, int cb_cpu)
{
	int target_cpu, err;
111
	struct padata_parallel_queue *queue;
112 113 114 115 116 117
	struct parallel_data *pd;

	rcu_read_lock_bh();

	pd = rcu_dereference(pinst->pd);

118
	err = -EINVAL;
119
	if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
120 121
		goto out;

122
	if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu))
123 124 125 126 127 128 129 130 131
		goto out;

	err =  -EBUSY;
	if ((pinst->flags & PADATA_RESET))
		goto out;

	if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
		goto out;

132
	err = 0;
133 134 135 136
	atomic_inc(&pd->refcnt);
	padata->pd = pd;
	padata->cb_cpu = cb_cpu;

137
	target_cpu = padata_cpu_hash(pd);
138
	queue = per_cpu_ptr(pd->pqueue, target_cpu);
139 140 141 142 143

	spin_lock(&queue->parallel.lock);
	list_add_tail(&padata->list, &queue->parallel.list);
	spin_unlock(&queue->parallel.lock);

144
	queue_work_on(target_cpu, pinst->wq, &queue->work);
145 146 147 148 149 150 151 152

out:
	rcu_read_unlock_bh();

	return err;
}
EXPORT_SYMBOL(padata_do_parallel);

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
/*
 * padata_get_next - Get the next object that needs serialization.
 *
 * Return values are:
 *
 * A pointer to the control struct of the next object that needs
 * serialization, if present in one of the percpu reorder queues.
 *
 * NULL, if all percpu reorder queues are empty.
 *
 * -EINPROGRESS, if the next object that needs serialization will
 *  be parallel processed by another cpu and is not yet present in
 *  the cpu's reorder queue.
 *
 * -ENODATA, if this cpu has to do the parallel processing for
 *  the next object.
 */
170 171
static struct padata_priv *padata_get_next(struct parallel_data *pd)
{
172
	int cpu, num_cpus;
173
	unsigned int next_nr, next_index;
174
	struct padata_parallel_queue *queue, *next_queue;
175 176 177
	struct padata_priv *padata;
	struct padata_list *reorder;

178
	num_cpus = cpumask_weight(pd->cpumask.pcpu);
179

180 181 182 183 184 185 186
	/*
	 * Calculate the percpu reorder queue and the sequence
	 * number of the next object.
	 */
	next_nr = pd->processed;
	next_index = next_nr % num_cpus;
	cpu = padata_index_to_cpu(pd, next_index);
187
	next_queue = per_cpu_ptr(pd->pqueue, cpu);
188

189 190 191 192 193 194 195 196 197 198 199 200 201
	padata = NULL;

	reorder = &next_queue->reorder;

	if (!list_empty(&reorder->list)) {
		padata = list_entry(reorder->list.next,
				    struct padata_priv, list);

		spin_lock(&reorder->lock);
		list_del_init(&padata->list);
		atomic_dec(&pd->reorder_objects);
		spin_unlock(&reorder->lock);

202
		pd->processed++;
203 204 205 206

		goto out;
	}

207
	queue = per_cpu_ptr(pd->pqueue, smp_processor_id());
208
	if (queue->cpu_index == next_queue->cpu_index) {
209 210 211 212 213 214 215 216 217 218 219
		padata = ERR_PTR(-ENODATA);
		goto out;
	}

	padata = ERR_PTR(-EINPROGRESS);
out:
	return padata;
}

static void padata_reorder(struct parallel_data *pd)
{
220
	int cb_cpu;
221
	struct padata_priv *padata;
222
	struct padata_serial_queue *squeue;
223 224
	struct padata_instance *pinst = pd->pinst;

225 226 227 228 229 230 231 232 233 234
	/*
	 * We need to ensure that only one cpu can work on dequeueing of
	 * the reorder queue the time. Calculating in which percpu reorder
	 * queue the next object will arrive takes some time. A spinlock
	 * would be highly contended. Also it is not clear in which order
	 * the objects arrive to the reorder queues. So a cpu could wait to
	 * get the lock just to notice that there is nothing to do at the
	 * moment. Therefore we use a trylock and let the holder of the lock
	 * care for all the objects enqueued during the holdtime of the lock.
	 */
235
	if (!spin_trylock_bh(&pd->lock))
236
		return;
237 238 239 240

	while (1) {
		padata = padata_get_next(pd);

241 242 243 244 245 246
		/*
		 * All reorder queues are empty, or the next object that needs
		 * serialization is parallel processed by another cpu and is
		 * still on it's way to the cpu's reorder queue, nothing to
		 * do for now.
		 */
247 248 249
		if (!padata || PTR_ERR(padata) == -EINPROGRESS)
			break;

250 251 252
		/*
		 * This cpu has to do the parallel processing of the next
		 * object. It's waiting in the cpu's parallelization queue,
L
Lucas De Marchi 已提交
253
		 * so exit immediately.
254
		 */
255
		if (PTR_ERR(padata) == -ENODATA) {
256
			del_timer(&pd->timer);
257
			spin_unlock_bh(&pd->lock);
258
			return;
259 260
		}

261 262
		cb_cpu = padata->cb_cpu;
		squeue = per_cpu_ptr(pd->squeue, cb_cpu);
263

264 265 266
		spin_lock(&squeue->serial.lock);
		list_add_tail(&padata->list, &squeue->serial.list);
		spin_unlock(&squeue->serial.lock);
267

268
		queue_work_on(cb_cpu, pinst->wq, &squeue->work);
269 270 271 272
	}

	spin_unlock_bh(&pd->lock);

273 274 275
	/*
	 * The next object that needs serialization might have arrived to
	 * the reorder queues in the meantime, we will be called again
L
Lucas De Marchi 已提交
276
	 * from the timer function if no one else cares for it.
277
	 */
278 279 280 281 282
	if (atomic_read(&pd->reorder_objects)
			&& !(pinst->flags & PADATA_RESET))
		mod_timer(&pd->timer, jiffies + HZ);
	else
		del_timer(&pd->timer);
283 284 285 286

	return;
}

287 288 289 290 291 292 293
static void padata_reorder_timer(unsigned long arg)
{
	struct parallel_data *pd = (struct parallel_data *)arg;

	padata_reorder(pd);
}

294
static void padata_serial_worker(struct work_struct *serial_work)
295
{
296
	struct padata_serial_queue *squeue;
297 298 299 300
	struct parallel_data *pd;
	LIST_HEAD(local_list);

	local_bh_disable();
301 302
	squeue = container_of(serial_work, struct padata_serial_queue, work);
	pd = squeue->pd;
303

304 305 306
	spin_lock(&squeue->serial.lock);
	list_replace_init(&squeue->serial.list, &local_list);
	spin_unlock(&squeue->serial.lock);
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321

	while (!list_empty(&local_list)) {
		struct padata_priv *padata;

		padata = list_entry(local_list.next,
				    struct padata_priv, list);

		list_del_init(&padata->list);

		padata->serial(padata);
		atomic_dec(&pd->refcnt);
	}
	local_bh_enable();
}

322
/**
323 324 325 326 327 328 329 330 331 332
 * padata_do_serial - padata serialization function
 *
 * @padata: object to be serialized.
 *
 * padata_do_serial must be called for every parallelized object.
 * The serialization callback function will run with BHs off.
 */
void padata_do_serial(struct padata_priv *padata)
{
	int cpu;
333
	struct padata_parallel_queue *pqueue;
334 335 336 337 338
	struct parallel_data *pd;

	pd = padata->pd;

	cpu = get_cpu();
339
	pqueue = per_cpu_ptr(pd->pqueue, cpu);
340

341
	spin_lock(&pqueue->reorder.lock);
342
	atomic_inc(&pd->reorder_objects);
343 344
	list_add_tail(&padata->list, &pqueue->reorder.list);
	spin_unlock(&pqueue->reorder.lock);
345 346 347 348 349 350 351

	put_cpu();

	padata_reorder(pd);
}
EXPORT_SYMBOL(padata_do_serial);

352 353 354
static int padata_setup_cpumasks(struct parallel_data *pd,
				 const struct cpumask *pcpumask,
				 const struct cpumask *cbcpumask)
355
{
356 357
	if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
		return -ENOMEM;
358

359 360 361 362 363
	cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_active_mask);
	if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
		free_cpumask_var(pd->cpumask.cbcpu);
		return -ENOMEM;
	}
364

365 366 367
	cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_active_mask);
	return 0;
}
368

369 370 371 372 373
static void __padata_list_init(struct padata_list *pd_list)
{
	INIT_LIST_HEAD(&pd_list->list);
	spin_lock_init(&pd_list->lock);
}
374

375 376 377 378 379
/* Initialize all percpu queues used by serial workers */
static void padata_init_squeues(struct parallel_data *pd)
{
	int cpu;
	struct padata_serial_queue *squeue;
380

381 382 383 384 385 386 387
	for_each_cpu(cpu, pd->cpumask.cbcpu) {
		squeue = per_cpu_ptr(pd->squeue, cpu);
		squeue->pd = pd;
		__padata_list_init(&squeue->serial);
		INIT_WORK(&squeue->work, padata_serial_worker);
	}
}
388

389 390 391
/* Initialize all percpu queues used by parallel workers */
static void padata_init_pqueues(struct parallel_data *pd)
{
392
	int cpu_index, cpu;
393
	struct padata_parallel_queue *pqueue;
394

395 396 397 398 399
	cpu_index = 0;
	for_each_cpu(cpu, pd->cpumask.pcpu) {
		pqueue = per_cpu_ptr(pd->pqueue, cpu);
		pqueue->pd = pd;
		pqueue->cpu_index = cpu_index;
400
		cpu_index++;
401

402 403 404 405
		__padata_list_init(&pqueue->reorder);
		__padata_list_init(&pqueue->parallel);
		INIT_WORK(&pqueue->work, padata_parallel_worker);
		atomic_set(&pqueue->num_obj, 0);
406
	}
407
}
408

409 410 411 412 413 414
/* Allocate and initialize the internal cpumask dependend resources. */
static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
					     const struct cpumask *pcpumask,
					     const struct cpumask *cbcpumask)
{
	struct parallel_data *pd;
415

416 417 418
	pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
	if (!pd)
		goto err;
419

420 421 422 423 424 425 426 427 428
	pd->pqueue = alloc_percpu(struct padata_parallel_queue);
	if (!pd->pqueue)
		goto err_free_pd;

	pd->squeue = alloc_percpu(struct padata_serial_queue);
	if (!pd->squeue)
		goto err_free_pqueue;
	if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0)
		goto err_free_squeue;
429

430 431
	padata_init_pqueues(pd);
	padata_init_squeues(pd);
432
	setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
433
	pd->seq_nr = 0;
434 435 436 437 438 439 440
	atomic_set(&pd->reorder_objects, 0);
	atomic_set(&pd->refcnt, 0);
	pd->pinst = pinst;
	spin_lock_init(&pd->lock);

	return pd;

441 442 443 444
err_free_squeue:
	free_percpu(pd->squeue);
err_free_pqueue:
	free_percpu(pd->pqueue);
445 446 447 448 449 450 451 452
err_free_pd:
	kfree(pd);
err:
	return NULL;
}

static void padata_free_pd(struct parallel_data *pd)
{
453 454 455 456
	free_cpumask_var(pd->cpumask.pcpu);
	free_cpumask_var(pd->cpumask.cbcpu);
	free_percpu(pd->pqueue);
	free_percpu(pd->squeue);
457 458 459
	kfree(pd);
}

460
/* Flush all objects out of the padata queues. */
461 462 463
static void padata_flush_queues(struct parallel_data *pd)
{
	int cpu;
464 465
	struct padata_parallel_queue *pqueue;
	struct padata_serial_queue *squeue;
466

467 468 469
	for_each_cpu(cpu, pd->cpumask.pcpu) {
		pqueue = per_cpu_ptr(pd->pqueue, cpu);
		flush_work(&pqueue->work);
470 471 472 473 474 475 476
	}

	del_timer_sync(&pd->timer);

	if (atomic_read(&pd->reorder_objects))
		padata_reorder(pd);

477 478 479
	for_each_cpu(cpu, pd->cpumask.cbcpu) {
		squeue = per_cpu_ptr(pd->squeue, cpu);
		flush_work(&squeue->work);
480 481 482 483 484
	}

	BUG_ON(atomic_read(&pd->refcnt) != 0);
}

485 486 487 488 489
static void __padata_start(struct padata_instance *pinst)
{
	pinst->flags |= PADATA_INIT;
}

490 491 492 493 494 495 496 497 498 499 500 501 502 503
static void __padata_stop(struct padata_instance *pinst)
{
	if (!(pinst->flags & PADATA_INIT))
		return;

	pinst->flags &= ~PADATA_INIT;

	synchronize_rcu();

	get_online_cpus();
	padata_flush_queues(pinst->pd);
	put_online_cpus();
}

L
Lucas De Marchi 已提交
504
/* Replace the internal control structure with a new one. */
505 506 507 508
static void padata_replace(struct padata_instance *pinst,
			   struct parallel_data *pd_new)
{
	struct parallel_data *pd_old = pinst->pd;
509
	int notification_mask = 0;
510 511 512 513 514 515 516

	pinst->flags |= PADATA_RESET;

	rcu_assign_pointer(pinst->pd, pd_new);

	synchronize_rcu();

517 518 519 520 521
	if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu))
		notification_mask |= PADATA_CPU_PARALLEL;
	if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
		notification_mask |= PADATA_CPU_SERIAL;

522
	padata_flush_queues(pd_old);
523 524
	padata_free_pd(pd_old);

525 526
	if (notification_mask)
		blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
527 528
					     notification_mask,
					     &pd_new->cpumask);
529 530 531 532

	pinst->flags &= ~PADATA_RESET;
}

533
/**
534 535
 * padata_register_cpumask_notifier - Registers a notifier that will be called
 *                             if either pcpu or cbcpu or both cpumasks change.
536
 *
537 538
 * @pinst: A poineter to padata instance
 * @nblock: A pointer to notifier block.
539
 */
540 541
int padata_register_cpumask_notifier(struct padata_instance *pinst,
				     struct notifier_block *nblock)
542
{
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
	return blocking_notifier_chain_register(&pinst->cpumask_change_notifier,
						nblock);
}
EXPORT_SYMBOL(padata_register_cpumask_notifier);

/**
 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
 *        registered earlier  using padata_register_cpumask_notifier
 *
 * @pinst: A pointer to data instance.
 * @nlock: A pointer to notifier block.
 */
int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
				       struct notifier_block *nblock)
{
	return blocking_notifier_chain_unregister(
		&pinst->cpumask_change_notifier,
		nblock);
}
EXPORT_SYMBOL(padata_unregister_cpumask_notifier);


565 566 567 568 569 570 571 572 573 574 575 576 577
/* If cpumask contains no active cpu, we mark the instance as invalid. */
static bool padata_validate_cpumask(struct padata_instance *pinst,
				    const struct cpumask *cpumask)
{
	if (!cpumask_intersects(cpumask, cpu_active_mask)) {
		pinst->flags |= PADATA_INVALID;
		return false;
	}

	pinst->flags &= ~PADATA_INVALID;
	return true;
}

578 579 580 581 582
static int __padata_set_cpumasks(struct padata_instance *pinst,
				 cpumask_var_t pcpumask,
				 cpumask_var_t cbcpumask)
{
	int valid;
583
	struct parallel_data *pd;
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623

	valid = padata_validate_cpumask(pinst, pcpumask);
	if (!valid) {
		__padata_stop(pinst);
		goto out_replace;
	}

	valid = padata_validate_cpumask(pinst, cbcpumask);
	if (!valid)
		__padata_stop(pinst);

out_replace:
	pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
	if (!pd)
		return -ENOMEM;

	cpumask_copy(pinst->cpumask.pcpu, pcpumask);
	cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);

	padata_replace(pinst, pd);

	if (valid)
		__padata_start(pinst);

	return 0;
}

/**
 * padata_set_cpumasks - Set both parallel and serial cpumasks. The first
 *                       one is used by parallel workers and the second one
 *                       by the wokers doing serialization.
 *
 * @pinst: padata instance
 * @pcpumask: the cpumask to use for parallel workers
 * @cbcpumask: the cpumsak to use for serial workers
 */
int padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask,
			cpumask_var_t cbcpumask)
{
	int err;
624 625

	mutex_lock(&pinst->lock);
626
	get_online_cpus();
627

628 629 630 631 632 633 634 635 636 637
	err = __padata_set_cpumasks(pinst, pcpumask, cbcpumask);

	put_online_cpus();
	mutex_unlock(&pinst->lock);

	return err;

}
EXPORT_SYMBOL(padata_set_cpumasks);

638 639 640
/**
 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
 *                     equivalent to @cpumask.
641 642
 *
 * @pinst: padata instance
643 644
 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
 *                to parallel and serial cpumasks respectively.
645 646
 * @cpumask: the cpumask to use
 */
647 648 649 650
int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
		       cpumask_var_t cpumask)
{
	struct cpumask *serial_mask, *parallel_mask;
651 652 653
	int err = -EINVAL;

	mutex_lock(&pinst->lock);
654 655
	get_online_cpus();

656 657 658 659 660 661 662 663 664 665
	switch (cpumask_type) {
	case PADATA_CPU_PARALLEL:
		serial_mask = pinst->cpumask.cbcpu;
		parallel_mask = cpumask;
		break;
	case PADATA_CPU_SERIAL:
		parallel_mask = pinst->cpumask.pcpu;
		serial_mask = cpumask;
		break;
	default:
666
		 goto out;
667 668
	}

669
	err =  __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
670 671

out:
672
	put_online_cpus();
673 674 675 676 677 678 679 680 681 682 683
	mutex_unlock(&pinst->lock);

	return err;
}
EXPORT_SYMBOL(padata_set_cpumask);

static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
{
	struct parallel_data *pd;

	if (cpumask_test_cpu(cpu, cpu_active_mask)) {
684 685
		pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
				     pinst->cpumask.cbcpu);
686 687 688 689
		if (!pd)
			return -ENOMEM;

		padata_replace(pinst, pd);
690

691 692
		if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
		    padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
693
			__padata_start(pinst);
694 695 696 697 698
	}

	return 0;
}

699 700 701
 /**
 * padata_add_cpu - add a cpu to one or both(parallel and serial)
 *                  padata cpumasks.
702 703 704
 *
 * @pinst: padata instance
 * @cpu: cpu to add
705 706 707 708
 * @mask: bitmask of flags specifying to which cpumask @cpu shuld be added.
 *        The @mask may be any combination of the following flags:
 *          PADATA_CPU_SERIAL   - serial cpumask
 *          PADATA_CPU_PARALLEL - parallel cpumask
709
 */
710 711

int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask)
712 713 714
{
	int err;

715 716 717
	if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
		return -EINVAL;

718 719
	mutex_lock(&pinst->lock);

720
	get_online_cpus();
721 722 723 724 725
	if (mask & PADATA_CPU_SERIAL)
		cpumask_set_cpu(cpu, pinst->cpumask.cbcpu);
	if (mask & PADATA_CPU_PARALLEL)
		cpumask_set_cpu(cpu, pinst->cpumask.pcpu);

726
	err = __padata_add_cpu(pinst, cpu);
727
	put_online_cpus();
728 729 730 731 732 733 734 735 736

	mutex_unlock(&pinst->lock);

	return err;
}
EXPORT_SYMBOL(padata_add_cpu);

static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
{
737
	struct parallel_data *pd = NULL;
738 739

	if (cpumask_test_cpu(cpu, cpu_online_mask)) {
740

741
		if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
742
		    !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
743 744
			__padata_stop(pinst);

745 746
		pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
				     pinst->cpumask.cbcpu);
747 748 749 750 751 752 753 754 755
		if (!pd)
			return -ENOMEM;

		padata_replace(pinst, pd);
	}

	return 0;
}

756
 /**
L
Lucas De Marchi 已提交
757
 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
758
 *                     padata cpumasks.
759 760 761
 *
 * @pinst: padata instance
 * @cpu: cpu to remove
762 763 764 765
 * @mask: bitmask specifying from which cpumask @cpu should be removed
 *        The @mask may be any combination of the following flags:
 *          PADATA_CPU_SERIAL   - serial cpumask
 *          PADATA_CPU_PARALLEL - parallel cpumask
766
 */
767
int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask)
768 769 770
{
	int err;

771 772 773
	if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
		return -EINVAL;

774 775
	mutex_lock(&pinst->lock);

776
	get_online_cpus();
777 778 779 780 781
	if (mask & PADATA_CPU_SERIAL)
		cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu);
	if (mask & PADATA_CPU_PARALLEL)
		cpumask_clear_cpu(cpu, pinst->cpumask.pcpu);

782
	err = __padata_remove_cpu(pinst, cpu);
783
	put_online_cpus();
784 785 786 787 788 789 790

	mutex_unlock(&pinst->lock);

	return err;
}
EXPORT_SYMBOL(padata_remove_cpu);

791
/**
792 793 794 795
 * padata_start - start the parallel processing
 *
 * @pinst: padata instance to start
 */
796
int padata_start(struct padata_instance *pinst)
797
{
798 799
	int err = 0;

800
	mutex_lock(&pinst->lock);
801 802 803 804 805 806

	if (pinst->flags & PADATA_INVALID)
		err =-EINVAL;

	 __padata_start(pinst);

807
	mutex_unlock(&pinst->lock);
808 809

	return err;
810 811 812
}
EXPORT_SYMBOL(padata_start);

813
/**
814 815 816 817 818 819 820
 * padata_stop - stop the parallel processing
 *
 * @pinst: padata instance to stop
 */
void padata_stop(struct padata_instance *pinst)
{
	mutex_lock(&pinst->lock);
821
	__padata_stop(pinst);
822 823 824 825
	mutex_unlock(&pinst->lock);
}
EXPORT_SYMBOL(padata_stop);

826
#ifdef CONFIG_HOTPLUG_CPU
827 828 829 830 831 832 833 834

static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
{
	return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
		cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
}


H
Henrik Kretzschmar 已提交
835 836
static int padata_cpu_callback(struct notifier_block *nfb,
			       unsigned long action, void *hcpu)
837 838 839 840 841 842 843 844 845 846
{
	int err;
	struct padata_instance *pinst;
	int cpu = (unsigned long)hcpu;

	pinst = container_of(nfb, struct padata_instance, cpu_notifier);

	switch (action) {
	case CPU_ONLINE:
	case CPU_ONLINE_FROZEN:
847
		if (!pinst_has_cpu(pinst, cpu))
848 849 850 851 852
			break;
		mutex_lock(&pinst->lock);
		err = __padata_add_cpu(pinst, cpu);
		mutex_unlock(&pinst->lock);
		if (err)
853
			return notifier_from_errno(err);
854 855 856 857
		break;

	case CPU_DOWN_PREPARE:
	case CPU_DOWN_PREPARE_FROZEN:
858
		if (!pinst_has_cpu(pinst, cpu))
859 860 861 862 863
			break;
		mutex_lock(&pinst->lock);
		err = __padata_remove_cpu(pinst, cpu);
		mutex_unlock(&pinst->lock);
		if (err)
864
			return notifier_from_errno(err);
865 866 867 868
		break;

	case CPU_UP_CANCELED:
	case CPU_UP_CANCELED_FROZEN:
869
		if (!pinst_has_cpu(pinst, cpu))
870 871 872 873 874 875 876
			break;
		mutex_lock(&pinst->lock);
		__padata_remove_cpu(pinst, cpu);
		mutex_unlock(&pinst->lock);

	case CPU_DOWN_FAILED:
	case CPU_DOWN_FAILED_FROZEN:
877
		if (!pinst_has_cpu(pinst, cpu))
878 879 880 881 882 883 884 885
			break;
		mutex_lock(&pinst->lock);
		__padata_add_cpu(pinst, cpu);
		mutex_unlock(&pinst->lock);
	}

	return NOTIFY_OK;
}
886
#endif
887

888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
static void __padata_free(struct padata_instance *pinst)
{
#ifdef CONFIG_HOTPLUG_CPU
	unregister_hotcpu_notifier(&pinst->cpu_notifier);
#endif

	padata_stop(pinst);
	padata_free_pd(pinst->pd);
	free_cpumask_var(pinst->cpumask.pcpu);
	free_cpumask_var(pinst->cpumask.cbcpu);
	kfree(pinst);
}

#define kobj2pinst(_kobj)					\
	container_of(_kobj, struct padata_instance, kobj)
#define attr2pentry(_attr)					\
	container_of(_attr, struct padata_sysfs_entry, attr)

static void padata_sysfs_release(struct kobject *kobj)
{
	struct padata_instance *pinst = kobj2pinst(kobj);
	__padata_free(pinst);
}

struct padata_sysfs_entry {
	struct attribute attr;
	ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
	ssize_t (*store)(struct padata_instance *, struct attribute *,
			 const char *, size_t);
};

static ssize_t show_cpumask(struct padata_instance *pinst,
			    struct attribute *attr,  char *buf)
{
	struct cpumask *cpumask;
	ssize_t len;

	mutex_lock(&pinst->lock);
	if (!strcmp(attr->name, "serial_cpumask"))
		cpumask = pinst->cpumask.cbcpu;
	else
		cpumask = pinst->cpumask.pcpu;

	len = bitmap_scnprintf(buf, PAGE_SIZE, cpumask_bits(cpumask),
			       nr_cpu_ids);
	if (PAGE_SIZE - len < 2)
		len = -EINVAL;
	else
		len += sprintf(buf + len, "\n");

	mutex_unlock(&pinst->lock);
	return len;
}

static ssize_t store_cpumask(struct padata_instance *pinst,
			     struct attribute *attr,
			     const char *buf, size_t count)
{
	cpumask_var_t new_cpumask;
	ssize_t ret;
	int mask_type;

	if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
		return -ENOMEM;

	ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
			   nr_cpumask_bits);
	if (ret < 0)
		goto out;

	mask_type = !strcmp(attr->name, "serial_cpumask") ?
		PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
	ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
	if (!ret)
		ret = count;

out:
	free_cpumask_var(new_cpumask);
	return ret;
}

#define PADATA_ATTR_RW(_name, _show_name, _store_name)		\
	static struct padata_sysfs_entry _name##_attr =		\
		__ATTR(_name, 0644, _show_name, _store_name)
#define PADATA_ATTR_RO(_name, _show_name)		\
	static struct padata_sysfs_entry _name##_attr = \
		__ATTR(_name, 0400, _show_name, NULL)

PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);

/*
 * Padata sysfs provides the following objects:
 * serial_cpumask   [RW] - cpumask for serial workers
 * parallel_cpumask [RW] - cpumask for parallel workers
 */
static struct attribute *padata_default_attrs[] = {
	&serial_cpumask_attr.attr,
	&parallel_cpumask_attr.attr,
	NULL,
};

static ssize_t padata_sysfs_show(struct kobject *kobj,
				 struct attribute *attr, char *buf)
{
	struct padata_instance *pinst;
	struct padata_sysfs_entry *pentry;
	ssize_t ret = -EIO;

	pinst = kobj2pinst(kobj);
	pentry = attr2pentry(attr);
	if (pentry->show)
		ret = pentry->show(pinst, attr, buf);

	return ret;
}

static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
				  const char *buf, size_t count)
{
	struct padata_instance *pinst;
	struct padata_sysfs_entry *pentry;
	ssize_t ret = -EIO;

	pinst = kobj2pinst(kobj);
	pentry = attr2pentry(attr);
	if (pentry->show)
		ret = pentry->store(pinst, attr, buf, count);

	return ret;
}

static const struct sysfs_ops padata_sysfs_ops = {
	.show = padata_sysfs_show,
	.store = padata_sysfs_store,
};

static struct kobj_type padata_attr_type = {
	.sysfs_ops = &padata_sysfs_ops,
	.default_attrs = padata_default_attrs,
	.release = padata_sysfs_release,
};

1031
/**
1032 1033 1034
 * padata_alloc_possible - Allocate and initialize padata instance.
 *                         Use the cpu_possible_mask for serial and
 *                         parallel workers.
1035 1036 1037
 *
 * @wq: workqueue to use for the allocated padata instance
 */
1038
struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
1039
{
1040
	return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
1041
}
1042
EXPORT_SYMBOL(padata_alloc_possible);
1043 1044

/**
1045 1046
 * padata_alloc - allocate and initialize a padata instance and specify
 *                cpumasks for serial and parallel workers.
1047 1048
 *
 * @wq: workqueue to use for the allocated padata instance
1049 1050
 * @pcpumask: cpumask that will be used for padata parallelization
 * @cbcpumask: cpumask that will be used for padata serialization
1051
 */
1052 1053 1054
struct padata_instance *padata_alloc(struct workqueue_struct *wq,
				     const struct cpumask *pcpumask,
				     const struct cpumask *cbcpumask)
1055 1056
{
	struct padata_instance *pinst;
1057
	struct parallel_data *pd = NULL;
1058 1059 1060 1061 1062

	pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
	if (!pinst)
		goto err;

1063
	get_online_cpus();
1064
	if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
1065
		goto err_free_inst;
1066 1067
	if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
		free_cpumask_var(pinst->cpumask.pcpu);
1068
		goto err_free_inst;
1069
	}
1070 1071 1072
	if (!padata_validate_cpumask(pinst, pcpumask) ||
	    !padata_validate_cpumask(pinst, cbcpumask))
		goto err_free_masks;
1073

1074 1075 1076
	pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
	if (!pd)
		goto err_free_masks;
1077

1078 1079 1080 1081
	rcu_assign_pointer(pinst->pd, pd);

	pinst->wq = wq;

1082 1083
	cpumask_copy(pinst->cpumask.pcpu, pcpumask);
	cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
1084 1085 1086

	pinst->flags = 0;

1087
#ifdef CONFIG_HOTPLUG_CPU
1088 1089
	pinst->cpu_notifier.notifier_call = padata_cpu_callback;
	pinst->cpu_notifier.priority = 0;
1090 1091
	register_hotcpu_notifier(&pinst->cpu_notifier);
#endif
1092

1093 1094
	put_online_cpus();

1095
	BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
1096
	kobject_init(&pinst->kobj, &padata_attr_type);
1097 1098 1099 1100
	mutex_init(&pinst->lock);

	return pinst;

1101 1102 1103
err_free_masks:
	free_cpumask_var(pinst->cpumask.pcpu);
	free_cpumask_var(pinst->cpumask.cbcpu);
1104 1105
err_free_inst:
	kfree(pinst);
1106
	put_online_cpus();
1107 1108 1109 1110 1111
err:
	return NULL;
}
EXPORT_SYMBOL(padata_alloc);

1112
/**
1113 1114
 * padata_free - free a padata instance
 *
1115
 * @padata_inst: padata instance to free
1116 1117 1118
 */
void padata_free(struct padata_instance *pinst)
{
1119
	kobject_put(&pinst->kobj);
1120 1121
}
EXPORT_SYMBOL(padata_free);