dm-bio-prison.c 9.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * Copyright (C) 2012 Red Hat, Inc.
 *
 * This file is released under the GPL.
 */

#include "dm.h"
#include "dm-bio-prison.h"

#include <linux/spinlock.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/slab.h>

/*----------------------------------------------------------------*/

17
struct bucket {
18
	spinlock_t lock;
19 20 21 22
	struct hlist_head cells;
};

struct dm_bio_prison {
23 24 25 26
	mempool_t *cell_pool;

	unsigned nr_buckets;
	unsigned hash_mask;
27
	struct bucket *buckets;
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
};

/*----------------------------------------------------------------*/

static uint32_t calc_nr_buckets(unsigned nr_cells)
{
	uint32_t n = 128;

	nr_cells /= 4;
	nr_cells = min(nr_cells, 8192u);

	while (n < nr_cells)
		n <<= 1;

	return n;
}

static struct kmem_cache *_cell_cache;

47 48 49 50 51 52
static void init_bucket(struct bucket *b)
{
	spin_lock_init(&b->lock);
	INIT_HLIST_HEAD(&b->cells);
}

53 54 55 56 57 58 59 60 61
/*
 * @nr_cells should be the number of cells you want in use _concurrently_.
 * Don't confuse it with the number of distinct keys.
 */
struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells)
{
	unsigned i;
	uint32_t nr_buckets = calc_nr_buckets(nr_cells);
	size_t len = sizeof(struct dm_bio_prison) +
62
		(sizeof(struct bucket) * nr_buckets);
63 64 65 66 67 68 69 70 71 72 73 74 75
	struct dm_bio_prison *prison = kmalloc(len, GFP_KERNEL);

	if (!prison)
		return NULL;

	prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
	if (!prison->cell_pool) {
		kfree(prison);
		return NULL;
	}

	prison->nr_buckets = nr_buckets;
	prison->hash_mask = nr_buckets - 1;
76
	prison->buckets = (struct bucket *) (prison + 1);
77
	for (i = 0; i < nr_buckets; i++)
78
		init_bucket(prison->buckets + i);
79 80 81 82 83 84 85 86 87 88 89 90

	return prison;
}
EXPORT_SYMBOL_GPL(dm_bio_prison_create);

void dm_bio_prison_destroy(struct dm_bio_prison *prison)
{
	mempool_destroy(prison->cell_pool);
	kfree(prison);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);

91 92 93 94 95 96 97 98 99 100 101 102 103
struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
{
	return mempool_alloc(prison->cell_pool, gfp);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);

void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
			     struct dm_bio_prison_cell *cell)
{
	mempool_free(cell, prison->cell_pool);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);

104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key)
{
	const unsigned long BIG_PRIME = 4294967291UL;
	uint64_t hash = key->block * BIG_PRIME;

	return (uint32_t) (hash & prison->hash_mask);
}

static int keys_equal(struct dm_cell_key *lhs, struct dm_cell_key *rhs)
{
	       return (lhs->virtual == rhs->virtual) &&
		       (lhs->dev == rhs->dev) &&
		       (lhs->block == rhs->block);
}

119 120 121 122 123 124 125
static struct bucket *get_bucket(struct dm_bio_prison *prison,
				 struct dm_cell_key *key)
{
	return prison->buckets + hash_key(prison, key);
}

static struct dm_bio_prison_cell *__search_bucket(struct bucket *b,
126 127 128 129
						  struct dm_cell_key *key)
{
	struct dm_bio_prison_cell *cell;

130
	hlist_for_each_entry(cell, &b->cells, list)
131 132 133 134 135 136
		if (keys_equal(&cell->key, key))
			return cell;

	return NULL;
}

137
static void __setup_new_cell(struct bucket *b,
138 139 140
			     struct dm_cell_key *key,
			     struct bio *holder,
			     struct dm_bio_prison_cell *cell)
141
{
142 143 144
	memcpy(&cell->key, key, sizeof(cell->key));
	cell->holder = holder;
	bio_list_init(&cell->bios);
145
	hlist_add_head(&cell->list, &b->cells);
146
}
147

148
static int __bio_detain(struct bucket *b,
149 150 151 152 153 154
			struct dm_cell_key *key,
			struct bio *inmate,
			struct dm_bio_prison_cell *cell_prealloc,
			struct dm_bio_prison_cell **cell_result)
{
	struct dm_bio_prison_cell *cell;
155

156
	cell = __search_bucket(b, key);
157
	if (cell) {
158 159 160 161
		if (inmate)
			bio_list_add(&cell->bios, inmate);
		*cell_result = cell;
		return 1;
162 163
	}

164
	__setup_new_cell(b, key, inmate, cell_prealloc);
165 166 167
	*cell_result = cell_prealloc;
	return 0;
}
168

169 170 171 172 173 174 175 176
static int bio_detain(struct dm_bio_prison *prison,
		      struct dm_cell_key *key,
		      struct bio *inmate,
		      struct dm_bio_prison_cell *cell_prealloc,
		      struct dm_bio_prison_cell **cell_result)
{
	int r;
	unsigned long flags;
177
	struct bucket *b = get_bucket(prison, key);
178

179 180 181
	spin_lock_irqsave(&b->lock, flags);
	r = __bio_detain(b, key, inmate, cell_prealloc, cell_result);
	spin_unlock_irqrestore(&b->lock, flags);
182 183 184

	return r;
}
185 186 187 188 189 190 191 192 193

int dm_bio_detain(struct dm_bio_prison *prison,
		  struct dm_cell_key *key,
		  struct bio *inmate,
		  struct dm_bio_prison_cell *cell_prealloc,
		  struct dm_bio_prison_cell **cell_result)
{
	return bio_detain(prison, key, inmate, cell_prealloc, cell_result);
}
194 195
EXPORT_SYMBOL_GPL(dm_bio_detain);

J
Joe Thornber 已提交
196 197 198 199 200 201 202 203 204
int dm_get_cell(struct dm_bio_prison *prison,
		struct dm_cell_key *key,
		struct dm_bio_prison_cell *cell_prealloc,
		struct dm_bio_prison_cell **cell_result)
{
	return bio_detain(prison, key, NULL, cell_prealloc, cell_result);
}
EXPORT_SYMBOL_GPL(dm_get_cell);

205 206 207
/*
 * @inmates must have been initialised prior to this call
 */
208 209
static void __cell_release(struct dm_bio_prison_cell *cell,
			   struct bio_list *inmates)
210 211 212 213
{
	hlist_del(&cell->list);

	if (inmates) {
214 215
		if (cell->holder)
			bio_list_add(inmates, cell->holder);
216 217 218 219
		bio_list_merge(inmates, &cell->bios);
	}
}

220 221 222
void dm_cell_release(struct dm_bio_prison *prison,
		     struct dm_bio_prison_cell *cell,
		     struct bio_list *bios)
223 224
{
	unsigned long flags;
225
	struct bucket *b = get_bucket(prison, &cell->key);
226

227
	spin_lock_irqsave(&b->lock, flags);
228
	__cell_release(cell, bios);
229
	spin_unlock_irqrestore(&b->lock, flags);
230 231 232 233 234 235
}
EXPORT_SYMBOL_GPL(dm_cell_release);

/*
 * Sometimes we don't want the holder, just the additional bios.
 */
236 237
static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
				     struct bio_list *inmates)
238 239 240 241 242
{
	hlist_del(&cell->list);
	bio_list_merge(inmates, &cell->bios);
}

243 244 245
void dm_cell_release_no_holder(struct dm_bio_prison *prison,
			       struct dm_bio_prison_cell *cell,
			       struct bio_list *inmates)
246 247
{
	unsigned long flags;
248
	struct bucket *b = get_bucket(prison, &cell->key);
249

250
	spin_lock_irqsave(&b->lock, flags);
251
	__cell_release_no_holder(cell, inmates);
252
	spin_unlock_irqrestore(&b->lock, flags);
253 254 255
}
EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);

256
void dm_cell_error(struct dm_bio_prison *prison,
257
		   struct dm_bio_prison_cell *cell, int error)
258 259 260 261 262
{
	struct bio_list bios;
	struct bio *bio;

	bio_list_init(&bios);
263
	dm_cell_release(prison, cell, &bios);
264 265

	while ((bio = bio_list_pop(&bios)))
266
		bio_endio(bio, error);
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
}
EXPORT_SYMBOL_GPL(dm_cell_error);

/*----------------------------------------------------------------*/

#define DEFERRED_SET_SIZE 64

struct dm_deferred_entry {
	struct dm_deferred_set *ds;
	unsigned count;
	struct list_head work_items;
};

struct dm_deferred_set {
	spinlock_t lock;
	unsigned current_entry;
	unsigned sweeper;
	struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
};

struct dm_deferred_set *dm_deferred_set_create(void)
{
	int i;
	struct dm_deferred_set *ds;

	ds = kmalloc(sizeof(*ds), GFP_KERNEL);
	if (!ds)
		return NULL;

	spin_lock_init(&ds->lock);
	ds->current_entry = 0;
	ds->sweeper = 0;
	for (i = 0; i < DEFERRED_SET_SIZE; i++) {
		ds->entries[i].ds = ds;
		ds->entries[i].count = 0;
		INIT_LIST_HEAD(&ds->entries[i].work_items);
	}

	return ds;
}
EXPORT_SYMBOL_GPL(dm_deferred_set_create);

void dm_deferred_set_destroy(struct dm_deferred_set *ds)
{
	kfree(ds);
}
EXPORT_SYMBOL_GPL(dm_deferred_set_destroy);

struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
{
	unsigned long flags;
	struct dm_deferred_entry *entry;

	spin_lock_irqsave(&ds->lock, flags);
	entry = ds->entries + ds->current_entry;
	entry->count++;
	spin_unlock_irqrestore(&ds->lock, flags);

	return entry;
}
EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);

static unsigned ds_next(unsigned index)
{
	return (index + 1) % DEFERRED_SET_SIZE;
}

static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
{
	while ((ds->sweeper != ds->current_entry) &&
	       !ds->entries[ds->sweeper].count) {
		list_splice_init(&ds->entries[ds->sweeper].work_items, head);
		ds->sweeper = ds_next(ds->sweeper);
	}

	if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
		list_splice_init(&ds->entries[ds->sweeper].work_items, head);
}

void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
{
	unsigned long flags;

	spin_lock_irqsave(&entry->ds->lock, flags);
	BUG_ON(!entry->count);
	--entry->count;
	__sweep(entry->ds, head);
	spin_unlock_irqrestore(&entry->ds->lock, flags);
}
EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);

/*
 * Returns 1 if deferred or 0 if no pending items to delay job.
 */
int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
{
	int r = 1;
	unsigned long flags;
	unsigned next_entry;

	spin_lock_irqsave(&ds->lock, flags);
	if ((ds->sweeper == ds->current_entry) &&
	    !ds->entries[ds->current_entry].count)
		r = 0;
	else {
		list_add(work, &ds->entries[ds->current_entry].work_items);
		next_entry = ds_next(ds->current_entry);
		if (!ds->entries[next_entry].count)
			ds->current_entry = next_entry;
	}
	spin_unlock_irqrestore(&ds->lock, flags);

	return r;
}
EXPORT_SYMBOL_GPL(dm_deferred_set_add_work);

/*----------------------------------------------------------------*/

static int __init dm_bio_prison_init(void)
{
	_cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
	if (!_cell_cache)
		return -ENOMEM;

	return 0;
}

static void __exit dm_bio_prison_exit(void)
{
	kmem_cache_destroy(_cell_cache);
	_cell_cache = NULL;
}

/*
 * module hooks
 */
module_init(dm_bio_prison_init);
module_exit(dm_bio_prison_exit);

MODULE_DESCRIPTION(DM_NAME " bio prison");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");