quarantine.c 9.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * KASAN quarantine.
 *
 * Author: Alexander Potapenko <glider@google.com>
 * Copyright (C) 2016 Google, Inc.
 *
 * Based on code by Dmitry Chernenkov.
 */

#include <linux/gfp.h>
#include <linux/hash.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/printk.h>
#include <linux/shrinker.h>
#include <linux/slab.h>
19
#include <linux/srcu.h>
20 21
#include <linux/string.h>
#include <linux/types.h>
22
#include <linux/cpuhotplug.h>
23 24 25 26 27 28 29 30 31 32 33 34 35 36

#include "../slab.h"
#include "kasan.h"

/* Data structure and operations for quarantine queues. */

/*
 * Each queue is a signle-linked list, which also stores the total size of
 * objects inside of it.
 */
struct qlist_head {
	struct qlist_node *head;
	struct qlist_node *tail;
	size_t bytes;
37
	bool offline;
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
};

#define QLIST_INIT { NULL, NULL, 0 }

static bool qlist_empty(struct qlist_head *q)
{
	return !q->head;
}

static void qlist_init(struct qlist_head *q)
{
	q->head = q->tail = NULL;
	q->bytes = 0;
}

static void qlist_put(struct qlist_head *q, struct qlist_node *qlink,
		size_t size)
{
	if (unlikely(qlist_empty(q)))
		q->head = qlink;
	else
		q->tail->next = qlink;
	q->tail = qlink;
	qlink->next = NULL;
	q->bytes += size;
}

static void qlist_move_all(struct qlist_head *from, struct qlist_head *to)
{
	if (unlikely(qlist_empty(from)))
		return;

	if (qlist_empty(to)) {
		*to = *from;
		qlist_init(from);
		return;
	}

	to->tail->next = from->head;
	to->tail = from->tail;
	to->bytes += from->bytes;

	qlist_init(from);
}

83 84 85
#define QUARANTINE_PERCPU_SIZE (1 << 20)
#define QUARANTINE_BATCHES \
	(1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS)
86 87 88 89 90 91 92

/*
 * The object quarantine consists of per-cpu queues and a global queue,
 * guarded by quarantine_lock.
 */
static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine);

93 94 95 96 97 98
/* Round-robin FIFO array of batches. */
static struct qlist_head global_quarantine[QUARANTINE_BATCHES];
static int quarantine_head;
static int quarantine_tail;
/* Total size of all objects in global_quarantine across all batches. */
static unsigned long quarantine_size;
99
static DEFINE_RAW_SPINLOCK(quarantine_lock);
100
DEFINE_STATIC_SRCU(remove_cache_srcu);
101 102

/* Maximum size of the global queue. */
103 104 105 106 107 108 109
static unsigned long quarantine_max_size;

/*
 * Target size of a batch in global_quarantine.
 * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM.
 */
static unsigned long quarantine_batch_size;
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136

/*
 * The fraction of physical memory the quarantine is allowed to occupy.
 * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep
 * the ratio low to avoid OOM.
 */
#define QUARANTINE_FRACTION 32

static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
{
	return virt_to_head_page(qlink)->slab_cache;
}

static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
{
	struct kasan_free_meta *free_info =
		container_of(qlink, struct kasan_free_meta,
			     quarantine_link);

	return ((void *)free_info) - cache->kasan_info.free_meta_offset;
}

static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
{
	void *object = qlink_to_object(qlink, cache);
	unsigned long flags;

137 138 139
	if (IS_ENABLED(CONFIG_SLAB))
		local_irq_save(flags);

140
	*(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREE;
141
	___cache_free(cache, object, _THIS_IP_);
142 143 144

	if (IS_ENABLED(CONFIG_SLAB))
		local_irq_restore(flags);
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
}

static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
{
	struct qlist_node *qlink;

	if (unlikely(qlist_empty(q)))
		return;

	qlink = q->head;
	while (qlink) {
		struct kmem_cache *obj_cache =
			cache ? cache :	qlink_to_cache(qlink);
		struct qlist_node *next = qlink->next;

		qlink_free(qlink, obj_cache);
		qlink = next;
	}
	qlist_init(q);
}

166
void quarantine_put(struct kmem_cache *cache, void *object)
167 168 169 170
{
	unsigned long flags;
	struct qlist_head *q;
	struct qlist_head temp = QLIST_INIT;
171
	struct kasan_free_meta *meta = kasan_get_free_meta(cache, object);
172

173 174 175 176 177 178 179 180
	/*
	 * Note: irq must be disabled until after we move the batch to the
	 * global quarantine. Otherwise quarantine_remove_cache() can miss
	 * some objects belonging to the cache if they are in our local temp
	 * list. quarantine_remove_cache() executes on_each_cpu() at the
	 * beginning which ensures that it either sees the objects in per-cpu
	 * lists or in the global quarantine.
	 */
181 182 183
	local_irq_save(flags);

	q = this_cpu_ptr(&cpu_quarantine);
184 185 186 187
	if (q->offline) {
		local_irq_restore(flags);
		return;
	}
188
	qlist_put(q, &meta->quarantine_link, cache->size);
189
	if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
190 191
		qlist_move_all(q, &temp);

192
		raw_spin_lock(&quarantine_lock);
193 194 195 196 197 198 199 200 201 202 203 204
		WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
		qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
		if (global_quarantine[quarantine_tail].bytes >=
				READ_ONCE(quarantine_batch_size)) {
			int new_tail;

			new_tail = quarantine_tail + 1;
			if (new_tail == QUARANTINE_BATCHES)
				new_tail = 0;
			if (new_tail != quarantine_head)
				quarantine_tail = new_tail;
		}
205
		raw_spin_unlock(&quarantine_lock);
206
	}
207 208

	local_irq_restore(flags);
209 210 211 212
}

void quarantine_reduce(void)
{
213
	size_t total_size, new_quarantine_size, percpu_quarantines;
214
	unsigned long flags;
215
	int srcu_idx;
216 217
	struct qlist_head to_free = QLIST_INIT;

218 219
	if (likely(READ_ONCE(quarantine_size) <=
		   READ_ONCE(quarantine_max_size)))
220 221
		return;

222 223 224 225 226 227 228 229 230 231
	/*
	 * srcu critical section ensures that quarantine_remove_cache()
	 * will not miss objects belonging to the cache while they are in our
	 * local to_free list. srcu is chosen because (1) it gives us private
	 * grace period domain that does not interfere with anything else,
	 * and (2) it allows synchronize_srcu() to return without waiting
	 * if there are no pending read critical sections (which is the
	 * expected case).
	 */
	srcu_idx = srcu_read_lock(&remove_cache_srcu);
232
	raw_spin_lock_irqsave(&quarantine_lock, flags);
233 234 235 236 237

	/*
	 * Update quarantine size in case of hotplug. Allocate a fraction of
	 * the installed memory to quarantine minus per-cpu queue limits.
	 */
238
	total_size = (totalram_pages() << PAGE_SHIFT) /
239
		QUARANTINE_FRACTION;
240
	percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
241 242 243 244 245 246 247 248 249 250 251 252 253
	new_quarantine_size = (total_size < percpu_quarantines) ?
		0 : total_size - percpu_quarantines;
	WRITE_ONCE(quarantine_max_size, new_quarantine_size);
	/* Aim at consuming at most 1/2 of slots in quarantine. */
	WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE,
		2 * total_size / QUARANTINE_BATCHES));

	if (likely(quarantine_size > quarantine_max_size)) {
		qlist_move_all(&global_quarantine[quarantine_head], &to_free);
		WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes);
		quarantine_head++;
		if (quarantine_head == QUARANTINE_BATCHES)
			quarantine_head = 0;
254 255
	}

256
	raw_spin_unlock_irqrestore(&quarantine_lock, flags);
257 258

	qlist_free_all(&to_free, NULL);
259
	srcu_read_unlock(&remove_cache_srcu, srcu_idx);
260 261 262 263 264 265
}

static void qlist_move_cache(struct qlist_head *from,
				   struct qlist_head *to,
				   struct kmem_cache *cache)
{
266
	struct qlist_node *curr;
267 268 269 270 271

	if (unlikely(qlist_empty(from)))
		return;

	curr = from->head;
272
	qlist_init(from);
273
	while (curr) {
274 275 276 277 278 279 280 281 282
		struct qlist_node *next = curr->next;
		struct kmem_cache *obj_cache = qlink_to_cache(curr);

		if (obj_cache == cache)
			qlist_put(to, curr, obj_cache->size);
		else
			qlist_put(from, curr, obj_cache->size);

		curr = next;
283 284 285 286 287 288 289 290 291 292 293 294 295 296
	}
}

static void per_cpu_remove_cache(void *arg)
{
	struct kmem_cache *cache = arg;
	struct qlist_head to_free = QLIST_INIT;
	struct qlist_head *q;

	q = this_cpu_ptr(&cpu_quarantine);
	qlist_move_cache(q, &to_free, cache);
	qlist_free_all(&to_free, cache);
}

297
/* Free all quarantined objects belonging to cache. */
298 299
void quarantine_remove_cache(struct kmem_cache *cache)
{
300
	unsigned long flags, i;
301 302
	struct qlist_head to_free = QLIST_INIT;

303 304 305 306 307 308 309
	/*
	 * Must be careful to not miss any objects that are being moved from
	 * per-cpu list to the global quarantine in quarantine_put(),
	 * nor objects being freed in quarantine_reduce(). on_each_cpu()
	 * achieves the first goal, while synchronize_srcu() achieves the
	 * second.
	 */
310 311
	on_each_cpu(per_cpu_remove_cache, cache, 1);

312
	raw_spin_lock_irqsave(&quarantine_lock, flags);
313 314 315
	for (i = 0; i < QUARANTINE_BATCHES; i++) {
		if (qlist_empty(&global_quarantine[i]))
			continue;
316
		qlist_move_cache(&global_quarantine[i], &to_free, cache);
317
		/* Scanning whole quarantine can take a while. */
318
		raw_spin_unlock_irqrestore(&quarantine_lock, flags);
319
		cond_resched();
320
		raw_spin_lock_irqsave(&quarantine_lock, flags);
321
	}
322
	raw_spin_unlock_irqrestore(&quarantine_lock, flags);
323 324

	qlist_free_all(&to_free, cache);
325 326

	synchronize_srcu(&remove_cache_srcu);
327
}
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360

static int kasan_cpu_online(unsigned int cpu)
{
	this_cpu_ptr(&cpu_quarantine)->offline = false;
	return 0;
}

static int kasan_cpu_offline(unsigned int cpu)
{
	struct qlist_head *q;

	q = this_cpu_ptr(&cpu_quarantine);
	/* Ensure the ordering between the writing to q->offline and
	 * qlist_free_all. Otherwise, cpu_quarantine may be corrupted
	 * by interrupt.
	 */
	WRITE_ONCE(q->offline, true);
	barrier();
	qlist_free_all(q, NULL);
	return 0;
}

static int __init kasan_cpu_quarantine_init(void)
{
	int ret = 0;

	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mm/kasan:online",
				kasan_cpu_online, kasan_cpu_offline);
	if (ret < 0)
		pr_err("kasan cpu quarantine register failed [%d]\n", ret);
	return ret;
}
late_initcall(kasan_cpu_quarantine_init);