mmu_audit.c 6.8 KB
Newer Older
1 2 3 4 5 6
/*
 * mmu_audit.c:
 *
 * Audit code for KVM MMU
 *
 * Copyright (C) 2006 Qumranet, Inc.
N
Nicolas Kaiser 已提交
7
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 9 10 11 12 13 14 15 16 17 18 19
 *
 * Authors:
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *   Avi Kivity   <avi@qumranet.com>
 *   Marcelo Tosatti <mtosatti@redhat.com>
 *   Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

20 21
#include <linux/ratelimit.h>

22 23 24 25 26 27 28 29 30
char const *audit_point_name[] = {
	"pre page fault",
	"post page fault",
	"pre pte write",
	"post pte write",
	"pre sync",
	"post sync"
};

31
#define audit_printk(kvm, fmt, args...)		\
32
	printk(KERN_ERR "audit: (%s) error: "	\
33
		fmt, audit_point_name[kvm->arch.audit_point], ##args)
34

X
Xiao Guangrong 已提交
35
typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
36

X
Xiao Guangrong 已提交
37 38
static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
			    inspect_spte_fn fn, int level)
39 40 41 42
{
	int i;

	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
X
Xiao Guangrong 已提交
43 44 45 46 47 48 49 50 51 52
		u64 *ent = sp->spt;

		fn(vcpu, ent + i, level);

		if (is_shadow_present_pte(ent[i]) &&
		      !is_last_spte(ent[i], level)) {
			struct kvm_mmu_page *child;

			child = page_header(ent[i] & PT64_BASE_ADDR_MASK);
			__mmu_spte_walk(vcpu, child, fn, level - 1);
53 54 55 56 57 58 59 60 61 62 63
		}
	}
}

static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
{
	int i;
	struct kvm_mmu_page *sp;

	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
		return;
X
Xiao Guangrong 已提交
64

65
	if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
66
		hpa_t root = vcpu->arch.mmu.root_hpa;
X
Xiao Guangrong 已提交
67

68
		sp = page_header(root);
X
Xiao Guangrong 已提交
69
		__mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL);
70 71
		return;
	}
X
Xiao Guangrong 已提交
72

73 74 75 76 77 78
	for (i = 0; i < 4; ++i) {
		hpa_t root = vcpu->arch.mmu.pae_root[i];

		if (root && VALID_PAGE(root)) {
			root &= PT64_BASE_ADDR_MASK;
			sp = page_header(root);
X
Xiao Guangrong 已提交
79
			__mmu_spte_walk(vcpu, sp, fn, 2);
80 81
		}
	}
X
Xiao Guangrong 已提交
82

83 84 85
	return;
}

X
Xiao Guangrong 已提交
86 87 88 89 90 91 92 93 94 95
typedef void (*sp_handler) (struct kvm *kvm, struct kvm_mmu_page *sp);

static void walk_all_active_sps(struct kvm *kvm, sp_handler fn)
{
	struct kvm_mmu_page *sp;

	list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link)
		fn(kvm, sp);
}

X
Xiao Guangrong 已提交
96
static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
97
{
X
Xiao Guangrong 已提交
98 99 100 101
	struct kvm_mmu_page *sp;
	gfn_t gfn;
	pfn_t pfn;
	hpa_t hpa;
102

X
Xiao Guangrong 已提交
103 104 105 106
	sp = page_header(__pa(sptep));

	if (sp->unsync) {
		if (level != PT_PAGE_TABLE_LEVEL) {
107 108
			audit_printk(vcpu->kvm, "unsync sp: %p "
				     "level = %d\n", sp, level);
109 110
			return;
		}
X
Xiao Guangrong 已提交
111
	}
112

X
Xiao Guangrong 已提交
113 114
	if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level))
		return;
115

X
Xiao Guangrong 已提交
116
	gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
117
	pfn = kvm_vcpu_gfn_to_pfn_atomic(vcpu, gfn);
118

119
	if (is_error_pfn(pfn))
X
Xiao Guangrong 已提交
120
		return;
121

X
Xiao Guangrong 已提交
122 123
	hpa =  pfn << PAGE_SHIFT;
	if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
124 125 126
		audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
			     "ent %llxn", vcpu->arch.mmu.root_level, pfn,
			     hpa, *sptep);
127 128
}

X
Xiao Guangrong 已提交
129
static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
130
{
131
	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
132
	struct kvm_rmap_head *rmap_head;
133
	struct kvm_mmu_page *rev_sp;
134 135
	struct kvm_memslots *slots;
	struct kvm_memory_slot *slot;
136 137 138 139 140
	gfn_t gfn;

	rev_sp = page_header(__pa(sptep));
	gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);

141 142 143
	slots = kvm_memslots_for_spte_role(kvm, rev_sp->role);
	slot = __gfn_to_memslot(slots, gfn);
	if (!slot) {
144
		if (!__ratelimit(&ratelimit_state))
145
			return;
146 147
		audit_printk(kvm, "no memslot for gfn %llx\n", gfn);
		audit_printk(kvm, "index %ld of sp (gfn=%llx)\n",
148
		       (long int)(sptep - rev_sp->spt), rev_sp->gfn);
149 150 151 152
		dump_stack();
		return;
	}

153 154
	rmap_head = __gfn_to_rmap(gfn, rev_sp->role.level, slot);
	if (!rmap_head->val) {
155
		if (!__ratelimit(&ratelimit_state))
156
			return;
157 158
		audit_printk(kvm, "no rmap for writable spte %llx\n",
			     *sptep);
159 160 161 162
		dump_stack();
	}
}

X
Xiao Guangrong 已提交
163
static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level)
164
{
X
Xiao Guangrong 已提交
165 166
	if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level))
		inspect_spte_has_rmap(vcpu->kvm, sptep);
167 168
}

169 170 171 172
static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level)
{
	struct kvm_mmu_page *sp = page_header(__pa(sptep));

173 174 175
	if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync)
		audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync "
			     "root.\n", sp);
176 177
}

X
Xiao Guangrong 已提交
178
static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
179 180 181
{
	int i;

X
Xiao Guangrong 已提交
182 183
	if (sp->role.level != PT_PAGE_TABLE_LEVEL)
		return;
184

X
Xiao Guangrong 已提交
185 186
	for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
		if (!is_rmap_spte(sp->spt[i]))
187 188
			continue;

X
Xiao Guangrong 已提交
189
		inspect_spte_has_rmap(kvm, sp->spt + i);
190 191 192
	}
}

193
static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
194
{
195
	struct kvm_rmap_head *rmap_head;
196 197
	u64 *sptep;
	struct rmap_iterator iter;
198 199
	struct kvm_memslots *slots;
	struct kvm_memory_slot *slot;
200

X
Xiao Guangrong 已提交
201 202
	if (sp->role.direct || sp->unsync || sp->role.invalid)
		return;
203

204
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
205
	slot = __gfn_to_memslot(slots, sp->gfn);
206
	rmap_head = __gfn_to_rmap(sp->gfn, PT_PAGE_TABLE_LEVEL, slot);
207

208
	for_each_rmap_spte(rmap_head, &iter, sptep) {
209
		if (is_writable_pte(*sptep))
210 211 212
			audit_printk(kvm, "shadow page has writable "
				     "mappings: gfn %llx role %x\n",
				     sp->gfn, sp->role.word);
213
	}
214 215
}

X
Xiao Guangrong 已提交
216 217 218 219 220 221 222 223 224 225 226
static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	check_mappings_rmap(kvm, sp);
	audit_write_protection(kvm, sp);
}

static void audit_all_active_sps(struct kvm *kvm)
{
	walk_all_active_sps(kvm, audit_sp);
}

X
Xiao Guangrong 已提交
227 228 229 230
static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level)
{
	audit_sptes_have_rmaps(vcpu, sptep, level);
	audit_mappings(vcpu, sptep, level);
231
	audit_spte_after_sync(vcpu, sptep, level);
X
Xiao Guangrong 已提交
232 233 234 235 236 237 238
}

static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
{
	mmu_spte_walk(vcpu, audit_spte);
}

239
static bool mmu_audit;
240
static struct static_key mmu_audit_key;
241

242
static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
243
{
244 245
	static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);

246 247
	if (!__ratelimit(&ratelimit_state))
		return;
248

249 250 251 252 253 254 255
	vcpu->kvm->arch.audit_point = point;
	audit_all_active_sps(vcpu->kvm);
	audit_vcpu_spte(vcpu);
}

static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
{
256
	if (static_key_false((&mmu_audit_key)))
257
		__kvm_mmu_audit(vcpu, point);
258 259 260 261 262 263 264
}

static void mmu_audit_enable(void)
{
	if (mmu_audit)
		return;

265
	static_key_slow_inc(&mmu_audit_key);
266 267 268 269 270 271 272 273
	mmu_audit = true;
}

static void mmu_audit_disable(void)
{
	if (!mmu_audit)
		return;

274
	static_key_slow_dec(&mmu_audit_key);
275 276 277 278 279 280 281 282
	mmu_audit = false;
}

static int mmu_audit_set(const char *val, const struct kernel_param *kp)
{
	int ret;
	unsigned long enable;

283
	ret = kstrtoul(val, 10, &enable);
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
	if (ret < 0)
		return -EINVAL;

	switch (enable) {
	case 0:
		mmu_audit_disable();
		break;
	case 1:
		mmu_audit_enable();
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

301
static const struct kernel_param_ops audit_param_ops = {
302 303 304 305
	.set = mmu_audit_set,
	.get = param_get_bool,
};

S
Sasha Levin 已提交
306
arch_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644);