book3s_mmu_hpte.c 8.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
 *
 * Authors:
 *     Alexander Graf <agraf@suse.de>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 */

#include <linux/kvm_host.h>
#include <linux/hash.h>
#include <linux/slab.h>
24
#include "trace.h"
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48

#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/machdep.h>
#include <asm/mmu_context.h>
#include <asm/hw_irq.h>

#define PTE_SIZE	12

/* #define DEBUG_MMU */

#ifdef DEBUG_MMU
#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
#else
#define dprintk_mmu(a, ...) do { } while(0)
#endif

static struct kmem_cache *hpte_cache;

static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
{
	return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
}

49 50 51 52 53 54
static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
{
	return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
		       HPTEG_HASH_BITS_PTE_LONG);
}

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
{
	return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
}

static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
{
	return hash_64((vpage & 0xffffff000ULL) >> 12,
		       HPTEG_HASH_BITS_VPTE_LONG);
}

void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
	u64 index;

70 71
	trace_kvm_book3s_mmu_map(pte);

A
Alexander Graf 已提交
72 73
	spin_lock(&vcpu->arch.mmu_lock);

74 75
	/* Add to ePTE list */
	index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
A
Alexander Graf 已提交
76
	hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]);
77

78 79 80 81 82
	/* Add to ePTE_long list */
	index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
	hlist_add_head_rcu(&pte->list_pte_long,
			   &vcpu->arch.hpte_hash_pte_long[index]);

83 84
	/* Add to vPTE list */
	index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
A
Alexander Graf 已提交
85
	hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]);
86 87 88

	/* Add to vPTE_long list */
	index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
A
Alexander Graf 已提交
89 90 91 92 93 94 95 96 97 98
	hlist_add_head_rcu(&pte->list_vpte_long,
			   &vcpu->arch.hpte_hash_vpte_long[index]);

	spin_unlock(&vcpu->arch.mmu_lock);
}

static void free_pte_rcu(struct rcu_head *head)
{
	struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
	kmem_cache_free(hpte_cache, pte);
99 100 101 102
}

static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
A
Alexander Graf 已提交
103 104 105 106
	/* pte already invalidated? */
	if (hlist_unhashed(&pte->list_pte))
		return;

107
	trace_kvm_book3s_mmu_invalidate(pte);
108 109 110 111

	/* Different for 32 and 64 bit */
	kvmppc_mmu_invalidate_pte(vcpu, pte);

A
Alexander Graf 已提交
112 113 114
	spin_lock(&vcpu->arch.mmu_lock);

	hlist_del_init_rcu(&pte->list_pte);
115
	hlist_del_init_rcu(&pte->list_pte_long);
A
Alexander Graf 已提交
116 117 118 119 120
	hlist_del_init_rcu(&pte->list_vpte);
	hlist_del_init_rcu(&pte->list_vpte_long);

	spin_unlock(&vcpu->arch.mmu_lock);

121 122 123 124 125 126
	if (pte->pte.may_write)
		kvm_release_pfn_dirty(pte->pfn);
	else
		kvm_release_pfn_clean(pte->pfn);

	vcpu->arch.hpte_cache_count--;
A
Alexander Graf 已提交
127
	call_rcu(&pte->rcu_head, free_pte_rcu);
128 129 130 131 132
}

static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
{
	struct hpte_cache *pte;
A
Alexander Graf 已提交
133
	struct hlist_node *node;
134 135
	int i;

A
Alexander Graf 已提交
136 137
	rcu_read_lock();

138 139 140
	for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
		struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];

A
Alexander Graf 已提交
141
		hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
142 143
			invalidate_pte(vcpu, pte);
	}
A
Alexander Graf 已提交
144 145

	rcu_read_unlock();
146 147 148 149 150
}

static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
{
	struct hlist_head *list;
A
Alexander Graf 已提交
151
	struct hlist_node *node;
152 153 154 155 156
	struct hpte_cache *pte;

	/* Find the list of entries in the map */
	list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];

A
Alexander Graf 已提交
157 158
	rcu_read_lock();

159
	/* Check the list for matching entries and invalidate */
A
Alexander Graf 已提交
160
	hlist_for_each_entry_rcu(pte, node, list, list_pte)
161 162
		if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
			invalidate_pte(vcpu, pte);
A
Alexander Graf 已提交
163 164

	rcu_read_unlock();
165 166
}

167
static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
168
{
169 170 171 172 173 174 175
	struct hlist_head *list;
	struct hlist_node *node;
	struct hpte_cache *pte;

	/* Find the list of entries in the map */
	list = &vcpu->arch.hpte_hash_pte_long[
			kvmppc_mmu_hash_pte_long(guest_ea)];
176

177 178 179 180 181 182 183 184 185 186 187 188
	rcu_read_lock();

	/* Check the list for matching entries and invalidate */
	hlist_for_each_entry_rcu(pte, node, list, list_pte_long)
		if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
			invalidate_pte(vcpu, pte);

	rcu_read_unlock();
}

void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
{
189 190 191 192 193 194 195 196 197 198
	dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n",
		    vcpu->arch.hpte_cache_count, guest_ea, ea_mask);

	guest_ea &= ea_mask;

	switch (ea_mask) {
	case ~0xfffUL:
		kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
		break;
	case 0x0ffff000:
199
		kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
		break;
	case 0:
		/* Doing a complete flush -> start from scratch */
		kvmppc_mmu_pte_flush_all(vcpu);
		break;
	default:
		WARN_ON(1);
		break;
	}
}

/* Flush with mask 0xfffffffff */
static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
{
	struct hlist_head *list;
A
Alexander Graf 已提交
215
	struct hlist_node *node;
216 217 218 219 220
	struct hpte_cache *pte;
	u64 vp_mask = 0xfffffffffULL;

	list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];

A
Alexander Graf 已提交
221 222
	rcu_read_lock();

223
	/* Check the list for matching entries and invalidate */
A
Alexander Graf 已提交
224
	hlist_for_each_entry_rcu(pte, node, list, list_vpte)
225 226
		if ((pte->pte.vpage & vp_mask) == guest_vp)
			invalidate_pte(vcpu, pte);
A
Alexander Graf 已提交
227 228

	rcu_read_unlock();
229 230 231 232 233 234
}

/* Flush with mask 0xffffff000 */
static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
{
	struct hlist_head *list;
A
Alexander Graf 已提交
235
	struct hlist_node *node;
236 237 238 239 240 241
	struct hpte_cache *pte;
	u64 vp_mask = 0xffffff000ULL;

	list = &vcpu->arch.hpte_hash_vpte_long[
		kvmppc_mmu_hash_vpte_long(guest_vp)];

A
Alexander Graf 已提交
242 243
	rcu_read_lock();

244
	/* Check the list for matching entries and invalidate */
A
Alexander Graf 已提交
245
	hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
246 247
		if ((pte->pte.vpage & vp_mask) == guest_vp)
			invalidate_pte(vcpu, pte);
A
Alexander Graf 已提交
248 249

	rcu_read_unlock();
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
}

void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
{
	dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
		    vcpu->arch.hpte_cache_count, guest_vp, vp_mask);
	guest_vp &= vp_mask;

	switch(vp_mask) {
	case 0xfffffffffULL:
		kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
		break;
	case 0xffffff000ULL:
		kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
		break;
	default:
		WARN_ON(1);
		return;
	}
}

void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
{
A
Alexander Graf 已提交
273
	struct hlist_node *node;
274 275 276 277 278 279
	struct hpte_cache *pte;
	int i;

	dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx - 0x%lx\n",
		    vcpu->arch.hpte_cache_count, pa_start, pa_end);

A
Alexander Graf 已提交
280 281
	rcu_read_lock();

282 283 284
	for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
		struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];

A
Alexander Graf 已提交
285
		hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
286 287 288 289
			if ((pte->pte.raddr >= pa_start) &&
			    (pte->pte.raddr < pa_end))
				invalidate_pte(vcpu, pte);
	}
A
Alexander Graf 已提交
290 291

	rcu_read_unlock();
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
}

struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
{
	struct hpte_cache *pte;

	pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
	vcpu->arch.hpte_cache_count++;

	if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM)
		kvmppc_mmu_pte_flush_all(vcpu);

	return pte;
}

void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
{
	kvmppc_mmu_pte_flush(vcpu, 0, 0);
}

static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
{
	int i;

	for (i = 0; i < len; i++)
		INIT_HLIST_HEAD(&hash_list[i]);
}

int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
{
	/* init hpte lookup hashes */
	kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte,
				  ARRAY_SIZE(vcpu->arch.hpte_hash_pte));
325 326
	kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte_long,
				  ARRAY_SIZE(vcpu->arch.hpte_hash_pte_long));
327 328 329 330 331
	kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte,
				  ARRAY_SIZE(vcpu->arch.hpte_hash_vpte));
	kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long,
				  ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long));

A
Alexander Graf 已提交
332 333
	spin_lock_init(&vcpu->arch.mmu_lock);

334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
	return 0;
}

int kvmppc_mmu_hpte_sysinit(void)
{
	/* init hpte slab cache */
	hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
				       sizeof(struct hpte_cache), 0, NULL);

	return 0;
}

void kvmppc_mmu_hpte_sysexit(void)
{
	kmem_cache_destroy(hpte_cache);
}