book3s_mmu_hpte.c 7.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
/*
 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
 *
 * Authors:
 *     Alexander Graf <agraf@suse.de>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 */

#include <linux/kvm_host.h>
#include <linux/hash.h>
#include <linux/slab.h>

#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/machdep.h>
#include <asm/mmu_context.h>
#include <asm/hw_irq.h>

#define PTE_SIZE	12

/* #define DEBUG_MMU */

#ifdef DEBUG_MMU
#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
#else
#define dprintk_mmu(a, ...) do { } while(0)
#endif

static struct kmem_cache *hpte_cache;

static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
{
	return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
}

static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
{
	return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
}

static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
{
	return hash_64((vpage & 0xffffff000ULL) >> 12,
		       HPTEG_HASH_BITS_VPTE_LONG);
}

void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
	u64 index;

A
Alexander Graf 已提交
63 64
	spin_lock(&vcpu->arch.mmu_lock);

65 66
	/* Add to ePTE list */
	index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
A
Alexander Graf 已提交
67
	hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]);
68 69 70

	/* Add to vPTE list */
	index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
A
Alexander Graf 已提交
71
	hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]);
72 73 74

	/* Add to vPTE_long list */
	index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
A
Alexander Graf 已提交
75 76 77 78 79 80 81 82 83 84
	hlist_add_head_rcu(&pte->list_vpte_long,
			   &vcpu->arch.hpte_hash_vpte_long[index]);

	spin_unlock(&vcpu->arch.mmu_lock);
}

static void free_pte_rcu(struct rcu_head *head)
{
	struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
	kmem_cache_free(hpte_cache, pte);
85 86 87 88
}

static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
A
Alexander Graf 已提交
89 90 91 92
	/* pte already invalidated? */
	if (hlist_unhashed(&pte->list_pte))
		return;

93 94 95 96 97 98
	dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n",
		    pte->pte.eaddr, pte->pte.vpage, pte->host_va);

	/* Different for 32 and 64 bit */
	kvmppc_mmu_invalidate_pte(vcpu, pte);

A
Alexander Graf 已提交
99 100 101 102 103 104 105 106
	spin_lock(&vcpu->arch.mmu_lock);

	hlist_del_init_rcu(&pte->list_pte);
	hlist_del_init_rcu(&pte->list_vpte);
	hlist_del_init_rcu(&pte->list_vpte_long);

	spin_unlock(&vcpu->arch.mmu_lock);

107 108 109 110 111 112
	if (pte->pte.may_write)
		kvm_release_pfn_dirty(pte->pfn);
	else
		kvm_release_pfn_clean(pte->pfn);

	vcpu->arch.hpte_cache_count--;
A
Alexander Graf 已提交
113
	call_rcu(&pte->rcu_head, free_pte_rcu);
114 115 116 117 118
}

static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
{
	struct hpte_cache *pte;
A
Alexander Graf 已提交
119
	struct hlist_node *node;
120 121
	int i;

A
Alexander Graf 已提交
122 123
	rcu_read_lock();

124 125 126
	for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
		struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];

A
Alexander Graf 已提交
127
		hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
128 129
			invalidate_pte(vcpu, pte);
	}
A
Alexander Graf 已提交
130 131

	rcu_read_unlock();
132 133 134 135 136
}

static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
{
	struct hlist_head *list;
A
Alexander Graf 已提交
137
	struct hlist_node *node;
138 139 140 141 142
	struct hpte_cache *pte;

	/* Find the list of entries in the map */
	list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];

A
Alexander Graf 已提交
143 144
	rcu_read_lock();

145
	/* Check the list for matching entries and invalidate */
A
Alexander Graf 已提交
146
	hlist_for_each_entry_rcu(pte, node, list, list_pte)
147 148
		if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
			invalidate_pte(vcpu, pte);
A
Alexander Graf 已提交
149 150

	rcu_read_unlock();
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
}

void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
{
	u64 i;

	dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n",
		    vcpu->arch.hpte_cache_count, guest_ea, ea_mask);

	guest_ea &= ea_mask;

	switch (ea_mask) {
	case ~0xfffUL:
		kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
		break;
	case 0x0ffff000:
		/* 32-bit flush w/o segment, go through all possible segments */
		for (i = 0; i < 0x100000000ULL; i += 0x10000000ULL)
			kvmppc_mmu_pte_flush(vcpu, guest_ea | i, ~0xfffUL);
		break;
	case 0:
		/* Doing a complete flush -> start from scratch */
		kvmppc_mmu_pte_flush_all(vcpu);
		break;
	default:
		WARN_ON(1);
		break;
	}
}

/* Flush with mask 0xfffffffff */
static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
{
	struct hlist_head *list;
A
Alexander Graf 已提交
185
	struct hlist_node *node;
186 187 188 189 190
	struct hpte_cache *pte;
	u64 vp_mask = 0xfffffffffULL;

	list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];

A
Alexander Graf 已提交
191 192
	rcu_read_lock();

193
	/* Check the list for matching entries and invalidate */
A
Alexander Graf 已提交
194
	hlist_for_each_entry_rcu(pte, node, list, list_vpte)
195 196
		if ((pte->pte.vpage & vp_mask) == guest_vp)
			invalidate_pte(vcpu, pte);
A
Alexander Graf 已提交
197 198

	rcu_read_unlock();
199 200 201 202 203 204
}

/* Flush with mask 0xffffff000 */
static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
{
	struct hlist_head *list;
A
Alexander Graf 已提交
205
	struct hlist_node *node;
206 207 208 209 210 211
	struct hpte_cache *pte;
	u64 vp_mask = 0xffffff000ULL;

	list = &vcpu->arch.hpte_hash_vpte_long[
		kvmppc_mmu_hash_vpte_long(guest_vp)];

A
Alexander Graf 已提交
212 213
	rcu_read_lock();

214
	/* Check the list for matching entries and invalidate */
A
Alexander Graf 已提交
215
	hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
216 217
		if ((pte->pte.vpage & vp_mask) == guest_vp)
			invalidate_pte(vcpu, pte);
A
Alexander Graf 已提交
218 219

	rcu_read_unlock();
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
}

void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
{
	dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
		    vcpu->arch.hpte_cache_count, guest_vp, vp_mask);
	guest_vp &= vp_mask;

	switch(vp_mask) {
	case 0xfffffffffULL:
		kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
		break;
	case 0xffffff000ULL:
		kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
		break;
	default:
		WARN_ON(1);
		return;
	}
}

void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
{
A
Alexander Graf 已提交
243
	struct hlist_node *node;
244 245 246 247 248 249
	struct hpte_cache *pte;
	int i;

	dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx - 0x%lx\n",
		    vcpu->arch.hpte_cache_count, pa_start, pa_end);

A
Alexander Graf 已提交
250 251
	rcu_read_lock();

252 253 254
	for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
		struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];

A
Alexander Graf 已提交
255
		hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
256 257 258 259
			if ((pte->pte.raddr >= pa_start) &&
			    (pte->pte.raddr < pa_end))
				invalidate_pte(vcpu, pte);
	}
A
Alexander Graf 已提交
260 261

	rcu_read_unlock();
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
}

struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
{
	struct hpte_cache *pte;

	pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
	vcpu->arch.hpte_cache_count++;

	if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM)
		kvmppc_mmu_pte_flush_all(vcpu);

	return pte;
}

void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
{
	kvmppc_mmu_pte_flush(vcpu, 0, 0);
}

static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
{
	int i;

	for (i = 0; i < len; i++)
		INIT_HLIST_HEAD(&hash_list[i]);
}

int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
{
	/* init hpte lookup hashes */
	kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte,
				  ARRAY_SIZE(vcpu->arch.hpte_hash_pte));
	kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte,
				  ARRAY_SIZE(vcpu->arch.hpte_hash_vpte));
	kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long,
				  ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long));

A
Alexander Graf 已提交
300 301
	spin_lock_init(&vcpu->arch.mmu_lock);

302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
	return 0;
}

int kvmppc_mmu_hpte_sysinit(void)
{
	/* init hpte slab cache */
	hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
				       sizeof(struct hpte_cache), 0, NULL);

	return 0;
}

void kvmppc_mmu_hpte_sysexit(void)
{
	kmem_cache_destroy(hpte_cache);
}