44x_tlb.c 11.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright IBM Corp. 2007
 *
 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
 */

#include <linux/types.h>
#include <linux/string.h>
22
#include <linux/kvm.h>
23 24 25 26
#include <linux/kvm_host.h>
#include <linux/highmem.h>
#include <asm/mmu-44x.h>
#include <asm/kvm_ppc.h>
27
#include <asm/kvm_44x.h>
28 29 30

#include "44x_tlb.h"

31 32 33 34 35 36 37
#ifndef PPC44x_TLBE_SIZE
#define PPC44x_TLBE_SIZE	PPC44x_TLB_4K
#endif

#define PAGE_SIZE_4K (1<<12)
#define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1))

38 39
#define PPC44x_TLB_UATTR_MASK \
	(PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3)
40 41 42 43 44
#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)

static unsigned int kvmppc_tlb_44x_pos;

45 46 47 48 49 50 51 52 53 54 55
#ifdef DEBUG
void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
{
	struct kvmppc_44x_tlbe *tlbe;
	int i;

	printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
	printk("| %2s | %3s | %8s | %8s | %8s |\n",
			"nr", "tid", "word0", "word1", "word2");

	for (i = 0; i < PPC44x_TLB_SIZE; i++) {
56
		tlbe = &vcpu_44x->guest_tlb[i];
57 58 59 60 61 62 63
		if (tlbe->word0 & PPC44x_TLB_VALID)
			printk(" G%2d |  %02X | %08X | %08X | %08X |\n",
			       i, tlbe->tid, tlbe->word0, tlbe->word1,
			       tlbe->word2);
	}

	for (i = 0; i < PPC44x_TLB_SIZE; i++) {
64
		tlbe = &vcpu_44x->shadow_tlb[i];
65 66 67 68 69 70 71 72
		if (tlbe->word0 & PPC44x_TLB_VALID)
			printk(" S%2d | %02X | %08X | %08X | %08X |\n",
			       i, tlbe->tid, tlbe->word0, tlbe->word1,
			       tlbe->word2);
	}
}
#endif

73 74
static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
{
75 76
	/* We only care about the guest's permission and user bits. */
	attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK;
77 78 79 80 81 82 83 84 85 86 87

	if (!usermode) {
		/* Guest is in supervisor mode, so we need to translate guest
		 * supervisor permissions into user permissions. */
		attrib &= ~PPC44x_TLB_USER_PERM_MASK;
		attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
	}

	/* Make sure host can always access this memory. */
	attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;

88 89 90
	/* WIMGE = 0b00100 */
	attrib |= PPC44x_TLB_M;

91 92 93 94 95 96 97
	return attrib;
}

/* Search the guest TLB for a matching entry. */
int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
                         unsigned int as)
{
98
	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
99 100 101 102
	int i;

	/* XXX Replace loop with fancy data structures. */
	for (i = 0; i < PPC44x_TLB_SIZE; i++) {
103
		struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i];
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
		unsigned int tid;

		if (eaddr < get_tlb_eaddr(tlbe))
			continue;

		if (eaddr > get_tlb_end(tlbe))
			continue;

		tid = get_tlb_tid(tlbe);
		if (tid && (tid != pid))
			continue;

		if (!get_tlb_v(tlbe))
			continue;

		if (get_tlb_ts(tlbe) != as)
			continue;

		return i;
	}

	return -1;
}

128 129
struct kvmppc_44x_tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu,
                                               gva_t eaddr)
130
{
131
	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
132 133 134 135 136 137
	unsigned int as = !!(vcpu->arch.msr & MSR_IS);
	unsigned int index;

	index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
	if (index == -1)
		return NULL;
138
	return &vcpu_44x->guest_tlb[index];
139 140
}

141 142
struct kvmppc_44x_tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu,
                                               gva_t eaddr)
143
{
144
	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
145 146 147 148 149 150
	unsigned int as = !!(vcpu->arch.msr & MSR_DS);
	unsigned int index;

	index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
	if (index == -1)
		return NULL;
151
	return &vcpu_44x->guest_tlb[index];
152 153
}

154
static int kvmppc_44x_tlbe_is_writable(struct kvmppc_44x_tlbe *tlbe)
155 156 157 158 159 160 161
{
	return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
}

static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
                                      unsigned int index)
{
162 163 164
	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
	struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[index];
	struct page *page = vcpu_44x->shadow_pages[index];
165 166 167 168 169 170 171 172 173

	if (get_tlb_v(stlbe)) {
		if (kvmppc_44x_tlbe_is_writable(stlbe))
			kvm_release_page_dirty(page);
		else
			kvm_release_page_clean(page);
	}
}

174 175 176 177 178 179 180 181
void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu)
{
	int i;

	for (i = 0; i <= tlb_44x_hwater; i++)
		kvmppc_44x_shadow_release(vcpu, i);
}

182 183
void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
{
184 185 186
	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);

	vcpu_44x->shadow_tlb_mod[i] = 1;
187 188
}

189 190 191 192 193 194 195 196 197 198 199 200 201 202
/**
 * kvmppc_mmu_map -- create a host mapping for guest memory
 *
 * If the guest wanted a larger page than the host supports, only the first
 * host page is mapped here and the rest are demand faulted.
 *
 * If the guest wanted a smaller page than the host page size, we map only the
 * guest-size page (i.e. not a full host page mapping).
 *
 * Caller must ensure that the specified guest TLB entry is safe to insert into
 * the shadow TLB.
 */
void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid,
                    u32 flags, u32 max_bytes)
203
{
204
	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
205
	struct page *new_page;
206
	struct kvmppc_44x_tlbe *stlbe;
207
	hpa_t hpaddr;
208
	gfn_t gfn;
209 210 211 212 213 214 215
	unsigned int victim;

	/* Future optimization: don't overwrite the TLB entry containing the
	 * current PC (or stack?). */
	victim = kvmppc_tlb_44x_pos++;
	if (kvmppc_tlb_44x_pos > tlb_44x_hwater)
		kvmppc_tlb_44x_pos = 0;
216
	stlbe = &vcpu_44x->shadow_tlb[victim];
217 218

	/* Get reference to new page. */
219
	gfn = gpaddr >> PAGE_SHIFT;
220 221
	new_page = gfn_to_page(vcpu->kvm, gfn);
	if (is_error_page(new_page)) {
H
Hollis Blanchard 已提交
222
		printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
223 224 225 226 227 228 229 230
		kvm_release_page_clean(new_page);
		return;
	}
	hpaddr = page_to_phys(new_page);

	/* Drop reference to old page. */
	kvmppc_44x_shadow_release(vcpu, victim);

231
	vcpu_44x->shadow_pages[victim] = new_page;
232 233 234 235 236 237 238

	/* XXX Make sure (va, size) doesn't overlap any other
	 * entries. 440x6 user manual says the result would be
	 * "undefined." */

	/* XXX what about AS? */

239
	stlbe->tid = !(asid & 0xff);
240 241

	/* Force TS=1 for all guest mappings. */
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
	stlbe->word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS;

	if (max_bytes >= PAGE_SIZE) {
		/* Guest mapping is larger than or equal to host page size. We can use
		 * a "native" host mapping. */
		stlbe->word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE;
	} else {
		/* Guest mapping is smaller than host page size. We must restrict the
		 * size of the mapping to be at most the smaller of the two, but for
		 * simplicity we fall back to a 4K mapping (this is probably what the
		 * guest is using anyways). */
		stlbe->word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K;

		/* 'hpaddr' is a host page, which is larger than the mapping we're
		 * inserting here. To compensate, we must add the in-page offset to the
		 * sub-page. */
		hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K);
	}

261 262 263
	stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
	stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags,
	                                            vcpu->arch.msr & MSR_PR);
264
	kvmppc_tlbe_set_modified(vcpu, victim);
265 266 267 268

	KVMTRACE_5D(STLB_WRITE, vcpu, victim,
			stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2,
			handler);
269 270
}

271 272
static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
                                  gva_t eend, u32 asid)
273
{
274
	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
275
	unsigned int pid = !(asid & 0xff);
276 277 278 279
	int i;

	/* XXX Replace loop with fancy data structures. */
	for (i = 0; i <= tlb_44x_hwater; i++) {
280
		struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
281 282 283 284 285
		unsigned int tid;

		if (!get_tlb_v(stlbe))
			continue;

286
		if (eend < get_tlb_eaddr(stlbe))
287 288 289 290 291 292 293 294 295 296 297
			continue;

		if (eaddr > get_tlb_end(stlbe))
			continue;

		tid = get_tlb_tid(stlbe);
		if (tid && (tid != pid))
			continue;

		kvmppc_44x_shadow_release(vcpu, i);
		stlbe->word0 = 0;
298
		kvmppc_tlbe_set_modified(vcpu, i);
299 300 301
		KVMTRACE_5D(STLB_INVAL, vcpu, i,
				stlbe->tid, stlbe->word0, stlbe->word1,
				stlbe->word2, handler);
302 303 304 305
	}
}

void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
306 307 308 309 310
{
	vcpu->arch.shadow_pid = !usermode;
}

void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
311
{
312
	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
313 314
	int i;

315 316 317 318 319 320 321 322 323 324 325
	if (unlikely(vcpu->arch.pid == new_pid))
		return;

	vcpu->arch.pid = new_pid;

	/* Guest userspace runs with TID=0 mappings and PID=0, to make sure it
	 * can't access guest kernel mappings (TID=1). When we switch to a new
	 * guest PID, which will also use host PID=0, we must discard the old guest
	 * userspace mappings. */
	for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_tlb); i++) {
		struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
326

327
		if (get_tlb_tid(stlbe) == 0) {
328 329 330 331
			kvmppc_44x_shadow_release(vcpu, i);
			stlbe->word0 = 0;
			kvmppc_tlbe_set_modified(vcpu, i);
		}
332 333
	}
}
334 335

static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
336
                             const struct kvmppc_44x_tlbe *tlbe)
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
{
	gpa_t gpa;

	if (!get_tlb_v(tlbe))
		return 0;

	/* Does it match current guest AS? */
	/* XXX what about IS != DS? */
	if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
		return 0;

	gpa = get_tlb_raddr(tlbe);
	if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
		/* Mapping is not for RAM. */
		return 0;

	return 1;
}

356
int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
357
{
358
	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
359
	gva_t eaddr;
360
	u64 asid;
361
	struct kvmppc_44x_tlbe *tlbe;
362 363 364 365 366 367 368 369 370
	unsigned int index;

	index = vcpu->arch.gpr[ra];
	if (index > PPC44x_TLB_SIZE) {
		printk("%s: index %d\n", __func__, index);
		kvmppc_dump_vcpu(vcpu);
		return EMULATE_FAIL;
	}

371
	tlbe = &vcpu_44x->guest_tlb[index];
372 373 374 375 376 377 378 379 380 381

	/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
	if (tlbe->word0 & PPC44x_TLB_VALID) {
		eaddr = get_tlb_eaddr(tlbe);
		asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
		kvmppc_mmu_invalidate(vcpu, eaddr, get_tlb_end(tlbe), asid);
	}

	switch (ws) {
	case PPC44x_TLB_PAGEID:
382
		tlbe->tid = get_mmucr_stid(vcpu);
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
		tlbe->word0 = vcpu->arch.gpr[rs];
		break;

	case PPC44x_TLB_XLAT:
		tlbe->word1 = vcpu->arch.gpr[rs];
		break;

	case PPC44x_TLB_ATTRIB:
		tlbe->word2 = vcpu->arch.gpr[rs];
		break;

	default:
		return EMULATE_FAIL;
	}

	if (tlbe_is_host_safe(vcpu, tlbe)) {
399 400 401 402
		gpa_t gpaddr;
		u32 flags;
		u32 bytes;

403
		eaddr = get_tlb_eaddr(tlbe);
404 405 406 407 408 409 410
		gpaddr = get_tlb_raddr(tlbe);

		/* Use the advertised page size to mask effective and real addrs. */
		bytes = get_tlb_bytes(tlbe);
		eaddr &= ~(bytes - 1);
		gpaddr &= ~(bytes - 1);

411 412 413
		asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
		flags = tlbe->word2 & 0xffff;

414
		kvmppc_mmu_map(vcpu, eaddr, gpaddr, asid, flags, bytes);
415 416 417 418 419 420 421 422 423
	}

	KVMTRACE_5D(GTLB_WRITE, vcpu, index,
	            tlbe->tid, tlbe->word0, tlbe->word1, tlbe->word2,
	            handler);

	return EMULATE_DONE;
}

424
int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
{
	u32 ea;
	int index;
	unsigned int as = get_mmucr_sts(vcpu);
	unsigned int pid = get_mmucr_stid(vcpu);

	ea = vcpu->arch.gpr[rb];
	if (ra)
		ea += vcpu->arch.gpr[ra];

	index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
	if (rc) {
		if (index < 0)
			vcpu->arch.cr &= ~0x20000000;
		else
			vcpu->arch.cr |= 0x20000000;
	}
	vcpu->arch.gpr[rt] = index;

	return EMULATE_DONE;
}