44x_tlb.c 10.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright IBM Corp. 2007
 *
 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
 */

#include <linux/types.h>
#include <linux/string.h>
22
#include <linux/kvm.h>
23 24 25 26
#include <linux/kvm_host.h>
#include <linux/highmem.h>
#include <asm/mmu-44x.h>
#include <asm/kvm_ppc.h>
27
#include <asm/kvm_44x.h>
28 29 30 31 32 33 34 35

#include "44x_tlb.h"

#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)

static unsigned int kvmppc_tlb_44x_pos;

36 37 38 39 40 41 42 43 44 45 46
#ifdef DEBUG
void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
{
	struct kvmppc_44x_tlbe *tlbe;
	int i;

	printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
	printk("| %2s | %3s | %8s | %8s | %8s |\n",
			"nr", "tid", "word0", "word1", "word2");

	for (i = 0; i < PPC44x_TLB_SIZE; i++) {
47
		tlbe = &vcpu_44x->guest_tlb[i];
48 49 50 51 52 53 54
		if (tlbe->word0 & PPC44x_TLB_VALID)
			printk(" G%2d |  %02X | %08X | %08X | %08X |\n",
			       i, tlbe->tid, tlbe->word0, tlbe->word1,
			       tlbe->word2);
	}

	for (i = 0; i < PPC44x_TLB_SIZE; i++) {
55
		tlbe = &vcpu_44x->shadow_tlb[i];
56 57 58 59 60 61 62 63
		if (tlbe->word0 & PPC44x_TLB_VALID)
			printk(" S%2d | %02X | %08X | %08X | %08X |\n",
			       i, tlbe->tid, tlbe->word0, tlbe->word1,
			       tlbe->word2);
	}
}
#endif

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
{
	/* Mask off reserved bits. */
	attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_ATTR_MASK;

	if (!usermode) {
		/* Guest is in supervisor mode, so we need to translate guest
		 * supervisor permissions into user permissions. */
		attrib &= ~PPC44x_TLB_USER_PERM_MASK;
		attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
	}

	/* Make sure host can always access this memory. */
	attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;

	return attrib;
}

/* Search the guest TLB for a matching entry. */
int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
                         unsigned int as)
{
86
	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
87 88 89 90
	int i;

	/* XXX Replace loop with fancy data structures. */
	for (i = 0; i < PPC44x_TLB_SIZE; i++) {
91
		struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i];
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
		unsigned int tid;

		if (eaddr < get_tlb_eaddr(tlbe))
			continue;

		if (eaddr > get_tlb_end(tlbe))
			continue;

		tid = get_tlb_tid(tlbe);
		if (tid && (tid != pid))
			continue;

		if (!get_tlb_v(tlbe))
			continue;

		if (get_tlb_ts(tlbe) != as)
			continue;

		return i;
	}

	return -1;
}

116 117
struct kvmppc_44x_tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu,
                                               gva_t eaddr)
118
{
119
	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
120 121 122 123 124 125
	unsigned int as = !!(vcpu->arch.msr & MSR_IS);
	unsigned int index;

	index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
	if (index == -1)
		return NULL;
126
	return &vcpu_44x->guest_tlb[index];
127 128
}

129 130
struct kvmppc_44x_tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu,
                                               gva_t eaddr)
131
{
132
	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
133 134 135 136 137 138
	unsigned int as = !!(vcpu->arch.msr & MSR_DS);
	unsigned int index;

	index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
	if (index == -1)
		return NULL;
139
	return &vcpu_44x->guest_tlb[index];
140 141
}

142
static int kvmppc_44x_tlbe_is_writable(struct kvmppc_44x_tlbe *tlbe)
143 144 145 146 147 148 149
{
	return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
}

static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
                                      unsigned int index)
{
150 151 152
	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
	struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[index];
	struct page *page = vcpu_44x->shadow_pages[index];
153 154 155 156 157 158 159 160 161

	if (get_tlb_v(stlbe)) {
		if (kvmppc_44x_tlbe_is_writable(stlbe))
			kvm_release_page_dirty(page);
		else
			kvm_release_page_clean(page);
	}
}

162 163 164 165 166 167 168 169
void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu)
{
	int i;

	for (i = 0; i <= tlb_44x_hwater; i++)
		kvmppc_44x_shadow_release(vcpu, i);
}

170 171
void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
{
172 173 174
	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);

	vcpu_44x->shadow_tlb_mod[i] = 1;
175 176
}

177 178 179 180 181
/* Caller must ensure that the specified guest TLB entry is safe to insert into
 * the shadow TLB. */
void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
                    u32 flags)
{
182
	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
183
	struct page *new_page;
184
	struct kvmppc_44x_tlbe *stlbe;
185 186 187 188 189 190 191 192
	hpa_t hpaddr;
	unsigned int victim;

	/* Future optimization: don't overwrite the TLB entry containing the
	 * current PC (or stack?). */
	victim = kvmppc_tlb_44x_pos++;
	if (kvmppc_tlb_44x_pos > tlb_44x_hwater)
		kvmppc_tlb_44x_pos = 0;
193
	stlbe = &vcpu_44x->shadow_tlb[victim];
194 195 196 197

	/* Get reference to new page. */
	new_page = gfn_to_page(vcpu->kvm, gfn);
	if (is_error_page(new_page)) {
H
Hollis Blanchard 已提交
198
		printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
199 200 201 202 203 204 205 206
		kvm_release_page_clean(new_page);
		return;
	}
	hpaddr = page_to_phys(new_page);

	/* Drop reference to old page. */
	kvmppc_44x_shadow_release(vcpu, victim);

207
	vcpu_44x->shadow_pages[victim] = new_page;
208 209 210 211 212 213 214

	/* XXX Make sure (va, size) doesn't overlap any other
	 * entries. 440x6 user manual says the result would be
	 * "undefined." */

	/* XXX what about AS? */

215
	stlbe->tid = !(asid & 0xff);
216 217 218 219 220 221 222 223 224

	/* Force TS=1 for all guest mappings. */
	/* For now we hardcode 4KB mappings, but it will be important to
	 * use host large pages in the future. */
	stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS
	               | PPC44x_TLB_4K;
	stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
	stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags,
	                                            vcpu->arch.msr & MSR_PR);
225
	kvmppc_tlbe_set_modified(vcpu, victim);
226 227 228 229

	KVMTRACE_5D(STLB_WRITE, vcpu, victim,
			stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2,
			handler);
230 231
}

232 233
static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
                                  gva_t eend, u32 asid)
234
{
235
	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
236
	unsigned int pid = !(asid & 0xff);
237 238 239 240
	int i;

	/* XXX Replace loop with fancy data structures. */
	for (i = 0; i <= tlb_44x_hwater; i++) {
241
		struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
242 243 244 245 246
		unsigned int tid;

		if (!get_tlb_v(stlbe))
			continue;

247
		if (eend < get_tlb_eaddr(stlbe))
248 249 250 251 252 253 254 255 256 257 258
			continue;

		if (eaddr > get_tlb_end(stlbe))
			continue;

		tid = get_tlb_tid(stlbe);
		if (tid && (tid != pid))
			continue;

		kvmppc_44x_shadow_release(vcpu, i);
		stlbe->word0 = 0;
259
		kvmppc_tlbe_set_modified(vcpu, i);
260 261 262
		KVMTRACE_5D(STLB_INVAL, vcpu, i,
				stlbe->tid, stlbe->word0, stlbe->word1,
				stlbe->word2, handler);
263 264 265
	}
}

266 267 268
/* Invalidate all mappings on the privilege switch after PID has been changed.
 * The guest always runs with PID=1, so we must clear the entire TLB when
 * switching address spaces. */
269 270
void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
{
271
	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
272 273
	int i;

274 275 276
	if (vcpu->arch.swap_pid) {
		/* XXX Replace loop with fancy data structures. */
		for (i = 0; i <= tlb_44x_hwater; i++) {
277
			struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
278 279 280 281 282 283 284 285 286 287

			/* Future optimization: clear only userspace mappings. */
			kvmppc_44x_shadow_release(vcpu, i);
			stlbe->word0 = 0;
			kvmppc_tlbe_set_modified(vcpu, i);
			KVMTRACE_5D(STLB_INVAL, vcpu, i,
			            stlbe->tid, stlbe->word0, stlbe->word1,
			            stlbe->word2, handler);
		}
		vcpu->arch.swap_pid = 0;
288
	}
289 290

	vcpu->arch.shadow_pid = !usermode;
291
}
292 293

static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
294
                             const struct kvmppc_44x_tlbe *tlbe)
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
{
	gpa_t gpa;

	if (!get_tlb_v(tlbe))
		return 0;

	/* Does it match current guest AS? */
	/* XXX what about IS != DS? */
	if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
		return 0;

	gpa = get_tlb_raddr(tlbe);
	if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
		/* Mapping is not for RAM. */
		return 0;

	return 1;
}

314
int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
315
{
316
	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
317 318 319 320
	u64 eaddr;
	u64 raddr;
	u64 asid;
	u32 flags;
321
	struct kvmppc_44x_tlbe *tlbe;
322 323 324 325 326 327 328 329 330
	unsigned int index;

	index = vcpu->arch.gpr[ra];
	if (index > PPC44x_TLB_SIZE) {
		printk("%s: index %d\n", __func__, index);
		kvmppc_dump_vcpu(vcpu);
		return EMULATE_FAIL;
	}

331
	tlbe = &vcpu_44x->guest_tlb[index];
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376

	/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
	if (tlbe->word0 & PPC44x_TLB_VALID) {
		eaddr = get_tlb_eaddr(tlbe);
		asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
		kvmppc_mmu_invalidate(vcpu, eaddr, get_tlb_end(tlbe), asid);
	}

	switch (ws) {
	case PPC44x_TLB_PAGEID:
		tlbe->tid = vcpu->arch.mmucr & 0xff;
		tlbe->word0 = vcpu->arch.gpr[rs];
		break;

	case PPC44x_TLB_XLAT:
		tlbe->word1 = vcpu->arch.gpr[rs];
		break;

	case PPC44x_TLB_ATTRIB:
		tlbe->word2 = vcpu->arch.gpr[rs];
		break;

	default:
		return EMULATE_FAIL;
	}

	if (tlbe_is_host_safe(vcpu, tlbe)) {
		eaddr = get_tlb_eaddr(tlbe);
		raddr = get_tlb_raddr(tlbe);
		asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
		flags = tlbe->word2 & 0xffff;

		/* Create a 4KB mapping on the host. If the guest wanted a
		 * large page, only the first 4KB is mapped here and the rest
		 * are mapped on the fly. */
		kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags);
	}

	KVMTRACE_5D(GTLB_WRITE, vcpu, index,
	            tlbe->tid, tlbe->word0, tlbe->word1, tlbe->word2,
	            handler);

	return EMULATE_DONE;
}

377
int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
{
	u32 ea;
	int index;
	unsigned int as = get_mmucr_sts(vcpu);
	unsigned int pid = get_mmucr_stid(vcpu);

	ea = vcpu->arch.gpr[rb];
	if (ra)
		ea += vcpu->arch.gpr[ra];

	index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
	if (rc) {
		if (index < 0)
			vcpu->arch.cr &= ~0x20000000;
		else
			vcpu->arch.cr |= 0x20000000;
	}
	vcpu->arch.gpr[rt] = index;

	return EMULATE_DONE;
}