book3s_64_vio_hv.c 9.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License, version 2, as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 *
 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
17
 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
 */

#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/hugetlb.h>
#include <linux/list.h>

#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
33
#include <asm/book3s/64/mmu-hash.h>
34
#include <asm/mmu_context.h>
35 36 37 38 39
#include <asm/hvcall.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
#include <asm/kvm_host.h>
#include <asm/udbg.h>
40
#include <asm/iommu.h>
41
#include <asm/tce.h>
42 43 44

#define TCES_PER_PAGE	(PAGE_SIZE / sizeof(u64))

45 46 47 48 49 50
/*
 * Finds a TCE table descriptor by LIOBN.
 *
 * WARNING: This will be called in real or virtual mode on HV KVM and virtual
 *          mode on PR KVM
 */
51
struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
52 53 54 55
		unsigned long liobn)
{
	struct kvmppc_spapr_tce_table *stt;

56
	list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
57 58 59 60 61
		if (stt->liobn == liobn)
			return stt;

	return NULL;
}
62
EXPORT_SYMBOL_GPL(kvmppc_find_table);
63 64 65 66 67 68 69

/*
 * Validates IO address.
 *
 * WARNING: This will be called in real-mode on HV KVM and virtual
 *          mode on PR KVM
 */
70
long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
71 72
		unsigned long ioba, unsigned long npages)
{
73 74
	unsigned long mask = (1ULL << stt->page_shift) - 1;
	unsigned long idx = ioba >> stt->page_shift;
75

76 77 78
	if ((ioba & mask) || (idx < stt->offset) ||
			(idx - stt->offset + npages > stt->size) ||
			(idx + npages < idx))
79 80 81 82
		return H_PARAMETER;

	return H_SUCCESS;
}
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
EXPORT_SYMBOL_GPL(kvmppc_ioba_validate);

/*
 * Validates TCE address.
 * At the moment flags and page mask are validated.
 * As the host kernel does not access those addresses (just puts them
 * to the table and user space is supposed to process them), we can skip
 * checking other things (such as TCE is a guest RAM address or the page
 * was actually allocated).
 *
 * WARNING: This will be called in real-mode on HV KVM and virtual
 *          mode on PR KVM
 */
long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
{
98 99
	unsigned long page_mask = ~((1ULL << stt->page_shift) - 1);
	unsigned long mask = ~(page_mask | TCE_PCI_WRITE | TCE_PCI_READ);
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149

	if (tce & mask)
		return H_PARAMETER;

	return H_SUCCESS;
}
EXPORT_SYMBOL_GPL(kvmppc_tce_validate);

/* Note on the use of page_address() in real mode,
 *
 * It is safe to use page_address() in real mode on ppc64 because
 * page_address() is always defined as lowmem_page_address()
 * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
 * operation and does not access page struct.
 *
 * Theoretically page_address() could be defined different
 * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
 * would have to be enabled.
 * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
 * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
 * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
 * is not expected to be enabled on ppc32, page_address()
 * is safe for ppc32 as well.
 *
 * WARNING: This will be called in real-mode on HV KVM and virtual
 *          mode on PR KVM
 */
static u64 *kvmppc_page_address(struct page *page)
{
#if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
#error TODO: fix to avoid page_address() here
#endif
	return (u64 *) page_address(page);
}

/*
 * Handles TCE requests for emulated devices.
 * Puts guest TCE values to the table and expects user space to convert them.
 * Called in both real and virtual modes.
 * Cannot fail so kvmppc_tce_validate must be called before it.
 *
 * WARNING: This will be called in real-mode on HV KVM and virtual
 *          mode on PR KVM
 */
void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
		unsigned long idx, unsigned long tce)
{
	struct page *page;
	u64 *tbl;

150
	idx -= stt->offset;
151 152 153 154 155 156
	page = stt->pages[idx / TCES_PER_PAGE];
	tbl = kvmppc_page_address(page);

	tbl[idx % TCES_PER_PAGE] = tce;
}
EXPORT_SYMBOL_GPL(kvmppc_tce_put);
157

158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
		unsigned long *ua, unsigned long **prmap)
{
	unsigned long gfn = gpa >> PAGE_SHIFT;
	struct kvm_memory_slot *memslot;

	memslot = search_memslots(kvm_memslots(kvm), gfn);
	if (!memslot)
		return -EINVAL;

	*ua = __gfn_to_hva_memslot(memslot, gfn) |
		(gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));

#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
	if (prmap)
		*prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
#endif

	return 0;
}
EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);

#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
181 182
long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
		unsigned long ioba, unsigned long tce)
183
{
184
	struct kvmppc_spapr_tce_table *stt;
185
	long ret;
186 187 188 189

	/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
	/* 	    liobn, ioba, tce); */

190
	stt = kvmppc_find_table(vcpu->kvm, liobn);
191 192 193 194 195 196 197
	if (!stt)
		return H_TOO_HARD;

	ret = kvmppc_ioba_validate(stt, ioba, 1);
	if (ret != H_SUCCESS)
		return ret;

198 199 200
	ret = kvmppc_tce_validate(stt, tce);
	if (ret != H_SUCCESS)
		return ret;
201

202
	kvmppc_tce_put(stt, ioba >> stt->page_shift, tce);
203 204

	return H_SUCCESS;
205
}
206

207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
		unsigned long ua, unsigned long *phpa)
{
	pte_t *ptep, pte;
	unsigned shift = 0;

	ptep = __find_linux_pte_or_hugepte(vcpu->arch.pgdir, ua, NULL, &shift);
	if (!ptep || !pte_present(*ptep))
		return -ENXIO;
	pte = *ptep;

	if (!shift)
		shift = PAGE_SHIFT;

	/* Avoid handling anything potentially complicated in realmode */
	if (shift > PAGE_SHIFT)
		return -EAGAIN;

	if (!pte_young(pte))
		return -EAGAIN;

	*phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
			(ua & ~PAGE_MASK);

	return 0;
}

long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
		unsigned long liobn, unsigned long ioba,
		unsigned long tce_list,	unsigned long npages)
{
	struct kvmppc_spapr_tce_table *stt;
	long i, ret = H_SUCCESS;
	unsigned long tces, entry, ua = 0;
	unsigned long *rmap = NULL;
242
	bool prereg = false;
243

244
	stt = kvmppc_find_table(vcpu->kvm, liobn);
245 246 247
	if (!stt)
		return H_TOO_HARD;

248
	entry = ioba >> stt->page_shift;
249 250 251 252 253 254 255 256 257 258 259 260 261 262
	/*
	 * The spec says that the maximum size of the list is 512 TCEs
	 * so the whole table addressed resides in 4K page
	 */
	if (npages > 512)
		return H_PARAMETER;

	if (tce_list & (SZ_4K - 1))
		return H_PARAMETER;

	ret = kvmppc_ioba_validate(stt, ioba, npages);
	if (ret != H_SUCCESS)
		return ret;

263 264 265 266 267 268 269
	if (mm_iommu_preregistered(vcpu->kvm->mm)) {
		/*
		 * We get here if guest memory was pre-registered which
		 * is normally VFIO case and gpa->hpa translation does not
		 * depend on hpt.
		 */
		struct mm_iommu_table_group_mem_t *mem;
270

271 272
		if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
			return H_TOO_HARD;
273

274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
		mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
		if (mem)
			prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
	}

	if (!prereg) {
		/*
		 * This is usually a case of a guest with emulated devices only
		 * when TCE list is not in preregistered memory.
		 * We do not require memory to be preregistered in this case
		 * so lock rmap and do __find_linux_pte_or_hugepte().
		 */
		if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
			return H_TOO_HARD;

		rmap = (void *) vmalloc_to_phys(rmap);

		/*
		 * Synchronize with the MMU notifier callbacks in
		 * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
		 * While we have the rmap lock, code running on other CPUs
		 * cannot finish unmapping the host real page that backs
		 * this guest real page, so we are OK to access the host
		 * real page.
		 */
		lock_rmap(rmap);
		if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
			ret = H_TOO_HARD;
			goto unlock_exit;
		}
304 305 306 307 308 309 310 311 312 313 314 315 316
	}

	for (i = 0; i < npages; ++i) {
		unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);

		ret = kvmppc_tce_validate(stt, tce);
		if (ret != H_SUCCESS)
			goto unlock_exit;

		kvmppc_tce_put(stt, entry + i, tce);
	}

unlock_exit:
317 318
	if (rmap)
		unlock_rmap(rmap);
319 320 321 322

	return ret;
}

323
long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
324 325 326 327 328 329
		unsigned long liobn, unsigned long ioba,
		unsigned long tce_value, unsigned long npages)
{
	struct kvmppc_spapr_tce_table *stt;
	long i, ret;

330
	stt = kvmppc_find_table(vcpu->kvm, liobn);
331 332 333 334 335 336 337 338 339 340 341
	if (!stt)
		return H_TOO_HARD;

	ret = kvmppc_ioba_validate(stt, ioba, npages);
	if (ret != H_SUCCESS)
		return ret;

	/* Check permission bits only to allow userspace poison TCE for debug */
	if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
		return H_PARAMETER;

342 343
	for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
		kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
344 345 346 347

	return H_SUCCESS;
}

348 349 350
long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
		      unsigned long ioba)
{
351
	struct kvmppc_spapr_tce_table *stt;
352 353 354 355
	long ret;
	unsigned long idx;
	struct page *page;
	u64 *tbl;
356

357
	stt = kvmppc_find_table(vcpu->kvm, liobn);
358 359
	if (!stt)
		return H_TOO_HARD;
360

361 362 363
	ret = kvmppc_ioba_validate(stt, ioba, 1);
	if (ret != H_SUCCESS)
		return ret;
364

365
	idx = (ioba >> stt->page_shift) - stt->offset;
366 367
	page = stt->pages[idx / TCES_PER_PAGE];
	tbl = (u64 *)page_address(page);
368

369
	vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
370

371
	return H_SUCCESS;
372 373
}
EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
374 375

#endif /* KVM_BOOK3S_HV_POSSIBLE */