// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2017-2018 Hisilicon Limited. * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define SVM_DEVICE_NAME "svm" #define ASID_SHIFT 48 #define SVM_IOCTL_REMAP_PROC 0xfff4 #define SVM_IOCTL_LOAD_FLAG 0xfffa #define SVM_IOCTL_PROCESS_BIND 0xffff #define CORE_SID 0 #define SVM_IOCTL_RELEASE_PHYS32 0xfff3 #define SVM_REMAP_MEM_LEN_MAX (16 * 1024 * 1024) #define MMAP_PHY32_MAX (16 * 1024 * 1024) static int probe_index; static LIST_HEAD(child_list); static DECLARE_RWSEM(svm_sem); static struct rb_root svm_process_root = RB_ROOT; static struct mutex svm_process_mutex; struct core_device { struct device dev; struct iommu_group *group; struct iommu_domain *domain; u8 smmu_bypass; struct list_head entry; }; struct svm_device { unsigned long long id; struct miscdevice miscdev; struct device *dev; phys_addr_t l2buff; unsigned long l2size; }; struct svm_bind_process { pid_t vpid; u64 ttbr; u64 tcr; int pasid; u32 flags; #define SVM_BIND_PID (1 << 0) }; /* *svm_process is released in svm_notifier_release() when mm refcnt *goes down zero. We should access svm_process only in the context *where mm_struct is valid, which means we should always get mm *refcnt first. */ struct svm_process { struct pid *pid; struct mm_struct *mm; unsigned long asid; struct rb_node rb_node; struct mmu_notifier notifier; /* For postponed release */ struct rcu_head rcu; int pasid; struct mutex mutex; struct rb_root sdma_list; struct svm_device *sdev; struct iommu_sva *sva; }; struct svm_proc_mem { u32 dev_id; u32 len; u64 pid; u64 vaddr; u64 buf; }; static char *svm_cmd_to_string(unsigned int cmd) { switch (cmd) { case SVM_IOCTL_PROCESS_BIND: return "bind"; case SVM_IOCTL_REMAP_PROC: return "remap proc"; case SVM_IOCTL_LOAD_FLAG: return "load flag"; case SVM_IOCTL_RELEASE_PHYS32: return "release phys"; default: return "unsupported"; } return NULL; } static struct svm_process *find_svm_process(unsigned long asid) { struct rb_node *node = svm_process_root.rb_node; while (node) { struct svm_process *process = NULL; process = rb_entry(node, struct svm_process, rb_node); if (asid < process->asid) node = node->rb_left; else if (asid > process->asid) node = node->rb_right; else return process; } return NULL; } static void insert_svm_process(struct svm_process *process) { struct rb_node **p = &svm_process_root.rb_node; struct rb_node *parent = NULL; while (*p) { struct svm_process *tmp_process = NULL; parent = *p; tmp_process = rb_entry(parent, struct svm_process, rb_node); if (process->asid < tmp_process->asid) p = &(*p)->rb_left; else if (process->asid > tmp_process->asid) p = &(*p)->rb_right; else { WARN_ON_ONCE("asid already in the tree"); return; } } rb_link_node(&process->rb_node, parent, p); rb_insert_color(&process->rb_node, &svm_process_root); } static void delete_svm_process(struct svm_process *process) { rb_erase(&process->rb_node, &svm_process_root); RB_CLEAR_NODE(&process->rb_node); } static struct svm_device *file_to_sdev(struct file *file) { return container_of(file->private_data, struct svm_device, miscdev); } static inline struct core_device *to_core_device(struct device *d) { return container_of(d, struct core_device, dev); } static int svm_acpi_bind_core(struct core_device *cdev, void *data) { struct task_struct *task = NULL; struct svm_process *process = data; if (cdev->smmu_bypass) return 0; task = get_pid_task(process->pid, PIDTYPE_PID); if (!task) { pr_err("failed to get task_struct\n"); return -ESRCH; } process->sva = iommu_sva_bind_device(&cdev->dev, task->mm, NULL); if (!process->sva) { pr_err("failed to bind device\n"); return PTR_ERR(process->sva); } process->pasid = task->mm->pasid; put_task_struct(task); return 0; } static int svm_dt_bind_core(struct device *dev, void *data) { struct task_struct *task = NULL; struct svm_process *process = data; struct core_device *cdev = to_core_device(dev); if (cdev->smmu_bypass) return 0; task = get_pid_task(process->pid, PIDTYPE_PID); if (!task) { pr_err("failed to get task_struct\n"); return -ESRCH; } process->sva = iommu_sva_bind_device(dev, task->mm, NULL); if (!process->sva) { pr_err("failed to bind device\n"); return PTR_ERR(process->sva); } process->pasid = task->mm->pasid; put_task_struct(task); return 0; } static void svm_dt_bind_cores(struct svm_process *process) { device_for_each_child(process->sdev->dev, process, svm_dt_bind_core); } static void svm_acpi_bind_cores(struct svm_process *process) { struct core_device *pos = NULL; list_for_each_entry(pos, &child_list, entry) { svm_acpi_bind_core(pos, process); } } static void svm_process_free(struct mmu_notifier *mn) { struct svm_process *process = NULL; process = container_of(mn, struct svm_process, notifier); arm64_mm_context_put(process->mm); kfree(process); } static void svm_process_release(struct svm_process *process) { delete_svm_process(process); put_pid(process->pid); mmu_notifier_put(&process->notifier); } static void svm_notifier_release(struct mmu_notifier *mn, struct mm_struct *mm) { struct svm_process *process = NULL; process = container_of(mn, struct svm_process, notifier); /* * No need to call svm_unbind_cores(), as iommu-sva will do the * unbind in its mm_notifier callback. */ mutex_lock(&svm_process_mutex); svm_process_release(process); mutex_unlock(&svm_process_mutex); } static struct mmu_notifier_ops svm_process_mmu_notifier = { .release = svm_notifier_release, .free_notifier = svm_process_free, }; static struct svm_process * svm_process_alloc(struct svm_device *sdev, struct pid *pid, struct mm_struct *mm, unsigned long asid) { struct svm_process *process = kzalloc(sizeof(*process), GFP_ATOMIC); if (!process) return ERR_PTR(-ENOMEM); process->sdev = sdev; process->pid = pid; process->mm = mm; process->asid = asid; process->sdma_list = RB_ROOT; //lint !e64 mutex_init(&process->mutex); process->notifier.ops = &svm_process_mmu_notifier; return process; } static struct task_struct *svm_get_task(struct svm_bind_process params) { struct task_struct *task = NULL; if (params.flags & ~SVM_BIND_PID) return ERR_PTR(-EINVAL); if (params.flags & SVM_BIND_PID) { struct mm_struct *mm = NULL; rcu_read_lock(); task = find_task_by_vpid(params.vpid); if (task) get_task_struct(task); rcu_read_unlock(); if (task == NULL) return ERR_PTR(-ESRCH); /* check the permission */ mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS); if (IS_ERR_OR_NULL(mm)) { pr_err("cannot access mm\n"); put_task_struct(task); return ERR_PTR(-ESRCH); } mmput(mm); } else { get_task_struct(current); task = current; } return task; } static int svm_process_bind(struct task_struct *task, struct svm_device *sdev, u64 *ttbr, u64 *tcr, int *pasid) { int err; unsigned long asid; struct pid *pid = NULL; struct svm_process *process = NULL; struct mm_struct *mm = NULL; if ((ttbr == NULL) || (tcr == NULL) || (pasid == NULL)) return -EINVAL; pid = get_task_pid(task, PIDTYPE_PID); if (pid == NULL) return -EINVAL; mm = get_task_mm(task); if (!mm) { err = -EINVAL; goto err_put_pid; } asid = arm64_mm_context_get(mm); if (!asid) { err = -ENOSPC; goto err_put_mm; } /* If a svm_process already exists, use it */ mutex_lock(&svm_process_mutex); process = find_svm_process(asid); if (process == NULL) { process = svm_process_alloc(sdev, pid, mm, asid); if (IS_ERR(process)) { err = PTR_ERR(process); mutex_unlock(&svm_process_mutex); goto err_put_mm_context; } err = mmu_notifier_register(&process->notifier, mm); if (err) { mutex_unlock(&svm_process_mutex); goto err_free_svm_process; } insert_svm_process(process); if (acpi_disabled) svm_dt_bind_cores(process); else svm_acpi_bind_cores(process); mutex_unlock(&svm_process_mutex); } else { mutex_unlock(&svm_process_mutex); arm64_mm_context_put(mm); put_pid(pid); } *ttbr = virt_to_phys(mm->pgd) | asid << ASID_SHIFT; *tcr = read_sysreg(tcr_el1); *pasid = process->pasid; mmput(mm); return 0; err_free_svm_process: kfree(process); err_put_mm_context: arm64_mm_context_put(mm); err_put_mm: mmput(mm); err_put_pid: put_pid(pid); return err; } static pte_t *svm_get_pte(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long *page_size, unsigned long *offset) { pte_t *pte = NULL; unsigned long size = 0; if (is_vm_hugetlb_page(vma)) { if (pud_present(*pud)) { if (pud_huge(*pud)) { pte = (pte_t *)pud; *offset = addr & (PUD_SIZE - 1); size = PUD_SIZE; } else { pte = (pte_t *)pmd_offset(pud, addr); *offset = addr & (PMD_SIZE - 1); size = PMD_SIZE; } } else { pr_err("%s:hugetlb but pud not present\n", __func__); } } else { pmd_t *pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return NULL; if (pmd_trans_huge(*pmd)) { pte = (pte_t *)pmd; *offset = addr & (PMD_SIZE - 1); size = PMD_SIZE; } else if (pmd_trans_unstable(pmd)) { pr_warn("%s: thp unstable\n", __func__); } else { pte = pte_offset_map(pmd, addr); *offset = addr & (PAGE_SIZE - 1); size = PAGE_SIZE; } } if (page_size) *page_size = size; return pte; } /* Must be called with mmap_lock held */ static pte_t *svm_walk_pt(unsigned long addr, unsigned long *page_size, unsigned long *offset) { pgd_t *pgd = NULL; p4d_t *p4d = NULL; pud_t *pud = NULL; struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; vma = find_vma(mm, addr); if (!vma) return NULL; pgd = pgd_offset(mm, addr); if (pgd_none_or_clear_bad(pgd)) return NULL; p4d = p4d_offset(pgd, addr); if (p4d_none_or_clear_bad(p4d)) return NULL; pud = pud_offset(p4d, addr); if (pud_none_or_clear_bad(pud)) return NULL; return svm_get_pte(vma, pud, addr, page_size, offset); } static struct bus_type svm_bus_type = { .name = "svm_bus", }; static int svm_open(struct inode *inode, struct file *file) { return 0; } static long svm_remap_get_phys(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long *phys, unsigned long *page_size, unsigned long *offset) { long err = -EINVAL; pgd_t *pgd = NULL; p4d_t *p4d = NULL; pud_t *pud = NULL; pte_t *pte = NULL; if (mm == NULL || vma == NULL || phys == NULL || page_size == NULL || offset == NULL) return err; pgd = pgd_offset(mm, addr); if (pgd_none_or_clear_bad(pgd)) return err; p4d = p4d_offset(pgd, addr); if (p4d_none_or_clear_bad(p4d)) return err; pud = pud_offset(p4d, addr); if (pud_none_or_clear_bad(pud)) return err; pte = svm_get_pte(vma, pud, addr, page_size, offset); if (pte && pte_present(*pte)) { *phys = PFN_PHYS(pte_pfn(*pte)); return 0; } return err; } static long svm_remap_proc(unsigned long __user *arg) { long ret = -EINVAL; struct svm_proc_mem pmem; struct task_struct *ptask = NULL; struct mm_struct *pmm = NULL, *mm = current->mm; struct vm_area_struct *pvma = NULL, *vma = NULL; unsigned long end, vaddr, phys, buf, offset, pagesize; if (!acpi_disabled) return -EPERM; if (arg == NULL) { pr_err("arg is invalid.\n"); return ret; } ret = copy_from_user(&pmem, (void __user *)arg, sizeof(pmem)); if (ret) { pr_err("failed to copy args from user space.\n"); return -EFAULT; } if (pmem.buf & (PAGE_SIZE - 1)) { pr_err("address is not aligned with page size, addr:%pK.\n", (void *)pmem.buf); return -EINVAL; } rcu_read_lock(); if (pmem.pid) { ptask = find_task_by_vpid(pmem.pid); if (!ptask) { rcu_read_unlock(); pr_err("No task for this pid\n"); return -EINVAL; } } else { ptask = current; } get_task_struct(ptask); rcu_read_unlock(); pmm = ptask->mm; down_read(&mm->mmap_lock); down_read(&pmm->mmap_lock); pvma = find_vma(pmm, pmem.vaddr); if (pvma == NULL) { ret = -ESRCH; goto err; } vma = find_vma(mm, pmem.buf); if (vma == NULL) { ret = -ESRCH; goto err; } if (pmem.len > SVM_REMAP_MEM_LEN_MAX) { ret = -EINVAL; pr_err("too large length of memory.\n"); goto err; } vaddr = pmem.vaddr; end = vaddr + pmem.len; buf = pmem.buf; vma->vm_flags |= VM_SHARED; if (end > pvma->vm_end || end < vaddr) { ret = -EINVAL; pr_err("memory length is out of range, vaddr:%pK, len:%u.\n", (void *)vaddr, pmem.len); goto err; } do { ret = svm_remap_get_phys(pmm, pvma, vaddr, &phys, &pagesize, &offset); if (ret) { ret = -EINVAL; goto err; } vaddr += pagesize - offset; do { if (remap_pfn_range(vma, buf, phys >> PAGE_SHIFT, PAGE_SIZE, __pgprot(vma->vm_page_prot.pgprot | PTE_DIRTY))) { ret = -ESRCH; goto err; } offset += PAGE_SIZE; buf += PAGE_SIZE; phys += PAGE_SIZE; } while (offset < pagesize); } while (vaddr < end); err: up_read(&pmm->mmap_lock); up_read(&mm->mmap_lock); put_task_struct(ptask); return ret; } static int svm_proc_load_flag(int __user *arg) { static atomic_t l2buf_load_flag = ATOMIC_INIT(0); int flag; if (!acpi_disabled) return -EPERM; if (arg == NULL) return -EINVAL; if (0 == (atomic_cmpxchg(&l2buf_load_flag, 0, 1))) flag = 0; else flag = 1; return put_user(flag, arg); } static unsigned long svm_get_unmapped_area(struct file *file, unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long addr = addr0; struct mm_struct *mm = current->mm; struct vm_unmapped_area_info info; struct svm_device *sdev = file_to_sdev(file); if (!acpi_disabled) return -EPERM; if (flags & MAP_FIXED) { if (IS_ALIGNED(addr, len)) return addr; dev_err(sdev->dev, "MAP_FIXED but not aligned\n"); return -EINVAL; //lint !e570 } if (addr) { struct vm_area_struct *vma = NULL; addr = ALIGN(addr, len); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && (vma == NULL || addr + len <= vm_start_gap(vma))) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = mm->mmap_base; info.align_mask = ((len >> PAGE_SHIFT) - 1) << PAGE_SHIFT; info.align_offset = pgoff << PAGE_SHIFT; addr = vm_unmapped_area(&info); if (offset_in_page(addr)) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = TASK_SIZE; addr = vm_unmapped_area(&info); } return addr; } static int svm_mmap(struct file *file, struct vm_area_struct *vma) { int err; struct svm_device *sdev = file_to_sdev(file); if (!acpi_disabled) return -EPERM; if (vma->vm_flags & VM_PA32BIT) { unsigned long vm_size = vma->vm_end - vma->vm_start; struct page *page = NULL; if ((vma->vm_end < vma->vm_start) || (vm_size > MMAP_PHY32_MAX)) return -EINVAL; /* vma->vm_pgoff transfer the nid */ if (vma->vm_pgoff == 0) page = alloc_pages(GFP_KERNEL | GFP_DMA32, get_order(vm_size)); else page = alloc_pages_node((int)vma->vm_pgoff, GFP_KERNEL | __GFP_THISNODE, get_order(vm_size)); if (!page) { dev_err(sdev->dev, "fail to alloc page on node 0x%lx\n", vma->vm_pgoff); return -ENOMEM; } err = remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), vm_size, vma->vm_page_prot); if (err) dev_err(sdev->dev, "fail to remap 0x%pK err=%d\n", (void *)vma->vm_start, err); } else { if ((vma->vm_end < vma->vm_start) || ((vma->vm_end - vma->vm_start) > sdev->l2size)) return -EINVAL; vma->vm_page_prot = __pgprot((~PTE_SHARED) & vma->vm_page_prot.pgprot); err = remap_pfn_range(vma, vma->vm_start, sdev->l2buff >> PAGE_SHIFT, vma->vm_end - vma->vm_start, __pgprot(vma->vm_page_prot.pgprot | PTE_DIRTY)); if (err) dev_err(sdev->dev, "fail to remap 0x%pK err=%d\n", (void *)vma->vm_start, err); } return err; } static int svm_release_phys32(unsigned long __user *arg) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; struct page *page = NULL; pte_t *pte = NULL; unsigned long phys, addr, offset; unsigned int len = 0; if (arg == NULL) return -EINVAL; if (get_user(addr, arg)) return -EFAULT; down_read(&mm->mmap_lock); pte = svm_walk_pt(addr, NULL, &offset); if (pte && pte_present(*pte)) { phys = PFN_PHYS(pte_pfn(*pte)) + offset; } else { up_read(&mm->mmap_lock); return -EINVAL; } vma = find_vma(mm, addr); if (!vma) { up_read(&mm->mmap_lock); return -EFAULT; } page = phys_to_page(phys); len = vma->vm_end - vma->vm_start; __free_pages(page, get_order(len)); up_read(&mm->mmap_lock); return 0; } static long svm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int err = -EINVAL; struct svm_bind_process params; struct svm_device *sdev = file_to_sdev(file); struct task_struct *task; if (!arg) return -EINVAL; if (cmd == SVM_IOCTL_PROCESS_BIND) { err = copy_from_user(¶ms, (void __user *)arg, sizeof(params)); if (err) { dev_err(sdev->dev, "fail to copy params %d\n", err); return -EFAULT; } } switch (cmd) { case SVM_IOCTL_PROCESS_BIND: task = svm_get_task(params); if (IS_ERR(task)) { dev_err(sdev->dev, "failed to get task\n"); return PTR_ERR(task); } err = svm_process_bind(task, sdev, ¶ms.ttbr, ¶ms.tcr, ¶ms.pasid); if (err) { put_task_struct(task); dev_err(sdev->dev, "failed to bind task %d\n", err); return err; } put_task_struct(task); err = copy_to_user((void __user *)arg, ¶ms, sizeof(params)); if (err) { dev_err(sdev->dev, "failed to copy to user!\n"); return -EFAULT; } break; case SVM_IOCTL_REMAP_PROC: err = svm_remap_proc((unsigned long __user *)arg); break; case SVM_IOCTL_LOAD_FLAG: err = svm_proc_load_flag((int __user *)arg); break; case SVM_IOCTL_RELEASE_PHYS32: err = svm_release_phys32((unsigned long __user *)arg); break; default: err = -EINVAL; } if (err) dev_err(sdev->dev, "%s: %s failed err = %d\n", __func__, svm_cmd_to_string(cmd), err); return err; } static const struct file_operations svm_fops = { .owner = THIS_MODULE, .open = svm_open, .mmap = svm_mmap, .get_unmapped_area = svm_get_unmapped_area, .unlocked_ioctl = svm_ioctl, }; static void cdev_device_release(struct device *dev) { struct core_device *cdev = to_core_device(dev); if (!acpi_disabled) list_del(&cdev->entry); kfree(cdev); } static int svm_remove_core(struct device *dev, void *data) { struct core_device *cdev = to_core_device(dev); if (!cdev->smmu_bypass) { iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); iommu_detach_group(cdev->domain, cdev->group); iommu_group_put(cdev->group); iommu_domain_free(cdev->domain); } device_unregister(&cdev->dev); return 0; } #ifdef CONFIG_ACPI static int svm_acpi_add_core(struct svm_device *sdev, struct acpi_device *children, int id) { int err; struct core_device *cdev = NULL; char *name = NULL; enum dev_dma_attr attr; name = devm_kasprintf(sdev->dev, GFP_KERNEL, "svm_child_dev%d", id); if (name == NULL) return -ENOMEM; cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); if (cdev == NULL) return -ENOMEM; cdev->dev.fwnode = &children->fwnode; cdev->dev.parent = sdev->dev; cdev->dev.bus = &svm_bus_type; cdev->dev.release = cdev_device_release; cdev->smmu_bypass = 0; list_add(&cdev->entry, &child_list); dev_set_name(&cdev->dev, "%s", name); err = device_register(&cdev->dev); if (err) { dev_info(&cdev->dev, "core_device register failed\n"); list_del(&cdev->entry); kfree(cdev); return err; } attr = acpi_get_dma_attr(children); if (attr != DEV_DMA_NOT_SUPPORTED) { err = acpi_dma_configure(&cdev->dev, attr); if (err) { dev_dbg(&cdev->dev, "acpi_dma_configure failed\n"); return err; } } err = acpi_dev_prop_read_single(children, "hisi,smmu-bypass", DEV_PROP_U8, &cdev->smmu_bypass); if (err) dev_info(&children->dev, "read smmu bypass failed\n"); cdev->group = iommu_group_get(&cdev->dev); if (IS_ERR_OR_NULL(cdev->group)) { dev_err(&cdev->dev, "smmu is not right configured\n"); return -ENXIO; } cdev->domain = iommu_domain_alloc(sdev->dev->bus); if (cdev->domain == NULL) { dev_info(&cdev->dev, "failed to alloc domain\n"); return -ENOMEM; } err = iommu_attach_group(cdev->domain, cdev->group); if (err) { dev_err(&cdev->dev, "failed group to domain\n"); return err; } err = iommu_dev_enable_feature(&cdev->dev, IOMMU_DEV_FEAT_IOPF); if (err) { dev_err(&cdev->dev, "failed to enable iopf feature, %d\n", err); return err; } err = iommu_dev_enable_feature(&cdev->dev, IOMMU_DEV_FEAT_SVA); if (err) { dev_err(&cdev->dev, "failed to enable sva feature\n"); return err; } return 0; } static int svm_acpi_init_core(struct svm_device *sdev) { int err = 0; struct device *dev = sdev->dev; struct acpi_device *adev = ACPI_COMPANION(sdev->dev); struct acpi_device *cdev = NULL; int id = 0; down_write(&svm_sem); if (!svm_bus_type.iommu_ops) { err = bus_register(&svm_bus_type); if (err) { up_write(&svm_sem); dev_err(dev, "failed to register svm_bus_type\n"); return err; } err = bus_set_iommu(&svm_bus_type, dev->bus->iommu_ops); if (err) { up_write(&svm_sem); dev_err(dev, "failed to set iommu for svm_bus_type\n"); goto err_unregister_bus; } } else if (svm_bus_type.iommu_ops != dev->bus->iommu_ops) { err = -EBUSY; up_write(&svm_sem); dev_err(dev, "iommu_ops configured, but changed!\n"); return err; } up_write(&svm_sem); list_for_each_entry(cdev, &adev->children, node) { err = svm_acpi_add_core(sdev, cdev, id++); if (err) device_for_each_child(dev, NULL, svm_remove_core); } return err; err_unregister_bus: bus_unregister(&svm_bus_type); return err; } #else static int svm_acpi_init_core(struct svm_device *sdev) { return 0; } #endif static int svm_of_add_core(struct svm_device *sdev, struct device_node *np) { int err; struct resource res; struct core_device *cdev = NULL; char *name = NULL; name = devm_kasprintf(sdev->dev, GFP_KERNEL, "svm%llu_%s", sdev->id, np->name); if (name == NULL) return -ENOMEM; cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); if (cdev == NULL) return -ENOMEM; cdev->dev.of_node = np; cdev->dev.parent = sdev->dev; cdev->dev.bus = &svm_bus_type; cdev->dev.release = cdev_device_release; cdev->smmu_bypass = of_property_read_bool(np, "hisi,smmu_bypass"); dev_set_name(&cdev->dev, "%s", name); err = device_register(&cdev->dev); if (err) { dev_info(&cdev->dev, "core_device register failed\n"); kfree(cdev); return err; } err = of_dma_configure(&cdev->dev, np, true); if (err) { dev_dbg(&cdev->dev, "of_dma_configure failed\n"); return err; } err = of_address_to_resource(np, 0, &res); if (err) { dev_info(&cdev->dev, "no reg, FW should install the sid\n"); } else { /* If the reg specified, install sid for the core */ void __iomem *core_base = NULL; int sid = cdev->dev.iommu->fwspec->ids[0]; core_base = ioremap(res.start, resource_size(&res)); if (core_base == NULL) { dev_err(&cdev->dev, "ioremap failed\n"); return -ENOMEM; } writel_relaxed(sid, core_base + CORE_SID); iounmap(core_base); } cdev->group = iommu_group_get(&cdev->dev); if (IS_ERR_OR_NULL(cdev->group)) { dev_err(&cdev->dev, "smmu is not right configured\n"); return -ENXIO; } cdev->domain = iommu_domain_alloc(sdev->dev->bus); if (cdev->domain == NULL) { dev_info(&cdev->dev, "failed to alloc domain\n"); return -ENOMEM; } err = iommu_attach_group(cdev->domain, cdev->group); if (err) { dev_err(&cdev->dev, "failed group to domain\n"); return err; } err = iommu_dev_enable_feature(&cdev->dev, IOMMU_DEV_FEAT_IOPF); if (err) { dev_err(&cdev->dev, "failed to enable iopf feature, %d\n", err); return err; } err = iommu_dev_enable_feature(&cdev->dev, IOMMU_DEV_FEAT_SVA); if (err) { dev_err(&cdev->dev, "failed to enable sva feature, %d\n", err); return err; } return 0; } static int svm_dt_init_core(struct svm_device *sdev, struct device_node *np) { int err = 0; struct device_node *child = NULL; struct device *dev = sdev->dev; down_write(&svm_sem); if (svm_bus_type.iommu_ops == NULL) { err = bus_register(&svm_bus_type); if (err) { up_write(&svm_sem); dev_err(dev, "failed to register svm_bus_type\n"); return err; } err = bus_set_iommu(&svm_bus_type, dev->bus->iommu_ops); if (err) { up_write(&svm_sem); dev_err(dev, "failed to set iommu for svm_bus_type\n"); goto err_unregister_bus; } } else if (svm_bus_type.iommu_ops != dev->bus->iommu_ops) { err = -EBUSY; up_write(&svm_sem); dev_err(dev, "iommu_ops configured, but changed!\n"); return err; } up_write(&svm_sem); for_each_available_child_of_node(np, child) { err = svm_of_add_core(sdev, child); if (err) device_for_each_child(dev, NULL, svm_remove_core); } return err; err_unregister_bus: bus_unregister(&svm_bus_type); return err; } int svm_get_pasid(pid_t vpid, int dev_id __maybe_unused) { int pasid; unsigned long asid; struct task_struct *task = NULL; struct mm_struct *mm = NULL; struct svm_process *process = NULL; struct svm_bind_process params; params.flags = SVM_BIND_PID; params.vpid = vpid; params.pasid = -1; params.ttbr = 0; params.tcr = 0; task = svm_get_task(params); if (IS_ERR(task)) return PTR_ERR(task); mm = get_task_mm(task); if (mm == NULL) { pasid = -EINVAL; goto put_task; } asid = arm64_mm_context_get(mm); if (!asid) { pasid = -ENOSPC; goto put_mm; } mutex_lock(&svm_process_mutex); process = find_svm_process(asid); mutex_unlock(&svm_process_mutex); if (process) pasid = process->pasid; else pasid = -ESRCH; arm64_mm_context_put(mm); put_mm: mmput(mm); put_task: put_task_struct(task); return pasid; } EXPORT_SYMBOL_GPL(svm_get_pasid); static int svm_dt_setup_l2buff(struct svm_device *sdev, struct device_node *np) { struct device_node *l2buff = of_parse_phandle(np, "memory-region", 0); if (l2buff) { struct resource r; int err = of_address_to_resource(l2buff, 0, &r); if (err) { of_node_put(l2buff); return err; } sdev->l2buff = r.start; sdev->l2size = resource_size(&r); } of_node_put(l2buff); return 0; } static int svm_device_probe(struct platform_device *pdev) { int err = -1; struct device *dev = &pdev->dev; struct svm_device *sdev = NULL; struct device_node *np = dev->of_node; int alias_id; if (acpi_disabled && np == NULL) return -ENODEV; if (!dev->bus) { dev_dbg(dev, "this dev bus is NULL\n"); return -EPROBE_DEFER; } if (!dev->bus->iommu_ops) { dev_dbg(dev, "defer probe svm device\n"); return -EPROBE_DEFER; } sdev = devm_kzalloc(dev, sizeof(*sdev), GFP_KERNEL); if (sdev == NULL) return -ENOMEM; if (!acpi_disabled) { err = device_property_read_u64(dev, "svmid", &sdev->id); if (err) { dev_err(dev, "failed to get this svm device id\n"); return err; } } else { alias_id = of_alias_get_id(np, "svm"); if (alias_id < 0) sdev->id = probe_index; else sdev->id = alias_id; } sdev->dev = dev; sdev->miscdev.minor = MISC_DYNAMIC_MINOR; sdev->miscdev.fops = &svm_fops; sdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, SVM_DEVICE_NAME"%llu", sdev->id); if (sdev->miscdev.name == NULL) return -ENOMEM; dev_set_drvdata(dev, sdev); err = misc_register(&sdev->miscdev); if (err) { dev_err(dev, "Unable to register misc device\n"); return err; } if (!acpi_disabled) { err = svm_acpi_init_core(sdev); if (err) { dev_err(dev, "failed to init acpi cores\n"); goto err_unregister_misc; } } else { /* * Get the l2buff phys address and size, if it do not exist * just warn and continue, and runtime can not use L2BUFF. */ err = svm_dt_setup_l2buff(sdev, np); if (err) dev_warn(dev, "Cannot get l2buff\n"); err = svm_dt_init_core(sdev, np); if (err) { dev_err(dev, "failed to init dt cores\n"); goto err_unregister_misc; } probe_index++; } mutex_init(&svm_process_mutex); return err; err_unregister_misc: misc_deregister(&sdev->miscdev); return err; } static int svm_device_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct svm_device *sdev = dev_get_drvdata(dev); device_for_each_child(sdev->dev, NULL, svm_remove_core); misc_deregister(&sdev->miscdev); return 0; } static const struct acpi_device_id svm_acpi_match[] = { { "HSVM1980", 0}, { } }; MODULE_DEVICE_TABLE(acpi, svm_acpi_match); static const struct of_device_id svm_of_match[] = { { .compatible = "hisilicon,svm" }, { } }; MODULE_DEVICE_TABLE(of, svm_of_match); /*svm acpi probe and remove*/ static struct platform_driver svm_driver = { .probe = svm_device_probe, .remove = svm_device_remove, .driver = { .name = SVM_DEVICE_NAME, .acpi_match_table = ACPI_PTR(svm_acpi_match), .of_match_table = svm_of_match, }, }; module_platform_driver(svm_driver); MODULE_DESCRIPTION("Hisilicon SVM driver"); MODULE_AUTHOR("Fang Lijun "); MODULE_LICENSE("GPL v2");