context.c 4.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
/*
 * Copyright 2014 IBM Corp.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/bitmap.h>
#include <linux/sched.h>
#include <linux/pid.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <asm/cputable.h>
#include <asm/current.h>
#include <asm/copro.h>

#include "cxl.h"

/*
 * Allocates space for a CXL context.
 */
struct cxl_context *cxl_context_alloc(void)
{
	return kzalloc(sizeof(struct cxl_context), GFP_KERNEL);
}

/*
 * Initialises a CXL context.
 */
int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
{
	int i;

	spin_lock_init(&ctx->sste_lock);
	ctx->afu = afu;
	ctx->master = master;
	ctx->pid = NULL; /* Set in start work ioctl */

	/*
	 * Allocate the segment table before we put it in the IDR so that we
	 * can always access it when dereferenced from IDR. For the same
	 * reason, the segment table is only destroyed after the context is
	 * removed from the IDR.  Access to this in the IOCTL is protected by
	 * Linux filesytem symantics (can't IOCTL until open is complete).
	 */
	i = cxl_alloc_sst(ctx);
	if (i)
		return i;

	INIT_WORK(&ctx->fault_work, cxl_handle_fault);

	init_waitqueue_head(&ctx->wq);
	spin_lock_init(&ctx->lock);

	ctx->irq_bitmap = NULL;
	ctx->pending_irq = false;
	ctx->pending_fault = false;
	ctx->pending_afu_err = false;

	/*
	 * When we have to destroy all contexts in cxl_context_detach_all() we
	 * end up with afu_release_irqs() called from inside a
	 * idr_for_each_entry(). Hence we need to make sure that anything
	 * dereferenced from this IDR is ok before we allocate the IDR here.
	 * This clears out the IRQ ranges to ensure this.
	 */
	for (i = 0; i < CXL_IRQ_RANGES; i++)
		ctx->irqs.range[i] = 0;

	mutex_init(&ctx->status_mutex);

	ctx->status = OPENED;

	/*
	 * Allocating IDR! We better make sure everything's setup that
	 * dereferences from it.
	 */
	idr_preload(GFP_KERNEL);
	spin_lock(&afu->contexts_lock);
	i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
		      ctx->afu->num_procs, GFP_NOWAIT);
	spin_unlock(&afu->contexts_lock);
	idr_preload_end();
	if (i < 0)
		return i;

	ctx->pe = i;
	ctx->elem = &ctx->afu->spa[i];
	ctx->pe_inserted = false;
	return 0;
}

/*
 * Map a per-context mmio space into the given vma.
 */
int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
{
	u64 len = vma->vm_end - vma->vm_start;
	len = min(len, ctx->psn_size);

	if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
		return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size);
	}

	/* make sure there is a valid per process space for this AFU */
	if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
		pr_devel("AFU doesn't support mmio space\n");
		return -EINVAL;
	}

	/* Can't mmap until the AFU is enabled */
	if (!ctx->afu->enabled)
		return -EBUSY;

	pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
		 ctx->psn_phys, ctx->pe , ctx->master);

	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	return vm_iomap_memory(vma, ctx->psn_phys, len);
}

/*
 * Detach a context from the hardware. This disables interrupts and doesn't
 * return until all outstanding interrupts for this context have completed. The
 * hardware should no longer access *ctx after this has returned.
 */
static void __detach_context(struct cxl_context *ctx)
{
	enum cxl_context_status status;

	mutex_lock(&ctx->status_mutex);
	status = ctx->status;
	ctx->status = CLOSED;
	mutex_unlock(&ctx->status_mutex);
	if (status != STARTED)
		return;

	WARN_ON(cxl_detach_process(ctx));
	afu_release_irqs(ctx);
	flush_work(&ctx->fault_work); /* Only needed for dedicated process */
	wake_up_all(&ctx->wq);
}

/*
 * Detach the given context from the AFU. This doesn't actually
 * free the context but it should stop the context running in hardware
 * (ie. prevent this context from generating any further interrupts
 * so that it can be freed).
 */
void cxl_context_detach(struct cxl_context *ctx)
{
	__detach_context(ctx);
}

/*
 * Detach all contexts on the given AFU.
 */
void cxl_context_detach_all(struct cxl_afu *afu)
{
	struct cxl_context *ctx;
	int tmp;

	rcu_read_lock();
	idr_for_each_entry(&afu->contexts_idr, ctx, tmp)
		/*
		 * Anything done in here needs to be setup before the IDR is
		 * created and torn down after the IDR removed
		 */
		__detach_context(ctx);
	rcu_read_unlock();
}

void cxl_context_free(struct cxl_context *ctx)
{
	spin_lock(&ctx->afu->contexts_lock);
	idr_remove(&ctx->afu->contexts_idr, ctx->pe);
	spin_unlock(&ctx->afu->contexts_lock);
	synchronize_rcu();

	free_page((u64)ctx->sstp);
	ctx->sstp = NULL;

	put_pid(ctx->pid);
	kfree(ctx);
}