nv10_fence.c 5.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Copyright 2012 Red Hat Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Ben Skeggs <bskeggs@redhat.com>
 */

#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
28
#include <core/ramht.h>
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
#include "nouveau_fence.h"

struct nv10_fence_chan {
	struct nouveau_fence_chan base;
};

struct nv10_fence_priv {
	struct nouveau_fence_priv base;
	struct nouveau_bo *bo;
	spinlock_t lock;
	u32 sequence;
};

static int
nv10_fence_emit(struct nouveau_fence *fence)
{
	struct nouveau_channel *chan = fence->channel;
	int ret = RING_SPACE(chan, 2);
	if (ret == 0) {
		BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
		OUT_RING  (chan, fence->sequence);
		FIRE_RING (chan);
	}
	return ret;
}

55

56
static int
57 58
nv10_fence_sync(struct nouveau_fence *fence,
		struct nouveau_channel *prev, struct nouveau_channel *chan)
59 60 61 62 63
{
	return -ENODEV;
}

static int
64 65
nv17_fence_sync(struct nouveau_fence *fence,
		struct nouveau_channel *prev, struct nouveau_channel *chan)
66
{
67 68
	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
	struct nv10_fence_priv *priv = dev_priv->fence.func;
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
	u32 value;
	int ret;

	if (!mutex_trylock(&prev->mutex))
		return -EBUSY;

	spin_lock(&priv->lock);
	value = priv->sequence;
	priv->sequence += 2;
	spin_unlock(&priv->lock);

	ret = RING_SPACE(prev, 5);
	if (!ret) {
		BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
		OUT_RING  (prev, NvSema);
		OUT_RING  (prev, 0);
		OUT_RING  (prev, value + 0);
		OUT_RING  (prev, value + 1);
		FIRE_RING (prev);
	}

	if (!ret && !(ret = RING_SPACE(chan, 5))) {
		BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
		OUT_RING  (chan, NvSema);
		OUT_RING  (chan, 0);
		OUT_RING  (chan, value + 1);
		OUT_RING  (chan, value + 2);
		FIRE_RING (chan);
	}

	mutex_unlock(&prev->mutex);
	return 0;
}

static u32
nv10_fence_read(struct nouveau_channel *chan)
{
	return nvchan_rd32(chan, 0x0048);
}

static void
110
nv10_fence_context_del(struct nouveau_channel *chan)
111
{
112
	struct nv10_fence_chan *fctx = chan->fence;
113
	nouveau_fence_context_del(&fctx->base);
114
	chan->fence = NULL;
115 116 117 118
	kfree(fctx);
}

static int
119
nv10_fence_context_new(struct nouveau_channel *chan)
120
{
121 122
	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
	struct nv10_fence_priv *priv = dev_priv->fence.func;
123 124 125 126
	struct nv10_fence_chan *fctx;
	struct nouveau_gpuobj *obj;
	int ret = 0;

127
	fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
	if (!fctx)
		return -ENOMEM;

	nouveau_fence_context_new(&fctx->base);

	if (priv->bo) {
		struct ttm_mem_reg *mem = &priv->bo->bo.mem;

		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
					     mem->start * PAGE_SIZE, mem->size,
					     NV_MEM_ACCESS_RW,
					     NV_MEM_TARGET_VRAM, &obj);
		if (!ret) {
			ret = nouveau_ramht_insert(chan, NvSema, obj);
			nouveau_gpuobj_ref(NULL, &obj);
		}
	}

	if (ret)
147
		nv10_fence_context_del(chan);
148 149 150 151
	return ret;
}

static void
152
nv10_fence_destroy(struct drm_device *dev)
153 154
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
155
	struct nv10_fence_priv *priv = dev_priv->fence.func;
156 157

	nouveau_bo_ref(NULL, &priv->bo);
158
	dev_priv->fence.func = NULL;
159 160 161 162 163 164 165 166 167 168 169 170 171 172
	kfree(priv);
}

int
nv10_fence_create(struct drm_device *dev)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nv10_fence_priv *priv;
	int ret = 0;

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

173 174 175
	priv->base.dtor = nv10_fence_destroy;
	priv->base.context_new = nv10_fence_context_new;
	priv->base.context_del = nv10_fence_context_del;
176 177 178
	priv->base.emit = nv10_fence_emit;
	priv->base.read = nv10_fence_read;
	priv->base.sync = nv10_fence_sync;
179
	dev_priv->fence.func = &priv->base;
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
	spin_lock_init(&priv->lock);

	if (dev_priv->chipset >= 0x17) {
		ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
				     0, 0x0000, NULL, &priv->bo);
		if (!ret) {
			ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
			if (!ret)
				ret = nouveau_bo_map(priv->bo);
			if (ret)
				nouveau_bo_ref(NULL, &priv->bo);
		}

		if (ret == 0) {
			nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
			priv->base.sync = nv17_fence_sync;
		}
	}

	if (ret)
200
		nv10_fence_destroy(dev);
201 202
	return ret;
}