intel_region_lmem.c 5.6 KB
Newer Older
1 2 3 4 5 6 7
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2019 Intel Corporation
 */

#include "i915_drv.h"
#include "intel_memory_region.h"
8 9
#include "intel_region_lmem.h"
#include "intel_region_ttm.h"
10 11
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
12
#include "gem/i915_gem_ttm.h"
13
#include "gt/intel_gt.h"
14

15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
static int init_fake_lmem_bar(struct intel_memory_region *mem)
{
	struct drm_i915_private *i915 = mem->i915;
	struct i915_ggtt *ggtt = &i915->ggtt;
	unsigned long n;
	int ret;

	/* We want to 1:1 map the mappable aperture to our reserved region */

	mem->fake_mappable.start = 0;
	mem->fake_mappable.size = resource_size(&mem->region);
	mem->fake_mappable.color = I915_COLOR_UNEVICTABLE;

	ret = drm_mm_reserve_node(&ggtt->vm.mm, &mem->fake_mappable);
	if (ret)
		return ret;

32
	mem->remap_addr = dma_map_resource(i915->drm.dev,
33 34
					   mem->region.start,
					   mem->fake_mappable.size,
35
					   DMA_BIDIRECTIONAL,
36
					   DMA_ATTR_FORCE_CONTIGUOUS);
37
	if (dma_mapping_error(i915->drm.dev, mem->remap_addr)) {
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
		drm_mm_remove_node(&mem->fake_mappable);
		return -EINVAL;
	}

	for (n = 0; n < mem->fake_mappable.size >> PAGE_SHIFT; ++n) {
		ggtt->vm.insert_page(&ggtt->vm,
				     mem->remap_addr + (n << PAGE_SHIFT),
				     n << PAGE_SHIFT,
				     I915_CACHE_NONE, 0);
	}

	mem->region = (struct resource)DEFINE_RES_MEM(mem->remap_addr,
						      mem->fake_mappable.size);

	return 0;
}

static void release_fake_lmem_bar(struct intel_memory_region *mem)
{
57 58 59 60
	if (!drm_mm_node_allocated(&mem->fake_mappable))
		return;

	drm_mm_remove_node(&mem->fake_mappable);
61

62
	dma_unmap_resource(mem->i915->drm.dev,
63 64
			   mem->remap_addr,
			   mem->fake_mappable.size,
65
			   DMA_BIDIRECTIONAL,
66 67 68
			   DMA_ATTR_FORCE_CONTIGUOUS);
}

69
static int
70 71
region_lmem_release(struct intel_memory_region *mem)
{
72 73 74
	int ret;

	ret = intel_region_ttm_fini(mem);
75
	io_mapping_fini(&mem->iomap);
76
	release_fake_lmem_bar(mem);
77 78

	return ret;
79 80 81 82 83 84 85
}

static int
region_lmem_init(struct intel_memory_region *mem)
{
	int ret;

86
	if (mem->i915->params.fake_lmem_start) {
87 88 89 90
		ret = init_fake_lmem_bar(mem);
		GEM_BUG_ON(ret);
	}

91 92
	if (!io_mapping_init_wc(&mem->iomap,
				mem->io_start,
93 94 95 96
				resource_size(&mem->region))) {
		ret = -EIO;
		goto out_no_io;
	}
97

98
	ret = intel_region_ttm_init(mem);
99
	if (ret)
100 101 102 103 104 105 106 107
		goto out_no_buddy;

	return 0;

out_no_buddy:
	io_mapping_fini(&mem->iomap);
out_no_io:
	release_fake_lmem_bar(mem);
108 109 110 111

	return ret;
}

112
static const struct intel_memory_region_ops intel_region_lmem_ops = {
113 114
	.init = region_lmem_init,
	.release = region_lmem_release,
115
	.init_object = __i915_gem_ttm_object_init,
116
};
117 118

struct intel_memory_region *
119
intel_gt_setup_fake_lmem(struct intel_gt *gt)
120
{
121
	struct drm_i915_private *i915 = gt->i915;
122
	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
123 124 125 126 127
	struct intel_memory_region *mem;
	resource_size_t mappable_end;
	resource_size_t io_start;
	resource_size_t start;

128 129 130 131 132 133
	if (!HAS_LMEM(i915))
		return ERR_PTR(-ENODEV);

	if (!i915->params.fake_lmem_start)
		return ERR_PTR(-ENODEV);

134 135 136 137
	GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt));

	/* Your mappable aperture belongs to me now! */
	mappable_end = pci_resource_len(pdev, 2);
138
	io_start = pci_resource_start(pdev, 2);
139
	start = i915->params.fake_lmem_start;
140 141 142 143 144 145

	mem = intel_memory_region_create(i915,
					 start,
					 mappable_end,
					 PAGE_SIZE,
					 io_start,
146 147
					 INTEL_MEMORY_LOCAL,
					 0,
148 149
					 &intel_region_lmem_ops);
	if (!IS_ERR(mem)) {
150 151 152 153 154 155
		drm_info(&i915->drm, "Intel graphics fake LMEM: %pR\n",
			 &mem->region);
		drm_info(&i915->drm,
			 "Intel graphics fake LMEM IO start: %llx\n",
			(u64)mem->io_start);
		drm_info(&i915->drm, "Intel graphics fake LMEM size: %llx\n",
156 157 158 159 160
			 (u64)resource_size(&mem->region));
	}

	return mem;
}
M
Matthew Auld 已提交
161

162 163 164
static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
				     u64 *start, u32 *size)
{
165
	if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0))
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
		return false;

	*start = 0;
	*size = SZ_1M;

	drm_dbg(&uncore->i915->drm, "LMEM: reserved legacy low-memory [0x%llx-0x%llx]\n",
		*start, *start + *size);

	return true;
}

static int reserve_lowmem_region(struct intel_uncore *uncore,
				 struct intel_memory_region *mem)
{
	u64 reserve_start;
	u32 reserve_size;
	int ret;

	if (!get_legacy_lowmem_region(uncore, &reserve_start, &reserve_size))
		return 0;

	ret = intel_memory_region_reserve(mem, reserve_start, reserve_size);
	if (ret)
		drm_err(&uncore->i915->drm, "LMEM: reserving low memory region failed\n");

	return ret;
}

M
Matthew Auld 已提交
194 195 196
static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
{
	struct drm_i915_private *i915 = gt->i915;
197
	struct intel_uncore *uncore = gt->uncore;
198
	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
M
Matthew Auld 已提交
199 200
	struct intel_memory_region *mem;
	resource_size_t io_start;
201
	resource_size_t lmem_size;
202
	int err;
M
Matthew Auld 已提交
203 204 205 206

	if (!IS_DGFX(i915))
		return ERR_PTR(-ENODEV);

207 208 209
	/* Stolen starts from GSMBASE on DG1 */
	lmem_size = intel_uncore_read64(uncore, GEN12_GSMBASE);

M
Matthew Auld 已提交
210
	io_start = pci_resource_start(pdev, 2);
211 212
	if (GEM_WARN_ON(lmem_size > pci_resource_len(pdev, 2)))
		return ERR_PTR(-ENODEV);
M
Matthew Auld 已提交
213 214 215

	mem = intel_memory_region_create(i915,
					 0,
216
					 lmem_size,
M
Matthew Auld 已提交
217 218
					 I915_GTT_PAGE_SIZE_4K,
					 io_start,
219 220
					 INTEL_MEMORY_LOCAL,
					 0,
M
Matthew Auld 已提交
221 222 223 224
					 &intel_region_lmem_ops);
	if (IS_ERR(mem))
		return mem;

225 226 227 228
	err = reserve_lowmem_region(uncore, mem);
	if (err)
		goto err_region_put;

M
Matthew Auld 已提交
229 230 231
	drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
	drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
		&mem->io_start);
232 233
	drm_info(&i915->drm, "Local memory available: %pa\n",
		 &lmem_size);
M
Matthew Auld 已提交
234 235

	return mem;
236 237

err_region_put:
238
	intel_memory_region_destroy(mem);
239
	return ERR_PTR(err);
M
Matthew Auld 已提交
240 241 242 243 244 245
}

struct intel_memory_region *intel_gt_setup_lmem(struct intel_gt *gt)
{
	return setup_lmem(gt);
}