amd_nb.c 6.2 KB
Newer Older
1 2 3 4 5
/*
 * Shared support code for AMD K8 northbridges and derivates.
 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
 */
#include <linux/types.h>
6
#include <linux/slab.h>
7 8 9 10
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/spinlock.h>
11
#include <asm/amd_nb.h>
12 13 14

static u32 *flush_words;

15
const struct pci_device_id amd_nb_misc_ids[] = {
16 17
	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
B
Borislav Petkov 已提交
18
	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
19 20
	{}
};
21
EXPORT_SYMBOL(amd_nb_misc_ids);
22

23
static struct pci_device_id amd_nb_link_ids[] = {
24
	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
25 26 27
	{}
};

28 29 30 31 32 33 34
const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
	{ 0x00, 0x18, 0x20 },
	{ 0xff, 0x00, 0x20 },
	{ 0xfe, 0x00, 0x20 },
	{ }
};

35 36
struct amd_northbridge_info amd_northbridges;
EXPORT_SYMBOL(amd_northbridges);
37

38
static struct pci_dev *next_northbridge(struct pci_dev *dev,
39
					const struct pci_device_id *ids)
40 41 42 43 44
{
	do {
		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
		if (!dev)
			break;
45
	} while (!pci_match_id(ids, dev));
46 47 48
	return dev;
}

49
int amd_cache_northbridges(void)
50
{
51
	u16 i = 0;
52
	struct amd_northbridge *nb;
53
	struct pci_dev *misc, *link;
54

55
	if (amd_nb_num())
56 57
		return 0;

58 59 60
	misc = NULL;
	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
		i++;
61

62 63
	if (i == 0)
		return 0;
64

65 66
	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
	if (!nb)
67 68
		return -ENOMEM;

69 70
	amd_northbridges.nb = nb;
	amd_northbridges.num = i;
71

72
	link = misc = NULL;
73 74 75
	for (i = 0; i != amd_nb_num(); i++) {
		node_to_amd_nb(i)->misc = misc =
			next_northbridge(misc, amd_nb_misc_ids);
76 77
		node_to_amd_nb(i)->link = link =
			next_northbridge(link, amd_nb_link_ids);
78 79 80 81 82 83
        }

	/* some CPU families (e.g. family 0x11) do not support GART */
	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
	    boot_cpu_data.x86 == 0x15)
		amd_northbridges.flags |= AMD_NB_GART;
84

85 86 87 88 89 90 91 92 93 94
	/*
	 * Some CPU families support L3 Cache Index Disable. There are some
	 * limitations because of E382 and E388 on family 0x10.
	 */
	if (boot_cpu_data.x86 == 0x10 &&
	    boot_cpu_data.x86_model >= 0x8 &&
	    (boot_cpu_data.x86_model > 0x9 ||
	     boot_cpu_data.x86_mask >= 0x1))
		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;

95 96 97
	if (boot_cpu_data.x86 == 0x15)
		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;

98 99 100 101
	/* L3 cache partitioning is supported on family 0x15 */
	if (boot_cpu_data.x86 == 0x15)
		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;

102 103
	return 0;
}
104
EXPORT_SYMBOL_GPL(amd_cache_northbridges);
105

106 107 108 109 110
/*
 * Ignores subdevice/subvendor but as far as I can figure out
 * they're useless anyways
 */
bool __init early_is_amd_nb(u32 device)
111
{
112
	const struct pci_device_id *id;
113
	u32 vendor = device & 0xffff;
114

115
	device >>= 16;
116
	for (id = amd_nb_misc_ids; id->vendor; id++)
117
		if (vendor == id->vendor && device == id->device)
118 119
			return true;
	return false;
120 121
}

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
int amd_get_subcaches(int cpu)
{
	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
	unsigned int mask;
	int cuid = 0;

	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
		return 0;

	pci_read_config_dword(link, 0x1d4, &mask);

#ifdef CONFIG_SMP
	cuid = cpu_data(cpu).compute_unit_id;
#endif
	return (mask >> (4 * cuid)) & 0xf;
}

int amd_set_subcaches(int cpu, int mask)
{
	static unsigned int reset, ban;
	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
	unsigned int reg;
	int cuid = 0;

	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
		return -EINVAL;

	/* if necessary, collect reset state of L3 partitioning and BAN mode */
	if (reset == 0) {
		pci_read_config_dword(nb->link, 0x1d4, &reset);
		pci_read_config_dword(nb->misc, 0x1b8, &ban);
		ban &= 0x180000;
	}

	/* deactivate BAN mode if any subcaches are to be disabled */
	if (mask != 0xf) {
		pci_read_config_dword(nb->misc, 0x1b8, &reg);
		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
	}

#ifdef CONFIG_SMP
	cuid = cpu_data(cpu).compute_unit_id;
#endif
	mask <<= 4 * cuid;
	mask |= (0xf ^ (1 << cuid)) << 26;

	pci_write_config_dword(nb->link, 0x1d4, mask);

	/* reset BAN mode if L3 partitioning returned to reset state */
	pci_read_config_dword(nb->link, 0x1d4, &reg);
	if (reg == reset) {
		pci_read_config_dword(nb->misc, 0x1b8, &reg);
		reg &= ~0x180000;
		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
	}

	return 0;
}

181
static int amd_cache_gart(void)
182
{
183
	u16 i;
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200

       if (!amd_nb_has_feature(AMD_NB_GART))
               return 0;

       flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
       if (!flush_words) {
               amd_northbridges.flags &= ~AMD_NB_GART;
               return -ENOMEM;
       }

       for (i = 0; i != amd_nb_num(); i++)
               pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
                                     &flush_words[i]);

       return 0;
}

201
void amd_flush_garts(void)
202 203 204 205 206
{
	int flushed, i;
	unsigned long flags;
	static DEFINE_SPINLOCK(gart_lock);

207
	if (!amd_nb_has_feature(AMD_NB_GART))
208 209
		return;

210 211 212 213 214 215
	/* Avoid races between AGP and IOMMU. In theory it's not needed
	   but I'm not sure if the hardware won't lose flush requests
	   when another is pending. This whole thing is so expensive anyways
	   that it doesn't matter to serialize more. -AK */
	spin_lock_irqsave(&gart_lock, flags);
	flushed = 0;
216 217 218
	for (i = 0; i < amd_nb_num(); i++) {
		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
				       flush_words[i] | 1);
219 220
		flushed++;
	}
221
	for (i = 0; i < amd_nb_num(); i++) {
222 223 224
		u32 w;
		/* Make sure the hardware actually executed the flush*/
		for (;;) {
225
			pci_read_config_dword(node_to_amd_nb(i)->misc,
226 227 228 229 230 231 232 233 234 235
					      0x9c, &w);
			if (!(w & 1))
				break;
			cpu_relax();
		}
	}
	spin_unlock_irqrestore(&gart_lock, flags);
	if (!flushed)
		printk("nothing to flush?\n");
}
236
EXPORT_SYMBOL_GPL(amd_flush_garts);
237

238
static __init int init_amd_nbs(void)
239 240 241
{
	int err = 0;

242
	err = amd_cache_northbridges();
243 244

	if (err < 0)
245
		printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
246

247 248 249 250
	if (amd_cache_gart() < 0)
		printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
		       "GART support disabled.\n");

251 252 253 254
	return err;
}

/* This has to go after the PCI subsystem */
255
fs_initcall(init_amd_nbs);