amd_nb.c 6.2 KB
Newer Older
1 2 3 4 5
/*
 * Shared support code for AMD K8 northbridges and derivates.
 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
 */
#include <linux/types.h>
6
#include <linux/slab.h>
7 8 9 10
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/spinlock.h>
11
#include <asm/amd_nb.h>
12 13 14

static u32 *flush_words;

15
const struct pci_device_id amd_nb_misc_ids[] = {
16 17
	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
18
	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
19 20
	{}
};
21
EXPORT_SYMBOL(amd_nb_misc_ids);
22

23 24 25 26 27
static struct pci_device_id amd_nb_link_ids[] = {
	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_LINK) },
	{}
};

28 29 30 31 32 33 34
const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
	{ 0x00, 0x18, 0x20 },
	{ 0xff, 0x00, 0x20 },
	{ 0xfe, 0x00, 0x20 },
	{ }
};

35 36
struct amd_northbridge_info amd_northbridges;
EXPORT_SYMBOL(amd_northbridges);
37

38
static struct pci_dev *next_northbridge(struct pci_dev *dev,
39
					const struct pci_device_id *ids)
40 41 42 43 44
{
	do {
		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
		if (!dev)
			break;
45
	} while (!pci_match_id(ids, dev));
46 47 48
	return dev;
}

49
int amd_cache_northbridges(void)
50
{
51 52
	int i = 0;
	struct amd_northbridge *nb;
53
	struct pci_dev *misc, *link;
54

55
	if (amd_nb_num())
56 57
		return 0;

58 59 60
	misc = NULL;
	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
		i++;
61

62 63
	if (i == 0)
		return 0;
64

65 66
	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
	if (!nb)
67 68
		return -ENOMEM;

69 70
	amd_northbridges.nb = nb;
	amd_northbridges.num = i;
71

72
	link = misc = NULL;
73 74 75
	for (i = 0; i != amd_nb_num(); i++) {
		node_to_amd_nb(i)->misc = misc =
			next_northbridge(misc, amd_nb_misc_ids);
76 77
		node_to_amd_nb(i)->link = link =
			next_northbridge(link, amd_nb_link_ids);
78 79 80 81 82 83
        }

	/* some CPU families (e.g. family 0x11) do not support GART */
	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
	    boot_cpu_data.x86 == 0x15)
		amd_northbridges.flags |= AMD_NB_GART;
84

85 86 87 88 89 90 91 92 93 94
	/*
	 * Some CPU families support L3 Cache Index Disable. There are some
	 * limitations because of E382 and E388 on family 0x10.
	 */
	if (boot_cpu_data.x86 == 0x10 &&
	    boot_cpu_data.x86_model >= 0x8 &&
	    (boot_cpu_data.x86_model > 0x9 ||
	     boot_cpu_data.x86_mask >= 0x1))
		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;

95 96 97
	if (boot_cpu_data.x86 == 0x15)
		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;

98 99 100 101
	/* L3 cache partitioning is supported on family 0x15 */
	if (boot_cpu_data.x86 == 0x15)
		amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;

102 103
	return 0;
}
104
EXPORT_SYMBOL_GPL(amd_cache_northbridges);
105 106 107

/* Ignores subdevice/subvendor but as far as I can figure out
   they're useless anyways */
108
int __init early_is_amd_nb(u32 device)
109
{
110
	const struct pci_device_id *id;
111
	u32 vendor = device & 0xffff;
112

113
	device >>= 16;
114
	for (id = amd_nb_misc_ids; id->vendor; id++)
115 116 117 118 119
		if (vendor == id->vendor && device == id->device)
			return 1;
	return 0;
}

120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178
int amd_get_subcaches(int cpu)
{
	struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
	unsigned int mask;
	int cuid = 0;

	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
		return 0;

	pci_read_config_dword(link, 0x1d4, &mask);

#ifdef CONFIG_SMP
	cuid = cpu_data(cpu).compute_unit_id;
#endif
	return (mask >> (4 * cuid)) & 0xf;
}

int amd_set_subcaches(int cpu, int mask)
{
	static unsigned int reset, ban;
	struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
	unsigned int reg;
	int cuid = 0;

	if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
		return -EINVAL;

	/* if necessary, collect reset state of L3 partitioning and BAN mode */
	if (reset == 0) {
		pci_read_config_dword(nb->link, 0x1d4, &reset);
		pci_read_config_dword(nb->misc, 0x1b8, &ban);
		ban &= 0x180000;
	}

	/* deactivate BAN mode if any subcaches are to be disabled */
	if (mask != 0xf) {
		pci_read_config_dword(nb->misc, 0x1b8, &reg);
		pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
	}

#ifdef CONFIG_SMP
	cuid = cpu_data(cpu).compute_unit_id;
#endif
	mask <<= 4 * cuid;
	mask |= (0xf ^ (1 << cuid)) << 26;

	pci_write_config_dword(nb->link, 0x1d4, mask);

	/* reset BAN mode if L3 partitioning returned to reset state */
	pci_read_config_dword(nb->link, 0x1d4, &reg);
	if (reg == reset) {
		pci_read_config_dword(nb->misc, 0x1b8, &reg);
		reg &= ~0x180000;
		pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
	}

	return 0;
}

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
int amd_cache_gart(void)
{
       int i;

       if (!amd_nb_has_feature(AMD_NB_GART))
               return 0;

       flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
       if (!flush_words) {
               amd_northbridges.flags &= ~AMD_NB_GART;
               return -ENOMEM;
       }

       for (i = 0; i != amd_nb_num(); i++)
               pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
                                     &flush_words[i]);

       return 0;
}

199
void amd_flush_garts(void)
200 201 202 203 204
{
	int flushed, i;
	unsigned long flags;
	static DEFINE_SPINLOCK(gart_lock);

205
	if (!amd_nb_has_feature(AMD_NB_GART))
206 207
		return;

208 209 210 211 212 213
	/* Avoid races between AGP and IOMMU. In theory it's not needed
	   but I'm not sure if the hardware won't lose flush requests
	   when another is pending. This whole thing is so expensive anyways
	   that it doesn't matter to serialize more. -AK */
	spin_lock_irqsave(&gart_lock, flags);
	flushed = 0;
214 215 216
	for (i = 0; i < amd_nb_num(); i++) {
		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
				       flush_words[i] | 1);
217 218
		flushed++;
	}
219
	for (i = 0; i < amd_nb_num(); i++) {
220 221 222
		u32 w;
		/* Make sure the hardware actually executed the flush*/
		for (;;) {
223
			pci_read_config_dword(node_to_amd_nb(i)->misc,
224 225 226 227 228 229 230 231 232 233
					      0x9c, &w);
			if (!(w & 1))
				break;
			cpu_relax();
		}
	}
	spin_unlock_irqrestore(&gart_lock, flags);
	if (!flushed)
		printk("nothing to flush?\n");
}
234
EXPORT_SYMBOL_GPL(amd_flush_garts);
235

236
static __init int init_amd_nbs(void)
237 238 239
{
	int err = 0;

240
	err = amd_cache_northbridges();
241 242

	if (err < 0)
243
		printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
244

245 246 247 248
	if (amd_cache_gart() < 0)
		printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
		       "GART support disabled.\n");

249 250 251 252
	return err;
}

/* This has to go after the PCI subsystem */
253
fs_initcall(init_amd_nbs);