amd_nb.c 4.6 KB
Newer Older
1 2 3 4 5
/*
 * Shared support code for AMD K8 northbridges and derivates.
 * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
 */
#include <linux/types.h>
6
#include <linux/slab.h>
7 8 9 10
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/spinlock.h>
11
#include <asm/amd_nb.h>
12 13 14

static u32 *flush_words;

15
struct pci_device_id amd_nb_misc_ids[] = {
16 17
	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
18
	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
19 20
	{}
};
21
EXPORT_SYMBOL(amd_nb_misc_ids);
22

23 24 25 26 27
static struct pci_device_id amd_nb_link_ids[] = {
	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_LINK) },
	{}
};

28 29 30 31 32 33 34
const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
	{ 0x00, 0x18, 0x20 },
	{ 0xff, 0x00, 0x20 },
	{ 0xfe, 0x00, 0x20 },
	{ }
};

35 36
struct amd_northbridge_info amd_northbridges;
EXPORT_SYMBOL(amd_northbridges);
37

38 39
static struct pci_dev *next_northbridge(struct pci_dev *dev,
					struct pci_device_id *ids)
40 41 42 43 44
{
	do {
		dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
		if (!dev)
			break;
45
	} while (!pci_match_id(ids, dev));
46 47 48
	return dev;
}

49
int amd_cache_northbridges(void)
50
{
51 52
	int i = 0;
	struct amd_northbridge *nb;
53
	struct pci_dev *misc, *link;
54

55
	if (amd_nb_num())
56 57
		return 0;

58 59 60
	misc = NULL;
	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
		i++;
61

62 63
	if (i == 0)
		return 0;
64

65 66
	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
	if (!nb)
67 68
		return -ENOMEM;

69 70
	amd_northbridges.nb = nb;
	amd_northbridges.num = i;
71

72
	link = misc = NULL;
73 74 75
	for (i = 0; i != amd_nb_num(); i++) {
		node_to_amd_nb(i)->misc = misc =
			next_northbridge(misc, amd_nb_misc_ids);
76 77
		node_to_amd_nb(i)->link = link =
			next_northbridge(link, amd_nb_link_ids);
78 79 80 81 82 83
        }

	/* some CPU families (e.g. family 0x11) do not support GART */
	if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
	    boot_cpu_data.x86 == 0x15)
		amd_northbridges.flags |= AMD_NB_GART;
84

85 86 87 88 89 90 91 92 93 94
	/*
	 * Some CPU families support L3 Cache Index Disable. There are some
	 * limitations because of E382 and E388 on family 0x10.
	 */
	if (boot_cpu_data.x86 == 0x10 &&
	    boot_cpu_data.x86_model >= 0x8 &&
	    (boot_cpu_data.x86_model > 0x9 ||
	     boot_cpu_data.x86_mask >= 0x1))
		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;

95 96 97
	if (boot_cpu_data.x86 == 0x15)
		amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;

98 99
	return 0;
}
100
EXPORT_SYMBOL_GPL(amd_cache_northbridges);
101 102 103

/* Ignores subdevice/subvendor but as far as I can figure out
   they're useless anyways */
104
int __init early_is_amd_nb(u32 device)
105 106 107 108
{
	struct pci_device_id *id;
	u32 vendor = device & 0xffff;
	device >>= 16;
109
	for (id = amd_nb_misc_ids; id->vendor; id++)
110 111 112 113 114
		if (vendor == id->vendor && device == id->device)
			return 1;
	return 0;
}

115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
int amd_cache_gart(void)
{
       int i;

       if (!amd_nb_has_feature(AMD_NB_GART))
               return 0;

       flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
       if (!flush_words) {
               amd_northbridges.flags &= ~AMD_NB_GART;
               return -ENOMEM;
       }

       for (i = 0; i != amd_nb_num(); i++)
               pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c,
                                     &flush_words[i]);

       return 0;
}

135
void amd_flush_garts(void)
136 137 138 139 140
{
	int flushed, i;
	unsigned long flags;
	static DEFINE_SPINLOCK(gart_lock);

141
	if (!amd_nb_has_feature(AMD_NB_GART))
142 143
		return;

144 145 146 147 148 149
	/* Avoid races between AGP and IOMMU. In theory it's not needed
	   but I'm not sure if the hardware won't lose flush requests
	   when another is pending. This whole thing is so expensive anyways
	   that it doesn't matter to serialize more. -AK */
	spin_lock_irqsave(&gart_lock, flags);
	flushed = 0;
150 151 152
	for (i = 0; i < amd_nb_num(); i++) {
		pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
				       flush_words[i] | 1);
153 154
		flushed++;
	}
155
	for (i = 0; i < amd_nb_num(); i++) {
156 157 158
		u32 w;
		/* Make sure the hardware actually executed the flush*/
		for (;;) {
159
			pci_read_config_dword(node_to_amd_nb(i)->misc,
160 161 162 163 164 165 166 167 168 169
					      0x9c, &w);
			if (!(w & 1))
				break;
			cpu_relax();
		}
	}
	spin_unlock_irqrestore(&gart_lock, flags);
	if (!flushed)
		printk("nothing to flush?\n");
}
170
EXPORT_SYMBOL_GPL(amd_flush_garts);
171

172
static __init int init_amd_nbs(void)
173 174 175
{
	int err = 0;

176
	err = amd_cache_northbridges();
177 178

	if (err < 0)
179
		printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
180

181 182 183 184
	if (amd_cache_gart() < 0)
		printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
		       "GART support disabled.\n");

185 186 187 188
	return err;
}

/* This has to go after the PCI subsystem */
189
fs_initcall(init_amd_nbs);