vsmp_64.c 5.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 * vSMPowered(tm) systems specific initialization
 * Copyright (C) 2005 ScaleMP Inc.
 *
 * Use of this code is subject to the terms and conditions of the
 * GNU general public license version 2. See "COPYING" or
 * http://www.gnu.org/licenses/gpl.html
 *
 * Ravikiran Thirumalai <kiran@scalemp.com>,
 * Shai Fultheim <shai@scalemp.com>
11 12
 * Paravirt ops integration: Glauber de Oliveira Costa <gcosta@redhat.com>,
 *			     Ravikiran Thirumalai <kiran@scalemp.com>
13 14 15 16 17
 */

#include <linux/init.h>
#include <linux/pci_ids.h>
#include <linux/pci_regs.h>
18
#include <linux/smp.h>
19
#include <linux/irq.h>
20 21

#include <asm/apic.h>
22
#include <asm/pci-direct.h>
23
#include <asm/io.h>
24
#include <asm/paravirt.h>
25
#include <asm/setup.h>
26

27 28
#define TOPOLOGY_REGISTER_OFFSET 0x10

O
Oren Twaig 已提交
29 30 31
/* Flag below is initialized once during vSMP PCI initialization. */
static int irq_routing_comply = 1;

32
#if defined CONFIG_PCI && defined CONFIG_PARAVIRT
33 34 35 36 37 38
/*
 * Interrupt control on vSMPowered systems:
 * ~AC is a shadow of IF.  If IF is 'on' AC should be 'off'
 * and vice versa.
 */

39
asmlinkage __visible unsigned long vsmp_save_fl(void)
40 41 42 43 44 45 46
{
	unsigned long flags = native_save_fl();

	if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC))
		flags &= ~X86_EFLAGS_IF;
	return flags;
}
47
PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
48

49
__visible void vsmp_restore_fl(unsigned long flags)
50 51 52 53 54 55 56
{
	if (flags & X86_EFLAGS_IF)
		flags &= ~X86_EFLAGS_AC;
	else
		flags |= X86_EFLAGS_AC;
	native_restore_fl(flags);
}
57
PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
58

59
asmlinkage __visible void vsmp_irq_disable(void)
60 61 62 63 64
{
	unsigned long flags = native_save_fl();

	native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
}
65
PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
66

67
asmlinkage __visible void vsmp_irq_enable(void)
68 69 70 71 72
{
	unsigned long flags = native_save_fl();

	native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
}
73
PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
74

75
static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
76 77 78 79 80 81 82 83 84 85 86 87 88
				  unsigned long addr, unsigned len)
{
	switch (type) {
	case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
	case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
	case PARAVIRT_PATCH(pv_irq_ops.save_fl):
	case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
		return paravirt_patch_default(type, clobbers, ibuf, addr, len);
	default:
		return native_patch(type, clobbers, ibuf, addr, len);
	}

}
89

90
static void __init set_vsmp_pv_ops(void)
91
{
92
	void __iomem *address;
G
Glauber Costa 已提交
93
	unsigned int cap, ctl, cfg;
94 95

	/* set vSMP magic bits to indicate vSMP capable kernel */
G
Glauber Costa 已提交
96 97
	cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0);
	address = early_ioremap(cfg, 8);
98 99
	cap = readl(address);
	ctl = readl(address + 4);
100 101
	printk(KERN_INFO "vSMP CTL: capabilities:0x%08x  control:0x%08x\n",
	       cap, ctl);
102 103 104 105 106

	/* If possible, let the vSMP foundation route the interrupt optimally */
#ifdef CONFIG_SMP
	if (cap & ctl & BIT(8)) {
		ctl &= ~BIT(8);
O
Oren Twaig 已提交
107 108 109 110

		/* Interrupt routing set to ignore */
		irq_routing_comply = 0;

111 112
#ifdef CONFIG_PROC_FS
		/* Don't let users change irq affinity via procfs */
113
		no_irq_affinity = 1;
114
#endif
115 116 117
	}
#endif

118
	if (cap & ctl & (1 << 4)) {
119
		/* Setup irq ops and turn on vSMP  IRQ fastpath handling */
120 121 122 123
		pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
		pv_irq_ops.irq_enable  = PV_CALLEE_SAVE(vsmp_irq_enable);
		pv_irq_ops.save_fl  = PV_CALLEE_SAVE(vsmp_save_fl);
		pv_irq_ops.restore_fl  = PV_CALLEE_SAVE(vsmp_restore_fl);
124
		pv_init_ops.patch = vsmp_patch;
125 126
		ctl &= ~(1 << 4);
	}
127 128 129
	writel(ctl, address + 4);
	ctl = readl(address + 4);
	pr_info("vSMP CTL: control set to:0x%08x\n", ctl);
130

G
Glauber Costa 已提交
131
	early_iounmap(address, 8);
132 133 134 135 136 137 138
}
#else
static void __init set_vsmp_pv_ops(void)
{
}
#endif

139
#ifdef CONFIG_PCI
140
static int is_vsmp = -1;
141

142
static void __init detect_vsmp_box(void)
143
{
144
	is_vsmp = 0;
145 146

	if (!early_pci_allowed())
147
		return;
148

149
	/* Check if we are running on a ScaleMP vSMPowered box */
150 151
	if (read_pci_config(0, 0x1f, 0, PCI_VENDOR_ID) ==
	     (PCI_VENDOR_ID_SCALEMP | (PCI_DEVICE_ID_SCALEMP_VSMP_CTL << 16)))
152 153
		is_vsmp = 1;
}
154

155 156 157 158 159 160 161 162
int is_vsmp_box(void)
{
	if (is_vsmp != -1)
		return is_vsmp;
	else {
		WARN_ON_ONCE(1);
		return 0;
	}
163 164
}

165 166 167 168 169 170 171 172 173
#else
static void __init detect_vsmp_box(void)
{
}
int is_vsmp_box(void)
{
	return 0;
}
#endif
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208

static void __init vsmp_cap_cpus(void)
{
#if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP)
	void __iomem *address;
	unsigned int cfg, topology, node_shift, maxcpus;

	/*
	 * CONFIG_X86_VSMP is not configured, so limit the number CPUs to the
	 * ones present in the first board, unless explicitly overridden by
	 * setup_max_cpus
	 */
	if (setup_max_cpus != NR_CPUS)
		return;

	/* Read the vSMP Foundation topology register */
	cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0);
	address = early_ioremap(cfg + TOPOLOGY_REGISTER_OFFSET, 4);
	if (WARN_ON(!address))
		return;

	topology = readl(address);
	node_shift = (topology >> 16) & 0x7;
	if (!node_shift)
		/* The value 0 should be decoded as 8 */
		node_shift = 8;
	maxcpus = (topology & ((1 << node_shift) - 1)) + 1;

	pr_info("vSMP CTL: Capping CPUs to %d (CONFIG_X86_VSMP is unset)\n",
		maxcpus);
	setup_max_cpus = maxcpus;
	early_iounmap(address, 4);
#endif
}

209 210 211 212 213
static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
{
	return hard_smp_processor_id() >> index_msb;
}

214 215 216 217
/*
 * In vSMP, all cpus should be capable of handling interrupts, regardless of
 * the APIC used.
 */
218 219
static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask,
					  const struct cpumask *mask)
220 221 222 223
{
	cpumask_setall(retmask);
}

224 225 226 227
static void vsmp_apic_post_init(void)
{
	/* need to update phys_pkg_id */
	apic->phys_pkg_id = apicid_phys_pkg_id;
O
Oren Twaig 已提交
228 229 230

	if (!irq_routing_comply)
		apic->vector_allocation_domain = fill_vector_allocation_domain;
231 232
}

233 234
void __init vsmp_init(void)
{
235
	detect_vsmp_box();
236 237 238
	if (!is_vsmp_box())
		return;

239 240
	x86_platform.apic_post_init = vsmp_apic_post_init;

241 242
	vsmp_cap_cpus();

243
	set_vsmp_pv_ops();
244
	return;
245
}