提交 ee78ad78 编写于 作者: L Linus Torvalds

Merge tag 'powerpc-4.16-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
 "The main attraction is a fix for a bug in the new drmem code, which
  was causing an oops on boot on some versions of Qemu.

  There's also a fix for XIVE (Power9 interrupt controller) on KVM, as
  well as a few other minor fixes.

  Thanks to: Corentin Labbe, Cyril Bur, Cédric Le Goater, Daniel Black,
  Nathan Fontenot, Nicholas Piggin"

* tag 'powerpc-4.16-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/pseries: Check for zero filled ibm,dynamic-memory property
  powerpc/pseries: Add empty update_numa_cpu_lookup_table() for NUMA=n
  powerpc/powernv: IMC fix out of bounds memory access at shutdown
  powerpc/xive: Use hw CPU ids when configuring the CPU queues
  powerpc: Expose TSCR via sysfs only on powernv
...@@ -81,6 +81,9 @@ static inline int numa_update_cpu_topology(bool cpus_locked) ...@@ -81,6 +81,9 @@ static inline int numa_update_cpu_topology(bool cpus_locked)
{ {
return 0; return 0;
} }
static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR) #if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
......
...@@ -788,7 +788,8 @@ static int register_cpu_online(unsigned int cpu) ...@@ -788,7 +788,8 @@ static int register_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2)) if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
device_create_file(s, &dev_attr_pir); device_create_file(s, &dev_attr_pir);
if (cpu_has_feature(CPU_FTR_ARCH_206)) if (cpu_has_feature(CPU_FTR_ARCH_206) &&
!firmware_has_feature(FW_FEATURE_LPAR))
device_create_file(s, &dev_attr_tscr); device_create_file(s, &dev_attr_tscr);
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
...@@ -873,7 +874,8 @@ static int unregister_cpu_online(unsigned int cpu) ...@@ -873,7 +874,8 @@ static int unregister_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2)) if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
device_remove_file(s, &dev_attr_pir); device_remove_file(s, &dev_attr_pir);
if (cpu_has_feature(CPU_FTR_ARCH_206)) if (cpu_has_feature(CPU_FTR_ARCH_206) &&
!firmware_has_feature(FW_FEATURE_LPAR))
device_remove_file(s, &dev_attr_tscr); device_remove_file(s, &dev_attr_tscr);
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
......
...@@ -216,6 +216,8 @@ static void __init __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm, ...@@ -216,6 +216,8 @@ static void __init __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm,
u32 i, n_lmbs; u32 i, n_lmbs;
n_lmbs = of_read_number(prop++, 1); n_lmbs = of_read_number(prop++, 1);
if (n_lmbs == 0)
return;
for (i = 0; i < n_lmbs; i++) { for (i = 0; i < n_lmbs; i++) {
read_drconf_v1_cell(&lmb, &prop); read_drconf_v1_cell(&lmb, &prop);
...@@ -245,6 +247,8 @@ static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm, ...@@ -245,6 +247,8 @@ static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm,
u32 i, j, lmb_sets; u32 i, j, lmb_sets;
lmb_sets = of_read_number(prop++, 1); lmb_sets = of_read_number(prop++, 1);
if (lmb_sets == 0)
return;
for (i = 0; i < lmb_sets; i++) { for (i = 0; i < lmb_sets; i++) {
read_drconf_v2_cell(&dr_cell, &prop); read_drconf_v2_cell(&dr_cell, &prop);
...@@ -354,6 +358,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop) ...@@ -354,6 +358,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop)
struct drmem_lmb *lmb; struct drmem_lmb *lmb;
drmem_info->n_lmbs = of_read_number(prop++, 1); drmem_info->n_lmbs = of_read_number(prop++, 1);
if (drmem_info->n_lmbs == 0)
return;
drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb), drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
GFP_KERNEL); GFP_KERNEL);
...@@ -373,6 +379,8 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop) ...@@ -373,6 +379,8 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop)
int lmb_index; int lmb_index;
lmb_sets = of_read_number(prop++, 1); lmb_sets = of_read_number(prop++, 1);
if (lmb_sets == 0)
return;
/* first pass, calculate the number of LMBs */ /* first pass, calculate the number of LMBs */
p = prop; p = prop;
......
...@@ -199,9 +199,11 @@ static void disable_nest_pmu_counters(void) ...@@ -199,9 +199,11 @@ static void disable_nest_pmu_counters(void)
const struct cpumask *l_cpumask; const struct cpumask *l_cpumask;
get_online_cpus(); get_online_cpus();
for_each_online_node(nid) { for_each_node_with_cpus(nid) {
l_cpumask = cpumask_of_node(nid); l_cpumask = cpumask_of_node(nid);
cpu = cpumask_first(l_cpumask); cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
if (cpu >= nr_cpu_ids)
continue;
opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
get_hard_smp_processor_id(cpu)); get_hard_smp_processor_id(cpu));
} }
......
...@@ -356,7 +356,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio, ...@@ -356,7 +356,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size); rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
if (rc) { if (rc) {
pr_err("Error %lld getting queue info prio %d\n", rc, prio); pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
target, prio);
rc = -EIO; rc = -EIO;
goto fail; goto fail;
} }
...@@ -370,7 +371,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio, ...@@ -370,7 +371,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
/* Configure and enable the queue in HW */ /* Configure and enable the queue in HW */
rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order); rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
if (rc) { if (rc) {
pr_err("Error %lld setting queue for prio %d\n", rc, prio); pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
target, prio);
rc = -EIO; rc = -EIO;
} else { } else {
q->qpage = qpage; q->qpage = qpage;
...@@ -389,8 +391,8 @@ static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc, ...@@ -389,8 +391,8 @@ static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
if (IS_ERR(qpage)) if (IS_ERR(qpage))
return PTR_ERR(qpage); return PTR_ERR(qpage);
return xive_spapr_configure_queue(cpu, q, prio, qpage, return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
xive_queue_shift); q, prio, qpage, xive_queue_shift);
} }
static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
...@@ -399,10 +401,12 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, ...@@ -399,10 +401,12 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
struct xive_q *q = &xc->queue[prio]; struct xive_q *q = &xc->queue[prio];
unsigned int alloc_order; unsigned int alloc_order;
long rc; long rc;
int hw_cpu = get_hard_smp_processor_id(cpu);
rc = plpar_int_set_queue_config(0, cpu, prio, 0, 0); rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
if (rc) if (rc)
pr_err("Error %ld setting queue for prio %d\n", rc, prio); pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
hw_cpu, prio);
alloc_order = xive_alloc_order(xive_queue_shift); alloc_order = xive_alloc_order(xive_queue_shift);
free_pages((unsigned long)q->qpage, alloc_order); free_pages((unsigned long)q->qpage, alloc_order);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册