提交 59954972 编写于 作者: L Linus Torvalds

Merge tag 'powerpc-6.0-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

 - Fix handling of PCI domains in /proc on 32-bit systems using the
   recently added support for numbering buses from zero for each domain.

 - A fix and a revert for some changes to use READ/WRITE_ONCE() which
   caused problems with KASAN enabled due to sanitisation calls being
   introduced in low-level paths that can't cope with it.

 - Fix build errors on 32-bit caused by the syscall table being
   misaligned sometimes.

 - Two fixes to get IBM Cell native machines booting again, which had
   bit-rotted while my QS22 was temporarily out of action.

 - Fix the papr_scm driver to not assume the order of events returned by
   the hypervisor is stable, and a related compile fix.

Thanks to Aneesh Kumar K.V, Christophe Leroy, Jordan Niethe, Kajol Jain,
Masahiro Yamada, Nathan Chancellor, Pali Rohár, Vaibhav Jain, and Zhouyi
Zhou.

* tag 'powerpc-6.0-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/papr_scm: Ensure rc is always initialized in papr_scm_pmu_register()
  Revert "powerpc/irq: Don't open code irq_soft_mask helpers"
  powerpc: Fix hard_irq_disable() with sanitizer
  powerpc/rtas: Fix RTAS MSR[HV] handling for Cell
  Revert "powerpc: Remove unused FW_FEATURE_NATIVE references"
  powerpc: align syscall table for ppc32
  powerpc/pci: Enable PCI domains in /proc when PCI bus numbers are not unique
  powerpc/papr_scm: Fix nvdimm event mappings
...@@ -83,6 +83,8 @@ enum { ...@@ -83,6 +83,8 @@ enum {
FW_FEATURE_POWERNV_ALWAYS = 0, FW_FEATURE_POWERNV_ALWAYS = 0,
FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
FW_FEATURE_NATIVE_POSSIBLE = 0,
FW_FEATURE_NATIVE_ALWAYS = 0,
FW_FEATURE_POSSIBLE = FW_FEATURE_POSSIBLE =
#ifdef CONFIG_PPC_PSERIES #ifdef CONFIG_PPC_PSERIES
FW_FEATURE_PSERIES_POSSIBLE | FW_FEATURE_PSERIES_POSSIBLE |
...@@ -92,6 +94,9 @@ enum { ...@@ -92,6 +94,9 @@ enum {
#endif #endif
#ifdef CONFIG_PPC_PS3 #ifdef CONFIG_PPC_PS3
FW_FEATURE_PS3_POSSIBLE | FW_FEATURE_PS3_POSSIBLE |
#endif
#ifdef CONFIG_PPC_HASH_MMU_NATIVE
FW_FEATURE_NATIVE_ALWAYS |
#endif #endif
0, 0,
FW_FEATURE_ALWAYS = FW_FEATURE_ALWAYS =
...@@ -103,6 +108,9 @@ enum { ...@@ -103,6 +108,9 @@ enum {
#endif #endif
#ifdef CONFIG_PPC_PS3 #ifdef CONFIG_PPC_PS3
FW_FEATURE_PS3_ALWAYS & FW_FEATURE_PS3_ALWAYS &
#endif
#ifdef CONFIG_PPC_HASH_MMU_NATIVE
FW_FEATURE_NATIVE_ALWAYS &
#endif #endif
FW_FEATURE_POSSIBLE, FW_FEATURE_POSSIBLE,
......
...@@ -113,7 +113,14 @@ static inline void __hard_RI_enable(void) ...@@ -113,7 +113,14 @@ static inline void __hard_RI_enable(void)
static inline notrace unsigned long irq_soft_mask_return(void) static inline notrace unsigned long irq_soft_mask_return(void)
{ {
return READ_ONCE(local_paca->irq_soft_mask); unsigned long flags;
asm volatile(
"lbz %0,%1(13)"
: "=r" (flags)
: "i" (offsetof(struct paca_struct, irq_soft_mask)));
return flags;
} }
/* /*
...@@ -140,24 +147,46 @@ static inline notrace void irq_soft_mask_set(unsigned long mask) ...@@ -140,24 +147,46 @@ static inline notrace void irq_soft_mask_set(unsigned long mask)
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
WARN_ON(mask && !(mask & IRQS_DISABLED)); WARN_ON(mask && !(mask & IRQS_DISABLED));
WRITE_ONCE(local_paca->irq_soft_mask, mask); asm volatile(
barrier(); "stb %0,%1(13)"
:
: "r" (mask),
"i" (offsetof(struct paca_struct, irq_soft_mask))
: "memory");
} }
static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask) static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
{ {
unsigned long flags = irq_soft_mask_return(); unsigned long flags;
irq_soft_mask_set(mask); #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
WARN_ON(mask && !(mask & IRQS_DISABLED));
#endif
asm volatile(
"lbz %0,%1(13); stb %2,%1(13)"
: "=&r" (flags)
: "i" (offsetof(struct paca_struct, irq_soft_mask)),
"r" (mask)
: "memory");
return flags; return flags;
} }
static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask) static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
{ {
unsigned long flags = irq_soft_mask_return(); unsigned long flags, tmp;
asm volatile(
"lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
: "=&r" (flags), "=r" (tmp)
: "i" (offsetof(struct paca_struct, irq_soft_mask)),
"r" (mask)
: "memory");
irq_soft_mask_set(flags | mask); #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
#endif
return flags; return flags;
} }
...@@ -282,7 +311,8 @@ static inline bool pmi_irq_pending(void) ...@@ -282,7 +311,8 @@ static inline bool pmi_irq_pending(void)
flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \ flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \ local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
if (!arch_irqs_disabled_flags(flags)) { \ if (!arch_irqs_disabled_flags(flags)) { \
WRITE_ONCE(local_paca->saved_r1, current_stack_pointer);\ asm volatile("std%X0 %1,%0" : "=m" (local_paca->saved_r1) \
: "r" (current_stack_pointer)); \
trace_hardirqs_off(); \ trace_hardirqs_off(); \
} \ } \
} while(0) } while(0)
......
...@@ -245,6 +245,15 @@ static int __init pcibios_init(void) ...@@ -245,6 +245,15 @@ static int __init pcibios_init(void)
printk(KERN_INFO "PCI: Probing PCI hardware\n"); printk(KERN_INFO "PCI: Probing PCI hardware\n");
#ifdef CONFIG_PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
/*
* Enable PCI domains in /proc when PCI bus numbers are not unique
* across all PCI domains to prevent conflicts. And keep PCI domain 0
* backward compatible in /proc for video cards.
*/
pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
#endif
if (pci_has_flag(PCI_REASSIGN_ALL_BUS)) if (pci_has_flag(PCI_REASSIGN_ALL_BUS))
pci_assign_all_buses = 1; pci_assign_all_buses = 1;
......
...@@ -109,8 +109,12 @@ __enter_rtas: ...@@ -109,8 +109,12 @@ __enter_rtas:
* its critical regions (as specified in PAPR+ section 7.2.1). MSR[S] * its critical regions (as specified in PAPR+ section 7.2.1). MSR[S]
* is not impacted by RFI_TO_KERNEL (only urfid can unset it). So if * is not impacted by RFI_TO_KERNEL (only urfid can unset it). So if
* MSR[S] is set, it will remain when entering RTAS. * MSR[S] is set, it will remain when entering RTAS.
* If we're in HV mode, RTAS must also run in HV mode, so extract MSR_HV
* from the saved MSR value and insert into the value RTAS will use.
*/ */
extrdi r0, r6, 1, 63 - MSR_HV_LG
LOAD_REG_IMMEDIATE(r6, MSR_ME | MSR_RI) LOAD_REG_IMMEDIATE(r6, MSR_ME | MSR_RI)
insrdi r6, r0, 1, 63 - MSR_HV_LG
li r0,0 li r0,0
mtmsrd r0,1 /* disable RI before using SRR0/1 */ mtmsrd r0,1 /* disable RI before using SRR0/1 */
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
.p2align 3 .p2align 3
#define __SYSCALL(nr, entry) .8byte entry #define __SYSCALL(nr, entry) .8byte entry
#else #else
.p2align 2
#define __SYSCALL(nr, entry) .long entry #define __SYSCALL(nr, entry) .long entry
#endif #endif
......
...@@ -124,9 +124,6 @@ struct papr_scm_priv { ...@@ -124,9 +124,6 @@ struct papr_scm_priv {
/* The bits which needs to be overridden */ /* The bits which needs to be overridden */
u64 health_bitmap_inject_mask; u64 health_bitmap_inject_mask;
/* array to have event_code and stat_id mappings */
u8 *nvdimm_events_map;
}; };
static int papr_scm_pmem_flush(struct nd_region *nd_region, static int papr_scm_pmem_flush(struct nd_region *nd_region,
...@@ -350,6 +347,25 @@ static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p, ...@@ -350,6 +347,25 @@ static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p,
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
#define to_nvdimm_pmu(_pmu) container_of(_pmu, struct nvdimm_pmu, pmu) #define to_nvdimm_pmu(_pmu) container_of(_pmu, struct nvdimm_pmu, pmu)
static const char * const nvdimm_events_map[] = {
[1] = "CtlResCt",
[2] = "CtlResTm",
[3] = "PonSecs ",
[4] = "MemLife ",
[5] = "CritRscU",
[6] = "HostLCnt",
[7] = "HostSCnt",
[8] = "HostSDur",
[9] = "HostLDur",
[10] = "MedRCnt ",
[11] = "MedWCnt ",
[12] = "MedRDur ",
[13] = "MedWDur ",
[14] = "CchRHCnt",
[15] = "CchWHCnt",
[16] = "FastWCnt",
};
static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev, u64 *count) static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev, u64 *count)
{ {
struct papr_scm_perf_stat *stat; struct papr_scm_perf_stat *stat;
...@@ -357,11 +373,15 @@ static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev, ...@@ -357,11 +373,15 @@ static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev,
struct papr_scm_priv *p = dev_get_drvdata(dev); struct papr_scm_priv *p = dev_get_drvdata(dev);
int rc, size; int rc, size;
/* Invalid eventcode */
if (event->attr.config == 0 || event->attr.config >= ARRAY_SIZE(nvdimm_events_map))
return -EINVAL;
/* Allocate request buffer enough to hold single performance stat */ /* Allocate request buffer enough to hold single performance stat */
size = sizeof(struct papr_scm_perf_stats) + size = sizeof(struct papr_scm_perf_stats) +
sizeof(struct papr_scm_perf_stat); sizeof(struct papr_scm_perf_stat);
if (!p || !p->nvdimm_events_map) if (!p)
return -EINVAL; return -EINVAL;
stats = kzalloc(size, GFP_KERNEL); stats = kzalloc(size, GFP_KERNEL);
...@@ -370,7 +390,7 @@ static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev, ...@@ -370,7 +390,7 @@ static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev,
stat = &stats->scm_statistic[0]; stat = &stats->scm_statistic[0];
memcpy(&stat->stat_id, memcpy(&stat->stat_id,
&p->nvdimm_events_map[event->attr.config * sizeof(stat->stat_id)], nvdimm_events_map[event->attr.config],
sizeof(stat->stat_id)); sizeof(stat->stat_id));
stat->stat_val = 0; stat->stat_val = 0;
...@@ -458,56 +478,6 @@ static void papr_scm_pmu_del(struct perf_event *event, int flags) ...@@ -458,56 +478,6 @@ static void papr_scm_pmu_del(struct perf_event *event, int flags)
papr_scm_pmu_read(event); papr_scm_pmu_read(event);
} }
static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu *nd_pmu)
{
struct papr_scm_perf_stat *stat;
struct papr_scm_perf_stats *stats;
u32 available_events;
int index, rc = 0;
if (!p->stat_buffer_len)
return -ENOENT;
available_events = (p->stat_buffer_len - sizeof(struct papr_scm_perf_stats))
/ sizeof(struct papr_scm_perf_stat);
if (available_events == 0)
return -EOPNOTSUPP;
/* Allocate the buffer for phyp where stats are written */
stats = kzalloc(p->stat_buffer_len, GFP_KERNEL);
if (!stats) {
rc = -ENOMEM;
return rc;
}
/* Called to get list of events supported */
rc = drc_pmem_query_stats(p, stats, 0);
if (rc)
goto out;
/*
* Allocate memory and populate nvdimm_event_map.
* Allocate an extra element for NULL entry
*/
p->nvdimm_events_map = kcalloc(available_events + 1,
sizeof(stat->stat_id),
GFP_KERNEL);
if (!p->nvdimm_events_map) {
rc = -ENOMEM;
goto out;
}
/* Copy all stat_ids to event map */
for (index = 0, stat = stats->scm_statistic;
index < available_events; index++, ++stat) {
memcpy(&p->nvdimm_events_map[index * sizeof(stat->stat_id)],
&stat->stat_id, sizeof(stat->stat_id));
}
out:
kfree(stats);
return rc;
}
static void papr_scm_pmu_register(struct papr_scm_priv *p) static void papr_scm_pmu_register(struct papr_scm_priv *p)
{ {
struct nvdimm_pmu *nd_pmu; struct nvdimm_pmu *nd_pmu;
...@@ -519,9 +489,10 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p) ...@@ -519,9 +489,10 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p)
goto pmu_err_print; goto pmu_err_print;
} }
rc = papr_scm_pmu_check_events(p, nd_pmu); if (!p->stat_buffer_len) {
if (rc) rc = -ENOENT;
goto pmu_check_events_err; goto pmu_check_events_err;
}
nd_pmu->pmu.task_ctx_nr = perf_invalid_context; nd_pmu->pmu.task_ctx_nr = perf_invalid_context;
nd_pmu->pmu.name = nvdimm_name(p->nvdimm); nd_pmu->pmu.name = nvdimm_name(p->nvdimm);
...@@ -539,7 +510,7 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p) ...@@ -539,7 +510,7 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p)
rc = register_nvdimm_pmu(nd_pmu, p->pdev); rc = register_nvdimm_pmu(nd_pmu, p->pdev);
if (rc) if (rc)
goto pmu_register_err; goto pmu_check_events_err;
/* /*
* Set archdata.priv value to nvdimm_pmu structure, to handle the * Set archdata.priv value to nvdimm_pmu structure, to handle the
...@@ -548,8 +519,6 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p) ...@@ -548,8 +519,6 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p)
p->pdev->archdata.priv = nd_pmu; p->pdev->archdata.priv = nd_pmu;
return; return;
pmu_register_err:
kfree(p->nvdimm_events_map);
pmu_check_events_err: pmu_check_events_err:
kfree(nd_pmu); kfree(nd_pmu);
pmu_err_print: pmu_err_print:
...@@ -1560,7 +1529,6 @@ static int papr_scm_remove(struct platform_device *pdev) ...@@ -1560,7 +1529,6 @@ static int papr_scm_remove(struct platform_device *pdev)
unregister_nvdimm_pmu(pdev->archdata.priv); unregister_nvdimm_pmu(pdev->archdata.priv);
pdev->archdata.priv = NULL; pdev->archdata.priv = NULL;
kfree(p->nvdimm_events_map);
kfree(p->bus_desc.provider_name); kfree(p->bus_desc.provider_name);
kfree(p); kfree(p);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册