提交 a3b5998d 编写于 作者: Y Yang Yingliang 提交者: Xie XiuQi

Revert "irq-gic-v3: Add support to init ts core GICR"

ascend inclusion
category: feature
bugzilla: NA
CVE: NA

------------

It's specific code for ascend, so remove it.

This reverts commit 62995ab3ec74ccec18b9e4523a74a331e7efa00d.
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 245285bb
......@@ -3078,7 +3078,7 @@ CONFIG_ARM_GIC_V3=y
CONFIG_ARM_GIC_V3_ITS=y
CONFIG_ARM_GIC_V3_ITS_PCI=y
CONFIG_HISILICON_IRQ_MBIGEN=y
CONFIG_ASCEND_INIT_ALL_GICR=y
CONFIG_INIT_ALL_GICR=y
CONFIG_PARTITION_PERCPU=y
# CONFIG_IPACK_BUS is not set
CONFIG_RESET_CONTROLLER=y
......
......@@ -4683,7 +4683,6 @@ CONFIG_ARM_GIC_V3=y
CONFIG_ARM_GIC_V3_ITS=y
CONFIG_ARM_GIC_V3_ITS_PCI=y
CONFIG_HISILICON_IRQ_MBIGEN=y
# CONFIG_ASCEND_INIT_ALL_GICR is not set
CONFIG_PARTITION_PERCPU=y
CONFIG_QCOM_IRQ_COMBINER=y
# CONFIG_QCOM_PDC is not set
......
......@@ -2615,7 +2615,7 @@ CONFIG_ARM_GIC_V3=y
CONFIG_ARM_GIC_V3_ITS=y
CONFIG_ARM_GIC_V3_ITS_PCI=y
CONFIG_HISILICON_IRQ_MBIGEN=y
# CONFIG_ASCEND_INIT_ALL_GICR is not set
# CONFIG_INIT_ALL_GICR is not set
CONFIG_PARTITION_PERCPU=y
# CONFIG_IPACK_BUS is not set
CONFIG_RESET_CONTROLLER=y
......
......@@ -2244,7 +2244,6 @@ CONFIG_ARM_GIC_V3=y
CONFIG_ARM_GIC_V3_ITS=y
CONFIG_ARM_GIC_V3_ITS_PCI=y
CONFIG_HISILICON_IRQ_MBIGEN=y
# CONFIG_ASCEND_INIT_ALL_GICR is not set
CONFIG_PARTITION_PERCPU=y
# CONFIG_IPACK_BUS is not set
CONFIG_RESET_CONTROLLER=y
......
......@@ -4655,7 +4655,6 @@ CONFIG_ARM_GIC_V3=y
CONFIG_ARM_GIC_V3_ITS=y
CONFIG_ARM_GIC_V3_ITS_PCI=y
CONFIG_HISILICON_IRQ_MBIGEN=y
# CONFIG_ASCEND_INIT_ALL_GICR is not set
CONFIG_PARTITION_PERCPU=y
CONFIG_QCOM_IRQ_COMBINER=y
# CONFIG_QCOM_PDC is not set
......
......@@ -145,13 +145,6 @@ config HISILICON_IRQ_MBIGEN
select ARM_GIC_V3
select ARM_GIC_V3_ITS
config ASCEND_INIT_ALL_GICR
bool "Enable init all GICR for Ascend"
depends on ARM_GIC_V3
depends on ARM_GIC_V3_ITS
depends on ARCH_ASCEND
default n
config IMGPDC_IRQ
bool
select GENERIC_IRQ_CHIP
......
......@@ -1171,15 +1171,9 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
}
}
#ifdef CONFIG_INIT_ALL_GICR
cpu = find_first_bit(cpumask_bits(mask_val), NR_CPUS);
if (cpu >= gic_rdists->nr_gicr)
#else
cpu = cpumask_any_and(mask_val, cpu_mask);
if (cpu >= nr_cpu_ids)
#endif
return -EINVAL;
/* don't set the affinity when the target cpu is same as current one */
......@@ -2018,19 +2012,8 @@ static int its_alloc_collections(struct its_node *its)
{
int i;
#ifdef CONFIG_INIT_ALL_GICR
int size = gic_rdists->nr_gicr;
if (size < nr_cpu_ids) {
pr_err("Number of GICR is smaller than nr_cpu_ids.\n");
return -EINVAL;
}
its->collections = kcalloc(size, sizeof(*its->collections),
GFP_KERNEL);
#else
its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
GFP_KERNEL);
#endif
if (!its->collections)
return -ENOMEM;
......@@ -2309,169 +2292,6 @@ static void its_cpu_init_collections(void)
raw_spin_unlock(&its_lock);
}
#ifdef CONFIG_INIT_ALL_GICR
static void its_cpu_init_lpis_others(void __iomem *rbase, int cpu)
{
struct page *pend_page;
phys_addr_t paddr;
u64 val, tmp;
val = readl_relaxed(rbase + GICR_CTLR);
if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
(val & GICR_CTLR_ENABLE_LPIS)) {
/*
* Check that we get the same property table on all
* RDs. If we don't, this is hopeless.
*/
paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
paddr &= GENMASK_ULL(51, 12);
if (WARN_ON(gic_rdists->prop_table_pa != paddr))
add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
paddr &= GENMASK_ULL(51, 16);
WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
its_free_pending_table(gic_data_rdist()->pend_page);
gic_data_rdist()->pend_page = NULL;
goto out;
}
/* If we didn't allocate the pending table yet, do it now */
pend_page = its_allocate_pending_table(GFP_NOWAIT);
if (!pend_page) {
pr_err("Failed to allocate PENDBASE for GICR:%p\n", rbase);
return;
}
paddr = page_to_phys(pend_page);
pr_info("GICR:%p using LPI pending table @%pa\n",
rbase, &paddr);
WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
/* Disable LPIs */
val = readl_relaxed(rbase + GICR_CTLR);
val &= ~GICR_CTLR_ENABLE_LPIS;
writel_relaxed(val, rbase + GICR_CTLR);
/*
* Make sure any change to the table is observable by the GIC.
*/
dsb(sy);
/* set PROPBASE */
val = (gic_rdists->prop_table_pa |
GICR_PROPBASER_InnerShareable |
GICR_PROPBASER_RaWaWb |
((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
gicr_write_propbaser(val, rbase + GICR_PROPBASER);
tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
/*
* The HW reports non-shareable, we must
* remove the cacheability attributes as
* well.
*/
val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
GICR_PROPBASER_CACHEABILITY_MASK);
val |= GICR_PROPBASER_nC;
gicr_write_propbaser(val, rbase + GICR_PROPBASER);
}
pr_info_once("GIC: using cache flushing for LPI property table\n");
gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
}
/* set PENDBASE */
val = (page_to_phys(pend_page) |
GICR_PENDBASER_InnerShareable |
GICR_PENDBASER_RaWaWb);
gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
/*
* The HW reports non-shareable, we must remove the
* cacheability attributes as well.
*/
val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
GICR_PENDBASER_CACHEABILITY_MASK);
val |= GICR_PENDBASER_nC;
gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
}
/* Enable LPIs */
val = readl_relaxed(rbase + GICR_CTLR);
val |= GICR_CTLR_ENABLE_LPIS;
writel_relaxed(val, rbase + GICR_CTLR);
/* Make sure the GIC has seen the above */
dsb(sy);
out:
pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
cpu, pend_page ? "allocated" : "reserved", &paddr);
}
static void its_cpu_init_collection_others(void __iomem *rbase,
phys_addr_t phys_base, int cpu)
{
struct its_node *its;
raw_spin_lock(&its_lock);
list_for_each_entry(its, &its_nodes, entry) {
u64 target;
/*
* We now have to bind each collection to its target
* redistributor.
*/
if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
/*
* This ITS wants the physical address of the
* redistributor.
*/
target = phys_base;
} else {
/*
* This ITS wants a linear CPU number.
*/
target = gic_read_typer(rbase + GICR_TYPER);
target = GICR_TYPER_CPU_NUMBER(target) << 16;
}
/* Perform collection mapping */
its->collections[cpu].target_address = target;
its->collections[cpu].col_id = cpu;
its_send_mapc(its, &its->collections[cpu], 1);
its_send_invall(its, &its->collections[cpu]);
}
raw_spin_unlock(&its_lock);
}
int its_cpu_init_others(void __iomem *base, phys_addr_t phys_base, int cpu)
{
if (!list_empty(&its_nodes)) {
if (!(gic_read_typer(base + GICR_TYPER) & GICR_TYPER_PLPIS)) {
pr_err("GICR:%p: LPIs not supported\n", base);
return -ENXIO;
}
its_cpu_init_lpis_others(base, cpu);
its_cpu_init_collection_others(base, phys_base, cpu);
}
return 0;
}
#endif
static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
{
struct its_device *its_dev = NULL, *tmp;
......@@ -4167,9 +3987,6 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
bool has_v4 = false;
int err;
#ifdef CONFIG_INIT_ALL_GICR
gic_rdists = rdists;
#endif
its_parent = parent_domain;
of_node = to_of_node(handle);
if (of_node)
......
......@@ -882,115 +882,6 @@ static void gic_cpu_init(void)
gic_cpu_sys_reg_init();
}
#ifdef CONFIG_INIT_ALL_GICR
static void gic_compute_nr_gicr(void)
{
int i;
for (i = 0; i < gic_data.nr_redist_regions; i++) {
u64 typer;
void __iomem *ptr = gic_data.redist_regions[i].redist_base;
do {
typer = gic_read_typer(ptr + GICR_TYPER);
gic_data.rdists.nr_gicr++;
if (gic_data.redist_regions[i].single_redist)
break;
if (gic_data.redist_stride) {
ptr += gic_data.redist_stride;
} else {
ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
if (typer & GICR_TYPER_VLPIS)
/* Skip VLPI_base + reserved page */
ptr += SZ_64K * 2;
}
} while (!(typer & GICR_TYPER_LAST));
}
}
static void gic_enable_redist_others(void __iomem *rbase, bool enable)
{
u32 count = 1000000; /* 1s! */
u32 val;
val = readl_relaxed(rbase + GICR_WAKER);
if (enable)
/* Wake up this CPU redistributor */
val &= ~GICR_WAKER_ProcessorSleep;
else
val |= GICR_WAKER_ProcessorSleep;
writel_relaxed(val, rbase + GICR_WAKER);
if (!enable) { /* Check that GICR_WAKER is writeable */
val = readl_relaxed(rbase + GICR_WAKER);
if (!(val & GICR_WAKER_ProcessorSleep))
return; /* No PM support in this redistributor */
}
while (--count) {
val = readl_relaxed(rbase + GICR_WAKER);
if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
break;
cpu_relax();
udelay(1);
};
if (!count)
pr_err_ratelimited("redistributor failed to %s...\n",
enable ? "wakeup" : "sleep");
}
static void gic_cpu_init_others(void)
{
int i, cpu = 0;
for (i = 0; i < gic_data.nr_redist_regions; i++) {
u64 typer;
void __iomem *redist_base = gic_data.redist_regions[i].redist_base;
phys_addr_t phys_base = gic_data.redist_regions[i].phys_base;
do {
typer = gic_read_typer(redist_base + GICR_TYPER);
if (cpu >= nr_cpu_ids) {
if (cpu < gic_data.rdists.nr_gicr) {
gic_enable_redist_others(redist_base, true);
if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
its_cpu_init_others(redist_base, phys_base, cpu);
} else {
pr_err("CPU number is larger than GICR number.\n");
}
}
cpu++;
if (gic_data.redist_regions[i].single_redist)
break;
if (gic_data.redist_stride) {
redist_base += gic_data.redist_stride;
phys_base += gic_data.redist_stride;
} else {
/* Skip RD_base + SGI_base */
redist_base += SZ_64K * 2;
phys_base += SZ_64K * 2;
if (typer & GICR_TYPER_VLPIS) {
/* Skip VLPI_base + reserved page */
redist_base += SZ_64K * 2;
phys_base += SZ_64K * 2;
}
}
} while (!(typer & GICR_TYPER_LAST));
}
}
#else
static inline void gic_compute_nr_gicr(void) {}
static inline void gic_cpu_init_others(void) {}
#endif
#ifdef CONFIG_SMP
#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
......@@ -1434,7 +1325,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
gic_data.rdists.has_vlpis = true;
gic_data.rdists.has_direct_lpi = true;
gic_compute_nr_gicr();
if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
err = -ENOMEM;
......@@ -1476,8 +1366,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
its_cpu_init();
}
gic_cpu_init_others();
return 0;
out_free:
......
......@@ -593,17 +593,11 @@ struct rdists {
u32 gicd_typer;
bool has_vlpis;
bool has_direct_lpi;
#ifdef CONFIG_INIT_ALL_GICR
int nr_gicr;
#endif
};
struct irq_domain;
struct fwnode_handle;
int its_cpu_init(void);
#ifdef CONFIG_INIT_ALL_GICR
int its_cpu_init_others(void __iomem *base, phys_addr_t phys_base, int idx);
#endif
int its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct irq_domain *domain);
int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册