提交 c2d5a08f 编写于 作者: Y Yang Yingliang 提交者: Yang Yingliang

irq-gic-v3: Add support to init ts core GICR

ascend inclusion
category: feature
bugzilla: NA
CVE: NA

------------

For Ascend platform, other NON-OS managed GICRs need be initialized
in OS.
Signed-off-by: NYang Yingliang <yaingyingliang@huawei.com>
Signed-off-by: NXu Qiang <xuqiang36@huawei.com>
Signed-off-by: NLijun Fang <fanglijun3@huawei.com>
Reviewed-by: NHanjun Guo <guohanjun@huawei.com>
Signed-off-by: NXu Qiang <xuqiang36@huawei.com>
Reviewed-by: NDing Tianhong <dingtianhong@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 3a82251a
......@@ -145,6 +145,16 @@ config HISILICON_IRQ_MBIGEN
select ARM_GIC_V3
select ARM_GIC_V3_ITS
if ASCEND_FEATURES
config ASCEND_INIT_ALL_GICR
bool "Enable init all GICR for Ascend"
depends on ARM_GIC_V3
depends on ARM_GIC_V3_ITS
default n
endif
config IMGPDC_IRQ
bool
select GENERIC_IRQ_CHIP
......
......@@ -185,6 +185,14 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock);
static DEFINE_IDA(its_vpeid_ida);
#ifdef CONFIG_ASCEND_INIT_ALL_GICR
static bool init_all_gicr;
static int nr_gicr;
#else
#define init_all_gicr false
#define nr_gicr 0
#endif
#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
......@@ -1156,6 +1164,7 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
unsigned int cpu;
unsigned int max_cpu;
const struct cpumask *cpu_mask = cpu_online_mask;
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
struct its_collection *target_col;
......@@ -1175,8 +1184,14 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
}
cpu = cpumask_any_and(mask_val, cpu_mask);
max_cpu = nr_cpu_ids;
if (init_all_gicr) {
cpu = find_first_bit(cpumask_bits(mask_val), NR_CPUS);
max_cpu = nr_gicr;
}
if (cpu >= nr_cpu_ids)
if (cpu >= max_cpu)
return -EINVAL;
/* don't set the affinity when the target cpu is same as current one */
......@@ -2046,8 +2061,12 @@ static int its_alloc_tables(struct its_node *its)
static int its_alloc_collections(struct its_node *its)
{
int i;
int cpu_nr = nr_cpu_ids;
its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
if (init_all_gicr)
cpu_nr = nr_gicr;
its->collections = kcalloc(cpu_nr, sizeof(*its->collections),
GFP_KERNEL);
if (!its->collections)
return -ENOMEM;
......@@ -2327,6 +2346,195 @@ static void its_cpu_init_collections(void)
raw_spin_unlock(&its_lock);
}
#ifdef CONFIG_ASCEND_INIT_ALL_GICR
void its_set_gicr_nr(int nr)
{
nr_gicr = nr;
}
int its_gicr_nr(void)
{
return nr_gicr;
}
void its_enable_init_all_gicr(void)
{
init_all_gicr = true;
}
bool its_init_all_gicr(void)
{
return init_all_gicr;
}
static void its_cpu_init_lpis_others(void __iomem *rbase, int cpu)
{
struct page *pend_page;
phys_addr_t paddr;
u64 val, tmp;
if (!init_all_gicr)
return;
val = readl_relaxed(rbase + GICR_CTLR);
if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
(val & GICR_CTLR_ENABLE_LPIS)) {
/*
* Check that we get the same property table on all
* RDs. If we don't, this is hopeless.
*/
paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
paddr &= GENMASK_ULL(51, 12);
if (WARN_ON(gic_rdists->prop_table_pa != paddr))
add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
paddr &= GENMASK_ULL(51, 16);
WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
its_free_pending_table(gic_data_rdist()->pend_page);
gic_data_rdist()->pend_page = NULL;
goto out;
}
/* If we didn't allocate the pending table yet, do it now */
pend_page = its_allocate_pending_table(GFP_NOWAIT);
if (!pend_page) {
pr_err("Failed to allocate PENDBASE for GICR:%p\n", rbase);
return;
}
paddr = page_to_phys(pend_page);
pr_info("GICR:%p using LPI pending table @%pa\n",
rbase, &paddr);
WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
/* Disable LPIs */
val = readl_relaxed(rbase + GICR_CTLR);
val &= ~GICR_CTLR_ENABLE_LPIS;
writel_relaxed(val, rbase + GICR_CTLR);
/*
* Make sure any change to the table is observable by the GIC.
*/
dsb(sy);
/* set PROPBASE */
val = (gic_rdists->prop_table_pa |
GICR_PROPBASER_InnerShareable |
GICR_PROPBASER_RaWaWb |
((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
gicr_write_propbaser(val, rbase + GICR_PROPBASER);
tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
/*
* The HW reports non-shareable, we must
* remove the cacheability attributes as
* well.
*/
val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
GICR_PROPBASER_CACHEABILITY_MASK);
val |= GICR_PROPBASER_nC;
gicr_write_propbaser(val, rbase + GICR_PROPBASER);
}
pr_info_once("GIC: using cache flushing for LPI property table\n");
gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
}
/* set PENDBASE */
val = (page_to_phys(pend_page) |
GICR_PENDBASER_InnerShareable |
GICR_PENDBASER_RaWaWb);
gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
/*
* The HW reports non-shareable, we must remove the
* cacheability attributes as well.
*/
val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
GICR_PENDBASER_CACHEABILITY_MASK);
val |= GICR_PENDBASER_nC;
gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
}
/* Enable LPIs */
val = readl_relaxed(rbase + GICR_CTLR);
val |= GICR_CTLR_ENABLE_LPIS;
writel_relaxed(val, rbase + GICR_CTLR);
/* Make sure the GIC has seen the above */
dsb(sy);
out:
pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
cpu, pend_page ? "allocated" : "reserved", &paddr);
}
static void its_cpu_init_collection_others(void __iomem *rbase,
phys_addr_t phys_base, int cpu)
{
struct its_node *its;
if (!init_all_gicr)
return;
raw_spin_lock(&its_lock);
list_for_each_entry(its, &its_nodes, entry) {
u64 target;
/*
* We now have to bind each collection to its target
* redistributor.
*/
if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
/*
* This ITS wants the physical address of the
* redistributor.
*/
target = phys_base;
} else {
/*
* This ITS wants a linear CPU number.
*/
target = gic_read_typer(rbase + GICR_TYPER);
target = GICR_TYPER_CPU_NUMBER(target) << 16;
}
/* Perform collection mapping */
its->collections[cpu].target_address = target;
its->collections[cpu].col_id = cpu;
its_send_mapc(its, &its->collections[cpu], 1);
its_send_invall(its, &its->collections[cpu]);
}
raw_spin_unlock(&its_lock);
}
int its_cpu_init_others(void __iomem *base, phys_addr_t phys_base, int cpu)
{
if (!list_empty(&its_nodes)) {
if (!(gic_read_typer(base + GICR_TYPER) & GICR_TYPER_PLPIS)) {
pr_err("GICR:%p: LPIs not supported\n", base);
return -ENXIO;
}
its_cpu_init_lpis_others(base, cpu);
its_cpu_init_collection_others(base, phys_base, cpu);
}
return 0;
}
#endif
static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
{
struct its_device *its_dev = NULL, *tmp;
......
......@@ -891,6 +891,208 @@ static void gic_cpu_init(void)
gic_cpu_sys_reg_init();
}
#ifdef CONFIG_ASCEND_INIT_ALL_GICR
struct workaround_oem_info {
char oem_id[ACPI_OEM_ID_SIZE + 1];
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
u32 oem_revision;
};
static struct workaround_oem_info gicr_wkrd_info[] = {
{
.oem_id = "HISI ",
.oem_table_id = "HIP08 ",
.oem_revision = 0x300,
}, {
.oem_id = "HISI ",
.oem_table_id = "HIP08 ",
.oem_revision = 0x301,
}, {
.oem_id = "HISI ",
.oem_table_id = "HIP08 ",
.oem_revision = 0x400,
}, {
.oem_id = "HISI ",
.oem_table_id = "HIP08 ",
.oem_revision = 0x401,
}, {
.oem_id = "HISI ",
.oem_table_id = "HIP08 ",
.oem_revision = 0x402,
}
};
static void gic_check_hisi_workaround(void)
{
struct acpi_table_header *tbl;
acpi_status status = AE_OK;
int i;
status = acpi_get_table(ACPI_SIG_MADT, 0, &tbl);
if (ACPI_FAILURE(status) || !tbl)
return;
for (i = 0; i < ARRAY_SIZE(gicr_wkrd_info); i++) {
if (!memcmp(gicr_wkrd_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
!memcmp(gicr_wkrd_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
gicr_wkrd_info[i].oem_revision == tbl->oem_revision) {
its_enable_init_all_gicr();
break;
}
}
}
static void gic_compute_nr_gicr(void)
{
int i;
int sum = 0;
for (i = 0; i < gic_data.nr_redist_regions; i++) {
u64 typer;
void __iomem *ptr = gic_data.redist_regions[i].redist_base;
do {
typer = gic_read_typer(ptr + GICR_TYPER);
sum++;
if (gic_data.redist_regions[i].single_redist)
break;
if (gic_data.redist_stride) {
ptr += gic_data.redist_stride;
} else {
ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
if (typer & GICR_TYPER_VLPIS)
/* Skip VLPI_base + reserved page */
ptr += SZ_64K * 2;
}
} while (!(typer & GICR_TYPER_LAST));
}
its_set_gicr_nr(sum);
}
static void gic_enable_redist_others(void __iomem *rbase, bool enable)
{
u32 count = 1000000; /* 1s! */
u32 val;
val = readl_relaxed(rbase + GICR_WAKER);
if (enable)
/* Wake up this CPU redistributor */
val &= ~GICR_WAKER_ProcessorSleep;
else
val |= GICR_WAKER_ProcessorSleep;
writel_relaxed(val, rbase + GICR_WAKER);
if (!enable) { /* Check that GICR_WAKER is writeable */
val = readl_relaxed(rbase + GICR_WAKER);
if (!(val & GICR_WAKER_ProcessorSleep))
return; /* No PM support in this redistributor */
}
while (--count) {
val = readl_relaxed(rbase + GICR_WAKER);
if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
break;
cpu_relax();
udelay(1);
};
if (!count)
pr_err_ratelimited("redistributor failed to %s...\n",
enable ? "wakeup" : "sleep");
}
static int gic_rdist_cpu(void __iomem *ptr, unsigned int cpu)
{
unsigned long mpidr = cpu_logical_map(cpu);
u64 typer;
u32 aff;
/*
* Convert affinity to a 32bit value that can be matched to
* GICR_TYPER bits [63:32].
*/
aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
MPIDR_AFFINITY_LEVEL(mpidr, 0));
typer = gic_read_typer(ptr + GICR_TYPER);
if ((typer >> 32) == aff)
return 0;
return 1;
}
static int gic_rdist_cpus(void __iomem *ptr)
{
unsigned int i;
for (i = 0; i < nr_cpu_ids; i++) {
if (gic_rdist_cpu(ptr, i) == 0)
return 0;
}
return 1;
}
static void gic_cpu_init_others(void)
{
int i, cpu = nr_cpu_ids;
int gicr_nr = its_gicr_nr();
if (!its_init_all_gicr())
return;
for (i = 0; i < gic_data.nr_redist_regions; i++) {
u64 typer;
void __iomem *redist_base =
gic_data.redist_regions[i].redist_base;
phys_addr_t phys_base = gic_data.redist_regions[i].phys_base;
do {
typer = gic_read_typer(redist_base + GICR_TYPER);
if (gic_rdist_cpus(redist_base) == 1) {
if (cpu >= gicr_nr) {
pr_err("CPU over GICR number.\n");
break;
}
gic_enable_redist_others(redist_base, true);
if (gic_dist_supports_lpis())
its_cpu_init_others(redist_base, phys_base, cpu);
cpu++;
}
if (gic_data.redist_regions[i].single_redist)
break;
if (gic_data.redist_stride) {
redist_base += gic_data.redist_stride;
phys_base += gic_data.redist_stride;
} else {
/* Skip RD_base + SGI_base */
redist_base += SZ_64K * 2;
phys_base += SZ_64K * 2;
if (typer & GICR_TYPER_VLPIS) {
/* Skip VLPI_base + reserved page */
redist_base += SZ_64K * 2;
phys_base += SZ_64K * 2;
}
}
} while (!(typer & GICR_TYPER_LAST));
}
}
#else
static inline void gic_check_hisi_workaround(void) {}
static inline void gic_compute_nr_gicr(void) {}
static inline void gic_cpu_init_others(void) {}
#endif
#ifdef CONFIG_SMP
#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
......@@ -1345,6 +1547,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
gic_data.rdists.has_vlpis = true;
gic_data.rdists.has_direct_lpi = true;
gic_check_hisi_workaround();
gic_compute_nr_gicr();
if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
err = -ENOMEM;
......@@ -1386,6 +1590,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
its_cpu_init();
}
gic_cpu_init_others();
return 0;
out_free:
......
......@@ -608,6 +608,13 @@ struct rdists {
struct irq_domain;
struct fwnode_handle;
int its_cpu_init(void);
#ifdef CONFIG_ASCEND_INIT_ALL_GICR
void its_set_gicr_nr(int nr);
int its_gicr_nr(void);
void its_enable_init_all_gicr(void);
bool its_init_all_gicr(void);
int its_cpu_init_others(void __iomem *base, phys_addr_t phys_base, int idx);
#endif
int its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct irq_domain *domain);
int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册