提交 17a870be 编写于 作者: R Russell King

Merge branches 'fixes' and 'misc'; commit 'kuser^{/add CPU_THUMB_CAPABLE to...

Merge branches 'fixes' and 'misc'; commit 'kuser^{/add CPU_THUMB_CAPABLE to indicate}' into for-linus
...@@ -12,7 +12,7 @@ DOCBOOKS := z8530book.xml \ ...@@ -12,7 +12,7 @@ DOCBOOKS := z8530book.xml \
kernel-api.xml filesystems.xml lsm.xml kgdb.xml \ kernel-api.xml filesystems.xml lsm.xml kgdb.xml \
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \ gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \ genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
80211.xml sh.xml regulator.xml w1.xml \ sh.xml regulator.xml w1.xml \
writing_musb_glue_layer.xml iio.xml writing_musb_glue_layer.xml iio.xml
ifeq ($(DOCBOOKS),) ifeq ($(DOCBOOKS),)
......
...@@ -151,7 +151,7 @@ bool ether_addr_equal(const u8 *addr1, const u8 *addr2) ...@@ -151,7 +151,7 @@ bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
#else #else
const u16 *a = (const u16 *)addr1; const u16 *a = (const u16 *)addr1;
const u16 *b = (const u16 *)addr2; const u16 *b = (const u16 *)addr2;
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
#endif #endif
} }
......
VERSION = 4 VERSION = 4
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc1 EXTRAVERSION = -rc2
NAME = Roaring Lionus NAME = Roaring Lionus
# *DOCUMENTATION* # *DOCUMENTATION*
......
...@@ -2,6 +2,7 @@ config ARM ...@@ -2,6 +2,7 @@ config ARM
bool bool
default y default y
select ARCH_CLOCKSOURCE_DATA select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
......
...@@ -34,8 +34,7 @@ config PROCESSOR_ID ...@@ -34,8 +34,7 @@ config PROCESSOR_ID
used instead of the auto-probing which utilizes the register. used instead of the auto-probing which utilizes the register.
config REMAP_VECTORS_TO_RAM config REMAP_VECTORS_TO_RAM
bool 'Install vectors to the beginning of RAM' if DRAM_BASE bool 'Install vectors to the beginning of RAM'
depends on DRAM_BASE
help help
The kernel needs to change the hardware exception vectors. The kernel needs to change the hardware exception vectors.
In nommu mode, the hardware exception vectors are normally In nommu mode, the hardware exception vectors are normally
......
...@@ -32,6 +32,7 @@ extern void error(char *); ...@@ -32,6 +32,7 @@ extern void error(char *);
/* Not needed, but used in some headers pulled in by decompressors */ /* Not needed, but used in some headers pulled in by decompressors */
extern char * strstr(const char * s1, const char *s2); extern char * strstr(const char * s1, const char *s2);
extern size_t strlen(const char *s);
#ifdef CONFIG_KERNEL_GZIP #ifdef CONFIG_KERNEL_GZIP
#include "../../../../lib/decompress_inflate.c" #include "../../../../lib/decompress_inflate.c"
......
...@@ -144,7 +144,7 @@ extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; ...@@ -144,7 +144,7 @@ extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
{ {
unsigned long val = ptr ? virt_to_phys(ptr) : 0; unsigned long val = ptr ? __pa_symbol(ptr) : 0;
mcpm_entry_vectors[cluster][cpu] = val; mcpm_entry_vectors[cluster][cpu] = val;
sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
} }
...@@ -299,8 +299,8 @@ void mcpm_cpu_power_down(void) ...@@ -299,8 +299,8 @@ void mcpm_cpu_power_down(void)
* the kernel as if the power_up method just had deasserted reset * the kernel as if the power_up method just had deasserted reset
* on the CPU. * on the CPU.
*/ */
phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
phys_reset(virt_to_phys(mcpm_entry_point)); phys_reset(__pa_symbol(mcpm_entry_point));
/* should never get here */ /* should never get here */
BUG(); BUG();
...@@ -388,8 +388,8 @@ static int __init nocache_trampoline(unsigned long _arg) ...@@ -388,8 +388,8 @@ static int __init nocache_trampoline(unsigned long _arg)
__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
__mcpm_cpu_down(cpu, cluster); __mcpm_cpu_down(cpu, cluster);
phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
phys_reset(virt_to_phys(mcpm_entry_point)); phys_reset(__pa_symbol(mcpm_entry_point));
BUG(); BUG();
} }
...@@ -449,7 +449,7 @@ int __init mcpm_sync_init( ...@@ -449,7 +449,7 @@ int __init mcpm_sync_init(
sync_cache_w(&mcpm_sync); sync_cache_w(&mcpm_sync);
if (power_up_setup) { if (power_up_setup) {
mcpm_power_up_setup_phys = virt_to_phys(power_up_setup); mcpm_power_up_setup_phys = __pa_symbol(power_up_setup);
sync_cache_w(&mcpm_power_up_setup_phys); sync_cache_w(&mcpm_power_up_setup_phys);
} }
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#ifndef __CACHE_UNIPHIER_H #ifndef __CACHE_UNIPHIER_H
#define __CACHE_UNIPHIER_H #define __CACHE_UNIPHIER_H
#include <linux/types.h> #include <linux/errno.h>
#ifdef CONFIG_CACHE_UNIPHIER #ifdef CONFIG_CACHE_UNIPHIER
int uniphier_cache_init(void); int uniphier_cache_init(void);
......
...@@ -83,8 +83,15 @@ ...@@ -83,8 +83,15 @@
#define IOREMAP_MAX_ORDER 24 #define IOREMAP_MAX_ORDER 24
#endif #endif
#define VECTORS_BASE UL(0xffff0000)
#else /* CONFIG_MMU */ #else /* CONFIG_MMU */
#ifndef __ASSEMBLY__
extern unsigned long vectors_base;
#define VECTORS_BASE vectors_base
#endif
/* /*
* The limitation of user task size can grow up to the end of free ram region. * The limitation of user task size can grow up to the end of free ram region.
* It is difficult to define and perhaps will never meet the original meaning * It is difficult to define and perhaps will never meet the original meaning
...@@ -111,6 +118,13 @@ ...@@ -111,6 +118,13 @@
#endif /* !CONFIG_MMU */ #endif /* !CONFIG_MMU */
#ifdef CONFIG_XIP_KERNEL
#define KERNEL_START _sdata
#else
#define KERNEL_START _stext
#endif
#define KERNEL_END _end
/* /*
* We fix the TCM memories max 32 KiB ITCM resp DTCM at these * We fix the TCM memories max 32 KiB ITCM resp DTCM at these
* locations * locations
...@@ -206,7 +220,7 @@ extern const void *__pv_table_begin, *__pv_table_end; ...@@ -206,7 +220,7 @@ extern const void *__pv_table_begin, *__pv_table_end;
: "r" (x), "I" (__PV_BITS_31_24) \ : "r" (x), "I" (__PV_BITS_31_24) \
: "cc") : "cc")
static inline phys_addr_t __virt_to_phys(unsigned long x) static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
{ {
phys_addr_t t; phys_addr_t t;
...@@ -238,7 +252,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) ...@@ -238,7 +252,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
#define PHYS_OFFSET PLAT_PHYS_OFFSET #define PHYS_OFFSET PLAT_PHYS_OFFSET
#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
static inline phys_addr_t __virt_to_phys(unsigned long x) static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
{ {
return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
} }
...@@ -254,6 +268,16 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) ...@@ -254,6 +268,16 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
PHYS_PFN_OFFSET) PHYS_PFN_OFFSET)
#define __pa_symbol_nodebug(x) __virt_to_phys_nodebug((x))
#ifdef CONFIG_DEBUG_VIRTUAL
extern phys_addr_t __virt_to_phys(unsigned long x);
extern phys_addr_t __phys_addr_symbol(unsigned long x);
#else
#define __virt_to_phys(x) __virt_to_phys_nodebug(x)
#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
#endif
/* /*
* These are *only* valid on the kernel direct mapped RAM memory. * These are *only* valid on the kernel direct mapped RAM memory.
* Note: Drivers should NOT use these. They are the wrong * Note: Drivers should NOT use these. They are the wrong
...@@ -276,6 +300,7 @@ static inline void *phys_to_virt(phys_addr_t x) ...@@ -276,6 +300,7 @@ static inline void *phys_to_virt(phys_addr_t x)
* Drivers should NOT use these either. * Drivers should NOT use these either.
*/ */
#define __pa(x) __virt_to_phys((unsigned long)(x)) #define __pa(x) __virt_to_phys((unsigned long)(x))
#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT) #define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
......
...@@ -63,9 +63,9 @@ typedef pte_t *pte_addr_t; ...@@ -63,9 +63,9 @@ typedef pte_t *pte_addr_t;
/* /*
* Mark the prot value as uncacheable and unbufferable. * Mark the prot value as uncacheable and unbufferable.
*/ */
#define pgprot_noncached(prot) __pgprot(0) #define pgprot_noncached(prot) (prot)
#define pgprot_writecombine(prot) __pgprot(0) #define pgprot_writecombine(prot) (prot)
#define pgprot_dmacoherent(prot) __pgprot(0) #define pgprot_dmacoherent(prot) (prot)
/* /*
......
...@@ -151,11 +151,6 @@ __after_proc_init: ...@@ -151,11 +151,6 @@ __after_proc_init:
#endif #endif
#ifdef CONFIG_CPU_ICACHE_DISABLE #ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #CR_I bic r0, r0, #CR_I
#endif
#ifdef CONFIG_CPU_HIGH_VECTOR
orr r0, r0, #CR_V
#else
bic r0, r0, #CR_V
#endif #endif
mcr p15, 0, r0, c1, c0, 0 @ write control reg mcr p15, 0, r0, c1, c0, 0 @ write control reg
#elif defined (CONFIG_CPU_V7M) #elif defined (CONFIG_CPU_V7M)
......
...@@ -155,8 +155,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, ...@@ -155,8 +155,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
break; break;
case R_ARM_PREL31: case R_ARM_PREL31:
offset = *(u32 *)loc + sym->st_value - loc; offset = (*(s32 *)loc << 1) >> 1; /* sign extend */
*(u32 *)loc = offset & 0x7fffffff; offset += sym->st_value - loc;
if (offset >= 0x40000000 || offset < -0x40000000) {
pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
module->name, relindex, i, symname,
ELF32_R_TYPE(rel->r_info), loc,
sym->st_value);
return -ENOEXEC;
}
*(u32 *)loc &= 0x80000000;
*(u32 *)loc |= offset & 0x7fffffff;
break; break;
case R_ARM_MOVW_ABS_NC: case R_ARM_MOVW_ABS_NC:
......
...@@ -81,7 +81,7 @@ __setup("fpe=", fpe_setup); ...@@ -81,7 +81,7 @@ __setup("fpe=", fpe_setup);
extern void init_default_cache_policy(unsigned long); extern void init_default_cache_policy(unsigned long);
extern void paging_init(const struct machine_desc *desc); extern void paging_init(const struct machine_desc *desc);
extern void early_paging_init(const struct machine_desc *); extern void early_paging_init(const struct machine_desc *);
extern void sanity_check_meminfo(void); extern void adjust_lowmem_bounds(void);
extern enum reboot_mode reboot_mode; extern enum reboot_mode reboot_mode;
extern void setup_dma_zone(const struct machine_desc *desc); extern void setup_dma_zone(const struct machine_desc *desc);
...@@ -1093,8 +1093,14 @@ void __init setup_arch(char **cmdline_p) ...@@ -1093,8 +1093,14 @@ void __init setup_arch(char **cmdline_p)
setup_dma_zone(mdesc); setup_dma_zone(mdesc);
xen_early_init(); xen_early_init();
efi_init(); efi_init();
sanity_check_meminfo(); /*
* Make sure the calculation for lowmem/highmem is set appropriately
* before reserving/allocating any mmeory
*/
adjust_lowmem_bounds();
arm_memblock_init(mdesc); arm_memblock_init(mdesc);
/* Memory may have been removed so recalculate the bounds. */
adjust_lowmem_bounds();
early_ioremap_reset(); early_ioremap_reset();
......
...@@ -251,7 +251,7 @@ void __cpu_die(unsigned int cpu) ...@@ -251,7 +251,7 @@ void __cpu_die(unsigned int cpu)
pr_err("CPU%u: cpu didn't die\n", cpu); pr_err("CPU%u: cpu didn't die\n", cpu);
return; return;
} }
pr_notice("CPU%u: shutdown\n", cpu); pr_debug("CPU%u: shutdown\n", cpu);
/* /*
* platform_cpu_kill() is generally expected to do the powering off * platform_cpu_kill() is generally expected to do the powering off
......
...@@ -27,7 +27,7 @@ static int alpine_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -27,7 +27,7 @@ static int alpine_boot_secondary(unsigned int cpu, struct task_struct *idle)
{ {
phys_addr_t addr; phys_addr_t addr;
addr = virt_to_phys(secondary_startup); addr = __pa_symbol(secondary_startup);
if (addr > (phys_addr_t)(uint32_t)(-1)) { if (addr > (phys_addr_t)(uint32_t)(-1)) {
pr_err("FAIL: resume address over 32bit (%pa)", &addr); pr_err("FAIL: resume address over 32bit (%pa)", &addr);
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
static void write_release_addr(u32 release_phys) static void write_release_addr(u32 release_phys)
{ {
u32 *virt = (u32 *) phys_to_virt(release_phys); u32 *virt = (u32 *) phys_to_virt(release_phys);
writel_relaxed(virt_to_phys(secondary_startup), virt); writel_relaxed(__pa_symbol(secondary_startup), virt);
/* Make sure this store is visible to other CPUs */ /* Make sure this store is visible to other CPUs */
smp_wmb(); smp_wmb();
__cpuc_flush_dcache_area(virt, sizeof(u32)); __cpuc_flush_dcache_area(virt, sizeof(u32));
......
...@@ -135,7 +135,7 @@ static int bcm63138_smp_boot_secondary(unsigned int cpu, ...@@ -135,7 +135,7 @@ static int bcm63138_smp_boot_secondary(unsigned int cpu,
} }
/* Write the secondary init routine to the BootLUT reset vector */ /* Write the secondary init routine to the BootLUT reset vector */
val = virt_to_phys(secondary_startup); val = __pa_symbol(secondary_startup);
writel_relaxed(val, bootlut_base + BOOTLUT_RESET_VECT); writel_relaxed(val, bootlut_base + BOOTLUT_RESET_VECT);
/* Power up the core, will jump straight to its reset vector when we /* Power up the core, will jump straight to its reset vector when we
......
...@@ -151,7 +151,7 @@ static void brcmstb_cpu_boot(u32 cpu) ...@@ -151,7 +151,7 @@ static void brcmstb_cpu_boot(u32 cpu)
* Set the reset vector to point to the secondary_startup * Set the reset vector to point to the secondary_startup
* routine * routine
*/ */
cpu_set_boot_addr(cpu, virt_to_phys(secondary_startup)); cpu_set_boot_addr(cpu, __pa_symbol(secondary_startup));
/* Unhalt the cpu */ /* Unhalt the cpu */
cpu_rst_cfg_set(cpu, 0); cpu_rst_cfg_set(cpu, 0);
......
...@@ -116,7 +116,7 @@ static int nsp_write_lut(unsigned int cpu) ...@@ -116,7 +116,7 @@ static int nsp_write_lut(unsigned int cpu)
return -ENOMEM; return -ENOMEM;
} }
secondary_startup_phy = virt_to_phys(secondary_startup); secondary_startup_phy = __pa_symbol(secondary_startup);
BUG_ON(secondary_startup_phy > (phys_addr_t)U32_MAX); BUG_ON(secondary_startup_phy > (phys_addr_t)U32_MAX);
writel_relaxed(secondary_startup_phy, sku_rom_lut); writel_relaxed(secondary_startup_phy, sku_rom_lut);
...@@ -189,7 +189,7 @@ static int kona_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -189,7 +189,7 @@ static int kona_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Secondary cores will start in secondary_startup(), * Secondary cores will start in secondary_startup(),
* defined in "arch/arm/kernel/head.S" * defined in "arch/arm/kernel/head.S"
*/ */
boot_func = virt_to_phys(secondary_startup); boot_func = __pa_symbol(secondary_startup);
BUG_ON(boot_func & BOOT_ADDR_CPUID_MASK); BUG_ON(boot_func & BOOT_ADDR_CPUID_MASK);
BUG_ON(boot_func > (phys_addr_t)U32_MAX); BUG_ON(boot_func > (phys_addr_t)U32_MAX);
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cp15.h> #include <asm/cp15.h>
#include <asm/memory.h>
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
#include <asm/smp_scu.h> #include <asm/smp_scu.h>
...@@ -75,7 +76,7 @@ static void __init berlin_smp_prepare_cpus(unsigned int max_cpus) ...@@ -75,7 +76,7 @@ static void __init berlin_smp_prepare_cpus(unsigned int max_cpus)
if (!cpu_ctrl) if (!cpu_ctrl)
goto unmap_scu; goto unmap_scu;
vectors_base = ioremap(CONFIG_VECTORS_BASE, SZ_32K); vectors_base = ioremap(VECTORS_BASE, SZ_32K);
if (!vectors_base) if (!vectors_base)
goto unmap_scu; goto unmap_scu;
...@@ -92,7 +93,7 @@ static void __init berlin_smp_prepare_cpus(unsigned int max_cpus) ...@@ -92,7 +93,7 @@ static void __init berlin_smp_prepare_cpus(unsigned int max_cpus)
* Write the secondary startup address into the SW reset address * Write the secondary startup address into the SW reset address
* vector. This is used by boot_inst. * vector. This is used by boot_inst.
*/ */
writel(virt_to_phys(secondary_startup), vectors_base + SW_RESET_ADDR); writel(__pa_symbol(secondary_startup), vectors_base + SW_RESET_ADDR);
iounmap(vectors_base); iounmap(vectors_base);
unmap_scu: unmap_scu:
......
...@@ -41,7 +41,7 @@ static int exynos_do_idle(unsigned long mode) ...@@ -41,7 +41,7 @@ static int exynos_do_idle(unsigned long mode)
case FW_DO_IDLE_AFTR: case FW_DO_IDLE_AFTR:
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
exynos_save_cp15(); exynos_save_cp15();
writel_relaxed(virt_to_phys(exynos_cpu_resume_ns), writel_relaxed(__pa_symbol(exynos_cpu_resume_ns),
sysram_ns_base_addr + 0x24); sysram_ns_base_addr + 0x24);
writel_relaxed(EXYNOS_AFTR_MAGIC, sysram_ns_base_addr + 0x20); writel_relaxed(EXYNOS_AFTR_MAGIC, sysram_ns_base_addr + 0x20);
if (soc_is_exynos3250()) { if (soc_is_exynos3250()) {
...@@ -135,7 +135,7 @@ static int exynos_suspend(void) ...@@ -135,7 +135,7 @@ static int exynos_suspend(void)
exynos_save_cp15(); exynos_save_cp15();
writel(EXYNOS_SLEEP_MAGIC, sysram_ns_base_addr + EXYNOS_BOOT_FLAG); writel(EXYNOS_SLEEP_MAGIC, sysram_ns_base_addr + EXYNOS_BOOT_FLAG);
writel(virt_to_phys(exynos_cpu_resume_ns), writel(__pa_symbol(exynos_cpu_resume_ns),
sysram_ns_base_addr + EXYNOS_BOOT_ADDR); sysram_ns_base_addr + EXYNOS_BOOT_ADDR);
return cpu_suspend(0, exynos_cpu_suspend); return cpu_suspend(0, exynos_cpu_suspend);
......
...@@ -221,7 +221,7 @@ static void exynos_mcpm_setup_entry_point(void) ...@@ -221,7 +221,7 @@ static void exynos_mcpm_setup_entry_point(void)
*/ */
__raw_writel(0xe59f0000, ns_sram_base_addr); /* ldr r0, [pc, #0] */ __raw_writel(0xe59f0000, ns_sram_base_addr); /* ldr r0, [pc, #0] */
__raw_writel(0xe12fff10, ns_sram_base_addr + 4); /* bx r0 */ __raw_writel(0xe12fff10, ns_sram_base_addr + 4); /* bx r0 */
__raw_writel(virt_to_phys(mcpm_entry_point), ns_sram_base_addr + 8); __raw_writel(__pa_symbol(mcpm_entry_point), ns_sram_base_addr + 8);
} }
static struct syscore_ops exynos_mcpm_syscore_ops = { static struct syscore_ops exynos_mcpm_syscore_ops = {
......
...@@ -353,7 +353,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -353,7 +353,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
smp_rmb(); smp_rmb();
boot_addr = virt_to_phys(exynos4_secondary_startup); boot_addr = __pa_symbol(exynos4_secondary_startup);
ret = exynos_set_boot_addr(core_id, boot_addr); ret = exynos_set_boot_addr(core_id, boot_addr);
if (ret) if (ret)
...@@ -443,7 +443,7 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus) ...@@ -443,7 +443,7 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
mpidr = cpu_logical_map(i); mpidr = cpu_logical_map(i);
core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
boot_addr = virt_to_phys(exynos4_secondary_startup); boot_addr = __pa_symbol(exynos4_secondary_startup);
ret = exynos_set_boot_addr(core_id, boot_addr); ret = exynos_set_boot_addr(core_id, boot_addr);
if (ret) if (ret)
......
...@@ -132,7 +132,7 @@ static void exynos_set_wakeupmask(long mask) ...@@ -132,7 +132,7 @@ static void exynos_set_wakeupmask(long mask)
static void exynos_cpu_set_boot_vector(long flags) static void exynos_cpu_set_boot_vector(long flags)
{ {
writel_relaxed(virt_to_phys(exynos_cpu_resume), writel_relaxed(__pa_symbol(exynos_cpu_resume),
exynos_boot_vector_addr()); exynos_boot_vector_addr());
writel_relaxed(flags, exynos_boot_vector_flag()); writel_relaxed(flags, exynos_boot_vector_flag());
} }
...@@ -238,7 +238,7 @@ static int exynos_cpu0_enter_aftr(void) ...@@ -238,7 +238,7 @@ static int exynos_cpu0_enter_aftr(void)
abort: abort:
if (cpu_online(1)) { if (cpu_online(1)) {
unsigned long boot_addr = virt_to_phys(exynos_cpu_resume); unsigned long boot_addr = __pa_symbol(exynos_cpu_resume);
/* /*
* Set the boot vector to something non-zero * Set the boot vector to something non-zero
...@@ -330,7 +330,7 @@ static int exynos_cpu1_powerdown(void) ...@@ -330,7 +330,7 @@ static int exynos_cpu1_powerdown(void)
static void exynos_pre_enter_aftr(void) static void exynos_pre_enter_aftr(void)
{ {
unsigned long boot_addr = virt_to_phys(exynos_cpu_resume); unsigned long boot_addr = __pa_symbol(exynos_cpu_resume);
(void)exynos_set_boot_addr(1, boot_addr); (void)exynos_set_boot_addr(1, boot_addr);
} }
......
...@@ -344,7 +344,7 @@ static void exynos_pm_prepare(void) ...@@ -344,7 +344,7 @@ static void exynos_pm_prepare(void)
exynos_pm_enter_sleep_mode(); exynos_pm_enter_sleep_mode();
/* ensure at least INFORM0 has the resume address */ /* ensure at least INFORM0 has the resume address */
pmu_raw_writel(virt_to_phys(exynos_cpu_resume), S5P_INFORM0); pmu_raw_writel(__pa_symbol(exynos_cpu_resume), S5P_INFORM0);
} }
static void exynos3250_pm_prepare(void) static void exynos3250_pm_prepare(void)
...@@ -361,7 +361,7 @@ static void exynos3250_pm_prepare(void) ...@@ -361,7 +361,7 @@ static void exynos3250_pm_prepare(void)
exynos_pm_enter_sleep_mode(); exynos_pm_enter_sleep_mode();
/* ensure at least INFORM0 has the resume address */ /* ensure at least INFORM0 has the resume address */
pmu_raw_writel(virt_to_phys(exynos_cpu_resume), S5P_INFORM0); pmu_raw_writel(__pa_symbol(exynos_cpu_resume), S5P_INFORM0);
} }
static void exynos5420_pm_prepare(void) static void exynos5420_pm_prepare(void)
...@@ -386,7 +386,7 @@ static void exynos5420_pm_prepare(void) ...@@ -386,7 +386,7 @@ static void exynos5420_pm_prepare(void)
/* ensure at least INFORM0 has the resume address */ /* ensure at least INFORM0 has the resume address */
if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM)) if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM))
pmu_raw_writel(virt_to_phys(mcpm_entry_point), S5P_INFORM0); pmu_raw_writel(__pa_symbol(mcpm_entry_point), S5P_INFORM0);
tmp = pmu_raw_readl(EXYNOS5_ARM_L2_OPTION); tmp = pmu_raw_readl(EXYNOS5_ARM_L2_OPTION);
tmp &= ~EXYNOS5_USE_RETENTION; tmp &= ~EXYNOS5_USE_RETENTION;
......
...@@ -327,7 +327,7 @@ static int __init hip04_smp_init(void) ...@@ -327,7 +327,7 @@ static int __init hip04_smp_init(void)
*/ */
writel_relaxed(hip04_boot_method[0], relocation); writel_relaxed(hip04_boot_method[0], relocation);
writel_relaxed(0xa5a5a5a5, relocation + 4); /* magic number */ writel_relaxed(0xa5a5a5a5, relocation + 4); /* magic number */
writel_relaxed(virt_to_phys(secondary_startup), relocation + 8); writel_relaxed(__pa_symbol(secondary_startup), relocation + 8);
writel_relaxed(0, relocation + 12); writel_relaxed(0, relocation + 12);
iounmap(relocation); iounmap(relocation);
......
...@@ -28,7 +28,7 @@ void hi3xxx_set_cpu_jump(int cpu, void *jump_addr) ...@@ -28,7 +28,7 @@ void hi3xxx_set_cpu_jump(int cpu, void *jump_addr)
cpu = cpu_logical_map(cpu); cpu = cpu_logical_map(cpu);
if (!cpu || !ctrl_base) if (!cpu || !ctrl_base)
return; return;
writel_relaxed(virt_to_phys(jump_addr), ctrl_base + ((cpu - 1) << 2)); writel_relaxed(__pa_symbol(jump_addr), ctrl_base + ((cpu - 1) << 2));
} }
int hi3xxx_get_cpu_jump(int cpu) int hi3xxx_get_cpu_jump(int cpu)
...@@ -118,7 +118,7 @@ static int hix5hd2_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -118,7 +118,7 @@ static int hix5hd2_boot_secondary(unsigned int cpu, struct task_struct *idle)
{ {
phys_addr_t jumpaddr; phys_addr_t jumpaddr;
jumpaddr = virt_to_phys(secondary_startup); jumpaddr = __pa_symbol(secondary_startup);
hix5hd2_set_scu_boot_addr(HIX5HD2_BOOT_ADDRESS, jumpaddr); hix5hd2_set_scu_boot_addr(HIX5HD2_BOOT_ADDRESS, jumpaddr);
hix5hd2_set_cpu(cpu, true); hix5hd2_set_cpu(cpu, true);
arch_send_wakeup_ipi_mask(cpumask_of(cpu)); arch_send_wakeup_ipi_mask(cpumask_of(cpu));
...@@ -156,7 +156,7 @@ static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -156,7 +156,7 @@ static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle)
struct device_node *node; struct device_node *node;
jumpaddr = virt_to_phys(secondary_startup); jumpaddr = __pa_symbol(secondary_startup);
hip01_set_boot_addr(HIP01_BOOT_ADDRESS, jumpaddr); hip01_set_boot_addr(HIP01_BOOT_ADDRESS, jumpaddr);
node = of_find_compatible_node(NULL, NULL, "hisilicon,hip01-sysctrl"); node = of_find_compatible_node(NULL, NULL, "hisilicon,hip01-sysctrl");
......
...@@ -117,7 +117,7 @@ static void __init ls1021a_smp_prepare_cpus(unsigned int max_cpus) ...@@ -117,7 +117,7 @@ static void __init ls1021a_smp_prepare_cpus(unsigned int max_cpus)
dcfg_base = of_iomap(np, 0); dcfg_base = of_iomap(np, 0);
BUG_ON(!dcfg_base); BUG_ON(!dcfg_base);
paddr = virt_to_phys(secondary_startup); paddr = __pa_symbol(secondary_startup);
writel_relaxed(cpu_to_be32(paddr), dcfg_base + DCFG_CCSR_SCRATCHRW1); writel_relaxed(cpu_to_be32(paddr), dcfg_base + DCFG_CCSR_SCRATCHRW1);
iounmap(dcfg_base); iounmap(dcfg_base);
......
...@@ -499,7 +499,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) ...@@ -499,7 +499,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
memset(suspend_ocram_base, 0, sizeof(*pm_info)); memset(suspend_ocram_base, 0, sizeof(*pm_info));
pm_info = suspend_ocram_base; pm_info = suspend_ocram_base;
pm_info->pbase = ocram_pbase; pm_info->pbase = ocram_pbase;
pm_info->resume_addr = virt_to_phys(v7_cpu_resume); pm_info->resume_addr = __pa_symbol(v7_cpu_resume);
pm_info->pm_info_size = sizeof(*pm_info); pm_info->pm_info_size = sizeof(*pm_info);
/* /*
......
...@@ -99,7 +99,7 @@ void imx_enable_cpu(int cpu, bool enable) ...@@ -99,7 +99,7 @@ void imx_enable_cpu(int cpu, bool enable)
void imx_set_cpu_jump(int cpu, void *jump_addr) void imx_set_cpu_jump(int cpu, void *jump_addr)
{ {
cpu = cpu_logical_map(cpu); cpu = cpu_logical_map(cpu);
writel_relaxed(virt_to_phys(jump_addr), writel_relaxed(__pa_symbol(jump_addr),
src_base + SRC_GPR1 + cpu * 8); src_base + SRC_GPR1 + cpu * 8);
} }
......
...@@ -122,7 +122,7 @@ static void __init __mtk_smp_prepare_cpus(unsigned int max_cpus, int trustzone) ...@@ -122,7 +122,7 @@ static void __init __mtk_smp_prepare_cpus(unsigned int max_cpus, int trustzone)
* write the address of slave startup address into the system-wide * write the address of slave startup address into the system-wide
* jump register * jump register
*/ */
writel_relaxed(virt_to_phys(secondary_startup_arm), writel_relaxed(__pa_symbol(secondary_startup_arm),
mtk_smp_base + mtk_smp_info->jump_reg); mtk_smp_base + mtk_smp_info->jump_reg);
} }
......
...@@ -110,7 +110,7 @@ static void mvebu_pm_store_armadaxp_bootinfo(u32 *store_addr) ...@@ -110,7 +110,7 @@ static void mvebu_pm_store_armadaxp_bootinfo(u32 *store_addr)
{ {
phys_addr_t resume_pc; phys_addr_t resume_pc;
resume_pc = virt_to_phys(armada_370_xp_cpu_resume); resume_pc = __pa_symbol(armada_370_xp_cpu_resume);
/* /*
* The bootloader expects the first two words to be a magic * The bootloader expects the first two words to be a magic
......
...@@ -112,7 +112,7 @@ static const struct of_device_id of_pmsu_table[] = { ...@@ -112,7 +112,7 @@ static const struct of_device_id of_pmsu_table[] = {
void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr) void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr)
{ {
writel(virt_to_phys(boot_addr), pmsu_mp_base + writel(__pa_symbol(boot_addr), pmsu_mp_base +
PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu)); PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
} }
......
...@@ -153,7 +153,7 @@ void mvebu_system_controller_set_cpu_boot_addr(void *boot_addr) ...@@ -153,7 +153,7 @@ void mvebu_system_controller_set_cpu_boot_addr(void *boot_addr)
if (of_machine_is_compatible("marvell,armada375")) if (of_machine_is_compatible("marvell,armada375"))
mvebu_armada375_smp_wa_init(); mvebu_armada375_smp_wa_init();
writel(virt_to_phys(boot_addr), system_controller_base + writel(__pa_symbol(boot_addr), system_controller_base +
mvebu_sc->resume_boot_addr); mvebu_sc->resume_boot_addr);
} }
#endif #endif
......
...@@ -315,15 +315,15 @@ void omap3_save_scratchpad_contents(void) ...@@ -315,15 +315,15 @@ void omap3_save_scratchpad_contents(void)
scratchpad_contents.boot_config_ptr = 0x0; scratchpad_contents.boot_config_ptr = 0x0;
if (cpu_is_omap3630()) if (cpu_is_omap3630())
scratchpad_contents.public_restore_ptr = scratchpad_contents.public_restore_ptr =
virt_to_phys(omap3_restore_3630); __pa_symbol(omap3_restore_3630);
else if (omap_rev() != OMAP3430_REV_ES3_0 && else if (omap_rev() != OMAP3430_REV_ES3_0 &&
omap_rev() != OMAP3430_REV_ES3_1 && omap_rev() != OMAP3430_REV_ES3_1 &&
omap_rev() != OMAP3430_REV_ES3_1_2) omap_rev() != OMAP3430_REV_ES3_1_2)
scratchpad_contents.public_restore_ptr = scratchpad_contents.public_restore_ptr =
virt_to_phys(omap3_restore); __pa_symbol(omap3_restore);
else else
scratchpad_contents.public_restore_ptr = scratchpad_contents.public_restore_ptr =
virt_to_phys(omap3_restore_es3); __pa_symbol(omap3_restore_es3);
if (omap_type() == OMAP2_DEVICE_TYPE_GP) if (omap_type() == OMAP2_DEVICE_TYPE_GP)
scratchpad_contents.secure_ram_restore_ptr = 0x0; scratchpad_contents.secure_ram_restore_ptr = 0x0;
...@@ -395,7 +395,7 @@ void omap3_save_scratchpad_contents(void) ...@@ -395,7 +395,7 @@ void omap3_save_scratchpad_contents(void)
sdrc_block_contents.flags = 0x0; sdrc_block_contents.flags = 0x0;
sdrc_block_contents.block_size = 0x0; sdrc_block_contents.block_size = 0x0;
arm_context_addr = virt_to_phys(omap3_arm_context); arm_context_addr = __pa_symbol(omap3_arm_context);
/* Copy all the contents to the scratchpad location */ /* Copy all the contents to the scratchpad location */
scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD); scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD);
......
...@@ -273,7 +273,7 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) ...@@ -273,7 +273,7 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
cpu_clear_prev_logic_pwrst(cpu); cpu_clear_prev_logic_pwrst(cpu);
pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
pwrdm_set_logic_retst(pm_info->pwrdm, cpu_logic_state); pwrdm_set_logic_retst(pm_info->pwrdm, cpu_logic_state);
set_cpu_wakeup_addr(cpu, virt_to_phys(omap_pm_ops.resume)); set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.resume));
omap_pm_ops.scu_prepare(cpu, power_state); omap_pm_ops.scu_prepare(cpu, power_state);
l2x0_pwrst_prepare(cpu, save_state); l2x0_pwrst_prepare(cpu, save_state);
...@@ -325,7 +325,7 @@ int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state) ...@@ -325,7 +325,7 @@ int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
set_cpu_wakeup_addr(cpu, virt_to_phys(omap_pm_ops.hotplug_restart)); set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.hotplug_restart));
omap_pm_ops.scu_prepare(cpu, power_state); omap_pm_ops.scu_prepare(cpu, power_state);
/* /*
...@@ -467,13 +467,13 @@ void __init omap4_mpuss_early_init(void) ...@@ -467,13 +467,13 @@ void __init omap4_mpuss_early_init(void)
sar_base = omap4_get_sar_ram_base(); sar_base = omap4_get_sar_ram_base();
if (cpu_is_omap443x()) if (cpu_is_omap443x())
startup_pa = virt_to_phys(omap4_secondary_startup); startup_pa = __pa_symbol(omap4_secondary_startup);
else if (cpu_is_omap446x()) else if (cpu_is_omap446x())
startup_pa = virt_to_phys(omap4460_secondary_startup); startup_pa = __pa_symbol(omap4460_secondary_startup);
else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
startup_pa = virt_to_phys(omap5_secondary_hyp_startup); startup_pa = __pa_symbol(omap5_secondary_hyp_startup);
else else
startup_pa = virt_to_phys(omap5_secondary_startup); startup_pa = __pa_symbol(omap5_secondary_startup);
if (cpu_is_omap44xx()) if (cpu_is_omap44xx())
writel_relaxed(startup_pa, sar_base + writel_relaxed(startup_pa, sar_base +
......
...@@ -316,9 +316,9 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) ...@@ -316,9 +316,9 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
* A barrier is added to ensure that write buffer is drained * A barrier is added to ensure that write buffer is drained
*/ */
if (omap_secure_apis_support()) if (omap_secure_apis_support())
omap_auxcoreboot_addr(virt_to_phys(cfg.startup_addr)); omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr));
else else
writel_relaxed(virt_to_phys(cfg.startup_addr), writel_relaxed(__pa_symbol(cfg.startup_addr),
base + OMAP_AUX_CORE_BOOT_1); base + OMAP_AUX_CORE_BOOT_1);
} }
......
...@@ -65,7 +65,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -65,7 +65,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
* waiting for. This would wake up the secondary core from WFE * waiting for. This would wake up the secondary core from WFE
*/ */
#define SIRFSOC_CPU1_JUMPADDR_OFFSET 0x2bc #define SIRFSOC_CPU1_JUMPADDR_OFFSET 0x2bc
__raw_writel(virt_to_phys(sirfsoc_secondary_startup), __raw_writel(__pa_symbol(sirfsoc_secondary_startup),
clk_base + SIRFSOC_CPU1_JUMPADDR_OFFSET); clk_base + SIRFSOC_CPU1_JUMPADDR_OFFSET);
#define SIRFSOC_CPU1_WAKEMAGIC_OFFSET 0x2b8 #define SIRFSOC_CPU1_WAKEMAGIC_OFFSET 0x2b8
......
...@@ -54,7 +54,7 @@ static void sirfsoc_set_sleep_mode(u32 mode) ...@@ -54,7 +54,7 @@ static void sirfsoc_set_sleep_mode(u32 mode)
static int sirfsoc_pre_suspend_power_off(void) static int sirfsoc_pre_suspend_power_off(void)
{ {
u32 wakeup_entry = virt_to_phys(cpu_resume); u32 wakeup_entry = __pa_symbol(cpu_resume);
sirfsoc_rtc_iobrg_writel(wakeup_entry, sirfsoc_pwrc_base + sirfsoc_rtc_iobrg_writel(wakeup_entry, sirfsoc_pwrc_base +
SIRFSOC_PWRC_SCRATCH_PAD1); SIRFSOC_PWRC_SCRATCH_PAD1);
......
...@@ -249,7 +249,7 @@ static int palmz72_pm_suspend(void) ...@@ -249,7 +249,7 @@ static int palmz72_pm_suspend(void)
store_ptr = *PALMZ72_SAVE_DWORD; store_ptr = *PALMZ72_SAVE_DWORD;
/* Setting PSPR to a proper value */ /* Setting PSPR to a proper value */
PSPR = virt_to_phys(&palmz72_resume_info); PSPR = __pa_symbol(&palmz72_resume_info);
return 0; return 0;
} }
......
...@@ -85,7 +85,7 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state) ...@@ -85,7 +85,7 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state)
static int pxa25x_cpu_pm_prepare(void) static int pxa25x_cpu_pm_prepare(void)
{ {
/* set resume return address */ /* set resume return address */
PSPR = virt_to_phys(cpu_resume); PSPR = __pa_symbol(cpu_resume);
return 0; return 0;
} }
......
...@@ -168,7 +168,7 @@ static int pxa27x_cpu_pm_valid(suspend_state_t state) ...@@ -168,7 +168,7 @@ static int pxa27x_cpu_pm_valid(suspend_state_t state)
static int pxa27x_cpu_pm_prepare(void) static int pxa27x_cpu_pm_prepare(void)
{ {
/* set resume return address */ /* set resume return address */
PSPR = virt_to_phys(cpu_resume); PSPR = __pa_symbol(cpu_resume);
return 0; return 0;
} }
......
...@@ -123,7 +123,7 @@ static void pxa3xx_cpu_pm_suspend(void) ...@@ -123,7 +123,7 @@ static void pxa3xx_cpu_pm_suspend(void)
PSPR = 0x5c014000; PSPR = 0x5c014000;
/* overwrite with the resume address */ /* overwrite with the resume address */
*p = virt_to_phys(cpu_resume); *p = __pa_symbol(cpu_resume);
cpu_suspend(0, pxa3xx_finish_suspend); cpu_suspend(0, pxa3xx_finish_suspend);
......
...@@ -76,7 +76,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus) ...@@ -76,7 +76,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus)
} }
/* Put the boot address in this magic register */ /* Put the boot address in this magic register */
regmap_write(map, REALVIEW_SYS_FLAGSSET_OFFSET, regmap_write(map, REALVIEW_SYS_FLAGSSET_OFFSET,
virt_to_phys(versatile_secondary_startup)); __pa_symbol(versatile_secondary_startup));
} }
static const struct smp_operations realview_dt_smp_ops __initconst = { static const struct smp_operations realview_dt_smp_ops __initconst = {
......
...@@ -156,7 +156,7 @@ static int rockchip_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -156,7 +156,7 @@ static int rockchip_boot_secondary(unsigned int cpu, struct task_struct *idle)
*/ */
mdelay(1); /* ensure the cpus other than cpu0 to startup */ mdelay(1); /* ensure the cpus other than cpu0 to startup */
writel(virt_to_phys(secondary_startup), sram_base_addr + 8); writel(__pa_symbol(secondary_startup), sram_base_addr + 8);
writel(0xDEADBEAF, sram_base_addr + 4); writel(0xDEADBEAF, sram_base_addr + 4);
dsb_sev(); dsb_sev();
} }
...@@ -195,7 +195,7 @@ static int __init rockchip_smp_prepare_sram(struct device_node *node) ...@@ -195,7 +195,7 @@ static int __init rockchip_smp_prepare_sram(struct device_node *node)
} }
/* set the boot function for the sram code */ /* set the boot function for the sram code */
rockchip_boot_fn = virt_to_phys(secondary_startup); rockchip_boot_fn = __pa_symbol(secondary_startup);
/* copy the trampoline to sram, that runs during startup of the core */ /* copy the trampoline to sram, that runs during startup of the core */
memcpy(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz); memcpy(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
......
...@@ -62,7 +62,7 @@ static inline u32 rk3288_l2_config(void) ...@@ -62,7 +62,7 @@ static inline u32 rk3288_l2_config(void)
static void rk3288_config_bootdata(void) static void rk3288_config_bootdata(void)
{ {
rkpm_bootdata_cpusp = rk3288_bootram_phy + (SZ_4K - 8); rkpm_bootdata_cpusp = rk3288_bootram_phy + (SZ_4K - 8);
rkpm_bootdata_cpu_code = virt_to_phys(cpu_resume); rkpm_bootdata_cpu_code = __pa_symbol(cpu_resume);
rkpm_bootdata_l2ctlr_f = 1; rkpm_bootdata_l2ctlr_f = 1;
rkpm_bootdata_l2ctlr = rk3288_l2_config(); rkpm_bootdata_l2ctlr = rk3288_l2_config();
......
...@@ -484,7 +484,7 @@ static int jive_pm_suspend(void) ...@@ -484,7 +484,7 @@ static int jive_pm_suspend(void)
* correct address to resume from. */ * correct address to resume from. */
__raw_writel(0x2BED, S3C2412_INFORM0); __raw_writel(0x2BED, S3C2412_INFORM0);
__raw_writel(virt_to_phys(s3c_cpu_resume), S3C2412_INFORM1); __raw_writel(__pa_symbol(s3c_cpu_resume), S3C2412_INFORM1);
return 0; return 0;
} }
......
...@@ -45,7 +45,7 @@ static void s3c2410_pm_prepare(void) ...@@ -45,7 +45,7 @@ static void s3c2410_pm_prepare(void)
{ {
/* ensure at least GSTATUS3 has the resume address */ /* ensure at least GSTATUS3 has the resume address */
__raw_writel(virt_to_phys(s3c_cpu_resume), S3C2410_GSTATUS3); __raw_writel(__pa_symbol(s3c_cpu_resume), S3C2410_GSTATUS3);
S3C_PMDBG("GSTATUS3 0x%08x\n", __raw_readl(S3C2410_GSTATUS3)); S3C_PMDBG("GSTATUS3 0x%08x\n", __raw_readl(S3C2410_GSTATUS3));
S3C_PMDBG("GSTATUS4 0x%08x\n", __raw_readl(S3C2410_GSTATUS4)); S3C_PMDBG("GSTATUS4 0x%08x\n", __raw_readl(S3C2410_GSTATUS4));
......
...@@ -48,7 +48,7 @@ static void s3c2416_pm_prepare(void) ...@@ -48,7 +48,7 @@ static void s3c2416_pm_prepare(void)
* correct address to resume from. * correct address to resume from.
*/ */
__raw_writel(0x2BED, S3C2412_INFORM0); __raw_writel(0x2BED, S3C2412_INFORM0);
__raw_writel(virt_to_phys(s3c_cpu_resume), S3C2412_INFORM1); __raw_writel(__pa_symbol(s3c_cpu_resume), S3C2412_INFORM1);
} }
static int s3c2416_pm_add(struct device *dev, struct subsys_interface *sif) static int s3c2416_pm_add(struct device *dev, struct subsys_interface *sif)
......
...@@ -304,7 +304,7 @@ static void s3c64xx_pm_prepare(void) ...@@ -304,7 +304,7 @@ static void s3c64xx_pm_prepare(void)
wake_irqs, ARRAY_SIZE(wake_irqs)); wake_irqs, ARRAY_SIZE(wake_irqs));
/* store address of resume. */ /* store address of resume. */
__raw_writel(virt_to_phys(s3c_cpu_resume), S3C64XX_INFORM0); __raw_writel(__pa_symbol(s3c_cpu_resume), S3C64XX_INFORM0);
/* ensure previous wakeup state is cleared before sleeping */ /* ensure previous wakeup state is cleared before sleeping */
__raw_writel(__raw_readl(S3C64XX_WAKEUP_STAT), S3C64XX_WAKEUP_STAT); __raw_writel(__raw_readl(S3C64XX_WAKEUP_STAT), S3C64XX_WAKEUP_STAT);
......
...@@ -69,7 +69,7 @@ static void s5pv210_pm_prepare(void) ...@@ -69,7 +69,7 @@ static void s5pv210_pm_prepare(void)
__raw_writel(s5pv210_irqwake_intmask, S5P_WAKEUP_MASK); __raw_writel(s5pv210_irqwake_intmask, S5P_WAKEUP_MASK);
/* ensure at least INFORM0 has the resume address */ /* ensure at least INFORM0 has the resume address */
__raw_writel(virt_to_phys(s5pv210_cpu_resume), S5P_INFORM0); __raw_writel(__pa_symbol(s5pv210_cpu_resume), S5P_INFORM0);
tmp = __raw_readl(S5P_SLEEP_CFG); tmp = __raw_readl(S5P_SLEEP_CFG);
tmp &= ~(S5P_SLEEP_CFG_OSC_EN | S5P_SLEEP_CFG_USBOSC_EN); tmp &= ~(S5P_SLEEP_CFG_OSC_EN | S5P_SLEEP_CFG_USBOSC_EN);
......
...@@ -73,7 +73,7 @@ static int sa11x0_pm_enter(suspend_state_t state) ...@@ -73,7 +73,7 @@ static int sa11x0_pm_enter(suspend_state_t state)
RCSR = RCSR_HWR | RCSR_SWR | RCSR_WDR | RCSR_SMR; RCSR = RCSR_HWR | RCSR_SWR | RCSR_WDR | RCSR_SMR;
/* set resume return address */ /* set resume return address */
PSPR = virt_to_phys(cpu_resume); PSPR = __pa_symbol(cpu_resume);
/* go zzz */ /* go zzz */
cpu_suspend(0, sa1100_finish_suspend); cpu_suspend(0, sa1100_finish_suspend);
......
...@@ -171,7 +171,7 @@ static void apmu_parse_dt(void (*fn)(struct resource *res, int cpu, int bit)) ...@@ -171,7 +171,7 @@ static void apmu_parse_dt(void (*fn)(struct resource *res, int cpu, int bit))
static void __init shmobile_smp_apmu_setup_boot(void) static void __init shmobile_smp_apmu_setup_boot(void)
{ {
/* install boot code shared by all CPUs */ /* install boot code shared by all CPUs */
shmobile_boot_fn = virt_to_phys(shmobile_smp_boot); shmobile_boot_fn = __pa_symbol(shmobile_smp_boot);
} }
void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus, void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus,
...@@ -185,7 +185,7 @@ void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus, ...@@ -185,7 +185,7 @@ void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus,
int shmobile_smp_apmu_boot_secondary(unsigned int cpu, struct task_struct *idle) int shmobile_smp_apmu_boot_secondary(unsigned int cpu, struct task_struct *idle)
{ {
/* For this particular CPU register boot vector */ /* For this particular CPU register boot vector */
shmobile_smp_hook(cpu, virt_to_phys(secondary_startup), 0); shmobile_smp_hook(cpu, __pa_symbol(secondary_startup), 0);
return apmu_wrap(cpu, apmu_power_on); return apmu_wrap(cpu, apmu_power_on);
} }
...@@ -301,7 +301,7 @@ int shmobile_smp_apmu_cpu_kill(unsigned int cpu) ...@@ -301,7 +301,7 @@ int shmobile_smp_apmu_cpu_kill(unsigned int cpu)
#if defined(CONFIG_SUSPEND) #if defined(CONFIG_SUSPEND)
static int shmobile_smp_apmu_do_suspend(unsigned long cpu) static int shmobile_smp_apmu_do_suspend(unsigned long cpu)
{ {
shmobile_smp_hook(cpu, virt_to_phys(cpu_resume), 0); shmobile_smp_hook(cpu, __pa_symbol(cpu_resume), 0);
shmobile_smp_apmu_cpu_shutdown(cpu); shmobile_smp_apmu_cpu_shutdown(cpu);
cpu_do_idle(); /* WFI selects Core Standby */ cpu_do_idle(); /* WFI selects Core Standby */
return 1; return 1;
......
...@@ -24,7 +24,7 @@ static void __iomem *shmobile_scu_base; ...@@ -24,7 +24,7 @@ static void __iomem *shmobile_scu_base;
static int shmobile_scu_cpu_prepare(unsigned int cpu) static int shmobile_scu_cpu_prepare(unsigned int cpu)
{ {
/* For this particular CPU register SCU SMP boot vector */ /* For this particular CPU register SCU SMP boot vector */
shmobile_smp_hook(cpu, virt_to_phys(shmobile_boot_scu), shmobile_smp_hook(cpu, __pa_symbol(shmobile_boot_scu),
shmobile_scu_base_phys); shmobile_scu_base_phys);
return 0; return 0;
} }
...@@ -33,7 +33,7 @@ void __init shmobile_smp_scu_prepare_cpus(phys_addr_t scu_base_phys, ...@@ -33,7 +33,7 @@ void __init shmobile_smp_scu_prepare_cpus(phys_addr_t scu_base_phys,
unsigned int max_cpus) unsigned int max_cpus)
{ {
/* install boot code shared by all CPUs */ /* install boot code shared by all CPUs */
shmobile_boot_fn = virt_to_phys(shmobile_smp_boot); shmobile_boot_fn = __pa_symbol(shmobile_smp_boot);
/* enable SCU and cache coherency on booting CPU */ /* enable SCU and cache coherency on booting CPU */
shmobile_scu_base_phys = scu_base_phys; shmobile_scu_base_phys = scu_base_phys;
......
...@@ -40,7 +40,7 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -40,7 +40,7 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size); memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
writel(virt_to_phys(secondary_startup), writel(__pa_symbol(secondary_startup),
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff)); sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
flush_cache_all(); flush_cache_all();
...@@ -63,7 +63,7 @@ static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle ...@@ -63,7 +63,7 @@ static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle
SOCFPGA_A10_RSTMGR_MODMPURST); SOCFPGA_A10_RSTMGR_MODMPURST);
memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size); memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
writel(virt_to_phys(secondary_startup), writel(__pa_symbol(secondary_startup),
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff)); sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff));
flush_cache_all(); flush_cache_all();
......
...@@ -117,7 +117,7 @@ static void __init spear13xx_smp_prepare_cpus(unsigned int max_cpus) ...@@ -117,7 +117,7 @@ static void __init spear13xx_smp_prepare_cpus(unsigned int max_cpus)
* (presently it is in SRAM). The BootMonitor waits until it receives a * (presently it is in SRAM). The BootMonitor waits until it receives a
* soft interrupt, and then the secondary CPU branches to this address. * soft interrupt, and then the secondary CPU branches to this address.
*/ */
__raw_writel(virt_to_phys(spear13xx_secondary_startup), SYS_LOCATION); __raw_writel(__pa_symbol(spear13xx_secondary_startup), SYS_LOCATION);
} }
const struct smp_operations spear13xx_smp_ops __initconst = { const struct smp_operations spear13xx_smp_ops __initconst = {
......
...@@ -103,7 +103,7 @@ static void __init sti_smp_prepare_cpus(unsigned int max_cpus) ...@@ -103,7 +103,7 @@ static void __init sti_smp_prepare_cpus(unsigned int max_cpus)
u32 __iomem *cpu_strt_ptr; u32 __iomem *cpu_strt_ptr;
u32 release_phys; u32 release_phys;
int cpu; int cpu;
unsigned long entry_pa = virt_to_phys(sti_secondary_startup); unsigned long entry_pa = __pa_symbol(sti_secondary_startup);
np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");
......
...@@ -80,7 +80,7 @@ static int sun6i_smp_boot_secondary(unsigned int cpu, ...@@ -80,7 +80,7 @@ static int sun6i_smp_boot_secondary(unsigned int cpu,
spin_lock(&cpu_lock); spin_lock(&cpu_lock);
/* Set CPU boot address */ /* Set CPU boot address */
writel(virt_to_phys(secondary_startup), writel(__pa_symbol(secondary_startup),
cpucfg_membase + CPUCFG_PRIVATE0_REG); cpucfg_membase + CPUCFG_PRIVATE0_REG);
/* Assert the CPU core in reset */ /* Assert the CPU core in reset */
...@@ -162,7 +162,7 @@ static int sun8i_smp_boot_secondary(unsigned int cpu, ...@@ -162,7 +162,7 @@ static int sun8i_smp_boot_secondary(unsigned int cpu,
spin_lock(&cpu_lock); spin_lock(&cpu_lock);
/* Set CPU boot address */ /* Set CPU boot address */
writel(virt_to_phys(secondary_startup), writel(__pa_symbol(secondary_startup),
cpucfg_membase + CPUCFG_PRIVATE0_REG); cpucfg_membase + CPUCFG_PRIVATE0_REG);
/* Assert the CPU core in reset */ /* Assert the CPU core in reset */
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
static int tango_boot_secondary(unsigned int cpu, struct task_struct *idle) static int tango_boot_secondary(unsigned int cpu, struct task_struct *idle)
{ {
tango_set_aux_boot_addr(virt_to_phys(secondary_startup)); tango_set_aux_boot_addr(__pa_symbol(secondary_startup));
tango_start_aux_core(cpu); tango_start_aux_core(cpu);
return 0; return 0;
} }
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
static int tango_pm_powerdown(unsigned long arg) static int tango_pm_powerdown(unsigned long arg)
{ {
tango_suspend(virt_to_phys(cpu_resume)); tango_suspend(__pa_symbol(cpu_resume));
return -EIO; /* tango_suspend has failed */ return -EIO; /* tango_suspend has failed */
} }
......
...@@ -94,14 +94,14 @@ void __init tegra_cpu_reset_handler_init(void) ...@@ -94,14 +94,14 @@ void __init tegra_cpu_reset_handler_init(void)
__tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_PRESENT] = __tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_PRESENT] =
*((u32 *)cpu_possible_mask); *((u32 *)cpu_possible_mask);
__tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_SECONDARY] = __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_SECONDARY] =
virt_to_phys((void *)secondary_startup); __pa_symbol((void *)secondary_startup);
#endif #endif
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
__tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP1] = __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP1] =
TEGRA_IRAM_LPx_RESUME_AREA; TEGRA_IRAM_LPx_RESUME_AREA;
__tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP2] = __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP2] =
virt_to_phys((void *)tegra_resume); __pa_symbol((void *)tegra_resume);
#endif #endif
tegra_cpu_reset_handler_enable(); tegra_cpu_reset_handler_enable();
......
...@@ -54,7 +54,7 @@ static void wakeup_secondary(void) ...@@ -54,7 +54,7 @@ static void wakeup_secondary(void)
* backup ram register at offset 0x1FF0, which is what boot rom code * backup ram register at offset 0x1FF0, which is what boot rom code
* is waiting for. This will wake up the secondary core from WFE. * is waiting for. This will wake up the secondary core from WFE.
*/ */
writel(virt_to_phys(secondary_startup), writel(__pa_symbol(secondary_startup),
backupram + UX500_CPU1_JUMPADDR_OFFSET); backupram + UX500_CPU1_JUMPADDR_OFFSET);
writel(0xA1FEED01, writel(0xA1FEED01,
backupram + UX500_CPU1_WAKEMAGIC_OFFSET); backupram + UX500_CPU1_WAKEMAGIC_OFFSET);
......
...@@ -166,7 +166,7 @@ static int __init dcscb_init(void) ...@@ -166,7 +166,7 @@ static int __init dcscb_init(void)
* Future entries into the kernel can now go * Future entries into the kernel can now go
* through the cluster entry vectors. * through the cluster entry vectors.
*/ */
vexpress_flags_set(virt_to_phys(mcpm_entry_point)); vexpress_flags_set(__pa_symbol(mcpm_entry_point));
return 0; return 0;
} }
......
...@@ -79,7 +79,7 @@ static void __init vexpress_smp_dt_prepare_cpus(unsigned int max_cpus) ...@@ -79,7 +79,7 @@ static void __init vexpress_smp_dt_prepare_cpus(unsigned int max_cpus)
* until it receives a soft interrupt, and then the * until it receives a soft interrupt, and then the
* secondary CPU branches to this address. * secondary CPU branches to this address.
*/ */
vexpress_flags_set(virt_to_phys(versatile_secondary_startup)); vexpress_flags_set(__pa_symbol(versatile_secondary_startup));
} }
const struct smp_operations vexpress_smp_dt_ops __initconst = { const struct smp_operations vexpress_smp_dt_ops __initconst = {
......
...@@ -54,7 +54,7 @@ static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster) ...@@ -54,7 +54,7 @@ static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster)
if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster])
return -EINVAL; return -EINVAL;
ve_spc_set_resume_addr(cluster, cpu, ve_spc_set_resume_addr(cluster, cpu,
virt_to_phys(mcpm_entry_point)); __pa_symbol(mcpm_entry_point));
ve_spc_cpu_wakeup_irq(cluster, cpu, true); ve_spc_cpu_wakeup_irq(cluster, cpu, true);
return 0; return 0;
} }
...@@ -159,7 +159,7 @@ static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster) ...@@ -159,7 +159,7 @@ static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster) static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster)
{ {
ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point)); ve_spc_set_resume_addr(cluster, cpu, __pa_symbol(mcpm_entry_point));
} }
static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster) static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster)
......
...@@ -76,7 +76,7 @@ void __init zx_smp_prepare_cpus(unsigned int max_cpus) ...@@ -76,7 +76,7 @@ void __init zx_smp_prepare_cpus(unsigned int max_cpus)
* until it receives a soft interrupt, and then the * until it receives a soft interrupt, and then the
* secondary CPU branches to this address. * secondary CPU branches to this address.
*/ */
__raw_writel(virt_to_phys(zx_secondary_startup), __raw_writel(__pa_symbol(zx_secondary_startup),
aonsysctrl_base + AON_SYS_CTRL_RESERVED1); aonsysctrl_base + AON_SYS_CTRL_RESERVED1);
iounmap(aonsysctrl_base); iounmap(aonsysctrl_base);
...@@ -94,7 +94,7 @@ void __init zx_smp_prepare_cpus(unsigned int max_cpus) ...@@ -94,7 +94,7 @@ void __init zx_smp_prepare_cpus(unsigned int max_cpus)
/* Map the first 4 KB IRAM for suspend usage */ /* Map the first 4 KB IRAM for suspend usage */
sys_iram = __arm_ioremap_exec(ZX_IRAM_BASE, PAGE_SIZE, false); sys_iram = __arm_ioremap_exec(ZX_IRAM_BASE, PAGE_SIZE, false);
zx_secondary_startup_pa = virt_to_phys(zx_secondary_startup); zx_secondary_startup_pa = __pa_symbol(zx_secondary_startup);
fncpy(sys_iram, &zx_resume_jump, zx_suspend_iram_sz); fncpy(sys_iram, &zx_resume_jump, zx_suspend_iram_sz);
} }
......
...@@ -89,7 +89,7 @@ EXPORT_SYMBOL(zynq_cpun_start); ...@@ -89,7 +89,7 @@ EXPORT_SYMBOL(zynq_cpun_start);
static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle) static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle)
{ {
return zynq_cpun_start(virt_to_phys(secondary_startup), cpu); return zynq_cpun_start(__pa_symbol(secondary_startup), cpu);
} }
/* /*
......
...@@ -29,6 +29,7 @@ config CPU_ARM720T ...@@ -29,6 +29,7 @@ config CPU_ARM720T
select CPU_COPY_V4WT if MMU select CPU_COPY_V4WT if MMU
select CPU_CP15_MMU select CPU_CP15_MMU
select CPU_PABRT_LEGACY select CPU_PABRT_LEGACY
select CPU_THUMB_CAPABLE
select CPU_TLB_V4WT if MMU select CPU_TLB_V4WT if MMU
help help
A 32-bit RISC processor with 8kByte Cache, Write Buffer and A 32-bit RISC processor with 8kByte Cache, Write Buffer and
...@@ -46,6 +47,7 @@ config CPU_ARM740T ...@@ -46,6 +47,7 @@ config CPU_ARM740T
select CPU_CACHE_V4 select CPU_CACHE_V4
select CPU_CP15_MPU select CPU_CP15_MPU
select CPU_PABRT_LEGACY select CPU_PABRT_LEGACY
select CPU_THUMB_CAPABLE
help help
A 32-bit RISC processor with 8KB cache or 4KB variants, A 32-bit RISC processor with 8KB cache or 4KB variants,
write buffer and MPU(Protection Unit) built around write buffer and MPU(Protection Unit) built around
...@@ -79,6 +81,7 @@ config CPU_ARM920T ...@@ -79,6 +81,7 @@ config CPU_ARM920T
select CPU_COPY_V4WB if MMU select CPU_COPY_V4WB if MMU
select CPU_CP15_MMU select CPU_CP15_MMU
select CPU_PABRT_LEGACY select CPU_PABRT_LEGACY
select CPU_THUMB_CAPABLE
select CPU_TLB_V4WBI if MMU select CPU_TLB_V4WBI if MMU
help help
The ARM920T is licensed to be produced by numerous vendors, The ARM920T is licensed to be produced by numerous vendors,
...@@ -97,6 +100,7 @@ config CPU_ARM922T ...@@ -97,6 +100,7 @@ config CPU_ARM922T
select CPU_COPY_V4WB if MMU select CPU_COPY_V4WB if MMU
select CPU_CP15_MMU select CPU_CP15_MMU
select CPU_PABRT_LEGACY select CPU_PABRT_LEGACY
select CPU_THUMB_CAPABLE
select CPU_TLB_V4WBI if MMU select CPU_TLB_V4WBI if MMU
help help
The ARM922T is a version of the ARM920T, but with smaller The ARM922T is a version of the ARM920T, but with smaller
...@@ -116,6 +120,7 @@ config CPU_ARM925T ...@@ -116,6 +120,7 @@ config CPU_ARM925T
select CPU_COPY_V4WB if MMU select CPU_COPY_V4WB if MMU
select CPU_CP15_MMU select CPU_CP15_MMU
select CPU_PABRT_LEGACY select CPU_PABRT_LEGACY
select CPU_THUMB_CAPABLE
select CPU_TLB_V4WBI if MMU select CPU_TLB_V4WBI if MMU
help help
The ARM925T is a mix between the ARM920T and ARM926T, but with The ARM925T is a mix between the ARM920T and ARM926T, but with
...@@ -134,6 +139,7 @@ config CPU_ARM926T ...@@ -134,6 +139,7 @@ config CPU_ARM926T
select CPU_COPY_V4WB if MMU select CPU_COPY_V4WB if MMU
select CPU_CP15_MMU select CPU_CP15_MMU
select CPU_PABRT_LEGACY select CPU_PABRT_LEGACY
select CPU_THUMB_CAPABLE
select CPU_TLB_V4WBI if MMU select CPU_TLB_V4WBI if MMU
help help
This is a variant of the ARM920. It has slightly different This is a variant of the ARM920. It has slightly different
...@@ -170,6 +176,7 @@ config CPU_ARM940T ...@@ -170,6 +176,7 @@ config CPU_ARM940T
select CPU_CACHE_VIVT select CPU_CACHE_VIVT
select CPU_CP15_MPU select CPU_CP15_MPU
select CPU_PABRT_LEGACY select CPU_PABRT_LEGACY
select CPU_THUMB_CAPABLE
help help
ARM940T is a member of the ARM9TDMI family of general- ARM940T is a member of the ARM9TDMI family of general-
purpose microprocessors with MPU and separate 4KB purpose microprocessors with MPU and separate 4KB
...@@ -188,6 +195,7 @@ config CPU_ARM946E ...@@ -188,6 +195,7 @@ config CPU_ARM946E
select CPU_CACHE_VIVT select CPU_CACHE_VIVT
select CPU_CP15_MPU select CPU_CP15_MPU
select CPU_PABRT_LEGACY select CPU_PABRT_LEGACY
select CPU_THUMB_CAPABLE
help help
ARM946E-S is a member of the ARM9E-S family of high- ARM946E-S is a member of the ARM9E-S family of high-
performance, 32-bit system-on-chip processor solutions. performance, 32-bit system-on-chip processor solutions.
...@@ -206,6 +214,7 @@ config CPU_ARM1020 ...@@ -206,6 +214,7 @@ config CPU_ARM1020
select CPU_COPY_V4WB if MMU select CPU_COPY_V4WB if MMU
select CPU_CP15_MMU select CPU_CP15_MMU
select CPU_PABRT_LEGACY select CPU_PABRT_LEGACY
select CPU_THUMB_CAPABLE
select CPU_TLB_V4WBI if MMU select CPU_TLB_V4WBI if MMU
help help
The ARM1020 is the 32K cached version of the ARM10 processor, The ARM1020 is the 32K cached version of the ARM10 processor,
...@@ -225,6 +234,7 @@ config CPU_ARM1020E ...@@ -225,6 +234,7 @@ config CPU_ARM1020E
select CPU_COPY_V4WB if MMU select CPU_COPY_V4WB if MMU
select CPU_CP15_MMU select CPU_CP15_MMU
select CPU_PABRT_LEGACY select CPU_PABRT_LEGACY
select CPU_THUMB_CAPABLE
select CPU_TLB_V4WBI if MMU select CPU_TLB_V4WBI if MMU
# ARM1022E # ARM1022E
...@@ -236,6 +246,7 @@ config CPU_ARM1022 ...@@ -236,6 +246,7 @@ config CPU_ARM1022
select CPU_COPY_V4WB if MMU # can probably do better select CPU_COPY_V4WB if MMU # can probably do better
select CPU_CP15_MMU select CPU_CP15_MMU
select CPU_PABRT_LEGACY select CPU_PABRT_LEGACY
select CPU_THUMB_CAPABLE
select CPU_TLB_V4WBI if MMU select CPU_TLB_V4WBI if MMU
help help
The ARM1022E is an implementation of the ARMv5TE architecture The ARM1022E is an implementation of the ARMv5TE architecture
...@@ -254,6 +265,7 @@ config CPU_ARM1026 ...@@ -254,6 +265,7 @@ config CPU_ARM1026
select CPU_COPY_V4WB if MMU # can probably do better select CPU_COPY_V4WB if MMU # can probably do better
select CPU_CP15_MMU select CPU_CP15_MMU
select CPU_PABRT_LEGACY select CPU_PABRT_LEGACY
select CPU_THUMB_CAPABLE
select CPU_TLB_V4WBI if MMU select CPU_TLB_V4WBI if MMU
help help
The ARM1026EJ-S is an implementation of the ARMv5TEJ architecture The ARM1026EJ-S is an implementation of the ARMv5TEJ architecture
...@@ -302,6 +314,7 @@ config CPU_XSCALE ...@@ -302,6 +314,7 @@ config CPU_XSCALE
select CPU_CACHE_VIVT select CPU_CACHE_VIVT
select CPU_CP15_MMU select CPU_CP15_MMU
select CPU_PABRT_LEGACY select CPU_PABRT_LEGACY
select CPU_THUMB_CAPABLE
select CPU_TLB_V4WBI if MMU select CPU_TLB_V4WBI if MMU
# XScale Core Version 3 # XScale Core Version 3
...@@ -312,6 +325,7 @@ config CPU_XSC3 ...@@ -312,6 +325,7 @@ config CPU_XSC3
select CPU_CACHE_VIVT select CPU_CACHE_VIVT
select CPU_CP15_MMU select CPU_CP15_MMU
select CPU_PABRT_LEGACY select CPU_PABRT_LEGACY
select CPU_THUMB_CAPABLE
select CPU_TLB_V4WBI if MMU select CPU_TLB_V4WBI if MMU
select IO_36 select IO_36
...@@ -324,6 +338,7 @@ config CPU_MOHAWK ...@@ -324,6 +338,7 @@ config CPU_MOHAWK
select CPU_COPY_V4WB if MMU select CPU_COPY_V4WB if MMU
select CPU_CP15_MMU select CPU_CP15_MMU
select CPU_PABRT_LEGACY select CPU_PABRT_LEGACY
select CPU_THUMB_CAPABLE
select CPU_TLB_V4WBI if MMU select CPU_TLB_V4WBI if MMU
# Feroceon # Feroceon
...@@ -335,6 +350,7 @@ config CPU_FEROCEON ...@@ -335,6 +350,7 @@ config CPU_FEROCEON
select CPU_COPY_FEROCEON if MMU select CPU_COPY_FEROCEON if MMU
select CPU_CP15_MMU select CPU_CP15_MMU
select CPU_PABRT_LEGACY select CPU_PABRT_LEGACY
select CPU_THUMB_CAPABLE
select CPU_TLB_FEROCEON if MMU select CPU_TLB_FEROCEON if MMU
config CPU_FEROCEON_OLD_ID config CPU_FEROCEON_OLD_ID
...@@ -367,6 +383,7 @@ config CPU_V6 ...@@ -367,6 +383,7 @@ config CPU_V6
select CPU_CP15_MMU select CPU_CP15_MMU
select CPU_HAS_ASID if MMU select CPU_HAS_ASID if MMU
select CPU_PABRT_V6 select CPU_PABRT_V6
select CPU_THUMB_CAPABLE
select CPU_TLB_V6 if MMU select CPU_TLB_V6 if MMU
# ARMv6k # ARMv6k
...@@ -381,6 +398,7 @@ config CPU_V6K ...@@ -381,6 +398,7 @@ config CPU_V6K
select CPU_CP15_MMU select CPU_CP15_MMU
select CPU_HAS_ASID if MMU select CPU_HAS_ASID if MMU
select CPU_PABRT_V6 select CPU_PABRT_V6
select CPU_THUMB_CAPABLE
select CPU_TLB_V6 if MMU select CPU_TLB_V6 if MMU
# ARMv7 # ARMv7
...@@ -396,6 +414,7 @@ config CPU_V7 ...@@ -396,6 +414,7 @@ config CPU_V7
select CPU_CP15_MPU if !MMU select CPU_CP15_MPU if !MMU
select CPU_HAS_ASID if MMU select CPU_HAS_ASID if MMU
select CPU_PABRT_V7 select CPU_PABRT_V7
select CPU_THUMB_CAPABLE
select CPU_TLB_V7 if MMU select CPU_TLB_V7 if MMU
# ARMv7M # ARMv7M
...@@ -410,11 +429,17 @@ config CPU_V7M ...@@ -410,11 +429,17 @@ config CPU_V7M
config CPU_THUMBONLY config CPU_THUMBONLY
bool bool
select CPU_THUMB_CAPABLE
# There are no CPUs available with MMU that don't implement an ARM ISA: # There are no CPUs available with MMU that don't implement an ARM ISA:
depends on !MMU depends on !MMU
help help
Select this if your CPU doesn't support the 32 bit ARM instructions. Select this if your CPU doesn't support the 32 bit ARM instructions.
config CPU_THUMB_CAPABLE
bool
help
Select this if your CPU can support Thumb mode.
# Figure out what processor architecture version we should be using. # Figure out what processor architecture version we should be using.
# This defines the compiler instruction set which depends on the machine type. # This defines the compiler instruction set which depends on the machine type.
config CPU_32v3 config CPU_32v3
...@@ -655,11 +680,7 @@ config ARCH_DMA_ADDR_T_64BIT ...@@ -655,11 +680,7 @@ config ARCH_DMA_ADDR_T_64BIT
config ARM_THUMB config ARM_THUMB
bool "Support Thumb user binaries" if !CPU_THUMBONLY bool "Support Thumb user binaries" if !CPU_THUMBONLY
depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || \ depends on CPU_THUMB_CAPABLE
CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || \
CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || \
CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || \
CPU_V7 || CPU_FEROCEON || CPU_V7M
default y default y
help help
Say Y if you want to include kernel support for running user space Say Y if you want to include kernel support for running user space
......
...@@ -14,6 +14,7 @@ endif ...@@ -14,6 +14,7 @@ endif
obj-$(CONFIG_ARM_PTDUMP) += dump.o obj-$(CONFIG_ARM_PTDUMP) += dump.o
obj-$(CONFIG_MODULES) += proc-syms.o obj-$(CONFIG_MODULES) += proc-syms.o
obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
obj-$(CONFIG_HIGHMEM) += highmem.o obj-$(CONFIG_HIGHMEM) += highmem.o
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#define pr_fmt(fmt) "uniphier: " fmt #define pr_fmt(fmt) "uniphier: " fmt
#include <linux/bitops.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/log2.h> #include <linux/log2.h>
...@@ -71,8 +72,7 @@ ...@@ -71,8 +72,7 @@
* @ctrl_base: virtual base address of control registers * @ctrl_base: virtual base address of control registers
* @rev_base: virtual base address of revision registers * @rev_base: virtual base address of revision registers
* @op_base: virtual base address of operation registers * @op_base: virtual base address of operation registers
* @way_present_mask: each bit specifies if the way is present * @way_mask: each bit specifies if the way is present
* @way_locked_mask: each bit specifies if the way is locked
* @nsets: number of associativity sets * @nsets: number of associativity sets
* @line_size: line size in bytes * @line_size: line size in bytes
* @range_op_max_size: max size that can be handled by a single range operation * @range_op_max_size: max size that can be handled by a single range operation
...@@ -83,8 +83,7 @@ struct uniphier_cache_data { ...@@ -83,8 +83,7 @@ struct uniphier_cache_data {
void __iomem *rev_base; void __iomem *rev_base;
void __iomem *op_base; void __iomem *op_base;
void __iomem *way_ctrl_base; void __iomem *way_ctrl_base;
u32 way_present_mask; u32 way_mask;
u32 way_locked_mask;
u32 nsets; u32 nsets;
u32 line_size; u32 line_size;
u32 range_op_max_size; u32 range_op_max_size;
...@@ -234,17 +233,13 @@ static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on) ...@@ -234,17 +233,13 @@ static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on)
writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC); writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC);
} }
static void __init __uniphier_cache_set_locked_ways( static void __init __uniphier_cache_set_active_ways(
struct uniphier_cache_data *data, struct uniphier_cache_data *data)
u32 way_mask)
{ {
unsigned int cpu; unsigned int cpu;
data->way_locked_mask = way_mask & data->way_present_mask;
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
writel_relaxed(~data->way_locked_mask & data->way_present_mask, writel_relaxed(data->way_mask, data->way_ctrl_base + 4 * cpu);
data->way_ctrl_base + 4 * cpu);
} }
static void uniphier_cache_maint_range(unsigned long start, unsigned long end, static void uniphier_cache_maint_range(unsigned long start, unsigned long end,
...@@ -307,7 +302,7 @@ static void __init uniphier_cache_enable(void) ...@@ -307,7 +302,7 @@ static void __init uniphier_cache_enable(void)
list_for_each_entry(data, &uniphier_cache_list, list) { list_for_each_entry(data, &uniphier_cache_list, list) {
__uniphier_cache_enable(data, true); __uniphier_cache_enable(data, true);
__uniphier_cache_set_locked_ways(data, 0); __uniphier_cache_set_active_ways(data);
} }
} }
...@@ -382,8 +377,8 @@ static int __init __uniphier_cache_init(struct device_node *np, ...@@ -382,8 +377,8 @@ static int __init __uniphier_cache_init(struct device_node *np,
goto err; goto err;
} }
data->way_present_mask = data->way_mask = GENMASK(cache_size / data->nsets / data->line_size - 1,
((u32)1 << cache_size / data->nsets / data->line_size) - 1; 0);
data->ctrl_base = of_iomap(np, 0); data->ctrl_base = of_iomap(np, 0);
if (!data->ctrl_base) { if (!data->ctrl_base) {
......
...@@ -868,6 +868,9 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, ...@@ -868,6 +868,9 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
vma->vm_end - vma->vm_start, vma->vm_end - vma->vm_start,
vma->vm_page_prot); vma->vm_page_prot);
} }
#else
ret = vm_iomap_memory(vma, vma->vm_start,
(vma->vm_end - vma->vm_start));
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
return ret; return ret;
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/memory.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
struct addr_marker { struct addr_marker {
...@@ -31,8 +32,8 @@ static struct addr_marker address_markers[] = { ...@@ -31,8 +32,8 @@ static struct addr_marker address_markers[] = {
{ 0, "vmalloc() Area" }, { 0, "vmalloc() Area" },
{ VMALLOC_END, "vmalloc() End" }, { VMALLOC_END, "vmalloc() End" },
{ FIXADDR_START, "Fixmap Area" }, { FIXADDR_START, "Fixmap Area" },
{ CONFIG_VECTORS_BASE, "Vectors" }, { VECTORS_BASE, "Vectors" },
{ CONFIG_VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" }, { VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" },
{ -1, NULL }, { -1, NULL },
}; };
......
...@@ -327,6 +327,12 @@ void flush_dcache_page(struct page *page) ...@@ -327,6 +327,12 @@ void flush_dcache_page(struct page *page)
if (page == ZERO_PAGE(0)) if (page == ZERO_PAGE(0))
return; return;
if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
if (test_bit(PG_dcache_clean, &page->flags))
clear_bit(PG_dcache_clean, &page->flags);
return;
}
mapping = page_mapping(page); mapping = page_mapping(page);
if (!cache_ops_need_broadcast() && if (!cache_ops_need_broadcast() &&
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm/cp15.h> #include <asm/cp15.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
#include <asm/memblock.h> #include <asm/memblock.h>
#include <asm/memory.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/setup.h> #include <asm/setup.h>
...@@ -227,41 +228,59 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) ...@@ -227,41 +228,59 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
return phys; return phys;
} }
void __init arm_memblock_init(const struct machine_desc *mdesc) static void __init arm_initrd_init(void)
{ {
/* Register the kernel text, kernel data and initrd with memblock. */
#ifdef CONFIG_XIP_KERNEL
memblock_reserve(__pa(_sdata), _end - _sdata);
#else
memblock_reserve(__pa(_stext), _end - _stext);
#endif
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
phys_addr_t start;
unsigned long size;
/* FDT scan will populate initrd_start */ /* FDT scan will populate initrd_start */
if (initrd_start && !phys_initrd_size) { if (initrd_start && !phys_initrd_size) {
phys_initrd_start = __virt_to_phys(initrd_start); phys_initrd_start = __virt_to_phys(initrd_start);
phys_initrd_size = initrd_end - initrd_start; phys_initrd_size = initrd_end - initrd_start;
} }
initrd_start = initrd_end = 0; initrd_start = initrd_end = 0;
if (phys_initrd_size &&
!memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { if (!phys_initrd_size)
return;
/*
* Round the memory region to page boundaries as per free_initrd_mem()
* This allows us to detect whether the pages overlapping the initrd
* are in use, but more importantly, reserves the entire set of pages
* as we don't want these pages allocated for other purposes.
*/
start = round_down(phys_initrd_start, PAGE_SIZE);
size = phys_initrd_size + (phys_initrd_start - start);
size = round_up(size, PAGE_SIZE);
if (!memblock_is_region_memory(start, size)) {
pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
(u64)phys_initrd_start, phys_initrd_size); (u64)start, size);
phys_initrd_start = phys_initrd_size = 0; return;
} }
if (phys_initrd_size &&
memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { if (memblock_is_region_reserved(start, size)) {
pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
(u64)phys_initrd_start, phys_initrd_size); (u64)start, size);
phys_initrd_start = phys_initrd_size = 0; return;
} }
if (phys_initrd_size) {
memblock_reserve(phys_initrd_start, phys_initrd_size); memblock_reserve(start, size);
/* Now convert initrd to virtual addresses */ /* Now convert initrd to virtual addresses */
initrd_start = __phys_to_virt(phys_initrd_start); initrd_start = __phys_to_virt(phys_initrd_start);
initrd_end = initrd_start + phys_initrd_size; initrd_end = initrd_start + phys_initrd_size;
}
#endif #endif
}
void __init arm_memblock_init(const struct machine_desc *mdesc)
{
/* Register the kernel text, kernel data and initrd with memblock. */
memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
arm_initrd_init();
arm_mm_memblock_reserve(); arm_mm_memblock_reserve();
...@@ -521,8 +540,7 @@ void __init mem_init(void) ...@@ -521,8 +540,7 @@ void __init mem_init(void)
" .data : 0x%p" " - 0x%p" " (%4td kB)\n" " .data : 0x%p" " - 0x%p" " (%4td kB)\n"
" .bss : 0x%p" " - 0x%p" " (%4td kB)\n", " .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE),
(PAGE_SIZE)),
#ifdef CONFIG_HAVE_TCM #ifdef CONFIG_HAVE_TCM
MLK(DTCM_OFFSET, (unsigned long) dtcm_end), MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
MLK(ITCM_OFFSET, (unsigned long) itcm_end), MLK(ITCM_OFFSET, (unsigned long) itcm_end),
......
...@@ -1152,13 +1152,12 @@ early_param("vmalloc", early_vmalloc); ...@@ -1152,13 +1152,12 @@ early_param("vmalloc", early_vmalloc);
phys_addr_t arm_lowmem_limit __initdata = 0; phys_addr_t arm_lowmem_limit __initdata = 0;
void __init sanity_check_meminfo(void) void __init adjust_lowmem_bounds(void)
{ {
phys_addr_t memblock_limit = 0; phys_addr_t memblock_limit = 0;
int highmem = 0;
u64 vmalloc_limit; u64 vmalloc_limit;
struct memblock_region *reg; struct memblock_region *reg;
bool should_use_highmem = false; phys_addr_t lowmem_limit = 0;
/* /*
* Let's use our own (unoptimized) equivalent of __pa() that is * Let's use our own (unoptimized) equivalent of __pa() that is
...@@ -1172,43 +1171,18 @@ void __init sanity_check_meminfo(void) ...@@ -1172,43 +1171,18 @@ void __init sanity_check_meminfo(void)
for_each_memblock(memory, reg) { for_each_memblock(memory, reg) {
phys_addr_t block_start = reg->base; phys_addr_t block_start = reg->base;
phys_addr_t block_end = reg->base + reg->size; phys_addr_t block_end = reg->base + reg->size;
phys_addr_t size_limit = reg->size;
if (reg->base >= vmalloc_limit) if (reg->base < vmalloc_limit) {
highmem = 1; if (block_end > lowmem_limit)
else /*
size_limit = vmalloc_limit - reg->base; * Compare as u64 to ensure vmalloc_limit does
* not get truncated. block_end should always
* fit in phys_addr_t so there should be no
if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { * issue with assignment.
*/
if (highmem) { lowmem_limit = min_t(u64,
pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", vmalloc_limit,
&block_start, &block_end); block_end);
memblock_remove(reg->base, reg->size);
should_use_highmem = true;
continue;
}
if (reg->size > size_limit) {
phys_addr_t overlap_size = reg->size - size_limit;
pr_notice("Truncating RAM at %pa-%pa",
&block_start, &block_end);
block_end = vmalloc_limit;
pr_cont(" to -%pa", &block_end);
memblock_remove(vmalloc_limit, overlap_size);
should_use_highmem = true;
}
}
if (!highmem) {
if (block_end > arm_lowmem_limit) {
if (reg->size > size_limit)
arm_lowmem_limit = vmalloc_limit;
else
arm_lowmem_limit = block_end;
}
/* /*
* Find the first non-pmd-aligned page, and point * Find the first non-pmd-aligned page, and point
...@@ -1227,14 +1201,13 @@ void __init sanity_check_meminfo(void) ...@@ -1227,14 +1201,13 @@ void __init sanity_check_meminfo(void)
if (!IS_ALIGNED(block_start, PMD_SIZE)) if (!IS_ALIGNED(block_start, PMD_SIZE))
memblock_limit = block_start; memblock_limit = block_start;
else if (!IS_ALIGNED(block_end, PMD_SIZE)) else if (!IS_ALIGNED(block_end, PMD_SIZE))
memblock_limit = arm_lowmem_limit; memblock_limit = lowmem_limit;
} }
} }
} }
if (should_use_highmem) arm_lowmem_limit = lowmem_limit;
pr_notice("Consider using a HIGHMEM enabled kernel.\n");
high_memory = __va(arm_lowmem_limit - 1) + 1; high_memory = __va(arm_lowmem_limit - 1) + 1;
...@@ -1248,6 +1221,18 @@ void __init sanity_check_meminfo(void) ...@@ -1248,6 +1221,18 @@ void __init sanity_check_meminfo(void)
if (!memblock_limit) if (!memblock_limit)
memblock_limit = arm_lowmem_limit; memblock_limit = arm_lowmem_limit;
if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
if (memblock_end_of_DRAM() > arm_lowmem_limit) {
phys_addr_t end = memblock_end_of_DRAM();
pr_notice("Ignoring RAM at %pa-%pa\n",
&memblock_limit, &end);
pr_notice("Consider using a HIGHMEM enabled kernel.\n");
memblock_remove(memblock_limit, end - memblock_limit);
}
}
memblock_set_current_limit(memblock_limit); memblock_set_current_limit(memblock_limit);
} }
...@@ -1437,11 +1422,7 @@ static void __init kmap_init(void) ...@@ -1437,11 +1422,7 @@ static void __init kmap_init(void)
static void __init map_lowmem(void) static void __init map_lowmem(void)
{ {
struct memblock_region *reg; struct memblock_region *reg;
#ifdef CONFIG_XIP_KERNEL phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);
phys_addr_t kernel_x_start = round_down(__pa(_sdata), SECTION_SIZE);
#else
phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
#endif
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
/* Map all the lowmem memory banks. */ /* Map all the lowmem memory banks. */
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cp15.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/setup.h> #include <asm/setup.h>
...@@ -22,6 +23,8 @@ ...@@ -22,6 +23,8 @@
#include "mm.h" #include "mm.h"
unsigned long vectors_base;
#ifdef CONFIG_ARM_MPU #ifdef CONFIG_ARM_MPU
struct mpu_rgn_info mpu_rgn_info; struct mpu_rgn_info mpu_rgn_info;
...@@ -85,7 +88,7 @@ static unsigned long irbar_read(void) ...@@ -85,7 +88,7 @@ static unsigned long irbar_read(void)
} }
/* MPU initialisation functions */ /* MPU initialisation functions */
void __init sanity_check_meminfo_mpu(void) void __init adjust_lowmem_bounds_mpu(void)
{ {
phys_addr_t phys_offset = PHYS_OFFSET; phys_addr_t phys_offset = PHYS_OFFSET;
phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size; phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size;
...@@ -274,19 +277,64 @@ void __init mpu_setup(void) ...@@ -274,19 +277,64 @@ void __init mpu_setup(void)
} }
} }
#else #else
static void sanity_check_meminfo_mpu(void) {} static void adjust_lowmem_bounds_mpu(void) {}
static void __init mpu_setup(void) {} static void __init mpu_setup(void) {}
#endif /* CONFIG_ARM_MPU */ #endif /* CONFIG_ARM_MPU */
#ifdef CONFIG_CPU_CP15
#ifdef CONFIG_CPU_HIGH_VECTOR
static unsigned long __init setup_vectors_base(void)
{
unsigned long reg = get_cr();
set_cr(reg | CR_V);
return 0xffff0000;
}
#else /* CONFIG_CPU_HIGH_VECTOR */
/* Write exception base address to VBAR */
static inline void set_vbar(unsigned long val)
{
asm("mcr p15, 0, %0, c12, c0, 0" : : "r" (val) : "cc");
}
/*
* Security extensions, bits[7:4], permitted values,
* 0b0000 - not implemented, 0b0001/0b0010 - implemented
*/
static inline bool security_extensions_enabled(void)
{
return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
}
static unsigned long __init setup_vectors_base(void)
{
unsigned long base = 0, reg = get_cr();
set_cr(reg & ~CR_V);
if (security_extensions_enabled()) {
if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM))
base = CONFIG_DRAM_BASE;
set_vbar(base);
} else if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) {
if (CONFIG_DRAM_BASE != 0)
pr_err("Security extensions not enabled, vectors cannot be remapped to RAM, vectors base will be 0x00000000\n");
}
return base;
}
#endif /* CONFIG_CPU_HIGH_VECTOR */
#endif /* CONFIG_CPU_CP15 */
void __init arm_mm_memblock_reserve(void) void __init arm_mm_memblock_reserve(void)
{ {
#ifndef CONFIG_CPU_V7M #ifndef CONFIG_CPU_V7M
vectors_base = IS_ENABLED(CONFIG_CPU_CP15) ? setup_vectors_base() : 0;
/* /*
* Register the exception vector page. * Register the exception vector page.
* some architectures which the DRAM is the exception vector to trap, * some architectures which the DRAM is the exception vector to trap,
* alloc_page breaks with error, although it is not NULL, but "0." * alloc_page breaks with error, although it is not NULL, but "0."
*/ */
memblock_reserve(CONFIG_VECTORS_BASE, 2 * PAGE_SIZE); memblock_reserve(vectors_base, 2 * PAGE_SIZE);
#else /* ifndef CONFIG_CPU_V7M */ #else /* ifndef CONFIG_CPU_V7M */
/* /*
* There is no dedicated vector page on V7-M. So nothing needs to be * There is no dedicated vector page on V7-M. So nothing needs to be
...@@ -295,10 +343,10 @@ void __init arm_mm_memblock_reserve(void) ...@@ -295,10 +343,10 @@ void __init arm_mm_memblock_reserve(void)
#endif #endif
} }
void __init sanity_check_meminfo(void) void __init adjust_lowmem_bounds(void)
{ {
phys_addr_t end; phys_addr_t end;
sanity_check_meminfo_mpu(); adjust_lowmem_bounds_mpu();
end = memblock_end_of_DRAM(); end = memblock_end_of_DRAM();
high_memory = __va(end - 1) + 1; high_memory = __va(end - 1) + 1;
memblock_set_current_limit(end); memblock_set_current_limit(end);
...@@ -310,7 +358,7 @@ void __init sanity_check_meminfo(void) ...@@ -310,7 +358,7 @@ void __init sanity_check_meminfo(void)
*/ */
void __init paging_init(const struct machine_desc *mdesc) void __init paging_init(const struct machine_desc *mdesc)
{ {
early_trap_init((void *)CONFIG_VECTORS_BASE); early_trap_init((void *)vectors_base);
mpu_setup(); mpu_setup();
bootmem_init(); bootmem_init();
} }
......
#include <linux/bug.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/mmdebug.h>
#include <linux/mm.h>
#include <asm/sections.h>
#include <asm/memory.h>
#include <asm/fixmap.h>
#include <asm/dma.h>
#include "mm.h"
static inline bool __virt_addr_valid(unsigned long x)
{
/*
* high_memory does not get immediately defined, and there
* are early callers of __pa() against PAGE_OFFSET
*/
if (!high_memory && x >= PAGE_OFFSET)
return true;
if (high_memory && x >= PAGE_OFFSET && x < (unsigned long)high_memory)
return true;
/*
* MAX_DMA_ADDRESS is a virtual address that may not correspond to an
* actual physical address. Enough code relies on __pa(MAX_DMA_ADDRESS)
* that we just need to work around it and always return true.
*/
if (x == MAX_DMA_ADDRESS)
return true;
return false;
}
phys_addr_t __virt_to_phys(unsigned long x)
{
WARN(!__virt_addr_valid(x),
"virt_to_phys used for non-linear address: %pK (%pS)\n",
(void *)x, (void *)x);
return __virt_to_phys_nodebug(x);
}
EXPORT_SYMBOL(__virt_to_phys);
phys_addr_t __phys_addr_symbol(unsigned long x)
{
/* This is bounds checking against the kernel image only.
* __pa_symbol should only be used on kernel symbol addresses.
*/
VIRTUAL_BUG_ON(x < (unsigned long)KERNEL_START ||
x > (unsigned long)KERNEL_END);
return __pa_symbol_nodebug(x);
}
EXPORT_SYMBOL(__phys_addr_symbol);
#ifndef __ASM_ASM_UACCESS_H
#define __ASM_ASM_UACCESS_H
#include <asm/alternative.h>
#include <asm/kernel-pgtable.h>
#include <asm/sysreg.h>
#include <asm/assembler.h>
/*
* User access enabling/disabling macros.
*/
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
.macro __uaccess_ttbr0_disable, tmp1
mrs \tmp1, ttbr1_el1 // swapper_pg_dir
add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
isb
.endm
.macro __uaccess_ttbr0_enable, tmp1
get_thread_info \tmp1
ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
isb
.endm
.macro uaccess_ttbr0_disable, tmp1
alternative_if_not ARM64_HAS_PAN
__uaccess_ttbr0_disable \tmp1
alternative_else_nop_endif
.endm
.macro uaccess_ttbr0_enable, tmp1, tmp2
alternative_if_not ARM64_HAS_PAN
save_and_disable_irq \tmp2 // avoid preemption
__uaccess_ttbr0_enable \tmp1
restore_irq \tmp2
alternative_else_nop_endif
.endm
#else
.macro uaccess_ttbr0_disable, tmp1
.endm
.macro uaccess_ttbr0_enable, tmp1, tmp2
.endm
#endif
/*
* These macros are no-ops when UAO is present.
*/
.macro uaccess_disable_not_uao, tmp1
uaccess_ttbr0_disable \tmp1
alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(1)
alternative_else_nop_endif
.endm
.macro uaccess_enable_not_uao, tmp1, tmp2
uaccess_ttbr0_enable \tmp1, \tmp2
alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(0)
alternative_else_nop_endif
.endm
#endif
...@@ -22,8 +22,6 @@ ...@@ -22,8 +22,6 @@
#include <asm/kernel-pgtable.h> #include <asm/kernel-pgtable.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
#ifndef __ASSEMBLY__
/* /*
* User space memory access functions * User space memory access functions
*/ */
...@@ -424,66 +422,4 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count); ...@@ -424,66 +422,4 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str); extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n); extern __must_check long strnlen_user(const char __user *str, long n);
#else /* __ASSEMBLY__ */
#include <asm/assembler.h>
/*
* User access enabling/disabling macros.
*/
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
.macro __uaccess_ttbr0_disable, tmp1
mrs \tmp1, ttbr1_el1 // swapper_pg_dir
add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
isb
.endm
.macro __uaccess_ttbr0_enable, tmp1
get_thread_info \tmp1
ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
isb
.endm
.macro uaccess_ttbr0_disable, tmp1
alternative_if_not ARM64_HAS_PAN
__uaccess_ttbr0_disable \tmp1
alternative_else_nop_endif
.endm
.macro uaccess_ttbr0_enable, tmp1, tmp2
alternative_if_not ARM64_HAS_PAN
save_and_disable_irq \tmp2 // avoid preemption
__uaccess_ttbr0_enable \tmp1
restore_irq \tmp2
alternative_else_nop_endif
.endm
#else
.macro uaccess_ttbr0_disable, tmp1
.endm
.macro uaccess_ttbr0_enable, tmp1, tmp2
.endm
#endif
/*
* These macros are no-ops when UAO is present.
*/
.macro uaccess_disable_not_uao, tmp1
uaccess_ttbr0_disable \tmp1
alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(1)
alternative_else_nop_endif
.endm
.macro uaccess_enable_not_uao, tmp1, tmp2
uaccess_ttbr0_enable \tmp1, \tmp2
alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(0)
alternative_else_nop_endif
.endm
#endif /* __ASSEMBLY__ */
#endif /* __ASM_UACCESS_H */ #endif /* __ASM_UACCESS_H */
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <linux/uaccess.h> #include <asm/asm-uaccess.h>
#include <asm/unistd.h> #include <asm/unistd.h>
/* /*
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/uaccess.h> #include <asm/asm-uaccess.h>
.text .text
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <linux/uaccess.h> #include <asm/asm-uaccess.h>
/* /*
* Copy from user space to a kernel buffer (alignment handled by the hardware) * Copy from user space to a kernel buffer (alignment handled by the hardware)
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <linux/uaccess.h> #include <asm/asm-uaccess.h>
/* /*
* Copy from user space to user space (alignment handled by the hardware) * Copy from user space to user space (alignment handled by the hardware)
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <linux/uaccess.h> #include <asm/asm-uaccess.h>
/* /*
* Copy to user space from a kernel buffer (alignment handled by the hardware) * Copy to user space from a kernel buffer (alignment handled by the hardware)
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#include <asm/assembler.h> #include <asm/assembler.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <linux/uaccess.h> #include <asm/asm-uaccess.h>
/* /*
* flush_icache_range(start,end) * flush_icache_range(start,end)
......
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#include <linux/uaccess.h> #include <asm/asm-uaccess.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
......
...@@ -46,6 +46,7 @@ config X86 ...@@ -46,6 +46,7 @@ config X86
select ARCH_CLOCKSOURCE_DATA select ARCH_CLOCKSOURCE_DATA
select ARCH_DISCARD_MEMBLOCK select ARCH_DISCARD_MEMBLOCK
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER select ARCH_HAS_FAST_MULTIPLIER
......
...@@ -139,6 +139,19 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) ...@@ -139,6 +139,19 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
} }
static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
{
bool negative;
asm volatile(LOCK_PREFIX "andb %2,%1\n\t"
CC_SET(s)
: CC_OUT(s) (negative), ADDR
: "ir" ((char) ~(1 << nr)) : "memory");
return negative;
}
// Let everybody know we have it
#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
/* /*
* __clear_bit_unlock - Clears a bit in memory * __clear_bit_unlock - Clears a bit in memory
* @nr: Bit to clear * @nr: Bit to clear
......
...@@ -1182,6 +1182,9 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) ...@@ -1182,6 +1182,9 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
const char *name = get_name(bank, NULL); const char *name = get_name(bank, NULL);
int err = 0; int err = 0;
if (!dev)
return -ENODEV;
if (is_shared_bank(bank)) { if (is_shared_bank(bank)) {
nb = node_to_amd_nb(amd_get_nb_id(cpu)); nb = node_to_amd_nb(amd_get_nb_id(cpu));
......
...@@ -1461,16 +1461,25 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, ...@@ -1461,16 +1461,25 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
for (i = 0; i < ctcount; i++) { for (i = 0; i < ctcount; i++) {
unsigned int dlen = COMP_BUF_SIZE; unsigned int dlen = COMP_BUF_SIZE;
int ilen = ctemplate[i].inlen; int ilen = ctemplate[i].inlen;
void *input_vec;
input_vec = kmalloc(ilen, GFP_KERNEL);
if (!input_vec) {
ret = -ENOMEM;
goto out;
}
memcpy(input_vec, ctemplate[i].input, ilen);
memset(output, 0, dlen); memset(output, 0, dlen);
init_completion(&result.completion); init_completion(&result.completion);
sg_init_one(&src, ctemplate[i].input, ilen); sg_init_one(&src, input_vec, ilen);
sg_init_one(&dst, output, dlen); sg_init_one(&dst, output, dlen);
req = acomp_request_alloc(tfm); req = acomp_request_alloc(tfm);
if (!req) { if (!req) {
pr_err("alg: acomp: request alloc failed for %s\n", pr_err("alg: acomp: request alloc failed for %s\n",
algo); algo);
kfree(input_vec);
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
...@@ -1483,6 +1492,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, ...@@ -1483,6 +1492,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
if (ret) { if (ret) {
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret); i + 1, algo, -ret);
kfree(input_vec);
acomp_request_free(req); acomp_request_free(req);
goto out; goto out;
} }
...@@ -1491,6 +1501,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, ...@@ -1491,6 +1501,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n", pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
i + 1, algo, req->dlen); i + 1, algo, req->dlen);
ret = -EINVAL; ret = -EINVAL;
kfree(input_vec);
acomp_request_free(req); acomp_request_free(req);
goto out; goto out;
} }
...@@ -1500,26 +1511,37 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, ...@@ -1500,26 +1511,37 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
i + 1, algo); i + 1, algo);
hexdump(output, req->dlen); hexdump(output, req->dlen);
ret = -EINVAL; ret = -EINVAL;
kfree(input_vec);
acomp_request_free(req); acomp_request_free(req);
goto out; goto out;
} }
kfree(input_vec);
acomp_request_free(req); acomp_request_free(req);
} }
for (i = 0; i < dtcount; i++) { for (i = 0; i < dtcount; i++) {
unsigned int dlen = COMP_BUF_SIZE; unsigned int dlen = COMP_BUF_SIZE;
int ilen = dtemplate[i].inlen; int ilen = dtemplate[i].inlen;
void *input_vec;
input_vec = kmalloc(ilen, GFP_KERNEL);
if (!input_vec) {
ret = -ENOMEM;
goto out;
}
memcpy(input_vec, dtemplate[i].input, ilen);
memset(output, 0, dlen); memset(output, 0, dlen);
init_completion(&result.completion); init_completion(&result.completion);
sg_init_one(&src, dtemplate[i].input, ilen); sg_init_one(&src, input_vec, ilen);
sg_init_one(&dst, output, dlen); sg_init_one(&dst, output, dlen);
req = acomp_request_alloc(tfm); req = acomp_request_alloc(tfm);
if (!req) { if (!req) {
pr_err("alg: acomp: request alloc failed for %s\n", pr_err("alg: acomp: request alloc failed for %s\n",
algo); algo);
kfree(input_vec);
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
...@@ -1532,6 +1554,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, ...@@ -1532,6 +1554,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
if (ret) { if (ret) {
pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n", pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
i + 1, algo, -ret); i + 1, algo, -ret);
kfree(input_vec);
acomp_request_free(req); acomp_request_free(req);
goto out; goto out;
} }
...@@ -1540,6 +1563,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, ...@@ -1540,6 +1563,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n", pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
i + 1, algo, req->dlen); i + 1, algo, req->dlen);
ret = -EINVAL; ret = -EINVAL;
kfree(input_vec);
acomp_request_free(req); acomp_request_free(req);
goto out; goto out;
} }
...@@ -1549,10 +1573,12 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, ...@@ -1549,10 +1573,12 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
i + 1, algo); i + 1, algo);
hexdump(output, req->dlen); hexdump(output, req->dlen);
ret = -EINVAL; ret = -EINVAL;
kfree(input_vec);
acomp_request_free(req); acomp_request_free(req);
goto out; goto out;
} }
kfree(input_vec);
acomp_request_free(req); acomp_request_free(req);
} }
......
...@@ -273,7 +273,8 @@ struct mv_cesa_op_ctx { ...@@ -273,7 +273,8 @@ struct mv_cesa_op_ctx {
#define CESA_TDMA_SRC_IN_SRAM BIT(30) #define CESA_TDMA_SRC_IN_SRAM BIT(30)
#define CESA_TDMA_END_OF_REQ BIT(29) #define CESA_TDMA_END_OF_REQ BIT(29)
#define CESA_TDMA_BREAK_CHAIN BIT(28) #define CESA_TDMA_BREAK_CHAIN BIT(28)
#define CESA_TDMA_TYPE_MSK GENMASK(27, 0) #define CESA_TDMA_SET_STATE BIT(27)
#define CESA_TDMA_TYPE_MSK GENMASK(26, 0)
#define CESA_TDMA_DUMMY 0 #define CESA_TDMA_DUMMY 0
#define CESA_TDMA_DATA 1 #define CESA_TDMA_DATA 1
#define CESA_TDMA_OP 2 #define CESA_TDMA_OP 2
......
...@@ -280,13 +280,32 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req) ...@@ -280,13 +280,32 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
sreq->offset = 0; sreq->offset = 0;
} }
static void mv_cesa_ahash_dma_step(struct ahash_request *req)
{
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
struct mv_cesa_req *base = &creq->base;
/* We must explicitly set the digest state. */
if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
struct mv_cesa_engine *engine = base->engine;
int i;
/* Set the hash state in the IVDIG regs. */
for (i = 0; i < ARRAY_SIZE(creq->state); i++)
writel_relaxed(creq->state[i], engine->regs +
CESA_IVDIG(i));
}
mv_cesa_dma_step(base);
}
static void mv_cesa_ahash_step(struct crypto_async_request *req) static void mv_cesa_ahash_step(struct crypto_async_request *req)
{ {
struct ahash_request *ahashreq = ahash_request_cast(req); struct ahash_request *ahashreq = ahash_request_cast(req);
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
mv_cesa_dma_step(&creq->base); mv_cesa_ahash_dma_step(ahashreq);
else else
mv_cesa_ahash_std_step(ahashreq); mv_cesa_ahash_std_step(ahashreq);
} }
...@@ -584,12 +603,16 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) ...@@ -584,12 +603,16 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
struct mv_cesa_ahash_dma_iter iter; struct mv_cesa_ahash_dma_iter iter;
struct mv_cesa_op_ctx *op = NULL; struct mv_cesa_op_ctx *op = NULL;
unsigned int frag_len; unsigned int frag_len;
bool set_state = false;
int ret; int ret;
u32 type; u32 type;
basereq->chain.first = NULL; basereq->chain.first = NULL;
basereq->chain.last = NULL; basereq->chain.last = NULL;
if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
set_state = true;
if (creq->src_nents) { if (creq->src_nents) {
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -683,6 +706,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) ...@@ -683,6 +706,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
if (type != CESA_TDMA_RESULT) if (type != CESA_TDMA_RESULT)
basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN; basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
if (set_state) {
/*
* Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
* let the step logic know that the IVDIG registers should be
* explicitly set before launching a TDMA chain.
*/
basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
}
return 0; return 0;
err_free_tdma: err_free_tdma:
......
...@@ -109,7 +109,14 @@ void mv_cesa_tdma_chain(struct mv_cesa_engine *engine, ...@@ -109,7 +109,14 @@ void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
last->next = dreq->chain.first; last->next = dreq->chain.first;
engine->chain.last = dreq->chain.last; engine->chain.last = dreq->chain.last;
if (!(last->flags & CESA_TDMA_BREAK_CHAIN)) /*
* Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
* the last element of the current chain, or if the request
* being queued needs the IV regs to be set before lauching
* the request.
*/
if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
!(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
last->next_dma = dreq->chain.first->cur_dma; last->next_dma = dreq->chain.first->cur_dma;
} }
} }
......
...@@ -383,7 +383,7 @@ static int psci_suspend_finisher(unsigned long index) ...@@ -383,7 +383,7 @@ static int psci_suspend_finisher(unsigned long index)
u32 *state = __this_cpu_read(psci_power_state); u32 *state = __this_cpu_read(psci_power_state);
return psci_ops.cpu_suspend(state[index - 1], return psci_ops.cpu_suspend(state[index - 1],
virt_to_phys(cpu_resume)); __pa_symbol(cpu_resume));
} }
int psci_cpu_suspend_enter(unsigned long index) int psci_cpu_suspend_enter(unsigned long index)
......
...@@ -75,18 +75,18 @@ static char module_name[] = "lart"; ...@@ -75,18 +75,18 @@ static char module_name[] = "lart";
/* blob */ /* blob */
#define NUM_BLOB_BLOCKS FLASH_NUMBLOCKS_16m_PARAM #define NUM_BLOB_BLOCKS FLASH_NUMBLOCKS_16m_PARAM
#define BLOB_START 0x00000000 #define PART_BLOB_START 0x00000000
#define BLOB_LEN (NUM_BLOB_BLOCKS * FLASH_BLOCKSIZE_PARAM) #define PART_BLOB_LEN (NUM_BLOB_BLOCKS * FLASH_BLOCKSIZE_PARAM)
/* kernel */ /* kernel */
#define NUM_KERNEL_BLOCKS 7 #define NUM_KERNEL_BLOCKS 7
#define KERNEL_START (BLOB_START + BLOB_LEN) #define PART_KERNEL_START (PART_BLOB_START + PART_BLOB_LEN)
#define KERNEL_LEN (NUM_KERNEL_BLOCKS * FLASH_BLOCKSIZE_MAIN) #define PART_KERNEL_LEN (NUM_KERNEL_BLOCKS * FLASH_BLOCKSIZE_MAIN)
/* initial ramdisk */ /* initial ramdisk */
#define NUM_INITRD_BLOCKS 24 #define NUM_INITRD_BLOCKS 24
#define INITRD_START (KERNEL_START + KERNEL_LEN) #define PART_INITRD_START (PART_KERNEL_START + PART_KERNEL_LEN)
#define INITRD_LEN (NUM_INITRD_BLOCKS * FLASH_BLOCKSIZE_MAIN) #define PART_INITRD_LEN (NUM_INITRD_BLOCKS * FLASH_BLOCKSIZE_MAIN)
/* /*
* See section 4.0 in "3 Volt Fast Boot Block Flash Memory" Intel Datasheet * See section 4.0 in "3 Volt Fast Boot Block Flash Memory" Intel Datasheet
...@@ -587,20 +587,20 @@ static struct mtd_partition lart_partitions[] = { ...@@ -587,20 +587,20 @@ static struct mtd_partition lart_partitions[] = {
/* blob */ /* blob */
{ {
.name = "blob", .name = "blob",
.offset = BLOB_START, .offset = PART_BLOB_START,
.size = BLOB_LEN, .size = PART_BLOB_LEN,
}, },
/* kernel */ /* kernel */
{ {
.name = "kernel", .name = "kernel",
.offset = KERNEL_START, /* MTDPART_OFS_APPEND */ .offset = PART_KERNEL_START, /* MTDPART_OFS_APPEND */
.size = KERNEL_LEN, .size = PART_KERNEL_LEN,
}, },
/* initial ramdisk / file system */ /* initial ramdisk / file system */
{ {
.name = "file system", .name = "file system",
.offset = INITRD_START, /* MTDPART_OFS_APPEND */ .offset = PART_INITRD_START, /* MTDPART_OFS_APPEND */
.size = INITRD_LEN, /* MTDPART_SIZ_FULL */ .size = PART_INITRD_LEN, /* MTDPART_SIZ_FULL */
} }
}; };
#define NUM_PARTITIONS ARRAY_SIZE(lart_partitions) #define NUM_PARTITIONS ARRAY_SIZE(lart_partitions)
......
...@@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work) ...@@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work)
DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR, DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
&lp->rx_dma_regs->dmasm); &lp->rx_dma_regs->dmasm);
korina_free_ring(dev);
napi_disable(&lp->napi); napi_disable(&lp->napi);
korina_free_ring(dev);
if (korina_init(dev) < 0) { if (korina_init(dev) < 0) {
printk(KERN_ERR "%s: cannot restart device\n", dev->name); printk(KERN_ERR "%s: cannot restart device\n", dev->name);
return; return;
...@@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev) ...@@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev)
tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR; tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
writel(tmp, &lp->rx_dma_regs->dmasm); writel(tmp, &lp->rx_dma_regs->dmasm);
korina_free_ring(dev);
napi_disable(&lp->napi); napi_disable(&lp->napi);
cancel_work_sync(&lp->restart_task); cancel_work_sync(&lp->restart_task);
korina_free_ring(dev);
free_irq(lp->rx_irq, dev); free_irq(lp->rx_irq, dev);
free_irq(lp->tx_irq, dev); free_irq(lp->tx_irq, dev);
free_irq(lp->ovr_irq, dev); free_irq(lp->ovr_irq, dev);
......
...@@ -1638,7 +1638,8 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1638,7 +1638,8 @@ int mlx4_en_start_port(struct net_device *dev)
/* Configure tx cq's and rings */ /* Configure tx cq's and rings */
for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) { for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
u8 num_tx_rings_p_up = t == TX ? priv->num_tx_rings_p_up : 1; u8 num_tx_rings_p_up = t == TX ?
priv->num_tx_rings_p_up : priv->tx_ring_num[t];
for (i = 0; i < priv->tx_ring_num[t]; i++) { for (i = 0; i < priv->tx_ring_num[t]; i++) {
/* Configure cq */ /* Configure cq */
......
...@@ -326,6 +326,7 @@ enum cfg_version { ...@@ -326,6 +326,7 @@ enum cfg_version {
static const struct pci_device_id rtl8169_pci_tbl[] = { static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
......
...@@ -116,7 +116,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, ...@@ -116,7 +116,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
unsigned int mii_address = priv->hw->mii.addr; unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data; unsigned int mii_data = priv->hw->mii.data;
u32 value = MII_WRITE | MII_BUSY; u32 value = MII_BUSY;
value |= (phyaddr << priv->hw->mii.addr_shift) value |= (phyaddr << priv->hw->mii.addr_shift)
& priv->hw->mii.addr_mask; & priv->hw->mii.addr_mask;
...@@ -126,6 +126,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, ...@@ -126,6 +126,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
& priv->hw->mii.clk_csr_mask; & priv->hw->mii.clk_csr_mask;
if (priv->plat->has_gmac4) if (priv->plat->has_gmac4)
value |= MII_GMAC4_WRITE; value |= MII_GMAC4_WRITE;
else
value |= MII_WRITE;
/* Wait until any existing MII operation is complete */ /* Wait until any existing MII operation is complete */
if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
......
...@@ -99,6 +99,11 @@ struct ipvl_port { ...@@ -99,6 +99,11 @@ struct ipvl_port {
int count; int count;
}; };
struct ipvl_skb_cb {
bool tx_pkt;
};
#define IPVL_SKB_CB(_skb) ((struct ipvl_skb_cb *)&((_skb)->cb[0]))
static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d) static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
{ {
return rcu_dereference(d->rx_handler_data); return rcu_dereference(d->rx_handler_data);
......
...@@ -198,7 +198,7 @@ void ipvlan_process_multicast(struct work_struct *work) ...@@ -198,7 +198,7 @@ void ipvlan_process_multicast(struct work_struct *work)
unsigned int mac_hash; unsigned int mac_hash;
int ret; int ret;
u8 pkt_type; u8 pkt_type;
bool hlocal, dlocal; bool tx_pkt;
__skb_queue_head_init(&list); __skb_queue_head_init(&list);
...@@ -207,8 +207,11 @@ void ipvlan_process_multicast(struct work_struct *work) ...@@ -207,8 +207,11 @@ void ipvlan_process_multicast(struct work_struct *work)
spin_unlock_bh(&port->backlog.lock); spin_unlock_bh(&port->backlog.lock);
while ((skb = __skb_dequeue(&list)) != NULL) { while ((skb = __skb_dequeue(&list)) != NULL) {
struct net_device *dev = skb->dev;
bool consumed = false;
ethh = eth_hdr(skb); ethh = eth_hdr(skb);
hlocal = ether_addr_equal(ethh->h_source, port->dev->dev_addr); tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
mac_hash = ipvlan_mac_hash(ethh->h_dest); mac_hash = ipvlan_mac_hash(ethh->h_dest);
if (ether_addr_equal(ethh->h_dest, port->dev->broadcast)) if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
...@@ -216,41 +219,45 @@ void ipvlan_process_multicast(struct work_struct *work) ...@@ -216,41 +219,45 @@ void ipvlan_process_multicast(struct work_struct *work)
else else
pkt_type = PACKET_MULTICAST; pkt_type = PACKET_MULTICAST;
dlocal = false;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) { list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
if (hlocal && (ipvlan->dev == skb->dev)) { if (tx_pkt && (ipvlan->dev == skb->dev))
dlocal = true;
continue; continue;
}
if (!test_bit(mac_hash, ipvlan->mac_filters)) if (!test_bit(mac_hash, ipvlan->mac_filters))
continue; continue;
if (!(ipvlan->dev->flags & IFF_UP))
continue;
ret = NET_RX_DROP; ret = NET_RX_DROP;
len = skb->len + ETH_HLEN; len = skb->len + ETH_HLEN;
nskb = skb_clone(skb, GFP_ATOMIC); nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb) local_bh_disable();
goto acct; if (nskb) {
consumed = true;
nskb->pkt_type = pkt_type; nskb->pkt_type = pkt_type;
nskb->dev = ipvlan->dev; nskb->dev = ipvlan->dev;
if (hlocal) if (tx_pkt)
ret = dev_forward_skb(ipvlan->dev, nskb); ret = dev_forward_skb(ipvlan->dev, nskb);
else else
ret = netif_rx(nskb); ret = netif_rx(nskb);
acct: }
ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true); ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
local_bh_enable();
} }
rcu_read_unlock(); rcu_read_unlock();
if (dlocal) { if (tx_pkt) {
/* If the packet originated here, send it out. */ /* If the packet originated here, send it out. */
skb->dev = port->dev; skb->dev = port->dev;
skb->pkt_type = pkt_type; skb->pkt_type = pkt_type;
dev_queue_xmit(skb); dev_queue_xmit(skb);
} else { } else {
if (consumed)
consume_skb(skb);
else
kfree_skb(skb); kfree_skb(skb);
} }
if (dev)
dev_put(dev);
} }
} }
...@@ -470,15 +477,24 @@ static int ipvlan_process_outbound(struct sk_buff *skb) ...@@ -470,15 +477,24 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
} }
static void ipvlan_multicast_enqueue(struct ipvl_port *port, static void ipvlan_multicast_enqueue(struct ipvl_port *port,
struct sk_buff *skb) struct sk_buff *skb, bool tx_pkt)
{ {
if (skb->protocol == htons(ETH_P_PAUSE)) { if (skb->protocol == htons(ETH_P_PAUSE)) {
kfree_skb(skb); kfree_skb(skb);
return; return;
} }
/* Record that the deferred packet is from TX or RX path. By
* looking at mac-addresses on packet will lead to erronus decisions.
* (This would be true for a loopback-mode on master device or a
* hair-pin mode of the switch.)
*/
IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
spin_lock(&port->backlog.lock); spin_lock(&port->backlog.lock);
if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) { if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
if (skb->dev)
dev_hold(skb->dev);
__skb_queue_tail(&port->backlog, skb); __skb_queue_tail(&port->backlog, skb);
spin_unlock(&port->backlog.lock); spin_unlock(&port->backlog.lock);
schedule_work(&port->wq); schedule_work(&port->wq);
...@@ -537,7 +553,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) ...@@ -537,7 +553,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
} else if (is_multicast_ether_addr(eth->h_dest)) { } else if (is_multicast_ether_addr(eth->h_dest)) {
ipvlan_skb_crossing_ns(skb, NULL); ipvlan_skb_crossing_ns(skb, NULL);
ipvlan_multicast_enqueue(ipvlan->port, skb); ipvlan_multicast_enqueue(ipvlan->port, skb, true);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -634,7 +650,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, ...@@ -634,7 +650,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
*/ */
if (nskb) { if (nskb) {
ipvlan_skb_crossing_ns(nskb, NULL); ipvlan_skb_crossing_ns(nskb, NULL);
ipvlan_multicast_enqueue(port, nskb); ipvlan_multicast_enqueue(port, nskb, false);
} }
} }
} else { } else {
......
...@@ -135,6 +135,7 @@ static int ipvlan_port_create(struct net_device *dev) ...@@ -135,6 +135,7 @@ static int ipvlan_port_create(struct net_device *dev)
static void ipvlan_port_destroy(struct net_device *dev) static void ipvlan_port_destroy(struct net_device *dev)
{ {
struct ipvl_port *port = ipvlan_port_get_rtnl(dev); struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
struct sk_buff *skb;
dev->priv_flags &= ~IFF_IPVLAN_MASTER; dev->priv_flags &= ~IFF_IPVLAN_MASTER;
if (port->mode == IPVLAN_MODE_L3S) { if (port->mode == IPVLAN_MODE_L3S) {
...@@ -144,7 +145,11 @@ static void ipvlan_port_destroy(struct net_device *dev) ...@@ -144,7 +145,11 @@ static void ipvlan_port_destroy(struct net_device *dev)
} }
netdev_rx_handler_unregister(dev); netdev_rx_handler_unregister(dev);
cancel_work_sync(&port->wq); cancel_work_sync(&port->wq);
__skb_queue_purge(&port->backlog); while ((skb = __skb_dequeue(&port->backlog)) != NULL) {
if (skb->dev)
dev_put(skb->dev);
kfree_skb(skb);
}
kfree(port); kfree(port);
} }
......
...@@ -451,16 +451,37 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping, ...@@ -451,16 +451,37 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
} }
static int __dax_invalidate_mapping_entry(struct address_space *mapping,
pgoff_t index, bool trunc)
{
int ret = 0;
void *entry;
struct radix_tree_root *page_tree = &mapping->page_tree;
spin_lock_irq(&mapping->tree_lock);
entry = get_unlocked_mapping_entry(mapping, index, NULL);
if (!entry || !radix_tree_exceptional_entry(entry))
goto out;
if (!trunc &&
(radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
goto out;
radix_tree_delete(page_tree, index);
mapping->nrexceptional--;
ret = 1;
out:
put_unlocked_mapping_entry(mapping, index, entry);
spin_unlock_irq(&mapping->tree_lock);
return ret;
}
/* /*
* Delete exceptional DAX entry at @index from @mapping. Wait for radix tree * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
* entry to get unlocked before deleting it. * entry to get unlocked before deleting it.
*/ */
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
{ {
void *entry; int ret = __dax_invalidate_mapping_entry(mapping, index, true);
spin_lock_irq(&mapping->tree_lock);
entry = get_unlocked_mapping_entry(mapping, index, NULL);
/* /*
* This gets called from truncate / punch_hole path. As such, the caller * This gets called from truncate / punch_hole path. As such, the caller
* must hold locks protecting against concurrent modifications of the * must hold locks protecting against concurrent modifications of the
...@@ -468,16 +489,46 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) ...@@ -468,16 +489,46 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
* caller has seen exceptional entry for this index, we better find it * caller has seen exceptional entry for this index, we better find it
* at that index as well... * at that index as well...
*/ */
if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry))) { WARN_ON_ONCE(!ret);
spin_unlock_irq(&mapping->tree_lock); return ret;
return 0; }
}
radix_tree_delete(&mapping->page_tree, index); /*
* Invalidate exceptional DAX entry if easily possible. This handles DAX
* entries for invalidate_inode_pages() so we evict the entry only if we can
* do so without blocking.
*/
int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
{
int ret = 0;
void *entry, **slot;
struct radix_tree_root *page_tree = &mapping->page_tree;
spin_lock_irq(&mapping->tree_lock);
entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
if (!entry || !radix_tree_exceptional_entry(entry) ||
slot_locked(mapping, slot))
goto out;
if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
goto out;
radix_tree_delete(page_tree, index);
mapping->nrexceptional--; mapping->nrexceptional--;
ret = 1;
out:
spin_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
if (ret)
dax_wake_mapping_entry_waiter(mapping, index, entry, true); dax_wake_mapping_entry_waiter(mapping, index, entry, true);
return ret;
}
return 1; /*
* Invalidate exceptional DAX entry if it is clean.
*/
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index)
{
return __dax_invalidate_mapping_entry(mapping, index, false);
} }
/* /*
...@@ -488,15 +539,16 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) ...@@ -488,15 +539,16 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
* otherwise it will simply fall out of the page cache under memory * otherwise it will simply fall out of the page cache under memory
* pressure without ever having been dirtied. * pressure without ever having been dirtied.
*/ */
static int dax_load_hole(struct address_space *mapping, void *entry, static int dax_load_hole(struct address_space *mapping, void **entry,
struct vm_fault *vmf) struct vm_fault *vmf)
{ {
struct page *page; struct page *page;
int ret;
/* Hole page already exists? Return it... */ /* Hole page already exists? Return it... */
if (!radix_tree_exceptional_entry(entry)) { if (!radix_tree_exceptional_entry(*entry)) {
vmf->page = entry; page = *entry;
return VM_FAULT_LOCKED; goto out;
} }
/* This will replace locked radix tree entry with a hole page */ /* This will replace locked radix tree entry with a hole page */
...@@ -504,8 +556,17 @@ static int dax_load_hole(struct address_space *mapping, void *entry, ...@@ -504,8 +556,17 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
vmf->gfp_mask | __GFP_ZERO); vmf->gfp_mask | __GFP_ZERO);
if (!page) if (!page)
return VM_FAULT_OOM; return VM_FAULT_OOM;
out:
vmf->page = page; vmf->page = page;
return VM_FAULT_LOCKED; ret = finish_fault(vmf);
vmf->page = NULL;
*entry = page;
if (!ret) {
/* Grab reference for PTE that is now referencing the page */
get_page(page);
return VM_FAULT_NOPAGE;
}
return ret;
} }
static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size, static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
...@@ -934,6 +995,17 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, ...@@ -934,6 +995,17 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
return -EIO; return -EIO;
/*
* Write can allocate block for an area which has a hole page mapped
* into page tables. We have to tear down these mappings so that data
* written by write(2) is visible in mmap.
*/
if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
invalidate_inode_pages2_range(inode->i_mapping,
pos >> PAGE_SHIFT,
(end - 1) >> PAGE_SHIFT);
}
while (pos < end) { while (pos < end) {
unsigned offset = pos & (PAGE_SIZE - 1); unsigned offset = pos & (PAGE_SIZE - 1);
struct blk_dax_ctl dax = { 0 }; struct blk_dax_ctl dax = { 0 };
...@@ -992,23 +1064,6 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -992,23 +1064,6 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
if (iov_iter_rw(iter) == WRITE) if (iov_iter_rw(iter) == WRITE)
flags |= IOMAP_WRITE; flags |= IOMAP_WRITE;
/*
* Yes, even DAX files can have page cache attached to them: A zeroed
* page is inserted into the pagecache when we have to serve a write
* fault on a hole. It should never be dirtied and can simply be
* dropped from the pagecache once we get real data for the page.
*
* XXX: This is racy against mmap, and there's nothing we can do about
* it. We'll eventually need to shift this down even further so that
* we can check if we allocated blocks over a hole first.
*/
if (mapping->nrpages) {
ret = invalidate_inode_pages2_range(mapping,
pos >> PAGE_SHIFT,
(pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT);
WARN_ON_ONCE(ret);
}
while (iov_iter_count(iter)) { while (iov_iter_count(iter)) {
ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
iter, dax_iomap_actor); iter, dax_iomap_actor);
...@@ -1023,6 +1078,15 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -1023,6 +1078,15 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
} }
EXPORT_SYMBOL_GPL(dax_iomap_rw); EXPORT_SYMBOL_GPL(dax_iomap_rw);
static int dax_fault_return(int error)
{
if (error == 0)
return VM_FAULT_NOPAGE;
if (error == -ENOMEM)
return VM_FAULT_OOM;
return VM_FAULT_SIGBUS;
}
/** /**
* dax_iomap_fault - handle a page fault on a DAX file * dax_iomap_fault - handle a page fault on a DAX file
* @vma: The virtual memory area where the fault occurred * @vma: The virtual memory area where the fault occurred
...@@ -1055,12 +1119,6 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, ...@@ -1055,12 +1119,6 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
if (pos >= i_size_read(inode)) if (pos >= i_size_read(inode))
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
if (IS_ERR(entry)) {
error = PTR_ERR(entry);
goto out;
}
if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
flags |= IOMAP_WRITE; flags |= IOMAP_WRITE;
...@@ -1071,9 +1129,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, ...@@ -1071,9 +1129,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
*/ */
error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
if (error) if (error)
goto unlock_entry; return dax_fault_return(error);
if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
error = -EIO; /* fs corruption? */ vmf_ret = dax_fault_return(-EIO); /* fs corruption? */
goto finish_iomap;
}
entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
if (IS_ERR(entry)) {
vmf_ret = dax_fault_return(PTR_ERR(entry));
goto finish_iomap; goto finish_iomap;
} }
...@@ -1096,13 +1160,13 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, ...@@ -1096,13 +1160,13 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
} }
if (error) if (error)
goto finish_iomap; goto error_unlock_entry;
__SetPageUptodate(vmf->cow_page); __SetPageUptodate(vmf->cow_page);
vmf_ret = finish_fault(vmf); vmf_ret = finish_fault(vmf);
if (!vmf_ret) if (!vmf_ret)
vmf_ret = VM_FAULT_DONE_COW; vmf_ret = VM_FAULT_DONE_COW;
goto finish_iomap; goto unlock_entry;
} }
switch (iomap.type) { switch (iomap.type) {
...@@ -1114,12 +1178,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, ...@@ -1114,12 +1178,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
} }
error = dax_insert_mapping(mapping, iomap.bdev, sector, error = dax_insert_mapping(mapping, iomap.bdev, sector,
PAGE_SIZE, &entry, vma, vmf); PAGE_SIZE, &entry, vma, vmf);
/* -EBUSY is fine, somebody else faulted on the same PTE */
if (error == -EBUSY)
error = 0;
break; break;
case IOMAP_UNWRITTEN: case IOMAP_UNWRITTEN:
case IOMAP_HOLE: case IOMAP_HOLE:
if (!(vmf->flags & FAULT_FLAG_WRITE)) { if (!(vmf->flags & FAULT_FLAG_WRITE)) {
vmf_ret = dax_load_hole(mapping, entry, vmf); vmf_ret = dax_load_hole(mapping, &entry, vmf);
break; goto unlock_entry;
} }
/*FALLTHRU*/ /*FALLTHRU*/
default: default:
...@@ -1128,31 +1195,25 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, ...@@ -1128,31 +1195,25 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
break; break;
} }
error_unlock_entry:
vmf_ret = dax_fault_return(error) | major;
unlock_entry:
put_locked_mapping_entry(mapping, vmf->pgoff, entry);
finish_iomap: finish_iomap:
if (ops->iomap_end) { if (ops->iomap_end) {
if (error || (vmf_ret & VM_FAULT_ERROR)) { int copied = PAGE_SIZE;
/* keep previous error */
ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags, if (vmf_ret & VM_FAULT_ERROR)
&iomap); copied = 0;
} else { /*
error = ops->iomap_end(inode, pos, PAGE_SIZE, * The fault is done by now and there's no way back (other
PAGE_SIZE, flags, &iomap); * thread may be already happily using PTE we have installed).
} * Just ignore error from ->iomap_end since we cannot do much
* with it.
*/
ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
} }
unlock_entry:
if (vmf_ret != VM_FAULT_LOCKED || error)
put_locked_mapping_entry(mapping, vmf->pgoff, entry);
out:
if (error == -ENOMEM)
return VM_FAULT_OOM | major;
/* -EBUSY is fine, somebody else faulted on the same PTE */
if (error < 0 && error != -EBUSY)
return VM_FAULT_SIGBUS | major;
if (vmf_ret) {
WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
return vmf_ret; return vmf_ret;
}
return VM_FAULT_NOPAGE | major;
} }
EXPORT_SYMBOL_GPL(dax_iomap_fault); EXPORT_SYMBOL_GPL(dax_iomap_fault);
...@@ -1276,16 +1337,6 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -1276,16 +1337,6 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
if ((pgoff | PG_PMD_COLOUR) > max_pgoff) if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
goto fallback; goto fallback;
/*
* grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
* PMD or a HZP entry. If it can't (because a 4k page is already in
* the tree, for instance), it will return -EEXIST and we just fall
* back to 4k entries.
*/
entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
if (IS_ERR(entry))
goto fallback;
/* /*
* Note that we don't use iomap_apply here. We aren't doing I/O, only * Note that we don't use iomap_apply here. We aren't doing I/O, only
* setting up a mapping, so really we're using iomap_begin() as a way * setting up a mapping, so really we're using iomap_begin() as a way
...@@ -1294,10 +1345,21 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -1294,10 +1345,21 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
pos = (loff_t)pgoff << PAGE_SHIFT; pos = (loff_t)pgoff << PAGE_SHIFT;
error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
if (error) if (error)
goto unlock_entry; goto fallback;
if (iomap.offset + iomap.length < pos + PMD_SIZE) if (iomap.offset + iomap.length < pos + PMD_SIZE)
goto finish_iomap; goto finish_iomap;
/*
* grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
* PMD or a HZP entry. If it can't (because a 4k page is already in
* the tree, for instance), it will return -EEXIST and we just fall
* back to 4k entries.
*/
entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
if (IS_ERR(entry))
goto finish_iomap;
vmf.pgoff = pgoff; vmf.pgoff = pgoff;
vmf.flags = flags; vmf.flags = flags;
vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO; vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO;
...@@ -1310,7 +1372,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -1310,7 +1372,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
case IOMAP_UNWRITTEN: case IOMAP_UNWRITTEN:
case IOMAP_HOLE: case IOMAP_HOLE:
if (WARN_ON_ONCE(write)) if (WARN_ON_ONCE(write))
goto finish_iomap; goto unlock_entry;
result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap, result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap,
&entry); &entry);
break; break;
...@@ -1319,20 +1381,23 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, ...@@ -1319,20 +1381,23 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
break; break;
} }
unlock_entry:
put_locked_mapping_entry(mapping, pgoff, entry);
finish_iomap: finish_iomap:
if (ops->iomap_end) { if (ops->iomap_end) {
if (result == VM_FAULT_FALLBACK) { int copied = PMD_SIZE;
ops->iomap_end(inode, pos, PMD_SIZE, 0, iomap_flags,
if (result == VM_FAULT_FALLBACK)
copied = 0;
/*
* The fault is done by now and there's no way back (other
* thread may be already happily using PMD we have installed).
* Just ignore error from ->iomap_end since we cannot do much
* with it.
*/
ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
&iomap); &iomap);
} else {
error = ops->iomap_end(inode, pos, PMD_SIZE, PMD_SIZE,
iomap_flags, &iomap);
if (error)
result = VM_FAULT_FALLBACK;
}
} }
unlock_entry:
put_locked_mapping_entry(mapping, pgoff, entry);
fallback: fallback:
if (result == VM_FAULT_FALLBACK) { if (result == VM_FAULT_FALLBACK) {
split_huge_pmd(vma, pmd, address); split_huge_pmd(vma, pmd, address);
......
...@@ -751,9 +751,8 @@ static int ext2_get_blocks(struct inode *inode, ...@@ -751,9 +751,8 @@ static int ext2_get_blocks(struct inode *inode,
mutex_unlock(&ei->truncate_mutex); mutex_unlock(&ei->truncate_mutex);
goto cleanup; goto cleanup;
} }
} else {
*new = true;
} }
*new = true;
ext2_splice_branch(inode, iblock, partial, indirect_blks, count); ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
mutex_unlock(&ei->truncate_mutex); mutex_unlock(&ei->truncate_mutex);
......
...@@ -258,7 +258,6 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) ...@@ -258,7 +258,6 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{ {
int result; int result;
handle_t *handle = NULL;
struct inode *inode = file_inode(vma->vm_file); struct inode *inode = file_inode(vma->vm_file);
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
bool write = vmf->flags & FAULT_FLAG_WRITE; bool write = vmf->flags & FAULT_FLAG_WRITE;
...@@ -266,24 +265,12 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -266,24 +265,12 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (write) { if (write) {
sb_start_pagefault(sb); sb_start_pagefault(sb);
file_update_time(vma->vm_file); file_update_time(vma->vm_file);
}
down_read(&EXT4_I(inode)->i_mmap_sem); down_read(&EXT4_I(inode)->i_mmap_sem);
handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
EXT4_DATA_TRANS_BLOCKS(sb));
} else
down_read(&EXT4_I(inode)->i_mmap_sem);
if (IS_ERR(handle))
result = VM_FAULT_SIGBUS;
else
result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops); result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
if (write) {
if (!IS_ERR(handle))
ext4_journal_stop(handle);
up_read(&EXT4_I(inode)->i_mmap_sem); up_read(&EXT4_I(inode)->i_mmap_sem);
if (write)
sb_end_pagefault(sb); sb_end_pagefault(sb);
} else
up_read(&EXT4_I(inode)->i_mmap_sem);
return result; return result;
} }
...@@ -292,7 +279,6 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, ...@@ -292,7 +279,6 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, unsigned int flags) pmd_t *pmd, unsigned int flags)
{ {
int result; int result;
handle_t *handle = NULL;
struct inode *inode = file_inode(vma->vm_file); struct inode *inode = file_inode(vma->vm_file);
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
bool write = flags & FAULT_FLAG_WRITE; bool write = flags & FAULT_FLAG_WRITE;
...@@ -300,27 +286,13 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, ...@@ -300,27 +286,13 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
if (write) { if (write) {
sb_start_pagefault(sb); sb_start_pagefault(sb);
file_update_time(vma->vm_file); file_update_time(vma->vm_file);
}
down_read(&EXT4_I(inode)->i_mmap_sem); down_read(&EXT4_I(inode)->i_mmap_sem);
handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
ext4_chunk_trans_blocks(inode,
PMD_SIZE / PAGE_SIZE));
} else
down_read(&EXT4_I(inode)->i_mmap_sem);
if (IS_ERR(handle))
result = VM_FAULT_SIGBUS;
else {
result = dax_iomap_pmd_fault(vma, addr, pmd, flags, result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
&ext4_iomap_ops); &ext4_iomap_ops);
}
if (write) {
if (!IS_ERR(handle))
ext4_journal_stop(handle);
up_read(&EXT4_I(inode)->i_mmap_sem); up_read(&EXT4_I(inode)->i_mmap_sem);
if (write)
sb_end_pagefault(sb); sb_end_pagefault(sb);
} else
up_read(&EXT4_I(inode)->i_mmap_sem);
return result; return result;
} }
......
...@@ -41,6 +41,9 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -41,6 +41,9 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
struct iomap_ops *ops); struct iomap_ops *ops);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
void dax_wake_mapping_entry_waiter(struct address_space *mapping, void dax_wake_mapping_entry_waiter(struct address_space *mapping,
pgoff_t index, void *entry, bool wake_all); pgoff_t index, void *entry, bool wake_all);
......
...@@ -610,7 +610,6 @@ bool bpf_helper_changes_pkt_data(void *func); ...@@ -610,7 +610,6 @@ bool bpf_helper_changes_pkt_data(void *func);
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len); const struct bpf_insn *patch, u32 len);
void bpf_warn_invalid_xdp_action(u32 act); void bpf_warn_invalid_xdp_action(u32 act);
void bpf_warn_invalid_xdp_buffer(void);
#ifdef CONFIG_BPF_JIT #ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable; extern int bpf_jit_enable;
......
...@@ -76,6 +76,10 @@ extern int mmap_rnd_compat_bits __read_mostly; ...@@ -76,6 +76,10 @@ extern int mmap_rnd_compat_bits __read_mostly;
#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) #define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
#endif #endif
#ifndef lm_alias
#define lm_alias(x) __va(__pa_symbol(x))
#endif
/* /*
* To prevent common memory management code establishing * To prevent common memory management code establishing
* a zero page mapping on a read fault. * a zero page mapping on a read fault.
......
...@@ -73,13 +73,13 @@ ...@@ -73,13 +73,13 @@
*/ */
enum pageflags { enum pageflags {
PG_locked, /* Page is locked. Don't touch. */ PG_locked, /* Page is locked. Don't touch. */
PG_waiters, /* Page has waiters, check its waitqueue */
PG_error, PG_error,
PG_referenced, PG_referenced,
PG_uptodate, PG_uptodate,
PG_dirty, PG_dirty,
PG_lru, PG_lru,
PG_active, PG_active,
PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
PG_slab, PG_slab,
PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
PG_arch_1, PG_arch_1,
......
...@@ -110,6 +110,7 @@ struct netns_ipv4 { ...@@ -110,6 +110,7 @@ struct netns_ipv4 {
int sysctl_tcp_orphan_retries; int sysctl_tcp_orphan_retries;
int sysctl_tcp_fin_timeout; int sysctl_tcp_fin_timeout;
unsigned int sysctl_tcp_notsent_lowat; unsigned int sysctl_tcp_notsent_lowat;
int sysctl_tcp_tw_reuse;
int sysctl_igmp_max_memberships; int sysctl_igmp_max_memberships;
int sysctl_igmp_max_msf; int sysctl_igmp_max_msf;
......
...@@ -252,7 +252,6 @@ extern int sysctl_tcp_wmem[3]; ...@@ -252,7 +252,6 @@ extern int sysctl_tcp_wmem[3];
extern int sysctl_tcp_rmem[3]; extern int sysctl_tcp_rmem[3];
extern int sysctl_tcp_app_win; extern int sysctl_tcp_app_win;
extern int sysctl_tcp_adv_win_scale; extern int sysctl_tcp_adv_win_scale;
extern int sysctl_tcp_tw_reuse;
extern int sysctl_tcp_frto; extern int sysctl_tcp_frto;
extern int sysctl_tcp_low_latency; extern int sysctl_tcp_low_latency;
extern int sysctl_tcp_nometrics_save; extern int sysctl_tcp_nometrics_save;
......
...@@ -1471,6 +1471,7 @@ int __cpuhp_setup_state(enum cpuhp_state state, ...@@ -1471,6 +1471,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
bool multi_instance) bool multi_instance)
{ {
int cpu, ret = 0; int cpu, ret = 0;
bool dynstate;
if (cpuhp_cb_check(state) || !name) if (cpuhp_cb_check(state) || !name)
return -EINVAL; return -EINVAL;
...@@ -1480,6 +1481,12 @@ int __cpuhp_setup_state(enum cpuhp_state state, ...@@ -1480,6 +1481,12 @@ int __cpuhp_setup_state(enum cpuhp_state state,
ret = cpuhp_store_callbacks(state, name, startup, teardown, ret = cpuhp_store_callbacks(state, name, startup, teardown,
multi_instance); multi_instance);
dynstate = state == CPUHP_AP_ONLINE_DYN;
if (ret > 0 && dynstate) {
state = ret;
ret = 0;
}
if (ret || !invoke || !startup) if (ret || !invoke || !startup)
goto out; goto out;
...@@ -1508,7 +1515,7 @@ int __cpuhp_setup_state(enum cpuhp_state state, ...@@ -1508,7 +1515,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
* If the requested state is CPUHP_AP_ONLINE_DYN, return the * If the requested state is CPUHP_AP_ONLINE_DYN, return the
* dynamically allocated state in case of success. * dynamically allocated state in case of success.
*/ */
if (!ret && state == CPUHP_AP_ONLINE_DYN) if (!ret && dynstate)
return state; return state;
return ret; return ret;
} }
......
...@@ -1399,7 +1399,7 @@ void __weak arch_crash_save_vmcoreinfo(void) ...@@ -1399,7 +1399,7 @@ void __weak arch_crash_save_vmcoreinfo(void)
phys_addr_t __weak paddr_vmcoreinfo_note(void) phys_addr_t __weak paddr_vmcoreinfo_note(void)
{ {
return __pa((unsigned long)(char *)&vmcoreinfo_note); return __pa_symbol((unsigned long)(char *)&vmcoreinfo_note);
} }
static int __init crash_save_vmcoreinfo_init(void) static int __init crash_save_vmcoreinfo_init(void)
......
...@@ -622,9 +622,12 @@ config DEBUG_VM_PGFLAGS ...@@ -622,9 +622,12 @@ config DEBUG_VM_PGFLAGS
If unsure, say N. If unsure, say N.
config ARCH_HAS_DEBUG_VIRTUAL
bool
config DEBUG_VIRTUAL config DEBUG_VIRTUAL
bool "Debug VM translations" bool "Debug VM translations"
depends on DEBUG_KERNEL && X86 depends on DEBUG_KERNEL && ARCH_HAS_DEBUG_VIRTUAL
help help
Enable some costly sanity checks in virtual to page code. This can Enable some costly sanity checks in virtual to page code. This can
catch mistakes with virt_to_page() and friends. catch mistakes with virt_to_page() and friends.
......
...@@ -235,18 +235,13 @@ int __init cma_declare_contiguous(phys_addr_t base, ...@@ -235,18 +235,13 @@ int __init cma_declare_contiguous(phys_addr_t base,
phys_addr_t highmem_start; phys_addr_t highmem_start;
int ret = 0; int ret = 0;
#ifdef CONFIG_X86
/* /*
* high_memory isn't direct mapped memory so retrieving its physical * We can't use __pa(high_memory) directly, since high_memory
* address isn't appropriate. But it would be useful to check the * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
* physical address of the highmem boundary so it's justifiable to get * complain. Find the boundary by adding one to the last valid
* the physical address from it. On x86 there is a validation check for * address.
* this case, so the following workaround is needed to avoid it.
*/ */
highmem_start = __pa_nodebug(high_memory); highmem_start = __pa(high_memory - 1) + 1;
#else
highmem_start = __pa(high_memory);
#endif
pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
__func__, &size, &base, &limit, &alignment); __func__, &size, &base, &limit, &alignment);
......
...@@ -912,6 +912,29 @@ void add_page_wait_queue(struct page *page, wait_queue_t *waiter) ...@@ -912,6 +912,29 @@ void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
} }
EXPORT_SYMBOL_GPL(add_page_wait_queue); EXPORT_SYMBOL_GPL(add_page_wait_queue);
#ifndef clear_bit_unlock_is_negative_byte
/*
* PG_waiters is the high bit in the same byte as PG_lock.
*
* On x86 (and on many other architectures), we can clear PG_lock and
* test the sign bit at the same time. But if the architecture does
* not support that special operation, we just do this all by hand
* instead.
*
* The read of PG_waiters has to be after (or concurrently with) PG_locked
* being cleared, but a memory barrier should be unneccssary since it is
* in the same byte as PG_locked.
*/
static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
{
clear_bit_unlock(nr, mem);
/* smp_mb__after_atomic(); */
return test_bit(PG_waiters, mem);
}
#endif
/** /**
* unlock_page - unlock a locked page * unlock_page - unlock a locked page
* @page: the page * @page: the page
...@@ -921,16 +944,19 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue); ...@@ -921,16 +944,19 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue);
* mechanism between PageLocked pages and PageWriteback pages is shared. * mechanism between PageLocked pages and PageWriteback pages is shared.
* But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
* *
* The mb is necessary to enforce ordering between the clear_bit and the read * Note that this depends on PG_waiters being the sign bit in the byte
* of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()). * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
* clear the PG_locked bit and test PG_waiters at the same time fairly
* portably (architectures that do LL/SC can test any bit, while x86 can
* test the sign bit).
*/ */
void unlock_page(struct page *page) void unlock_page(struct page *page)
{ {
BUILD_BUG_ON(PG_waiters != 7);
page = compound_head(page); page = compound_head(page);
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page);
clear_bit_unlock(PG_locked, &page->flags); if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
smp_mb__after_atomic(); wake_up_page_bit(page, PG_locked);
wake_up_page(page, PG_locked);
} }
EXPORT_SYMBOL(unlock_page); EXPORT_SYMBOL(unlock_page);
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/pfn.h> #include <linux/pfn.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -49,7 +50,7 @@ static void __init zero_pte_populate(pmd_t *pmd, unsigned long addr, ...@@ -49,7 +50,7 @@ static void __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
pte_t *pte = pte_offset_kernel(pmd, addr); pte_t *pte = pte_offset_kernel(pmd, addr);
pte_t zero_pte; pte_t zero_pte;
zero_pte = pfn_pte(PFN_DOWN(__pa(kasan_zero_page)), PAGE_KERNEL); zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_zero_page)), PAGE_KERNEL);
zero_pte = pte_wrprotect(zero_pte); zero_pte = pte_wrprotect(zero_pte);
while (addr + PAGE_SIZE <= end) { while (addr + PAGE_SIZE <= end) {
...@@ -69,7 +70,7 @@ static void __init zero_pmd_populate(pud_t *pud, unsigned long addr, ...@@ -69,7 +70,7 @@ static void __init zero_pmd_populate(pud_t *pud, unsigned long addr,
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
continue; continue;
} }
...@@ -92,9 +93,9 @@ static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr, ...@@ -92,9 +93,9 @@ static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
pmd_t *pmd; pmd_t *pmd;
pud_populate(&init_mm, pud, kasan_zero_pmd); pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
continue; continue;
} }
...@@ -135,11 +136,11 @@ void __init kasan_populate_zero_shadow(const void *shadow_start, ...@@ -135,11 +136,11 @@ void __init kasan_populate_zero_shadow(const void *shadow_start,
* puds,pmds, so pgd_populate(), pud_populate() * puds,pmds, so pgd_populate(), pud_populate()
* is noops. * is noops.
*/ */
pgd_populate(&init_mm, pgd, kasan_zero_pud); pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_pud));
pud = pud_offset(pgd, addr); pud = pud_offset(pgd, addr);
pud_populate(&init_mm, pud, kasan_zero_pmd); pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
continue; continue;
} }
......
...@@ -24,20 +24,12 @@ ...@@ -24,20 +24,12 @@
#include <linux/rmap.h> #include <linux/rmap.h>
#include "internal.h" #include "internal.h"
static void clear_exceptional_entry(struct address_space *mapping, static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
pgoff_t index, void *entry) void *entry)
{ {
struct radix_tree_node *node; struct radix_tree_node *node;
void **slot; void **slot;
/* Handled by shmem itself */
if (shmem_mapping(mapping))
return;
if (dax_mapping(mapping)) {
dax_delete_mapping_entry(mapping, index);
return;
}
spin_lock_irq(&mapping->tree_lock); spin_lock_irq(&mapping->tree_lock);
/* /*
* Regular page slots are stabilized by the page lock even * Regular page slots are stabilized by the page lock even
...@@ -55,6 +47,56 @@ static void clear_exceptional_entry(struct address_space *mapping, ...@@ -55,6 +47,56 @@ static void clear_exceptional_entry(struct address_space *mapping,
spin_unlock_irq(&mapping->tree_lock); spin_unlock_irq(&mapping->tree_lock);
} }
/*
* Unconditionally remove exceptional entry. Usually called from truncate path.
*/
static void truncate_exceptional_entry(struct address_space *mapping,
pgoff_t index, void *entry)
{
/* Handled by shmem itself */
if (shmem_mapping(mapping))
return;
if (dax_mapping(mapping)) {
dax_delete_mapping_entry(mapping, index);
return;
}
clear_shadow_entry(mapping, index, entry);
}
/*
* Invalidate exceptional entry if easily possible. This handles exceptional
* entries for invalidate_inode_pages() so for DAX it evicts only unlocked and
* clean entries.
*/
static int invalidate_exceptional_entry(struct address_space *mapping,
pgoff_t index, void *entry)
{
/* Handled by shmem itself */
if (shmem_mapping(mapping))
return 1;
if (dax_mapping(mapping))
return dax_invalidate_mapping_entry(mapping, index);
clear_shadow_entry(mapping, index, entry);
return 1;
}
/*
* Invalidate exceptional entry if clean. This handles exceptional entries for
* invalidate_inode_pages2() so for DAX it evicts only clean entries.
*/
static int invalidate_exceptional_entry2(struct address_space *mapping,
pgoff_t index, void *entry)
{
/* Handled by shmem itself */
if (shmem_mapping(mapping))
return 1;
if (dax_mapping(mapping))
return dax_invalidate_mapping_entry_sync(mapping, index);
clear_shadow_entry(mapping, index, entry);
return 1;
}
/** /**
* do_invalidatepage - invalidate part or all of a page * do_invalidatepage - invalidate part or all of a page
* @page: the page which is affected * @page: the page which is affected
...@@ -262,7 +304,8 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -262,7 +304,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
break; break;
if (radix_tree_exceptional_entry(page)) { if (radix_tree_exceptional_entry(page)) {
clear_exceptional_entry(mapping, index, page); truncate_exceptional_entry(mapping, index,
page);
continue; continue;
} }
...@@ -351,7 +394,8 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -351,7 +394,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
} }
if (radix_tree_exceptional_entry(page)) { if (radix_tree_exceptional_entry(page)) {
clear_exceptional_entry(mapping, index, page); truncate_exceptional_entry(mapping, index,
page);
continue; continue;
} }
...@@ -470,7 +514,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, ...@@ -470,7 +514,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
break; break;
if (radix_tree_exceptional_entry(page)) { if (radix_tree_exceptional_entry(page)) {
clear_exceptional_entry(mapping, index, page); invalidate_exceptional_entry(mapping, index,
page);
continue; continue;
} }
...@@ -592,7 +637,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping, ...@@ -592,7 +637,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
break; break;
if (radix_tree_exceptional_entry(page)) { if (radix_tree_exceptional_entry(page)) {
clear_exceptional_entry(mapping, index, page); if (!invalidate_exceptional_entry2(mapping,
index, page))
ret = -EBUSY;
continue; continue;
} }
......
...@@ -108,13 +108,13 @@ static inline const char *check_kernel_text_object(const void *ptr, ...@@ -108,13 +108,13 @@ static inline const char *check_kernel_text_object(const void *ptr,
* __pa() is not just the reverse of __va(). This can be detected * __pa() is not just the reverse of __va(). This can be detected
* and checked: * and checked:
*/ */
textlow_linear = (unsigned long)__va(__pa(textlow)); textlow_linear = (unsigned long)lm_alias(textlow);
/* No different mapping: we're done. */ /* No different mapping: we're done. */
if (textlow_linear == textlow) if (textlow_linear == textlow)
return NULL; return NULL;
/* Check the secondary mapping... */ /* Check the secondary mapping... */
texthigh_linear = (unsigned long)__va(__pa(texthigh)); texthigh_linear = (unsigned long)lm_alias(texthigh);
if (overlaps(ptr, n, textlow_linear, texthigh_linear)) if (overlaps(ptr, n, textlow_linear, texthigh_linear))
return "<linear kernel text>"; return "<linear kernel text>";
......
...@@ -2972,12 +2972,6 @@ void bpf_warn_invalid_xdp_action(u32 act) ...@@ -2972,12 +2972,6 @@ void bpf_warn_invalid_xdp_action(u32 act)
} }
EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
void bpf_warn_invalid_xdp_buffer(void)
{
WARN_ONCE(1, "Illegal XDP buffer encountered, expect throughput degradation\n");
}
EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_buffer);
static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg, static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
int src_reg, int ctx_off, int src_reg, int ctx_off,
struct bpf_insn *insn_buf, struct bpf_insn *insn_buf,
......
...@@ -432,13 +432,6 @@ static struct ctl_table ipv4_table[] = { ...@@ -432,13 +432,6 @@ static struct ctl_table ipv4_table[] = {
.extra1 = &tcp_adv_win_scale_min, .extra1 = &tcp_adv_win_scale_min,
.extra2 = &tcp_adv_win_scale_max, .extra2 = &tcp_adv_win_scale_max,
}, },
{
.procname = "tcp_tw_reuse",
.data = &sysctl_tcp_tw_reuse,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{ {
.procname = "tcp_frto", .procname = "tcp_frto",
.data = &sysctl_tcp_frto, .data = &sysctl_tcp_frto,
...@@ -960,6 +953,13 @@ static struct ctl_table ipv4_net_table[] = { ...@@ -960,6 +953,13 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec,
}, },
{
.procname = "tcp_tw_reuse",
.data = &init_net.ipv4.sysctl_tcp_tw_reuse,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
},
#ifdef CONFIG_IP_ROUTE_MULTIPATH #ifdef CONFIG_IP_ROUTE_MULTIPATH
{ {
.procname = "fib_multipath_use_neigh", .procname = "fib_multipath_use_neigh",
......
...@@ -84,7 +84,6 @@ ...@@ -84,7 +84,6 @@
#include <crypto/hash.h> #include <crypto/hash.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
int sysctl_tcp_tw_reuse __read_mostly;
int sysctl_tcp_low_latency __read_mostly; int sysctl_tcp_low_latency __read_mostly;
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
...@@ -120,7 +119,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) ...@@ -120,7 +119,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
and use initial timestamp retrieved from peer table. and use initial timestamp retrieved from peer table.
*/ */
if (tcptw->tw_ts_recent_stamp && if (tcptw->tw_ts_recent_stamp &&
(!twp || (sysctl_tcp_tw_reuse && (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse &&
get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
if (tp->write_seq == 0) if (tp->write_seq == 0)
...@@ -2456,6 +2455,7 @@ static int __net_init tcp_sk_init(struct net *net) ...@@ -2456,6 +2455,7 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_orphan_retries = 0; net->ipv4.sysctl_tcp_orphan_retries = 0;
net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX; net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
net->ipv4.sysctl_tcp_tw_reuse = 0;
return 0; return 0;
fail: fail:
......
...@@ -606,7 +606,6 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) ...@@ -606,7 +606,6 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
rcu_assign_pointer(flow->sf_acts, acts); rcu_assign_pointer(flow->sf_acts, acts);
packet->priority = flow->key.phy.priority; packet->priority = flow->key.phy.priority;
packet->mark = flow->key.phy.skb_mark; packet->mark = flow->key.phy.skb_mark;
packet->protocol = flow->key.eth.type;
rcu_read_lock(); rcu_read_lock();
dp = get_dp_rcu(net, ovs_header->dp_ifindex); dp = get_dp_rcu(net, ovs_header->dp_ifindex);
......
...@@ -312,7 +312,8 @@ static bool icmp6hdr_ok(struct sk_buff *skb) ...@@ -312,7 +312,8 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
* Returns 0 if it encounters a non-vlan or incomplete packet. * Returns 0 if it encounters a non-vlan or incomplete packet.
* Returns 1 after successfully parsing vlan tag. * Returns 1 after successfully parsing vlan tag.
*/ */
static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh) static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh,
bool untag_vlan)
{ {
struct vlan_head *vh = (struct vlan_head *)skb->data; struct vlan_head *vh = (struct vlan_head *)skb->data;
...@@ -330,7 +331,20 @@ static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh) ...@@ -330,7 +331,20 @@ static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh)
key_vh->tci = vh->tci | htons(VLAN_TAG_PRESENT); key_vh->tci = vh->tci | htons(VLAN_TAG_PRESENT);
key_vh->tpid = vh->tpid; key_vh->tpid = vh->tpid;
if (unlikely(untag_vlan)) {
int offset = skb->data - skb_mac_header(skb);
u16 tci;
int err;
__skb_push(skb, offset);
err = __skb_vlan_pop(skb, &tci);
__skb_pull(skb, offset);
if (err)
return err;
__vlan_hwaccel_put_tag(skb, key_vh->tpid, tci);
} else {
__skb_pull(skb, sizeof(struct vlan_head)); __skb_pull(skb, sizeof(struct vlan_head));
}
return 1; return 1;
} }
...@@ -351,13 +365,13 @@ static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key) ...@@ -351,13 +365,13 @@ static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
key->eth.vlan.tpid = skb->vlan_proto; key->eth.vlan.tpid = skb->vlan_proto;
} else { } else {
/* Parse outer vlan tag in the non-accelerated case. */ /* Parse outer vlan tag in the non-accelerated case. */
res = parse_vlan_tag(skb, &key->eth.vlan); res = parse_vlan_tag(skb, &key->eth.vlan, true);
if (res <= 0) if (res <= 0)
return res; return res;
} }
/* Parse inner vlan tag. */ /* Parse inner vlan tag. */
res = parse_vlan_tag(skb, &key->eth.cvlan); res = parse_vlan_tag(skb, &key->eth.cvlan, false);
if (res <= 0) if (res <= 0)
return res; return res;
...@@ -800,29 +814,15 @@ int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr, ...@@ -800,29 +814,15 @@ int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
if (err) if (err)
return err; return err;
if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
/* key_extract assumes that skb->protocol is set-up for /* key_extract assumes that skb->protocol is set-up for
* layer 3 packets which is the case for other callers, * layer 3 packets which is the case for other callers,
* in particular packets recieved from the network stack. * in particular packets received from the network stack.
* Here the correct value can be set from the metadata * Here the correct value can be set from the metadata
* extracted above. * extracted above.
* For L2 packet key eth type would be zero. skb protocol
* would be set to correct value later during key-extact.
*/ */
skb->protocol = key->eth.type;
} else {
struct ethhdr *eth;
skb_reset_mac_header(skb);
eth = eth_hdr(skb);
/* Normally, setting the skb 'protocol' field would be
* handled by a call to eth_type_trans(), but it assumes
* there's a sending device, which we may not have.
*/
if (eth_proto_is_802_3(eth->h_proto))
skb->protocol = eth->h_proto;
else
skb->protocol = htons(ETH_P_802_2);
}
skb->protocol = key->eth.type;
return key_extract(skb, key); return key_extract(skb, key);
} }
...@@ -148,13 +148,15 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) ...@@ -148,13 +148,15 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
unsigned long cl; unsigned long cl;
unsigned long fh; unsigned long fh;
int err; int err;
int tp_created = 0; int tp_created;
if ((n->nlmsg_type != RTM_GETTFILTER) && if ((n->nlmsg_type != RTM_GETTFILTER) &&
!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
return -EPERM; return -EPERM;
replay: replay:
tp_created = 0;
err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL); err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL);
if (err < 0) if (err < 0)
return err; return err;
......
...@@ -441,7 +441,8 @@ static void __tipc_shutdown(struct socket *sock, int error) ...@@ -441,7 +441,8 @@ static void __tipc_shutdown(struct socket *sock, int error)
while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (TIPC_SKB_CB(skb)->bytes_read) { if (TIPC_SKB_CB(skb)->bytes_read) {
kfree_skb(skb); kfree_skb(skb);
} else { continue;
}
if (!tipc_sk_type_connectionless(sk) && if (!tipc_sk_type_connectionless(sk) &&
sk->sk_state != TIPC_DISCONNECTING) { sk->sk_state != TIPC_DISCONNECTING) {
tipc_set_sk_state(sk, TIPC_DISCONNECTING); tipc_set_sk_state(sk, TIPC_DISCONNECTING);
...@@ -449,7 +450,10 @@ static void __tipc_shutdown(struct socket *sock, int error) ...@@ -449,7 +450,10 @@ static void __tipc_shutdown(struct socket *sock, int error)
} }
tipc_sk_respond(sk, skb, error); tipc_sk_respond(sk, skb, error);
} }
}
if (tipc_sk_type_connectionless(sk))
return;
if (sk->sk_state != TIPC_DISCONNECTING) { if (sk->sk_state != TIPC_DISCONNECTING) {
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
...@@ -457,11 +461,9 @@ static void __tipc_shutdown(struct socket *sock, int error) ...@@ -457,11 +461,9 @@ static void __tipc_shutdown(struct socket *sock, int error)
tsk->portid, error); tsk->portid, error);
if (skb) if (skb)
tipc_node_xmit_skb(net, skb, dnode, tsk->portid); tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
if (!tipc_sk_type_connectionless(sk)) {
tipc_node_remove_conn(net, dnode, tsk->portid); tipc_node_remove_conn(net, dnode, tsk->portid);
tipc_set_sk_state(sk, TIPC_DISCONNECTING); tipc_set_sk_state(sk, TIPC_DISCONNECTING);
} }
}
} }
/** /**
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册