提交 d6ac4ffc 编写于 作者: L Linus Torvalds

Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:
 "These are late by a week; they should have been merged during the
  merge window, but unfortunately, the ARM kernel build/boot farms were
  indicating random failures, and it wasn't clear whether the cause was
  something in these changes or something during the merge window.

  This is a set of merge window fixes with some documentation additions"

* 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
  ARM: avoid unwanted GCC memset()/memcpy() optimisations for IO variants
  ARM: pgtable: document mapping types
  ARM: io: convert ioremap*() to functions
  ARM: io: fix ioremap_wt() implementation
  ARM: io: document ARM specific behaviour of ioremap*() implementations
  ARM: fix lockdep unannotated irqs-off warning
  ARM: 8397/1: fix vdsomunge not to depend on glibc specific error.h
  ARM: add helpful message when truncating physical memory
  ARM: add help text for HIGHPTE configuration entry
  ARM: fix DEBUG_SET_MODULE_RONX build dependencies
  ARM: 8396/1: use phys_addr_t in pfn_to_kaddr()
  ARM: 8394/1: update memblock limit after mapping lowmem
  ARM: 8393/1: smp: Fix suspicious RCU usage with ipi tracepoints
......@@ -1693,6 +1693,12 @@ config HIGHMEM
config HIGHPTE
bool "Allocate 2nd-level pagetables from highmem"
depends on HIGHMEM
help
The VM uses one page of physical memory for each page table.
For systems with a lot of processes, this can use a lot of
precious low memory, eventually leading to low memory being
consumed by page tables. Setting this option will allow
user-space 2nd level page tables to reside in high memory.
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
......
......@@ -1635,7 +1635,7 @@ config PID_IN_CONTEXTIDR
config DEBUG_SET_MODULE_RONX
bool "Set loadable kernel module data as NX and text as RO"
depends on MODULES
depends on MODULES && MMU
---help---
This option helps catch unintended modifications to loadable
kernel module's text and read-only data. It also prevents execution
......
......@@ -140,16 +140,11 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
* The _caller variety takes a __builtin_return_address(0) value for
* /proc/vmalloc to use - and should only be used in non-inline functions.
*/
extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
size_t, unsigned int, void *);
extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
void *);
extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);
extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
extern void __iounmap(volatile void __iomem *addr);
extern void __arm_iounmap(volatile void __iomem *addr);
extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
unsigned int, void *);
......@@ -321,21 +316,24 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
static inline void memset_io(volatile void __iomem *dst, unsigned c,
size_t count)
{
memset((void __force *)dst, c, count);
extern void mmioset(void *, unsigned int, size_t);
mmioset((void __force *)dst, c, count);
}
#define memset_io(dst,c,count) memset_io(dst,c,count)
static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
size_t count)
{
memcpy(to, (const void __force *)from, count);
extern void mmiocpy(void *, const void *, size_t);
mmiocpy(to, (const void __force *)from, count);
}
#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
static inline void memcpy_toio(volatile void __iomem *to, const void *from,
size_t count)
{
memcpy((void __force *)to, from, count);
extern void mmiocpy(void *, const void *, size_t);
mmiocpy((void __force *)to, from, count);
}
#define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
......@@ -348,18 +346,61 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
#endif /* readl */
/*
* ioremap and friends.
* ioremap() and friends.
*
* ioremap() takes a resource address, and size. Due to the ARM memory
* types, it is important to use the correct ioremap() function as each
* mapping has specific properties.
*
* Function Memory type Cacheability Cache hint
* ioremap() Device n/a n/a
* ioremap_nocache() Device n/a n/a
* ioremap_cache() Normal Writeback Read allocate
* ioremap_wc() Normal Non-cacheable n/a
* ioremap_wt() Normal Non-cacheable n/a
*
* All device mappings have the following properties:
* - no access speculation
* - no repetition (eg, on return from an exception)
* - number, order and size of accesses are maintained
* - unaligned accesses are "unpredictable"
* - writes may be delayed before they hit the endpoint device
*
* ioremap takes a PCI memory address, as specified in
* Documentation/io-mapping.txt.
* ioremap_nocache() is the same as ioremap() as there are too many device
* drivers using this for device registers, and documentation which tells
* people to use it for such for this to be any different. This is not a
* safe fallback for memory-like mappings, or memory regions where the
* compiler may generate unaligned accesses - eg, via inlining its own
* memcpy.
*
* All normal memory mappings have the following properties:
* - reads can be repeated with no side effects
* - repeated reads return the last value written
* - reads can fetch additional locations without side effects
* - writes can be repeated (in certain cases) with no side effects
* - writes can be merged before accessing the target
* - unaligned accesses can be supported
* - ordering is not guaranteed without explicit dependencies or barrier
* instructions
* - writes may be delayed before they hit the endpoint memory
*
* The cache hint is only a performance hint: CPUs may alias these hints.
* Eg, a CPU not implementing read allocate but implementing write allocate
* will provide a write allocate mapping instead.
*/
#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC)
#define ioremap_wt(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
#define iounmap __arm_iounmap
void __iomem *ioremap(resource_size_t res_cookie, size_t size);
#define ioremap ioremap
#define ioremap_nocache ioremap
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
#define ioremap_cache ioremap_cache
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
#define ioremap_wc ioremap_wc
#define ioremap_wt ioremap_wc
void iounmap(volatile void __iomem *iomem_cookie);
#define iounmap iounmap
/*
* io{read,write}{16,32}be() macros
......
......@@ -275,7 +275,7 @@ static inline void *phys_to_virt(phys_addr_t x)
*/
#define __pa(x) __virt_to_phys((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
......
......@@ -129,7 +129,36 @@
/*
* These are the memory types, defined to be compatible with
* pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
* pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
* ARMv6+ without TEX remapping, they are a table index.
* ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B
*
* MT type Pre-ARMv6 ARMv6+ type / cacheable status
* UNCACHED Uncached Strongly ordered
* BUFFERABLE Bufferable Normal memory / non-cacheable
* WRITETHROUGH Writethrough Normal memory / write through
* WRITEBACK Writeback Normal memory / write back, read alloc
* MINICACHE Minicache N/A
* WRITEALLOC Writeback Normal memory / write back, write alloc
* DEV_SHARED Uncached Device memory (shared)
* DEV_NONSHARED Uncached Device memory (non-shared)
* DEV_WC Bufferable Normal memory / non-cacheable
* DEV_CACHED Writeback Normal memory / write back, read alloc
* VECTORS Variable Normal memory / variable
*
* All normal memory mappings have the following properties:
* - reads can be repeated with no side effects
* - repeated reads return the last value written
* - reads can fetch additional locations without side effects
* - writes can be repeated (in certain cases) with no side effects
* - writes can be merged before accessing the target
* - unaligned accesses can be supported
*
* All device mappings have the following properties:
* - no access speculation
* - no repetition (eg, on return from an exception)
* - number, order and size of accesses are maintained
* - unaligned accesses are "unpredictable"
*/
#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */
#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */
......
......@@ -50,6 +50,9 @@ extern void __aeabi_ulcmp(void);
extern void fpundefinstr(void);
void mmioset(void *, unsigned int, size_t);
void mmiocpy(void *, const void *, size_t);
/* platform dependent support */
EXPORT_SYMBOL(arm_delay_ops);
......@@ -88,6 +91,9 @@ EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(__memzero);
EXPORT_SYMBOL(mmioset);
EXPORT_SYMBOL(mmiocpy);
#ifdef CONFIG_MMU
EXPORT_SYMBOL(copy_page);
......
......@@ -410,7 +410,7 @@ ENDPROC(__fiq_abt)
zero_fp
.if \trace
#ifdef CONFIG_IRQSOFF_TRACER
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
ct_user_exit save = 0
......
......@@ -578,7 +578,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs);
if ((unsigned)ipinr < NR_IPI) {
trace_ipi_entry(ipi_types[ipinr]);
trace_ipi_entry_rcuidle(ipi_types[ipinr]);
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
}
......@@ -637,7 +637,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
}
if ((unsigned)ipinr < NR_IPI)
trace_ipi_exit(ipi_types[ipinr]);
trace_ipi_exit_rcuidle(ipi_types[ipinr]);
set_irq_regs(old_regs);
}
......
......@@ -61,8 +61,10 @@
/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
ENTRY(mmiocpy)
ENTRY(memcpy)
#include "copy_template.S"
ENDPROC(memcpy)
ENDPROC(mmiocpy)
......@@ -16,6 +16,7 @@
.text
.align 5
ENTRY(mmioset)
ENTRY(memset)
UNWIND( .fnstart )
ands r3, r0, #3 @ 1 unaligned?
......@@ -133,3 +134,4 @@ UNWIND( .fnstart )
b 1b
UNWIND( .fnend )
ENDPROC(memset)
ENDPROC(mmioset)
......@@ -255,7 +255,7 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
}
#endif
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
const struct mem_type *type;
......@@ -363,7 +363,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
unsigned int mtype)
{
return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
__builtin_return_address(0));
__builtin_return_address(0));
}
EXPORT_SYMBOL(__arm_ioremap_pfn);
......@@ -371,13 +371,26 @@ void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
unsigned int, void *) =
__arm_ioremap_caller;
void __iomem *
__arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype)
void __iomem *ioremap(resource_size_t res_cookie, size_t size)
{
return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap);
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
{
return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_cache);
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
{
return arch_ioremap_caller(phys_addr, size, mtype,
__builtin_return_address(0));
return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
__builtin_return_address(0));
}
EXPORT_SYMBOL(__arm_ioremap);
EXPORT_SYMBOL(ioremap_wc);
/*
* Remap an arbitrary physical address space into the kernel virtual
......@@ -431,11 +444,11 @@ void __iounmap(volatile void __iomem *io_addr)
void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
void __arm_iounmap(volatile void __iomem *io_addr)
void iounmap(volatile void __iomem *cookie)
{
arch_iounmap(io_addr);
arch_iounmap(cookie);
}
EXPORT_SYMBOL(__arm_iounmap);
EXPORT_SYMBOL(iounmap);
#ifdef CONFIG_PCI
static int pci_ioremap_mem_type = MT_DEVICE;
......
......@@ -1072,6 +1072,7 @@ void __init sanity_check_meminfo(void)
int highmem = 0;
phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
struct memblock_region *reg;
bool should_use_highmem = false;
for_each_memblock(memory, reg) {
phys_addr_t block_start = reg->base;
......@@ -1090,6 +1091,7 @@ void __init sanity_check_meminfo(void)
pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
&block_start, &block_end);
memblock_remove(reg->base, reg->size);
should_use_highmem = true;
continue;
}
......@@ -1100,6 +1102,7 @@ void __init sanity_check_meminfo(void)
&block_start, &block_end, &vmalloc_limit);
memblock_remove(vmalloc_limit, overlap_size);
block_end = vmalloc_limit;
should_use_highmem = true;
}
}
......@@ -1134,6 +1137,9 @@ void __init sanity_check_meminfo(void)
}
}
if (should_use_highmem)
pr_notice("Consider using a HIGHMEM enabled kernel.\n");
high_memory = __va(arm_lowmem_limit - 1) + 1;
/*
......@@ -1494,6 +1500,7 @@ void __init paging_init(const struct machine_desc *mdesc)
build_mem_type_table();
prepare_page_table();
map_lowmem();
memblock_set_current_limit(arm_lowmem_limit);
dma_contiguous_remap();
devicemaps_init(mdesc);
kmap_init();
......
......@@ -351,30 +351,43 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
}
EXPORT_SYMBOL(__arm_ioremap_pfn);
void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
size_t size, unsigned int mtype, void *caller)
void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
unsigned int mtype, void *caller)
{
return __arm_ioremap_pfn(pfn, offset, size, mtype);
return (void __iomem *)phys_addr;
}
void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size,
unsigned int mtype)
void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
void __iomem *ioremap(resource_size_t res_cookie, size_t size)
{
return (void __iomem *)phys_addr;
return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
__builtin_return_address(0));
}
EXPORT_SYMBOL(__arm_ioremap);
EXPORT_SYMBOL(ioremap);
void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
{
return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_cache);
void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
unsigned int mtype, void *caller)
void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
{
return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
__builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_wc);
void __iounmap(volatile void __iomem *addr)
{
return __arm_ioremap(phys_addr, size, mtype);
}
EXPORT_SYMBOL(__iounmap);
void (*arch_iounmap)(volatile void __iomem *);
void __arm_iounmap(volatile void __iomem *addr)
void iounmap(volatile void __iomem *addr)
{
}
EXPORT_SYMBOL(__arm_iounmap);
EXPORT_SYMBOL(iounmap);
......@@ -45,13 +45,11 @@
* it does.
*/
#define _GNU_SOURCE
#include <byteswap.h>
#include <elf.h>
#include <errno.h>
#include <error.h>
#include <fcntl.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
......@@ -82,11 +80,25 @@
#define EF_ARM_ABI_FLOAT_HARD 0x400
#endif
static int failed;
static const char *argv0;
static const char *outfile;
static void fail(const char *fmt, ...)
{
va_list ap;
failed = 1;
fprintf(stderr, "%s: ", argv0);
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
exit(EXIT_FAILURE);
}
static void cleanup(void)
{
if (error_message_count > 0 && outfile != NULL)
if (failed && outfile != NULL)
unlink(outfile);
}
......@@ -119,68 +131,66 @@ int main(int argc, char **argv)
int infd;
atexit(cleanup);
argv0 = argv[0];
if (argc != 3)
error(EXIT_FAILURE, 0, "Usage: %s [infile] [outfile]", argv[0]);
fail("Usage: %s [infile] [outfile]\n", argv[0]);
infile = argv[1];
outfile = argv[2];
infd = open(infile, O_RDONLY);
if (infd < 0)
error(EXIT_FAILURE, errno, "Cannot open %s", infile);
fail("Cannot open %s: %s\n", infile, strerror(errno));
if (fstat(infd, &stat) != 0)
error(EXIT_FAILURE, errno, "Failed stat for %s", infile);
fail("Failed stat for %s: %s\n", infile, strerror(errno));
inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0);
if (inbuf == MAP_FAILED)
error(EXIT_FAILURE, errno, "Failed to map %s", infile);
fail("Failed to map %s: %s\n", infile, strerror(errno));
close(infd);
inhdr = inbuf;
if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0)
error(EXIT_FAILURE, 0, "Not an ELF file");
fail("Not an ELF file\n");
if (inhdr->e_ident[EI_CLASS] != ELFCLASS32)
error(EXIT_FAILURE, 0, "Unsupported ELF class");
fail("Unsupported ELF class\n");
swap = inhdr->e_ident[EI_DATA] != HOST_ORDER;
if (read_elf_half(inhdr->e_type, swap) != ET_DYN)
error(EXIT_FAILURE, 0, "Not a shared object");
fail("Not a shared object\n");
if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) {
error(EXIT_FAILURE, 0, "Unsupported architecture %#x",
inhdr->e_machine);
}
if (read_elf_half(inhdr->e_machine, swap) != EM_ARM)
fail("Unsupported architecture %#x\n", inhdr->e_machine);
e_flags = read_elf_word(inhdr->e_flags, swap);
if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) {
error(EXIT_FAILURE, 0, "Unsupported EABI version %#x",
EF_ARM_EABI_VERSION(e_flags));
fail("Unsupported EABI version %#x\n",
EF_ARM_EABI_VERSION(e_flags));
}
if (e_flags & EF_ARM_ABI_FLOAT_HARD)
error(EXIT_FAILURE, 0,
"Unexpected hard-float flag set in e_flags");
fail("Unexpected hard-float flag set in e_flags\n");
clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT);
outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
if (outfd < 0)
error(EXIT_FAILURE, errno, "Cannot open %s", outfile);
fail("Cannot open %s: %s\n", outfile, strerror(errno));
if (ftruncate(outfd, stat.st_size) != 0)
error(EXIT_FAILURE, errno, "Cannot truncate %s", outfile);
fail("Cannot truncate %s: %s\n", outfile, strerror(errno));
outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED,
outfd, 0);
if (outbuf == MAP_FAILED)
error(EXIT_FAILURE, errno, "Failed to map %s", outfile);
fail("Failed to map %s: %s\n", outfile, strerror(errno));
close(outfd);
......@@ -195,7 +205,7 @@ int main(int argc, char **argv)
}
if (msync(outbuf, stat.st_size, MS_SYNC) != 0)
error(EXIT_FAILURE, errno, "Failed to sync %s", outfile);
fail("Failed to sync %s: %s\n", outfile, strerror(errno));
return EXIT_SUCCESS;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册