- 21 5月, 2016 1 次提交
-
-
由 Zhaoxiu Zeng 提交于
The binary GCD algorithm is based on the following facts: 1. If a and b are all evens, then gcd(a,b) = 2 * gcd(a/2, b/2) 2. If a is even and b is odd, then gcd(a,b) = gcd(a/2, b) 3. If a and b are all odds, then gcd(a,b) = gcd((a-b)/2, b) = gcd((a+b)/2, b) Even on x86 machines with reasonable division hardware, the binary algorithm runs about 25% faster (80% the execution time) than the division-based Euclidian algorithm. On platforms like Alpha and ARMv6 where division is a function call to emulation code, it's even more significant. There are two variants of the code here, depending on whether a fast __ffs (find least significant set bit) instruction is available. This allows the unpredictable branches in the bit-at-a-time shifting loop to be eliminated. If fast __ffs is not available, the "even/odd" GCD variant is used. I use the following code to benchmark: #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <time.h> #include <unistd.h> #define swap(a, b) \ do { \ a ^= b; \ b ^= a; \ a ^= b; \ } while (0) unsigned long gcd0(unsigned long a, unsigned long b) { unsigned long r; if (a < b) { swap(a, b); } if (b == 0) return a; while ((r = a % b) != 0) { a = b; b = r; } return b; } unsigned long gcd1(unsigned long a, unsigned long b) { unsigned long r = a | b; if (!a || !b) return r; b >>= __builtin_ctzl(b); for (;;) { a >>= __builtin_ctzl(a); if (a == b) return a << __builtin_ctzl(r); if (a < b) swap(a, b); a -= b; } } unsigned long gcd2(unsigned long a, unsigned long b) { unsigned long r = a | b; if (!a || !b) return r; r &= -r; while (!(b & r)) b >>= 1; for (;;) { while (!(a & r)) a >>= 1; if (a == b) return a; if (a < b) swap(a, b); a -= b; a >>= 1; if (a & r) a += b; a >>= 1; } } unsigned long gcd3(unsigned long a, unsigned long b) { unsigned long r = a | b; if (!a || !b) return r; b >>= __builtin_ctzl(b); if (b == 1) return r & -r; for (;;) { a >>= __builtin_ctzl(a); if (a == 1) return r & -r; if (a == b) return a << __builtin_ctzl(r); if (a < b) swap(a, b); a -= b; } } unsigned long gcd4(unsigned long a, unsigned long b) { unsigned long r = a | b; if (!a || !b) return r; r &= -r; while (!(b & r)) b >>= 1; if (b == r) return r; for (;;) { while (!(a & r)) a >>= 1; if (a == r) return r; if (a == b) return a; if (a < b) swap(a, b); a -= b; a >>= 1; if (a & r) a += b; a >>= 1; } } static unsigned long (*gcd_func[])(unsigned long a, unsigned long b) = { gcd0, gcd1, gcd2, gcd3, gcd4, }; #define TEST_ENTRIES (sizeof(gcd_func) / sizeof(gcd_func[0])) #if defined(__x86_64__) #define rdtscll(val) do { \ unsigned long __a,__d; \ __asm__ __volatile__("rdtsc" : "=a" (__a), "=d" (__d)); \ (val) = ((unsigned long long)__a) | (((unsigned long long)__d)<<32); \ } while(0) static unsigned long long benchmark_gcd_func(unsigned long (*gcd)(unsigned long, unsigned long), unsigned long a, unsigned long b, unsigned long *res) { unsigned long long start, end; unsigned long long ret; unsigned long gcd_res; rdtscll(start); gcd_res = gcd(a, b); rdtscll(end); if (end >= start) ret = end - start; else ret = ~0ULL - start + 1 + end; *res = gcd_res; return ret; } #else static inline struct timespec read_time(void) { struct timespec time; clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time); return time; } static inline unsigned long long diff_time(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec - start.tv_nsec) < 0) { temp.tv_sec = end.tv_sec - start.tv_sec - 1; temp.tv_nsec = 1000000000ULL + end.tv_nsec - start.tv_nsec; } else { temp.tv_sec = end.tv_sec - start.tv_sec; temp.tv_nsec = end.tv_nsec - start.tv_nsec; } return temp.tv_sec * 1000000000ULL + temp.tv_nsec; } static unsigned long long benchmark_gcd_func(unsigned long (*gcd)(unsigned long, unsigned long), unsigned long a, unsigned long b, unsigned long *res) { struct timespec start, end; unsigned long gcd_res; start = read_time(); gcd_res = gcd(a, b); end = read_time(); *res = gcd_res; return diff_time(start, end); } #endif static inline unsigned long get_rand() { if (sizeof(long) == 8) return (unsigned long)rand() << 32 | rand(); else return rand(); } int main(int argc, char **argv) { unsigned int seed = time(0); int loops = 100; int repeats = 1000; unsigned long (*res)[TEST_ENTRIES]; unsigned long long elapsed[TEST_ENTRIES]; int i, j, k; for (;;) { int opt = getopt(argc, argv, "n:r:s:"); /* End condition always first */ if (opt == -1) break; switch (opt) { case 'n': loops = atoi(optarg); break; case 'r': repeats = atoi(optarg); break; case 's': seed = strtoul(optarg, NULL, 10); break; default: /* You won't actually get here. */ break; } } res = malloc(sizeof(unsigned long) * TEST_ENTRIES * loops); memset(elapsed, 0, sizeof(elapsed)); srand(seed); for (j = 0; j < loops; j++) { unsigned long a = get_rand(); /* Do we have args? */ unsigned long b = argc > optind ? strtoul(argv[optind], NULL, 10) : get_rand(); unsigned long long min_elapsed[TEST_ENTRIES]; for (k = 0; k < repeats; k++) { for (i = 0; i < TEST_ENTRIES; i++) { unsigned long long tmp = benchmark_gcd_func(gcd_func[i], a, b, &res[j][i]); if (k == 0 || min_elapsed[i] > tmp) min_elapsed[i] = tmp; } } for (i = 0; i < TEST_ENTRIES; i++) elapsed[i] += min_elapsed[i]; } for (i = 0; i < TEST_ENTRIES; i++) printf("gcd%d: elapsed %llu\n", i, elapsed[i]); k = 0; srand(seed); for (j = 0; j < loops; j++) { unsigned long a = get_rand(); unsigned long b = argc > optind ? strtoul(argv[optind], NULL, 10) : get_rand(); for (i = 1; i < TEST_ENTRIES; i++) { if (res[j][i] != res[j][0]) break; } if (i < TEST_ENTRIES) { if (k == 0) { k = 1; fprintf(stderr, "Error:\n"); } fprintf(stderr, "gcd(%lu, %lu): ", a, b); for (i = 0; i < TEST_ENTRIES; i++) fprintf(stderr, "%ld%s", res[j][i], i < TEST_ENTRIES - 1 ? ", " : "\n"); } } if (k == 0) fprintf(stderr, "PASS\n"); free(res); return 0; } Compiled with "-O2", on "VirtualBox 4.4.0-22-generic #38-Ubuntu x86_64" got: zhaoxiuzeng@zhaoxiuzeng-VirtualBox:~/develop$ ./gcd -r 500000 -n 10 gcd0: elapsed 10174 gcd1: elapsed 2120 gcd2: elapsed 2902 gcd3: elapsed 2039 gcd4: elapsed 2812 PASS zhaoxiuzeng@zhaoxiuzeng-VirtualBox:~/develop$ ./gcd -r 500000 -n 10 gcd0: elapsed 9309 gcd1: elapsed 2280 gcd2: elapsed 2822 gcd3: elapsed 2217 gcd4: elapsed 2710 PASS zhaoxiuzeng@zhaoxiuzeng-VirtualBox:~/develop$ ./gcd -r 500000 -n 10 gcd0: elapsed 9589 gcd1: elapsed 2098 gcd2: elapsed 2815 gcd3: elapsed 2030 gcd4: elapsed 2718 PASS zhaoxiuzeng@zhaoxiuzeng-VirtualBox:~/develop$ ./gcd -r 500000 -n 10 gcd0: elapsed 9914 gcd1: elapsed 2309 gcd2: elapsed 2779 gcd3: elapsed 2228 gcd4: elapsed 2709 PASS [akpm@linux-foundation.org: avoid #defining a CONFIG_ variable] Signed-off-by: NZhaoxiu Zeng <zhaoxiu.zeng@gmail.com> Signed-off-by: NGeorge Spelvin <linux@horizon.com> Signed-off-by: NAndrew Morton <akpm@linux-foundation.org> Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
-
- 22 2月, 2016 1 次提交
-
-
由 Arnd Bergmann 提交于
When CONFIG_DEBUG_ALIGN_RODATA is set, we get a link error: arch/arm/mm/built-in.o:(.data+0x4bc): undefined reference to `__start_rodata_section_aligned' However, this combination is useless, as XIP_KERNEL implies that all the RODATA is already marked readonly, so both CONFIG_DEBUG_RODATA and CONFIG_DEBUG_ALIGN_RODATA (which depends on the other) are not needed with XIP_KERNEL, and this patches enforces that using a Kconfig dependency. Signed-off-by: NArnd Bergmann <arnd@arndb.de> Fixes: 25362dc4 ("ARM: 8501/1: mm: flip priority of CONFIG_DEBUG_RODATA") Acked-by: NArd Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 08 2月, 2016 1 次提交
-
-
由 Kees Cook 提交于
The use of CONFIG_DEBUG_RODATA is generally seen as an essential part of kernel self-protection: http://www.openwall.com/lists/kernel-hardening/2015/11/30/13 Additionally, its name has grown to mean things beyond just rodata. To get ARM closer to this, we ought to rearrange the names of the configs that control how the kernel protects its memory. What was called CONFIG_ARM_KERNMEM_PERMS is realy doing the work that other architectures call CONFIG_DEBUG_RODATA. This redefines CONFIG_DEBUG_RODATA to actually do the bulk of the ROing (and NXing). In the place of the old CONFIG_DEBUG_RODATA, use CONFIG_DEBUG_ALIGN_RODATA, since that's what the option does: adds section alignment for making rodata explicitly NX, as arm does not split the page tables like arm64 does without _ALIGN_RODATA. Also adds human readable names to the sections so I could more easily debug my typos, and makes CONFIG_DEBUG_RODATA default "y" for CPU_V7. Results in /sys/kernel/debug/kernel_page_tables for each config state: # CONFIG_DEBUG_RODATA is not set # CONFIG_DEBUG_ALIGN_RODATA is not set ---[ Kernel Mapping ]--- 0x80000000-0x80900000 9M RW x SHD 0x80900000-0xa0000000 503M RW NX SHD CONFIG_DEBUG_RODATA=y CONFIG_DEBUG_ALIGN_RODATA=y ---[ Kernel Mapping ]--- 0x80000000-0x80100000 1M RW NX SHD 0x80100000-0x80700000 6M ro x SHD 0x80700000-0x80a00000 3M ro NX SHD 0x80a00000-0xa0000000 502M RW NX SHD CONFIG_DEBUG_RODATA=y # CONFIG_DEBUG_ALIGN_RODATA is not set ---[ Kernel Mapping ]--- 0x80000000-0x80100000 1M RW NX SHD 0x80100000-0x80a00000 9M ro x SHD 0x80a00000-0xa0000000 502M RW NX SHD Signed-off-by: NKees Cook <keescook@chromium.org> Reviewed-by: NLaura Abbott <labbott@fedoraproject.org> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 18 12月, 2015 1 次提交
-
-
由 Arnd Bergmann 提交于
Now that realview and integrator always select the correct CPU type themselves based on the core tiles, there is no need to still have them user-visible in arch/arm/mm/Kconfig. The ARM925T symbol has been selected by the only user for many years, so that can be removed along with the realview and integrator specific ones. This also solves randconfig build problems on realview. Signed-off-by: NArnd Bergmann <arnd@arndb.de> Cc: Russell King <linux@arm.linux.org.uk> Signed-off-by: NLinus Walleij <linus.walleij@linaro.org>
-
- 15 12月, 2015 1 次提交
-
-
由 Arnd Bergmann 提交于
Commit 42c4dafe ("ARM: 6202/1: Do not ARM_DMA_MEM_BUFFERABLE on RealView boards with L210/L220") changed the generic setting for ARM_DMA_MEM_BUFFERABLE to be disabled on any Realview kernel that includes support for any of the ARM11 variations. Doing this was required to allow doing DMA without a lockup in the l2x0 cache controller on the Realview platform. Unfortunately, in a kernel that also contains support for any ARMv7 based machine, the same change makes it impossible to do DMA on ARMv7, which gets in the way of enabling multiplatform support on Realview. As confirmed by Catalin Marinas and Linus Walleij, the current code for Realview that we have in the kernel does not actually perform any DMA, and this is unlikely to change in the future. Therefore we can revert 42c4dafe without introducing regressions, but we must never start using DMA on this platform in the future. Signed-off-by: NArnd Bergmann <arnd@arndb.de> Cc: Russell King <linux@arm.linux.org.uk> Signed-off-by: NLinus Walleij <linus.walleij@linaro.org>
-
- 27 10月, 2015 1 次提交
-
-
由 Masahiro Yamada 提交于
This commit adds support for UniPhier outer cache controller. All the UniPhier SoCs are equipped with the L2 cache, while the L3 cache is currently only integrated on PH1-Pro5 SoC. Signed-off-by: NMasahiro Yamada <yamada.masahiro@socionext.com> Acked-by: NRob Herring <robh@kernel.org> Signed-off-by: NOlof Johansson <olof@lixom.net>
-
- 03 10月, 2015 1 次提交
-
-
由 Russell King 提交于
Mark Brand reports that a NEEDS_SYSCALL_FOR_CMPXCHG enabled kernel would open a security hole in the ghost syscall used to implement cmpxchg, as it fails to validate the user pointer. However, in order for this option to be enabled, you'd need to be building a pre-ARMv6 kernel with SMP support. There is only one system known which fits that, which is an early ARM SMP FPGA implementation based on the ARM926T. In any case, the Kconfig does not allow SMP to be enabled for pre-ARMv6 systems. Moreover, even if NEEDS_SYSCALL_FOR_CMPXCHG were to be enabled, the kernel would not build as __ARM_NR_cmpxchg64 is not defined. The simple answer is to remove the buggy code. Reported-by: NMark Brand <markbrand@google.com> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 25 7月, 2015 1 次提交
-
-
由 Russell King 提交于
The existing memory barrier macro causes a significant amount of code to be inserted inline at every call site. For example, in gpio_set_irq_type(), we have this for mb(): c0344c08: f57ff04e dsb st c0344c0c: e59f8190 ldr r8, [pc, #400] ; c0344da4 <gpio_set_irq_type+0x230> c0344c10: e3590004 cmp r9, #4 c0344c14: e5983014 ldr r3, [r8, #20] c0344c18: 0a000054 beq c0344d70 <gpio_set_irq_type+0x1fc> c0344c1c: e3530000 cmp r3, #0 c0344c20: 0a000004 beq c0344c38 <gpio_set_irq_type+0xc4> c0344c24: e50b2030 str r2, [fp, #-48] ; 0xffffffd0 c0344c28: e50bc034 str ip, [fp, #-52] ; 0xffffffcc c0344c2c: e12fff33 blx r3 c0344c30: e51bc034 ldr ip, [fp, #-52] ; 0xffffffcc c0344c34: e51b2030 ldr r2, [fp, #-48] ; 0xffffffd0 c0344c38: e5963004 ldr r3, [r6, #4] Moving the outer_cache_sync() call out of line reduces the impact of the barrier: c0344968: f57ff04e dsb st c034496c: e35a0004 cmp sl, #4 c0344970: e50b2030 str r2, [fp, #-48] ; 0xffffffd0 c0344974: 0a000044 beq c0344a8c <gpio_set_irq_type+0x1b8> c0344978: ebf363dd bl c001d8f4 <arm_heavy_mb> c034497c: e5953004 ldr r3, [r5, #4] This should reduce the cache footprint of this code. Overall, this results in a reduction of around 20K in the kernel size: text data bss dec hex filename 10773970 667392 10369656 21811018 14ccf4a ../build/imx6/vmlinux-old 10754219 667392 10369656 21791267 14c8223 ../build/imx6/vmlinux-new Another advantage to this approach is that we can finally resolve the issue of SoCs which have their own memory barrier requirements within multiplatform kernels (such as OMAP.) Here, the bus interconnects need additional handling to ensure that writes become visible in the correct order (eg, between dma_map() operations, writes to DMA coherent memory, and MMIO accesses.) Acked-by: NTony Lindgren <tony@atomide.com> Acked-by: NRichard Woodruff <r-woodruff2@ti.com> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 02 6月, 2015 2 次提交
-
-
由 Stefan Agner 提交于
Vybrids has 112 peripheral interrupts which can be routed to the Cortex-M4's NVIC interrupt controller. Signed-off-by: NStefan Agner <stefan@agner.ch> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
由 Russell King 提交于
Re-implement the physical address space switching to be architecturally compliant. This involves flushing the caches, disabling the MMU, and only then updating the page tables. Once that is complete, the system can be brought back up again. Since we disable the MMU, we need to do the update in assembly code. Luckily, the entries which need updating are fairly trivial, and are all setup by the early assembly code. We can merely adjust each entry by the delta required. Not only does this fix the code to be architecturally compliant, but it fixes a couple of bugs too: 1. The original code would only ever update the first L2 entry covering a fraction of the kernel; the remainder were left untouched. 2. The L2 entries covering the DTB blob were likewise untouched. This solution fixes up all entries. Tested-by: NMurali Karicheri <m-karicheri2@ti.com> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 28 5月, 2015 1 次提交
-
-
由 Arnd Bergmann 提交于
Atmel at91x40 is gone, so we no longer have any platform using either of these two, and we get randconfig failures on NOMMU kernels if they accidentally get enabled on something that conflicts with ARMv4T. This stops short of removing the entire CPU support for now, but as nothing selects these, it is basically dead code. Signed-off-by: NArnd Bergmann <arnd@arndb.de> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 08 5月, 2015 1 次提交
-
-
由 Maxime Coquelin stm32 提交于
From Cortex-M reference manuals, the nvic supports up to 240 interrupts. So the number of entries in vectors table is up to 256. This patch adds a new config flag to specify the number of external interrupts. Some ifdeferies are added in order to respect the natural alignment without wasting too much space on smaller systems. Acked-by: NUwe Kleine-König <u.kleine-koenig@pengutronix.de> Acked-by: NStefan Agner <stefan@agner.ch> Tested-by: NChanwoo Choi <cw00.choi@samsung.com> Signed-off-by: NMaxime Coquelin <mcoquelin.stm32@gmail.com> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 18 4月, 2015 1 次提交
-
-
由 Nathan Lynch 提交于
When targeting ARMv3 (e.g. rpc) and enabling CONFIG_VDSO we get: arch/arm/vdso/datapage.S:13: Error: selected processor does not support ARM mode `bx lr' One fix considered was to use 'ldr pc,lr' for such configurations, but since the VDSO is unlikely to be useful for pre-v7 hardware, just make it depend on CONFIG_CPU_V7. Reported-by: NArnd Bergmann <arnd@arndb.de> Signed-off-by: NNathan Lynch <nathan_lynch@mentor.com> Acked-by: NArnd Bergmann <arnd@arndb.de> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 02 4月, 2015 1 次提交
-
-
由 Florian Fainelli 提交于
Enabling CPU_DCACHE_DISABLE on a SMP capable system will prevent the kernel from booting because of the following ldrex instruction in arch_spin_lock: (gdb) x/10i $pc => 0xc053cfa8 <_raw_spin_lock+4>: ldrex r3, [r0] 0xc053cfac <_raw_spin_lock+8>: add r2, r3, #65536 ; 0x10000 which is taken by the very first printk call: at /home/fainelli/work/linux/arch/arm/include/asm/spinlock.h:65 fmt=0xc0637650 " 01 66Booting Linux on physical CPU 0x%xn", args=<incomplete type>) at kernel/printk/printk.c:1525 fmt=0xc05370f4 <printk+52> " 24320215342 04340235344 20320215342 36377/341 17") at kernel/printk/printk.c:1688 ldrex requires exclusive monitor(s) (local or global) which are no longer working when the Data cache is disabled in CP15 and will just hang the CPU there. Acked-by: NArnd Bergmann <arnd@arndb.de> Signed-off-by: NFlorian Fainelli <f.fainelli@gmail.com> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 28 3月, 2015 1 次提交
-
-
由 Nathan Lynch 提交于
Allow users to enable the vdso in Kconfig; include the vdso in the build if CONFIG_VDSO is enabled. Add 'vdso_install' target. Signed-off-by: NNathan Lynch <nathan_lynch@mentor.com> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 18 2月, 2015 1 次提交
-
-
由 Paul Bolle 提交于
Commit 20e783e3 ("ARM: 8296/1: cache-l2x0: clean up aurora cache handling") removed the only user of the Kconfig symbol CACHE_PL310. Setting CACHE_PL310 is now pointless. Remove its Kconfig entry, and one select of this symbol. Signed-off-by: NPaul Bolle <pebolle@tiscali.nl> Signed-off-by: NArnd Bergmann <arnd@arndb.de>
-
- 29 1月, 2015 1 次提交
-
-
由 Arnd Bergmann 提交于
The recently added ARM_KERNMEM_PERMS feature works by manipulating the kernel page tables, which obviously requires an MMU. Trying to enable this feature when the MMU is disabled results in a lot of compile errors in mm/init.c, so let's add a Kconfig dependency to avoid that case. Signed-off-by: NArnd Bergmann <arnd@arndb.de> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 14 11月, 2014 1 次提交
-
-
由 Nathan Lynch 提交于
The kuser helpers page is not set up on non-MMU systems, so it does not make sense to allow CONFIG_KUSER_HELPERS to be enabled when CONFIG_MMU=n. Allowing it to be set on !MMU results in an oops in set_tls (used in execve and the arm_syscall trap handler): Unhandled exception: IPSR = 00000005 LR = fffffff1 CPU: 0 PID: 1 Comm: swapper Not tainted 3.18.0-rc1-00041-ga30465a #216 task: 8b838000 ti: 8b82a000 task.ti: 8b82a000 PC is at flush_thread+0x32/0x40 LR is at flush_thread+0x21/0x40 pc : [<8f00157a>] lr : [<8f001569>] psr: 4100000b sp : 8b82be20 ip : 00000000 fp : 8b83c000 r10: 00000001 r9 : 88018c84 r8 : 8bb85000 r7 : 8b838000 r6 : 00000000 r5 : 8bb77400 r4 : 8b82a000 r3 : ffff0ff0 r2 : 8b82a000 r1 : 00000000 r0 : 88020354 xPSR: 4100000b CPU: 0 PID: 1 Comm: swapper Not tainted 3.18.0-rc1-00041-ga30465a #216 [<8f002bc1>] (unwind_backtrace) from [<8f002033>] (show_stack+0xb/0xc) [<8f002033>] (show_stack) from [<8f00265b>] (__invalid_entry+0x4b/0x4c) As best I can tell this issue existed for the set_tls ARM syscall before commit fbfb872f "ARM: 8148/1: flush TLS and thumbee register state during exec" consolidated the TLS manipulation code into the set_tls helper function, but now that we're using it to flush register state during execve, !MMU users encounter the oops at the first exec. Prevent CONFIG_MMU=n configurations from enabling CONFIG_KUSER_HELPERS. Fixes: fbfb872f (ARM: 8148/1: flush TLS and thumbee register state during exec) Signed-off-by: NNathan Lynch <nathan_lynch@mentor.com> Reported-by: NStefan Agner <stefan@agner.ch> Acked-by: NUwe Kleine-König <u.kleine-koenig@pengutronix.de> Cc: stable@vger.kernel.org Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 12 11月, 2014 1 次提交
-
-
由 Linus Walleij 提交于
Commit 68f3b875 "ARM: integrator: make the Integrator multiplatform" broke allmodconfig like this: >> arch/arm/include/asm/cmpxchg.h:114:2: error: #error "SMP is not supported on this platform" (etc) This is due to the fact that as we turned on multiplatform for the Integrator, this enabled a lot of non-applicable CPU's to be selected for its multiplatform images, due to a lot of "depends on ARCH_INTEGRATOR" restrictions in arch/arm/mm/Kconfig for the different ARM CPU types. Fix this by restricting the CPU selections to respective multiplatform config, which now becomes a subset of the possible Integrator configurations, or alternatively the non-multiplatform config plus ARCH_INTEGRATOR, i.e.: if (!ARCH_MULTIPLATFORM || ARCH_MULTI_Vx) && (ARCH_INTEGRATOR || ARCH_FOO ...) Since the Integrator has been converted to multiplatform, this will often take the short form: if (ARCH_MULTI_Vx && ARCH_INTEGRATOR) If no other non-multiplatform platforms are elegible. Reported-by: NBuild bot for Mark Brown <broonie@kernel.org> Reported-by: NKbuild test robot <fengguang.wu@intel.com> Suggested-by: NRussell King <linux@arm.linux.org.uk> Signed-off-by: NLinus Walleij <linus.walleij@linaro.org> Signed-off-by: NArnd Bergmann <arnd@arndb.de>
-
- 17 10月, 2014 2 次提交
-
-
由 Kees Cook 提交于
This introduces CONFIG_DEBUG_RODATA, making kernel text and rodata read-only. Additionally, this splits rodata from text so that rodata can also be NX, which may lead to wasted memory when aligning to SECTION_SIZE. The read-only areas are made writable during ftrace updates and kexec. Signed-off-by: NKees Cook <keescook@chromium.org> Tested-by: NLaura Abbott <lauraa@codeaurora.org> Acked-by: NNicolas Pitre <nico@linaro.org>
-
由 Kees Cook 提交于
Adds CONFIG_ARM_KERNMEM_PERMS to separate the kernel memory regions into section-sized areas that can have different permisions. Performs the NX permission changes during free_initmem, so that init memory can be reclaimed. This uses section size instead of PMD size to reduce memory lost to padding on non-LPAE systems. Based on work by Brad Spengler, Larry Bassel, and Laura Abbott. Signed-off-by: NKees Cook <keescook@chromium.org> Tested-by: NLaura Abbott <lauraa@codeaurora.org> Acked-by: NNicolas Pitre <nico@linaro.org>
-
- 18 7月, 2014 2 次提交
-
-
由 Russell King 提交于
SWP is deprecated in ARMv6 and ARMv7 CPUs, but more importantly, when running on a SMP system, SWP doesn't guarantee atomicity. This means it can't really be used (by userspace) for locking purposes in a SMP environment. Currently, many configurations leave the SWP emulation disabled, which means we never know if userspace executes this instruction on ARMv7 hardware. Rectify this by enabling SWP emulation for ARMv7 with SMP (where we can trap the instruction.) Tested-by: NTony Lindgren <tony@atomide.com> Acked-by: NCatalin Marinas <catalin.marinas@arm.com> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
由 Shawn Guo 提交于
Add revision info for PL310_ERRATA_588369 and PL310_ERRATA_727915 to help people understand if they need to enable the errata for their hardware. Signed-off-by: NShawn Guo <shawn.guo@freescale.com> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 14 7月, 2014 1 次提交
-
-
由 Andrew Lunn 提交于
Now that all boards have been converted to DT and all the support code lives in mach-mvebu, we can remove mach-kirkwood. Signed-off-by: NAndrew Lunn <andrew@lunn.ch> Link: https://lkml.kernel.org/r/1405028192-9623-2-git-send-email-andrew@lunn.chSigned-off-by: NJason Cooper <jason@lakedaemon.net>
-
- 19 6月, 2014 1 次提交
-
-
由 Russell King 提交于
A number of configurations spit out warnings similar to: warning: (SOC_IMX6 && SOC_VF610 && ARCH_OMAP4) selects PL310_ERRATA_588369 which has unmet direct dependencies (CACHE_L2X0) warning: (SOC_IMX6 && SOC_VF610 && ARCH_OMAP4) selects PL310_ERRATA_727915 which has unmet direct dependencies (CACHE_L2X0) Clean up the dependencies here: * PL310 symbols should only be selected when CACHE_L2X0 is enabled. * Since the cache-l2x0 code detects PL310 presence at runtime, and we will eventually get rid of CACHE_PL310, surround these errata options with an if CACHE_L2X0 conditional rather than repeating the dependency against each. Acked-by: NArnd Bergmann <arnd@arndb.de> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 30 5月, 2014 1 次提交
-
-
由 Russell King 提交于
Move the L2C-310 errata configuration options to arch/arm/mm/Kconfig along side the option which enables support for this device. Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 23 4月, 2014 1 次提交
-
-
由 Russell King 提交于
Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 22 3月, 2014 1 次提交
-
-
由 Arnd Bergmann 提交于
ARCH_RPC no longer supports other CPUs aside from StrongARM110, so we can make the option implicitly selected by the platform and no longer give the option of building a kernel without CPU support. Signed-off-by: NArnd Bergmann <arnd@arndb.de> Cc: Russell King <linux@arm.linux.org.uk>
-
- 23 2月, 2014 1 次提交
-
-
由 Andrew Lunn 提交于
Kirkwood, which uses the Feroceon L2 cache controller will soon be moving into mach-mvebu. Allow the cache controller to be built in this situation. Signed-off-by: NAndrew Lunn <andrew@lunn.ch> Acked-by: NArnd Bergmann <arnd@arndb.de> Tested-by: NJason Gunthorpe <jgunthorpe@obsidianresearch.com> Signed-off-by: NJason Cooper <jason@lakedaemon.net>
-
- 10 2月, 2014 1 次提交
-
-
由 Will Deacon 提交于
CPU_32v6 currently selects CPU_USE_DOMAINS if CPU_V6 and MMU. This is because ARM 1136 r0pX CPUs lack the v6k extensions, and therefore do not have hardware thread registers. The lack of these registers requires the kernel to update the vectors page at each context switch in order to write a new TLS pointer. This write must be done via the userspace mapping, since aliasing caches can lead to expensive flushing when using kmap. Finally, this requires the vectors page to be mapped r/w for kernel and r/o for user, which has implications for things like put_user which must trigger CoW appropriately when targetting user pages. The upshot of all this is that a v6/v7 kernel makes use of domains to segregate kernel and user memory accesses. This has the nasty side-effect of making device mappings executable, which has been observed to cause subtle bugs on recent cores (e.g. Cortex-A15 performing a speculative instruction fetch from the GIC and acking an interrupt in the process). This patch solves this problem by removing the remaining domain support from ARMv6. A new memory type is added specifically for the vectors page which allows that page (and only that page) to be mapped as user r/o, kernel r/w. All other user r/o pages are mapped also as kernel r/o. Patch co-developed with Russell King. Cc: <stable@vger.kernel.org> Signed-off-by: NWill Deacon <will.deacon@arm.com> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 20 10月, 2013 1 次提交
-
-
由 Ben Dooks 提交于
The Kconfig for arch/arm/mach-ixp4xx has a local definition of ARCH_SUPPORTS_BIG_ENDIAN which could be used elsewhere. This means that if IXP4xx is selected and this symbol is selected eleswhere then an warning is produced. Clean the following error up by making the symbol be selected by the main ARCH_IXP4XX definition and have a common definition in arch/arm/mm/Kconfig warning: (ARCH_xxx) selects ARCH_SUPPORTS_BIG_ENDIAN which has unmet direct dependencies (ARCH_IXP4XX) warning: (ARCH_xxx) selects ARCH_SUPPORTS_BIG_ENDIAN which has unmet direct dependencies (ARCH_IXP4XX) Signed-off-by: NBen Dooks <ben.dooks@codethink.co.uk>
-
- 20 8月, 2013 1 次提交
-
-
由 Nicolas Pitre 提交于
Commit f6f91b0d ("ARM: allow kuser helpers to be removed from the vector page") introduced some help text for the CONFIG_KUSER_HELPERS option which is rather contradictory. Let's fix that, and improve it a little. Cc: <stable@vger.kernel.org> Signed-off-by: NNicolas Pitre <nico@linaro.org> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 01 8月, 2013 1 次提交
-
-
由 Russell King 提交于
Provide a kernel configuration option to allow the kernel user helpers to be removed from the vector page, thereby preventing their use with ROP (return orientated programming) attacks. This option is only visible for CPU architectures which natively support all the operations which kernel user helpers would normally provide, and must be enabled with caution. Cc: <stable@vger.kernel.org> Acked-by: NNicolas Pitre <nico@linaro.org> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-
- 08 6月, 2013 1 次提交
-
-
由 Jonathan Austin 提交于
Currently CPU_V7 selects CPU_CP15_MMU, however in the case of a V7 CPU implementing the PMSA, such as the Cortex-R7, the CP15_MMU operations are not available. Selecting CPU_CP15_MPU is appropriate in this case. This patch makes CPU_CP15_MMU dependent on the use of the MMU, selecting CPU_CP15_MPU for v7 processors when !MMU is chosen. Signed-off-by: NJonathan Austin <jonathan.austin@arm.com>
-
- 18 4月, 2013 1 次提交
-
-
由 Uwe Kleine-König 提交于
This patch modifies the required Kconfig and Makefile files to allow the building of kernel for Cortex-M3. Signed-off-by: NCatalin Marinas <catalin.marinas@arm.com> Reviewed-by: NJonathan Austin <jonathan.austin@arm.com> Tested-by: NJonathan Austin <jonathan.austin@arm.com> Signed-off-by: NUwe Kleine-König <u.kleine-koenig@pengutronix.de>
-
- 26 3月, 2013 1 次提交
-
-
由 Will Deacon 提交于
This is only used by 740t, which is a v4 core and (by my reading of the datasheet for the CPU) ignores CRm for the cp15 cache flush operation, making the v4 cache implementation in cache-v4.S sufficient for this CPU. Tested with 740T core-tile on Integrator/AP baseboard. Acked-by: NHyok S. Choi <hyok.choi@samsung.com> Acked-by: NGreg Ungerer <gerg@uclinux.org> Signed-off-by: NWill Deacon <will.deacon@arm.com>
-
- 01 2月, 2013 1 次提交
-
-
由 Uwe Kleine-König 提交于
Some ARM cores are not capable to run in ARM mode (e.g. Cortex-M3). So obviously these cannot enter the kernel in ARM mode. Make an exception for them and let them enter in THUMB mode. Signed-off-by: NUwe Kleine-König <u.kleine-koenig@pengutronix.de> Message-Id: 1358162123-30113-1-git-send-email-u.kleine-koenig@pengutronix.de Acked-by: NNicolas Pitre <nico@linaro.org>
-
- 11 1月, 2013 1 次提交
-
-
由 Will Deacon 提交于
ARM_VIRT_EXT is a property of CPU_V7, but does not adversely affect other CPUs that can be built into the same kernel image (i.e. ARMv6+). This patch defaults ARM_VIRT_EXT to y if CPU_V7, allowing hypervisors such as KVM to make better use of the option and being able to rely on hyp-mode boot support. Signed-off-by: NWill Deacon <will.deacon@arm.com>
-
- 21 11月, 2012 1 次提交
-
-
由 Gregory CLEMENT 提交于
PJ4B is an implementation of the ARMv7 (such as the Cortex A9 for example) released by Marvell. This CPU is currently found in Armada 370 and Armada XP SoCs. This patch provides a support for the specific initialization of this CPU. Signed-off-by: NYehuda Yitschak <yehuday@marvell.com> Signed-off-by: NGregory CLEMENT <gregory.clement@free-electrons.com> Reviewed-by: NWill Deacon <will.deacon@arm.com> Acked-by: NCatalin Marinas <catalin.marinas@arm.com>
-
- 14 10月, 2012 1 次提交
-
-
由 Russell King 提交于
As suggested by Andrew Morton: This is a pet peeve of mine. Any time there's a long list of items (header file inclusions, kconfig entries, array initalisers, etc) and someone wants to add a new item, they *always* go and stick it at the end of the list. Guys, don't do this. Either put the new item into a randomly-chosen position or, probably better, alphanumerically sort the list. lets sort all our select statements alphanumerically. This commit was created by the following perl: while (<>) { while (/\\\s*$/) { $_ .= <>; } undef %selects if /^\s*config\s+/; if (/^\s+select\s+(\w+).*/) { if (defined($selects{$1})) { if ($selects{$1} eq $_) { print STDERR "Warning: removing duplicated $1 entry\n"; } else { print STDERR "Error: $1 differently selected\n". "\tOld: $selects{$1}\n". "\tNew: $_\n"; exit 1; } } $selects{$1} = $_; next; } if (%selects and (/^\s*$/ or /^\s+help/ or /^\s+---help---/ or /^endif/ or /^endchoice/)) { foreach $k (sort (keys %selects)) { print "$selects{$k}"; } undef %selects; } print; } if (%selects) { foreach $k (sort (keys %selects)) { print "$selects{$k}"; } } It found two duplicates: Warning: removing duplicated S5P_SETUP_MIPIPHY entry Warning: removing duplicated HARDIRQS_SW_RESEND entry and they are identical duplicates, hence the shrinkage in the diffstat of two lines. We have four testers reporting success of this change (Tony, Stephen, Linus and Sekhar.) Acked-by: NJason Cooper <jason@lakedaemon.net> Acked-by: NTony Lindgren <tony@atomide.com> Acked-by: NStephen Warren <swarren@nvidia.com> Acked-by: NLinus Walleij <linus.walleij@linaro.org> Acked-by: NSekhar Nori <nsekhar@ti.com> Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
-