1. 21 5月, 2016 1 次提交
    • Z
      lib/GCD.c: use binary GCD algorithm instead of Euclidean · fff7fb0b
      Zhaoxiu Zeng 提交于
      The binary GCD algorithm is based on the following facts:
      	1. If a and b are all evens, then gcd(a,b) = 2 * gcd(a/2, b/2)
      	2. If a is even and b is odd, then gcd(a,b) = gcd(a/2, b)
      	3. If a and b are all odds, then gcd(a,b) = gcd((a-b)/2, b) = gcd((a+b)/2, b)
      
      Even on x86 machines with reasonable division hardware, the binary
      algorithm runs about 25% faster (80% the execution time) than the
      division-based Euclidian algorithm.
      
      On platforms like Alpha and ARMv6 where division is a function call to
      emulation code, it's even more significant.
      
      There are two variants of the code here, depending on whether a fast
      __ffs (find least significant set bit) instruction is available.  This
      allows the unpredictable branches in the bit-at-a-time shifting loop to
      be eliminated.
      
      If fast __ffs is not available, the "even/odd" GCD variant is used.
      
      I use the following code to benchmark:
      
      	#include <stdio.h>
      	#include <stdlib.h>
      	#include <stdint.h>
      	#include <string.h>
      	#include <time.h>
      	#include <unistd.h>
      
      	#define swap(a, b) \
      		do { \
      			a ^= b; \
      			b ^= a; \
      			a ^= b; \
      		} while (0)
      
      	unsigned long gcd0(unsigned long a, unsigned long b)
      	{
      		unsigned long r;
      
      		if (a < b) {
      			swap(a, b);
      		}
      
      		if (b == 0)
      			return a;
      
      		while ((r = a % b) != 0) {
      			a = b;
      			b = r;
      		}
      
      		return b;
      	}
      
      	unsigned long gcd1(unsigned long a, unsigned long b)
      	{
      		unsigned long r = a | b;
      
      		if (!a || !b)
      			return r;
      
      		b >>= __builtin_ctzl(b);
      
      		for (;;) {
      			a >>= __builtin_ctzl(a);
      			if (a == b)
      				return a << __builtin_ctzl(r);
      
      			if (a < b)
      				swap(a, b);
      			a -= b;
      		}
      	}
      
      	unsigned long gcd2(unsigned long a, unsigned long b)
      	{
      		unsigned long r = a | b;
      
      		if (!a || !b)
      			return r;
      
      		r &= -r;
      
      		while (!(b & r))
      			b >>= 1;
      
      		for (;;) {
      			while (!(a & r))
      				a >>= 1;
      			if (a == b)
      				return a;
      
      			if (a < b)
      				swap(a, b);
      			a -= b;
      			a >>= 1;
      			if (a & r)
      				a += b;
      			a >>= 1;
      		}
      	}
      
      	unsigned long gcd3(unsigned long a, unsigned long b)
      	{
      		unsigned long r = a | b;
      
      		if (!a || !b)
      			return r;
      
      		b >>= __builtin_ctzl(b);
      		if (b == 1)
      			return r & -r;
      
      		for (;;) {
      			a >>= __builtin_ctzl(a);
      			if (a == 1)
      				return r & -r;
      			if (a == b)
      				return a << __builtin_ctzl(r);
      
      			if (a < b)
      				swap(a, b);
      			a -= b;
      		}
      	}
      
      	unsigned long gcd4(unsigned long a, unsigned long b)
      	{
      		unsigned long r = a | b;
      
      		if (!a || !b)
      			return r;
      
      		r &= -r;
      
      		while (!(b & r))
      			b >>= 1;
      		if (b == r)
      			return r;
      
      		for (;;) {
      			while (!(a & r))
      				a >>= 1;
      			if (a == r)
      				return r;
      			if (a == b)
      				return a;
      
      			if (a < b)
      				swap(a, b);
      			a -= b;
      			a >>= 1;
      			if (a & r)
      				a += b;
      			a >>= 1;
      		}
      	}
      
      	static unsigned long (*gcd_func[])(unsigned long a, unsigned long b) = {
      		gcd0, gcd1, gcd2, gcd3, gcd4,
      	};
      
      	#define TEST_ENTRIES (sizeof(gcd_func) / sizeof(gcd_func[0]))
      
      	#if defined(__x86_64__)
      
      	#define rdtscll(val) do { \
      		unsigned long __a,__d; \
      		__asm__ __volatile__("rdtsc" : "=a" (__a), "=d" (__d)); \
      		(val) = ((unsigned long long)__a) | (((unsigned long long)__d)<<32); \
      	} while(0)
      
      	static unsigned long long benchmark_gcd_func(unsigned long (*gcd)(unsigned long, unsigned long),
      								unsigned long a, unsigned long b, unsigned long *res)
      	{
      		unsigned long long start, end;
      		unsigned long long ret;
      		unsigned long gcd_res;
      
      		rdtscll(start);
      		gcd_res = gcd(a, b);
      		rdtscll(end);
      
      		if (end >= start)
      			ret = end - start;
      		else
      			ret = ~0ULL - start + 1 + end;
      
      		*res = gcd_res;
      		return ret;
      	}
      
      	#else
      
      	static inline struct timespec read_time(void)
      	{
      		struct timespec time;
      		clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time);
      		return time;
      	}
      
      	static inline unsigned long long diff_time(struct timespec start, struct timespec end)
      	{
      		struct timespec temp;
      
      		if ((end.tv_nsec - start.tv_nsec) < 0) {
      			temp.tv_sec = end.tv_sec - start.tv_sec - 1;
      			temp.tv_nsec = 1000000000ULL + end.tv_nsec - start.tv_nsec;
      		} else {
      			temp.tv_sec = end.tv_sec - start.tv_sec;
      			temp.tv_nsec = end.tv_nsec - start.tv_nsec;
      		}
      
      		return temp.tv_sec * 1000000000ULL + temp.tv_nsec;
      	}
      
      	static unsigned long long benchmark_gcd_func(unsigned long (*gcd)(unsigned long, unsigned long),
      								unsigned long a, unsigned long b, unsigned long *res)
      	{
      		struct timespec start, end;
      		unsigned long gcd_res;
      
      		start = read_time();
      		gcd_res = gcd(a, b);
      		end = read_time();
      
      		*res = gcd_res;
      		return diff_time(start, end);
      	}
      
      	#endif
      
      	static inline unsigned long get_rand()
      	{
      		if (sizeof(long) == 8)
      			return (unsigned long)rand() << 32 | rand();
      		else
      			return rand();
      	}
      
      	int main(int argc, char **argv)
      	{
      		unsigned int seed = time(0);
      		int loops = 100;
      		int repeats = 1000;
      		unsigned long (*res)[TEST_ENTRIES];
      		unsigned long long elapsed[TEST_ENTRIES];
      		int i, j, k;
      
      		for (;;) {
      			int opt = getopt(argc, argv, "n:r:s:");
      			/* End condition always first */
      			if (opt == -1)
      				break;
      
      			switch (opt) {
      			case 'n':
      				loops = atoi(optarg);
      				break;
      			case 'r':
      				repeats = atoi(optarg);
      				break;
      			case 's':
      				seed = strtoul(optarg, NULL, 10);
      				break;
      			default:
      				/* You won't actually get here. */
      				break;
      			}
      		}
      
      		res = malloc(sizeof(unsigned long) * TEST_ENTRIES * loops);
      		memset(elapsed, 0, sizeof(elapsed));
      
      		srand(seed);
      		for (j = 0; j < loops; j++) {
      			unsigned long a = get_rand();
      			/* Do we have args? */
      			unsigned long b = argc > optind ? strtoul(argv[optind], NULL, 10) : get_rand();
      			unsigned long long min_elapsed[TEST_ENTRIES];
      			for (k = 0; k < repeats; k++) {
      				for (i = 0; i < TEST_ENTRIES; i++) {
      					unsigned long long tmp = benchmark_gcd_func(gcd_func[i], a, b, &res[j][i]);
      					if (k == 0 || min_elapsed[i] > tmp)
      						min_elapsed[i] = tmp;
      				}
      			}
      			for (i = 0; i < TEST_ENTRIES; i++)
      				elapsed[i] += min_elapsed[i];
      		}
      
      		for (i = 0; i < TEST_ENTRIES; i++)
      			printf("gcd%d: elapsed %llu\n", i, elapsed[i]);
      
      		k = 0;
      		srand(seed);
      		for (j = 0; j < loops; j++) {
      			unsigned long a = get_rand();
      			unsigned long b = argc > optind ? strtoul(argv[optind], NULL, 10) : get_rand();
      			for (i = 1; i < TEST_ENTRIES; i++) {
      				if (res[j][i] != res[j][0])
      					break;
      			}
      			if (i < TEST_ENTRIES) {
      				if (k == 0) {
      					k = 1;
      					fprintf(stderr, "Error:\n");
      				}
      				fprintf(stderr, "gcd(%lu, %lu): ", a, b);
      				for (i = 0; i < TEST_ENTRIES; i++)
      					fprintf(stderr, "%ld%s", res[j][i], i < TEST_ENTRIES - 1 ? ", " : "\n");
      			}
      		}
      
      		if (k == 0)
      			fprintf(stderr, "PASS\n");
      
      		free(res);
      
      		return 0;
      	}
      
      Compiled with "-O2", on "VirtualBox 4.4.0-22-generic #38-Ubuntu x86_64" got:
      
        zhaoxiuzeng@zhaoxiuzeng-VirtualBox:~/develop$ ./gcd -r 500000 -n 10
        gcd0: elapsed 10174
        gcd1: elapsed 2120
        gcd2: elapsed 2902
        gcd3: elapsed 2039
        gcd4: elapsed 2812
        PASS
        zhaoxiuzeng@zhaoxiuzeng-VirtualBox:~/develop$ ./gcd -r 500000 -n 10
        gcd0: elapsed 9309
        gcd1: elapsed 2280
        gcd2: elapsed 2822
        gcd3: elapsed 2217
        gcd4: elapsed 2710
        PASS
        zhaoxiuzeng@zhaoxiuzeng-VirtualBox:~/develop$ ./gcd -r 500000 -n 10
        gcd0: elapsed 9589
        gcd1: elapsed 2098
        gcd2: elapsed 2815
        gcd3: elapsed 2030
        gcd4: elapsed 2718
        PASS
        zhaoxiuzeng@zhaoxiuzeng-VirtualBox:~/develop$ ./gcd -r 500000 -n 10
        gcd0: elapsed 9914
        gcd1: elapsed 2309
        gcd2: elapsed 2779
        gcd3: elapsed 2228
        gcd4: elapsed 2709
        PASS
      
      [akpm@linux-foundation.org: avoid #defining a CONFIG_ variable]
      Signed-off-by: NZhaoxiu Zeng <zhaoxiu.zeng@gmail.com>
      Signed-off-by: NGeorge Spelvin <linux@horizon.com>
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      fff7fb0b
  2. 22 2月, 2016 1 次提交
  3. 08 2月, 2016 1 次提交
    • K
      ARM: 8501/1: mm: flip priority of CONFIG_DEBUG_RODATA · 25362dc4
      Kees Cook 提交于
      The use of CONFIG_DEBUG_RODATA is generally seen as an essential part of
      kernel self-protection:
      http://www.openwall.com/lists/kernel-hardening/2015/11/30/13
      Additionally, its name has grown to mean things beyond just rodata. To
      get ARM closer to this, we ought to rearrange the names of the configs
      that control how the kernel protects its memory. What was called
      CONFIG_ARM_KERNMEM_PERMS is realy doing the work that other architectures
      call CONFIG_DEBUG_RODATA.
      
      This redefines CONFIG_DEBUG_RODATA to actually do the bulk of the
      ROing (and NXing). In the place of the old CONFIG_DEBUG_RODATA, use
      CONFIG_DEBUG_ALIGN_RODATA, since that's what the option does: adds
      section alignment for making rodata explicitly NX, as arm does not split
      the page tables like arm64 does without _ALIGN_RODATA.
      
      Also adds human readable names to the sections so I could more easily
      debug my typos, and makes CONFIG_DEBUG_RODATA default "y" for CPU_V7.
      
      Results in /sys/kernel/debug/kernel_page_tables for each config state:
      
       # CONFIG_DEBUG_RODATA is not set
       # CONFIG_DEBUG_ALIGN_RODATA is not set
      
      ---[ Kernel Mapping ]---
      0x80000000-0x80900000           9M     RW x  SHD
      0x80900000-0xa0000000         503M     RW NX SHD
      
       CONFIG_DEBUG_RODATA=y
       CONFIG_DEBUG_ALIGN_RODATA=y
      
      ---[ Kernel Mapping ]---
      0x80000000-0x80100000           1M     RW NX SHD
      0x80100000-0x80700000           6M     ro x  SHD
      0x80700000-0x80a00000           3M     ro NX SHD
      0x80a00000-0xa0000000         502M     RW NX SHD
      
       CONFIG_DEBUG_RODATA=y
       # CONFIG_DEBUG_ALIGN_RODATA is not set
      
      ---[ Kernel Mapping ]---
      0x80000000-0x80100000           1M     RW NX SHD
      0x80100000-0x80a00000           9M     ro x  SHD
      0x80a00000-0xa0000000         502M     RW NX SHD
      Signed-off-by: NKees Cook <keescook@chromium.org>
      Reviewed-by: NLaura Abbott <labbott@fedoraproject.org>
      Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
      25362dc4
  4. 18 12月, 2015 1 次提交
  5. 15 12月, 2015 1 次提交
    • A
      ARM: no longer force unbuffered DMA for realview · 38541bf4
      Arnd Bergmann 提交于
      Commit 42c4dafe ("ARM: 6202/1: Do not ARM_DMA_MEM_BUFFERABLE
      on RealView boards with L210/L220") changed the generic setting for
      ARM_DMA_MEM_BUFFERABLE to be disabled on any Realview kernel that includes
      support for any of the ARM11 variations. Doing this was required to
      allow doing DMA without a lockup in the l2x0 cache controller on the
      Realview platform.
      
      Unfortunately, in a kernel that also contains support for any ARMv7
      based machine, the same change makes it impossible to do DMA on ARMv7,
      which gets in the way of enabling multiplatform support on Realview.
      
      As confirmed by Catalin Marinas and Linus Walleij, the current
      code for Realview that we have in the kernel does not actually
      perform any DMA, and this is unlikely to change in the future.
      Therefore we can revert 42c4dafe without introducing regressions,
      but we must never start using DMA on this platform in the future.
      Signed-off-by: NArnd Bergmann <arnd@arndb.de>
      Cc: Russell King <linux@arm.linux.org.uk>
      Signed-off-by: NLinus Walleij <linus.walleij@linaro.org>
      38541bf4
  6. 27 10月, 2015 1 次提交
  7. 03 10月, 2015 1 次提交
    • R
      ARM: remove user cmpxchg syscall · db695c05
      Russell King 提交于
      Mark Brand reports that a NEEDS_SYSCALL_FOR_CMPXCHG enabled kernel would
      open a security hole in the ghost syscall used to implement cmpxchg, as
      it fails to validate the user pointer.
      
      However, in order for this option to be enabled, you'd need to be
      building a pre-ARMv6 kernel with SMP support.  There is only one system
      known which fits that, which is an early ARM SMP FPGA implementation
      based on the ARM926T.
      
      In any case, the Kconfig does not allow SMP to be enabled for pre-ARMv6
      systems.
      
      Moreover, even if NEEDS_SYSCALL_FOR_CMPXCHG were to be enabled, the
      kernel would not build as __ARM_NR_cmpxchg64 is not defined.
      
      The simple answer is to remove the buggy code.
      Reported-by: NMark Brand <markbrand@google.com>
      Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
      db695c05
  8. 25 7月, 2015 1 次提交
    • R
      ARM: move heavy barrier support out of line · f8130906
      Russell King 提交于
      The existing memory barrier macro causes a significant amount of code
      to be inserted inline at every call site.  For example, in
      gpio_set_irq_type(), we have this for mb():
      
      c0344c08:       f57ff04e        dsb     st
      c0344c0c:       e59f8190        ldr     r8, [pc, #400]  ; c0344da4 <gpio_set_irq_type+0x230>
      c0344c10:       e3590004        cmp     r9, #4
      c0344c14:       e5983014        ldr     r3, [r8, #20]
      c0344c18:       0a000054        beq     c0344d70 <gpio_set_irq_type+0x1fc>
      c0344c1c:       e3530000        cmp     r3, #0
      c0344c20:       0a000004        beq     c0344c38 <gpio_set_irq_type+0xc4>
      c0344c24:       e50b2030        str     r2, [fp, #-48]  ; 0xffffffd0
      c0344c28:       e50bc034        str     ip, [fp, #-52]  ; 0xffffffcc
      c0344c2c:       e12fff33        blx     r3
      c0344c30:       e51bc034        ldr     ip, [fp, #-52]  ; 0xffffffcc
      c0344c34:       e51b2030        ldr     r2, [fp, #-48]  ; 0xffffffd0
      c0344c38:       e5963004        ldr     r3, [r6, #4]
      
      Moving the outer_cache_sync() call out of line reduces the impact of
      the barrier:
      
      c0344968:       f57ff04e        dsb     st
      c034496c:       e35a0004        cmp     sl, #4
      c0344970:       e50b2030        str     r2, [fp, #-48]  ; 0xffffffd0
      c0344974:       0a000044        beq     c0344a8c <gpio_set_irq_type+0x1b8>
      c0344978:       ebf363dd        bl      c001d8f4 <arm_heavy_mb>
      c034497c:       e5953004        ldr     r3, [r5, #4]
      
      This should reduce the cache footprint of this code.  Overall, this
      results in a reduction of around 20K in the kernel size:
      
          text    data      bss      dec     hex filename
      10773970  667392 10369656 21811018 14ccf4a ../build/imx6/vmlinux-old
      10754219  667392 10369656 21791267 14c8223 ../build/imx6/vmlinux-new
      
      Another advantage to this approach is that we can finally resolve the
      issue of SoCs which have their own memory barrier requirements within
      multiplatform kernels (such as OMAP.)  Here, the bus interconnects
      need additional handling to ensure that writes become visible in the
      correct order (eg, between dma_map() operations, writes to DMA
      coherent memory, and MMIO accesses.)
      Acked-by: NTony Lindgren <tony@atomide.com>
      Acked-by: NRichard Woodruff <r-woodruff2@ti.com>
      Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
      f8130906
  9. 02 6月, 2015 2 次提交
  10. 28 5月, 2015 1 次提交
  11. 08 5月, 2015 1 次提交
  12. 18 4月, 2015 1 次提交
  13. 02 4月, 2015 1 次提交
    • F
      ARM: 8276/1: Make CPU_DCACHE_DISABLE depend on !SMP · e1e2f6e4
      Florian Fainelli 提交于
      Enabling CPU_DCACHE_DISABLE on a SMP capable system will prevent the
      kernel from booting because of the following ldrex instruction in
      arch_spin_lock:
      
      (gdb) x/10i $pc
      => 0xc053cfa8 <_raw_spin_lock+4>:       ldrex   r3, [r0]
         0xc053cfac <_raw_spin_lock+8>:       add     r2, r3, #65536  ; 0x10000
      
      which is taken by the very first printk call:
      
          at /home/fainelli/work/linux/arch/arm/include/asm/spinlock.h:65
          fmt=0xc0637650 " 01 66Booting Linux on physical CPU 0x%xn", args=<incomplete type>)
          at kernel/printk/printk.c:1525
          fmt=0xc05370f4 <printk+52> " 24320215342 04340235344 20320215342 36377/341 17") at kernel/printk/printk.c:1688
      
      ldrex requires exclusive monitor(s) (local or global) which are no longer
      working when the Data cache is disabled in CP15 and will just hang the CPU
      there.
      Acked-by: NArnd Bergmann <arnd@arndb.de>
      Signed-off-by: NFlorian Fainelli <f.fainelli@gmail.com>
      Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
      e1e2f6e4
  14. 28 3月, 2015 1 次提交
  15. 18 2月, 2015 1 次提交
  16. 29 1月, 2015 1 次提交
  17. 14 11月, 2014 1 次提交
    • N
      ARM: 8198/1: make kuser helpers depend on MMU · 08b964ff
      Nathan Lynch 提交于
      The kuser helpers page is not set up on non-MMU systems, so it does
      not make sense to allow CONFIG_KUSER_HELPERS to be enabled when
      CONFIG_MMU=n.  Allowing it to be set on !MMU results in an oops in
      set_tls (used in execve and the arm_syscall trap handler):
      
      Unhandled exception: IPSR = 00000005 LR = fffffff1
      CPU: 0 PID: 1 Comm: swapper Not tainted 3.18.0-rc1-00041-ga30465a #216
      task: 8b838000 ti: 8b82a000 task.ti: 8b82a000
      PC is at flush_thread+0x32/0x40
      LR is at flush_thread+0x21/0x40
      pc : [<8f00157a>]    lr : [<8f001569>]    psr: 4100000b
      sp : 8b82be20  ip : 00000000  fp : 8b83c000
      r10: 00000001  r9 : 88018c84  r8 : 8bb85000
      r7 : 8b838000  r6 : 00000000  r5 : 8bb77400  r4 : 8b82a000
      r3 : ffff0ff0  r2 : 8b82a000  r1 : 00000000  r0 : 88020354
      xPSR: 4100000b
      CPU: 0 PID: 1 Comm: swapper Not tainted 3.18.0-rc1-00041-ga30465a #216
      [<8f002bc1>] (unwind_backtrace) from [<8f002033>] (show_stack+0xb/0xc)
      [<8f002033>] (show_stack) from [<8f00265b>] (__invalid_entry+0x4b/0x4c)
      
      As best I can tell this issue existed for the set_tls ARM syscall
      before commit fbfb872f "ARM: 8148/1: flush TLS and thumbee
      register state during exec" consolidated the TLS manipulation code
      into the set_tls helper function, but now that we're using it to flush
      register state during execve, !MMU users encounter the oops at the
      first exec.
      
      Prevent CONFIG_MMU=n configurations from enabling
      CONFIG_KUSER_HELPERS.
      
      Fixes: fbfb872f (ARM: 8148/1: flush TLS and thumbee register state during exec)
      Signed-off-by: NNathan Lynch <nathan_lynch@mentor.com>
      Reported-by: NStefan Agner <stefan@agner.ch>
      Acked-by: NUwe Kleine-König <u.kleine-koenig@pengutronix.de>
      Cc: stable@vger.kernel.org
      Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
      08b964ff
  18. 12 11月, 2014 1 次提交
    • L
      ARM: fix multiplatform allmodcompile · dc680b98
      Linus Walleij 提交于
      Commit 68f3b875
      "ARM: integrator: make the Integrator multiplatform"
      broke allmodconfig like this:
      
      >> arch/arm/include/asm/cmpxchg.h:114:2: error: #error
      "SMP is not supported on this platform"
      (etc)
      
      This is due to the fact that as we turned on multiplatform
      for the Integrator, this enabled a lot of non-applicable
      CPU's to be selected for its multiplatform images, due to
      a lot of "depends on ARCH_INTEGRATOR" restrictions in
      arch/arm/mm/Kconfig for the different ARM CPU types.
      
      Fix this by restricting the CPU selections to respective
      multiplatform config, which now becomes a subset of the
      possible Integrator configurations, or alternatively the
      non-multiplatform config plus ARCH_INTEGRATOR, i.e.:
      
      if (!ARCH_MULTIPLATFORM || ARCH_MULTI_Vx) &&
         (ARCH_INTEGRATOR || ARCH_FOO ...)
      
      Since the Integrator has been converted to multiplatform,
      this will often take the short form:
      
      if (ARCH_MULTI_Vx && ARCH_INTEGRATOR)
      
      If no other non-multiplatform platforms are elegible.
      Reported-by: NBuild bot for Mark Brown <broonie@kernel.org>
      Reported-by: NKbuild test robot <fengguang.wu@intel.com>
      Suggested-by: NRussell King <linux@arm.linux.org.uk>
      Signed-off-by: NLinus Walleij <linus.walleij@linaro.org>
      Signed-off-by: NArnd Bergmann <arnd@arndb.de>
      dc680b98
  19. 17 10月, 2014 2 次提交
  20. 18 7月, 2014 2 次提交
  21. 14 7月, 2014 1 次提交
  22. 19 6月, 2014 1 次提交
    • R
      ARM: l2c: fix dependencies on PL310 errata symbols · a641f3a6
      Russell King 提交于
      A number of configurations spit out warnings similar to:
      
      warning: (SOC_IMX6 && SOC_VF610 && ARCH_OMAP4) selects PL310_ERRATA_588369 which has unmet direct dependencies (CACHE_L2X0)
      warning: (SOC_IMX6 && SOC_VF610 && ARCH_OMAP4) selects PL310_ERRATA_727915 which has unmet direct dependencies (CACHE_L2X0)
      
      Clean up the dependencies here:
      * PL310 symbols should only be selected when CACHE_L2X0 is enabled.
      * Since the cache-l2x0 code detects PL310 presence at runtime, and we will
        eventually get rid of CACHE_PL310, surround these errata options with an
        if CACHE_L2X0 conditional rather than repeating the dependency against
        each.
      Acked-by: NArnd Bergmann <arnd@arndb.de>
      Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
      a641f3a6
  23. 30 5月, 2014 1 次提交
  24. 23 4月, 2014 1 次提交
  25. 22 3月, 2014 1 次提交
    • A
      ARM: rpc: autoselect CPU_SA110 · fa04e209
      Arnd Bergmann 提交于
      ARCH_RPC no longer supports other CPUs aside from StrongARM110,
      so we can make the option implicitly selected by the platform
      and no longer give the option of building a kernel without CPU
      support.
      Signed-off-by: NArnd Bergmann <arnd@arndb.de>
      Cc: Russell King <linux@arm.linux.org.uk>
      fa04e209
  26. 23 2月, 2014 1 次提交
  27. 10 2月, 2014 1 次提交
    • W
      ARM: 7954/1: mm: remove remaining domain support from ARMv6 · b6ccb980
      Will Deacon 提交于
      CPU_32v6 currently selects CPU_USE_DOMAINS if CPU_V6 and MMU. This is
      because ARM 1136 r0pX CPUs lack the v6k extensions, and therefore do
      not have hardware thread registers. The lack of these registers requires
      the kernel to update the vectors page at each context switch in order to
      write a new TLS pointer. This write must be done via the userspace
      mapping, since aliasing caches can lead to expensive flushing when using
      kmap. Finally, this requires the vectors page to be mapped r/w for
      kernel and r/o for user, which has implications for things like put_user
      which must trigger CoW appropriately when targetting user pages.
      
      The upshot of all this is that a v6/v7 kernel makes use of domains to
      segregate kernel and user memory accesses. This has the nasty
      side-effect of making device mappings executable, which has been
      observed to cause subtle bugs on recent cores (e.g. Cortex-A15
      performing a speculative instruction fetch from the GIC and acking an
      interrupt in the process).
      
      This patch solves this problem by removing the remaining domain support
      from ARMv6. A new memory type is added specifically for the vectors page
      which allows that page (and only that page) to be mapped as user r/o,
      kernel r/w. All other user r/o pages are mapped also as kernel r/o.
      Patch co-developed with Russell King.
      
      Cc: <stable@vger.kernel.org>
      Signed-off-by: NWill Deacon <will.deacon@arm.com>
      Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
      b6ccb980
  28. 20 10月, 2013 1 次提交
    • B
      ARM: fix ARCH_IXP4xx usage of ARCH_SUPPORTS_BIG_ENDIAN · d10d2d48
      Ben Dooks 提交于
      The Kconfig for arch/arm/mach-ixp4xx has a local definition
      of ARCH_SUPPORTS_BIG_ENDIAN which could be used elsewhere.
      This means that if IXP4xx is selected and this symbol is
      selected eleswhere then an warning is produced.
      
      Clean the following error up by making the symbol be
      selected by the main ARCH_IXP4XX definition and have a
      common definition in arch/arm/mm/Kconfig
      
      warning: (ARCH_xxx) selects ARCH_SUPPORTS_BIG_ENDIAN which has unmet direct dependencies (ARCH_IXP4XX)
      warning: (ARCH_xxx) selects ARCH_SUPPORTS_BIG_ENDIAN which has unmet direct dependencies (ARCH_IXP4XX)
      Signed-off-by: NBen Dooks <ben.dooks@codethink.co.uk>
      d10d2d48
  29. 20 8月, 2013 1 次提交
  30. 01 8月, 2013 1 次提交
  31. 08 6月, 2013 1 次提交
    • J
      ARM: select CPU_CPU15_MMU/MPU appropriately · 66567618
      Jonathan Austin 提交于
      Currently CPU_V7 selects CPU_CP15_MMU, however in the case of a V7 CPU
      implementing the PMSA, such as the Cortex-R7, the CP15_MMU operations are
      not available. Selecting CPU_CP15_MPU is appropriate in this case.
      
      This patch makes CPU_CP15_MMU dependent on the use of the MMU, selecting
      CPU_CP15_MPU for v7 processors when !MMU is chosen.
      Signed-off-by: NJonathan Austin <jonathan.austin@arm.com>
      66567618
  32. 18 4月, 2013 1 次提交
  33. 26 3月, 2013 1 次提交
  34. 01 2月, 2013 1 次提交
  35. 11 1月, 2013 1 次提交
    • W
      ARM: virt: hide CONFIG_ARM_VIRT_EXT from user · 651134b0
      Will Deacon 提交于
      ARM_VIRT_EXT is a property of CPU_V7, but does not adversely affect
      other CPUs that can be built into the same kernel image (i.e. ARMv6+).
      
      This patch defaults ARM_VIRT_EXT to y if CPU_V7, allowing hypervisors
      such as KVM to make better use of the option and being able to rely
      on hyp-mode boot support.
      Signed-off-by: NWill Deacon <will.deacon@arm.com>
      651134b0
  36. 21 11月, 2012 1 次提交
  37. 14 10月, 2012 1 次提交
    • R
      ARM: config: sort select statements alphanumerically · b1b3f49c
      Russell King 提交于
      As suggested by Andrew Morton:
      
        This is a pet peeve of mine.  Any time there's a long list of items
        (header file inclusions, kconfig entries, array initalisers, etc) and
        someone wants to add a new item, they *always* go and stick it at the
        end of the list.
      
        Guys, don't do this.  Either put the new item into a randomly-chosen
        position or, probably better, alphanumerically sort the list.
      
      lets sort all our select statements alphanumerically.  This commit was
      created by the following perl:
      
      while (<>) {
      	while (/\\\s*$/) {
      		$_ .= <>;
      	}
      	undef %selects if /^\s*config\s+/;
      	if (/^\s+select\s+(\w+).*/) {
      		if (defined($selects{$1})) {
      			if ($selects{$1} eq $_) {
      				print STDERR "Warning: removing duplicated $1 entry\n";
      			} else {
      				print STDERR "Error: $1 differently selected\n".
      					"\tOld: $selects{$1}\n".
      					"\tNew: $_\n";
      				exit 1;
      			}
      		}
      		$selects{$1} = $_;
      		next;
      	}
      	if (%selects and (/^\s*$/ or /^\s+help/ or /^\s+---help---/ or
      			  /^endif/ or /^endchoice/)) {
      		foreach $k (sort (keys %selects)) {
      			print "$selects{$k}";
      		}
      		undef %selects;
      	}
      	print;
      }
      if (%selects) {
      	foreach $k (sort (keys %selects)) {
      		print "$selects{$k}";
      	}
      }
      
      It found two duplicates:
      
      Warning: removing duplicated S5P_SETUP_MIPIPHY entry
      Warning: removing duplicated HARDIRQS_SW_RESEND entry
      
      and they are identical duplicates, hence the shrinkage in the diffstat
      of two lines.
      
      We have four testers reporting success of this change (Tony, Stephen,
      Linus and Sekhar.)
      Acked-by: NJason Cooper <jason@lakedaemon.net>
      Acked-by: NTony Lindgren <tony@atomide.com>
      Acked-by: NStephen Warren <swarren@nvidia.com>
      Acked-by: NLinus Walleij <linus.walleij@linaro.org>
      Acked-by: NSekhar Nori <nsekhar@ti.com>
      Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
      b1b3f49c