1. 30 12月, 2011 2 次提交
    • K
      mm/mempolicy.c: refix mbind_range() vma issue · e26a5114
      KOSAKI Motohiro 提交于
      commit 8aacc9f5 ("mm/mempolicy.c: fix pgoff in mbind vma merge") is the
      slightly incorrect fix.
      
      Why? Think following case.
      
      1. map 4 pages of a file at offset 0
      
         [0123]
      
      2. map 2 pages just after the first mapping of the same file but with
         page offset 2
      
         [0123][23]
      
      3. mbind() 2 pages from the first mapping at offset 2.
         mbind_range() should treat new vma is,
      
         [0123][23]
           |23|
           mbind vma
      
         but it does
      
         [0123][23]
           |01|
           mbind vma
      
         Oops. then, it makes wrong vma merge and splitting ([01][0123] or similar).
      
      This patch fixes it.
      
      [testcase]
        test result - before the patch
      
      	case4: 126: test failed. expect '2,4', actual '2,2,2'
             	case5: passed
      	case6: passed
      	case7: passed
      	case8: passed
      	case_n: 246: test failed. expect '4,2', actual '1,4'
      
      	------------[ cut here ]------------
      	kernel BUG at mm/filemap.c:135!
      	invalid opcode: 0000 [#4] SMP DEBUG_PAGEALLOC
      
      	(snip long bug on messages)
      
        test result - after the patch
      
      	case4: passed
             	case5: passed
      	case6: passed
      	case7: passed
      	case8: passed
      	case_n: passed
      
        source:  mbind_vma_test.c
      ============================================================
       #include <numaif.h>
       #include <numa.h>
       #include <sys/mman.h>
       #include <stdio.h>
       #include <unistd.h>
       #include <stdlib.h>
       #include <string.h>
      
      static unsigned long pagesize;
      void* mmap_addr;
      struct bitmask *nmask;
      char buf[1024];
      FILE *file;
      char retbuf[10240] = "";
      int mapped_fd;
      
      char *rubysrc = "ruby -e '\
        pid = %d; \
        vstart = 0x%llx; \
        vend = 0x%llx; \
        s = `pmap -q #{pid}`; \
        rary = []; \
        s.each_line {|line|; \
          ary=line.split(\" \"); \
          addr = ary[0].to_i(16); \
          if(vstart <= addr && addr < vend) then \
            rary.push(ary[1].to_i()/4); \
          end; \
        }; \
        print rary.join(\",\"); \
      '";
      
      void init(void)
      {
      	void* addr;
      	char buf[128];
      
      	nmask = numa_allocate_nodemask();
      	numa_bitmask_setbit(nmask, 0);
      
      	pagesize = getpagesize();
      
      	sprintf(buf, "%s", "mbind_vma_XXXXXX");
      	mapped_fd = mkstemp(buf);
      	if (mapped_fd == -1)
      		perror("mkstemp "), exit(1);
      	unlink(buf);
      
      	if (lseek(mapped_fd, pagesize*8, SEEK_SET) < 0)
      		perror("lseek "), exit(1);
      	if (write(mapped_fd, "\0", 1) < 0)
      		perror("write "), exit(1);
      
      	addr = mmap(NULL, pagesize*8, PROT_NONE,
      		    MAP_SHARED, mapped_fd, 0);
      	if (addr == MAP_FAILED)
      		perror("mmap "), exit(1);
      
      	if (mprotect(addr+pagesize, pagesize*6, PROT_READ|PROT_WRITE) < 0)
      		perror("mprotect "), exit(1);
      
      	mmap_addr = addr + pagesize;
      
      	/* make page populate */
      	memset(mmap_addr, 0, pagesize*6);
      }
      
      void fin(void)
      {
      	void* addr = mmap_addr - pagesize;
      	munmap(addr, pagesize*8);
      
      	memset(buf, 0, sizeof(buf));
      	memset(retbuf, 0, sizeof(retbuf));
      }
      
      void mem_bind(int index, int len)
      {
      	int err;
      
      	err = mbind(mmap_addr+pagesize*index, pagesize*len,
      		    MPOL_BIND, nmask->maskp, nmask->size, 0);
      	if (err)
      		perror("mbind "), exit(err);
      }
      
      void mem_interleave(int index, int len)
      {
      	int err;
      
      	err = mbind(mmap_addr+pagesize*index, pagesize*len,
      		    MPOL_INTERLEAVE, nmask->maskp, nmask->size, 0);
      	if (err)
      		perror("mbind "), exit(err);
      }
      
      void mem_unbind(int index, int len)
      {
      	int err;
      
      	err = mbind(mmap_addr+pagesize*index, pagesize*len,
      		    MPOL_DEFAULT, NULL, 0, 0);
      	if (err)
      		perror("mbind "), exit(err);
      }
      
      void Assert(char *expected, char *value, char *name, int line)
      {
      	if (strcmp(expected, value) == 0) {
      		fprintf(stderr, "%s: passed\n", name);
      		return;
      	}
      	else {
      		fprintf(stderr, "%s: %d: test failed. expect '%s', actual '%s'\n",
      			name, line,
      			expected, value);
      //		exit(1);
      	}
      }
      
      /*
            AAAA
          PPPPPPNNNNNN
          might become
          PPNNNNNNNNNN
          case 4 below
      */
      void case4(void)
      {
      	init();
      	sprintf(buf, rubysrc, getpid(), mmap_addr, mmap_addr+pagesize*6);
      
      	mem_bind(0, 4);
      	mem_unbind(2, 2);
      
      	file = popen(buf, "r");
      	fread(retbuf, sizeof(retbuf), 1, file);
      	Assert("2,4", retbuf, "case4", __LINE__);
      
      	fin();
      }
      
      /*
             AAAA
       PPPPPPNNNNNN
       might become
       PPPPPPPPPPNN
       case 5 below
      */
      void case5(void)
      {
      	init();
      	sprintf(buf, rubysrc, getpid(), mmap_addr, mmap_addr+pagesize*6);
      
      	mem_bind(0, 2);
      	mem_bind(2, 2);
      
      	file = popen(buf, "r");
      	fread(retbuf, sizeof(retbuf), 1, file);
      	Assert("4,2", retbuf, "case5", __LINE__);
      
      	fin();
      }
      
      /*
      	    AAAA
      	PPPPNNNNXXXX
      	might become
      	PPPPPPPPPPPP 6
      */
      void case6(void)
      {
      	init();
      	sprintf(buf, rubysrc, getpid(), mmap_addr, mmap_addr+pagesize*6);
      
      	mem_bind(0, 2);
      	mem_bind(4, 2);
      	mem_bind(2, 2);
      
      	file = popen(buf, "r");
      	fread(retbuf, sizeof(retbuf), 1, file);
      	Assert("6", retbuf, "case6", __LINE__);
      
      	fin();
      }
      
      /*
          AAAA
      PPPPNNNNXXXX
      might become
      PPPPPPPPXXXX 7
      */
      void case7(void)
      {
      	init();
      	sprintf(buf, rubysrc, getpid(), mmap_addr, mmap_addr+pagesize*6);
      
      	mem_bind(0, 2);
      	mem_interleave(4, 2);
      	mem_bind(2, 2);
      
      	file = popen(buf, "r");
      	fread(retbuf, sizeof(retbuf), 1, file);
      	Assert("4,2", retbuf, "case7", __LINE__);
      
      	fin();
      }
      
      /*
          AAAA
      PPPPNNNNXXXX
      might become
      PPPPNNNNNNNN 8
      */
      void case8(void)
      {
      	init();
      	sprintf(buf, rubysrc, getpid(), mmap_addr, mmap_addr+pagesize*6);
      
      	mem_bind(0, 2);
      	mem_interleave(4, 2);
      	mem_interleave(2, 2);
      
      	file = popen(buf, "r");
      	fread(retbuf, sizeof(retbuf), 1, file);
      	Assert("2,4", retbuf, "case8", __LINE__);
      
      	fin();
      }
      
      void case_n(void)
      {
      	init();
      	sprintf(buf, rubysrc, getpid(), mmap_addr, mmap_addr+pagesize*6);
      
      	/* make redundunt mappings [0][1234][34][7] */
      	mmap(mmap_addr + pagesize*4, pagesize*2, PROT_READ|PROT_WRITE,
      	     MAP_FIXED|MAP_SHARED, mapped_fd, pagesize*3);
      
      	/* Expect to do nothing. */
      	mem_unbind(2, 2);
      
      	file = popen(buf, "r");
      	fread(retbuf, sizeof(retbuf), 1, file);
      	Assert("4,2", retbuf, "case_n", __LINE__);
      
      	fin();
      }
      
      int main(int argc, char** argv)
      {
      	case4();
      	case5();
      	case6();
      	case7();
      	case8();
      	case_n();
      
      	return 0;
      }
      =============================================================
      Signed-off-by: NKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
      Acked-by: NJohannes Weiner <hannes@cmpxchg.org>
      Cc: Minchan Kim <minchan.kim@gmail.com>
      Cc: Caspar Zhang <caspar@casparzhang.com>
      Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
      Cc: Christoph Lameter <cl@linux.com>
      Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
      Cc: Mel Gorman <mel@csn.ul.ie>
      Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
      Cc: <stable@vger.kernel.org>		[3.1.x]
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      e26a5114
    • H
      gspca: Fix bulk mode cameras no longer working (regression fix) · 757e55c2
      Hans de Goede 提交于
      The new iso bandwidth calculation code accidentally has broken support
      for bulk mode cameras. This has broken the following drivers:
      finepix, jeilinj, ovfx2, ov534, ov534_9, se401, sq905, sq905c, sq930x,
      stv0680, vicam.
      
      Thix patch fixes this. Fix tested with: se401, sq905, sq905c, stv0680 & vicam
      cams.
      Signed-off-by: NHans de Goede <hdegoede@redhat.com>
      Signed-off-by: NMauro Carvalho Chehab <mchehab@redhat.com>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      757e55c2
  2. 27 12月, 2011 5 次提交
  3. 26 12月, 2011 6 次提交
  4. 25 12月, 2011 4 次提交
  5. 24 12月, 2011 9 次提交
  6. 23 12月, 2011 14 次提交
    • F
      netfilter: xt_connbytes: handle negation correctly · 0354b48f
      Florian Westphal 提交于
      "! --connbytes 23:42" should match if the packet/byte count is not in range.
      
      As there is no explict "invert match" toggle in the match structure,
      userspace swaps the from and to arguments
      (i.e., as if "--connbytes 42:23" were given).
      
      However, "what <= 23 && what >= 42" will always be false.
      
      Change things so we use "||" in case "from" is larger than "to".
      
      This change may look like it breaks backwards compatibility when "to" is 0.
      However, older iptables binaries will refuse "connbytes 42:0",
      and current releases treat it to mean "! --connbytes 0:42",
      so we should be fine.
      Signed-off-by: NFlorian Westphal <fw@strlen.de>
      Signed-off-by: NPablo Neira Ayuso <pablo@netfilter.org>
      0354b48f
    • A
      Btrfs: call d_instantiate after all ops are setup · 08c422c2
      Al Viro 提交于
      This closes races where btrfs is calling d_instantiate too soon during
      inode creation.  All of the callers of btrfs_add_nondir are updated to
      instantiate after the inode is fully setup in memory.
      Signed-off-by: NAl Viro <viro@zeniv.linux.org.uk>
      Signed-off-by: NChris Mason <chris.mason@oracle.com>
      08c422c2
    • C
      Btrfs: fix worker lock misuse in find_worker · 8d532b2a
      Chris Mason 提交于
      Dan Carpenter noticed that we were doing a double unlock on the worker
      lock, and sometimes picking a worker thread without the lock held.
      
      This fixes both errors.
      Signed-off-by: NChris Mason <chris.mason@oracle.com>
      Reported-by: NDan Carpenter <dan.carpenter@oracle.com>
      8d532b2a
    • E
      net: relax rcvbuf limits · 0fd7bac6
      Eric Dumazet 提交于
      skb->truesize might be big even for a small packet.
      
      Its even bigger after commit 87fb4b7b (net: more accurate skb
      truesize) and big MTU.
      
      We should allow queueing at least one packet per receiver, even with a
      low RCVBUF setting.
      Reported-by: NMichal Simek <monstr@monstr.eu>
      Signed-off-by: NEric Dumazet <eric.dumazet@gmail.com>
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      0fd7bac6
    • X
      rps: fix insufficient bounds checking in store_rps_dev_flow_table_cnt() · a0a129f8
      Xi Wang 提交于
      Setting a large rps_flow_cnt like (1 << 30) on 32-bit platform will
      cause a kernel oops due to insufficient bounds checking.
      
      	if (count > 1<<30) {
      		/* Enforce a limit to prevent overflow */
      		return -EINVAL;
      	}
      	count = roundup_pow_of_two(count);
      	table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
      
      Note that the macro RPS_DEV_FLOW_TABLE_SIZE(count) is defined as:
      
      	... + (count * sizeof(struct rps_dev_flow))
      
      where sizeof(struct rps_dev_flow) is 8.  (1 << 30) * 8 will overflow
      32 bits.
      
      This patch replaces the magic number (1 << 30) with a symbolic bound.
      Suggested-by: NEric Dumazet <eric.dumazet@gmail.com>
      Signed-off-by: NXi Wang <xi.wang@gmail.com>
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      a0a129f8
    • E
      net: introduce DST_NOPEER dst flag · e688a604
      Eric Dumazet 提交于
      Chris Boot reported crashes occurring in ipv6_select_ident().
      
      [  461.457562] RIP: 0010:[<ffffffff812dde61>]  [<ffffffff812dde61>]
      ipv6_select_ident+0x31/0xa7
      
      [  461.578229] Call Trace:
      [  461.580742] <IRQ>
      [  461.582870]  [<ffffffff812efa7f>] ? udp6_ufo_fragment+0x124/0x1a2
      [  461.589054]  [<ffffffff812dbfe0>] ? ipv6_gso_segment+0xc0/0x155
      [  461.595140]  [<ffffffff812700c6>] ? skb_gso_segment+0x208/0x28b
      [  461.601198]  [<ffffffffa03f236b>] ? ipv6_confirm+0x146/0x15e
      [nf_conntrack_ipv6]
      [  461.608786]  [<ffffffff81291c4d>] ? nf_iterate+0x41/0x77
      [  461.614227]  [<ffffffff81271d64>] ? dev_hard_start_xmit+0x357/0x543
      [  461.620659]  [<ffffffff81291cf6>] ? nf_hook_slow+0x73/0x111
      [  461.626440]  [<ffffffffa0379745>] ? br_parse_ip_options+0x19a/0x19a
      [bridge]
      [  461.633581]  [<ffffffff812722ff>] ? dev_queue_xmit+0x3af/0x459
      [  461.639577]  [<ffffffffa03747d2>] ? br_dev_queue_push_xmit+0x72/0x76
      [bridge]
      [  461.646887]  [<ffffffffa03791e3>] ? br_nf_post_routing+0x17d/0x18f
      [bridge]
      [  461.653997]  [<ffffffff81291c4d>] ? nf_iterate+0x41/0x77
      [  461.659473]  [<ffffffffa0374760>] ? br_flood+0xfa/0xfa [bridge]
      [  461.665485]  [<ffffffff81291cf6>] ? nf_hook_slow+0x73/0x111
      [  461.671234]  [<ffffffffa0374760>] ? br_flood+0xfa/0xfa [bridge]
      [  461.677299]  [<ffffffffa0379215>] ?
      nf_bridge_update_protocol+0x20/0x20 [bridge]
      [  461.684891]  [<ffffffffa03bb0e5>] ? nf_ct_zone+0xa/0x17 [nf_conntrack]
      [  461.691520]  [<ffffffffa0374760>] ? br_flood+0xfa/0xfa [bridge]
      [  461.697572]  [<ffffffffa0374812>] ? NF_HOOK.constprop.8+0x3c/0x56
      [bridge]
      [  461.704616]  [<ffffffffa0379031>] ?
      nf_bridge_push_encap_header+0x1c/0x26 [bridge]
      [  461.712329]  [<ffffffffa037929f>] ? br_nf_forward_finish+0x8a/0x95
      [bridge]
      [  461.719490]  [<ffffffffa037900a>] ?
      nf_bridge_pull_encap_header+0x1c/0x27 [bridge]
      [  461.727223]  [<ffffffffa0379974>] ? br_nf_forward_ip+0x1c0/0x1d4 [bridge]
      [  461.734292]  [<ffffffff81291c4d>] ? nf_iterate+0x41/0x77
      [  461.739758]  [<ffffffffa03748cc>] ? __br_deliver+0xa0/0xa0 [bridge]
      [  461.746203]  [<ffffffff81291cf6>] ? nf_hook_slow+0x73/0x111
      [  461.751950]  [<ffffffffa03748cc>] ? __br_deliver+0xa0/0xa0 [bridge]
      [  461.758378]  [<ffffffffa037533a>] ? NF_HOOK.constprop.4+0x56/0x56
      [bridge]
      
      This is caused by bridge netfilter special dst_entry (fake_rtable), a
      special shared entry, where attaching an inetpeer makes no sense.
      
      Problem is present since commit 87c48fa3 (ipv6: make fragment
      identifications less predictable)
      
      Introduce DST_NOPEER dst flag and make sure ipv6_select_ident() and
      __ip_select_ident() fallback to the 'no peer attached' handling.
      Reported-by: NChris Boot <bootc@bootc.net>
      Tested-by: NChris Boot <bootc@bootc.net>
      Signed-off-by: NEric Dumazet <eric.dumazet@gmail.com>
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      e688a604
    • T
      mqprio: Avoid panic if no options are provided · 7838f2ce
      Thomas Graf 提交于
      Userspace may not provide TCA_OPTIONS, in fact tc currently does
      so not do so if no arguments are specified on the command line.
      Return EINVAL instead of panicing.
      Signed-off-by: NThomas Graf <tgraf@redhat.com>
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      7838f2ce
    • E
      bridge: provide a mtu() method for fake_dst_ops · a13861a2
      Eric Dumazet 提交于
      Commit 618f9bc7 (net: Move mtu handling down to the protocol
      depended handlers) forgot the bridge netfilter case, adding a NULL
      dereference in ip_fragment().
      Reported-by: NChris Boot <bootc@bootc.net>
      CC: Steffen Klassert <steffen.klassert@secunet.com>
      Signed-off-by: NEric Dumazet <eric.dumazet@gmail.com>
      Acked-by: NSteffen Klassert <steffen.klassert@secunet.com>
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      a13861a2
    • L
      Merge branch 'for-linus' of git://neil.brown.name/md · ad1fca20
      Linus Torvalds 提交于
      * 'for-linus' of git://neil.brown.name/md:
        md/bitmap: It is OK to clear bits during recovery.
        md: don't give up looking for spares on first failure-to-add
        md/raid5: ensure correct assessment of drives during degraded reshape.
        md/linear: fix hot-add of devices to linear arrays.
      ad1fca20
    • N
      md/bitmap: It is OK to clear bits during recovery. · 961902c0
      NeilBrown 提交于
      commit d0a4bb49 introduced a
      regression which is annoying but fairly harmless.
      
      When writing to an array that is undergoing recovery (a spare
      in being integrated into the array), writing to the array will
      set bits in the bitmap, but they will not be cleared when the
      write completes.
      
      For bits covering areas that have not been recovered yet this is not a
      problem as the recovery will clear the bits.  However bits set in
      already-recovered region will stay set and never be cleared.
      This doesn't risk data integrity.  The only negatives are:
       - next time there is a crash, more resyncing than necessary will
         be done.
       - the bitmap doesn't look clean, which is confusing.
      
      While an array is recovering we don't want to update the
      'events_cleared' setting in the bitmap but we do still want to clear
      bits that have very recently been set - providing they were written to
      the recovering device.
      
      So split those two needs - which previously both depended on 'success'
      and always clear the bit of the write went to all devices.
      Signed-off-by: NNeilBrown <neilb@suse.de>
      961902c0
    • N
      md: don't give up looking for spares on first failure-to-add · 60fc1370
      NeilBrown 提交于
      Before performing a recovery we try to remove any spares that
      might not be working, then add any that might have become relevant.
      
      Currently we abort on the first spare that cannot be added.
      This is a false optimisation.
      It is conceivable that - depending on rules in the personality - a
      subsequent spare might be accepted.
      Also the loop does other things like count the available spares and
      reset the 'recovery_offset' value.
      
      If we abort early these might not happen properly.
      
      So remove the early abort.
      
      In particular if you have an array what is undergoing recovery and
      which has extra spares, then the recovery may not restart after as
      reboot as the could of 'spares' might end up as zero.
      Reported-by: NAnssi Hannula <anssi.hannula@iki.fi>
      Signed-off-by: NNeilBrown <neilb@suse.de>
      60fc1370
    • N
      md/raid5: ensure correct assessment of drives during degraded reshape. · 30d7a483
      NeilBrown 提交于
      While reshaping a degraded array (as when reshaping a RAID0 by first
      converting it to a degraded RAID4) we currently get confused about
      which devices are in_sync.  In most cases we get it right, but in the
      region that is being reshaped we need to treat non-failed devices as
      in-sync when we have the data but haven't actually written it out yet.
      Reported-by: NAdam Kwolek <adam.kwolek@intel.com>
      Signed-off-by: NNeilBrown <neilb@suse.de>
      30d7a483
    • N
      md/linear: fix hot-add of devices to linear arrays. · 09cd9270
      NeilBrown 提交于
      commit d70ed2e4
      broke hot-add to a linear array.
      After that commit, metadata if not written to devices until they
      have been fully integrated into the array as determined by
      saved_raid_disk.  That patch arranged to clear that field after
      a recovery completed.
      
      However for linear arrays, there is no recovery - the integration is
      instantaneous.  So we need to explicitly clear the saved_raid_disk
      field.
      Signed-off-by: NNeilBrown <neilb@suse.de>
      09cd9270
    • D
      sparc64: Fix MSIQ HV call ordering in pci_sun4v_msiq_build_irq(). · 7cc85833
      David S. Miller 提交于
      This silently was working for many years and stopped working on
      Niagara-T3 machines.
      
      We need to set the MSIQ to VALID before we can set it's state to IDLE.
      
      On Niagara-T3, setting the state to IDLE first was causing HV_EINVAL
      errors.  The hypervisor documentation says, rather ambiguously, that
      the MSIQ must be "initialized" before one can set the state.
      
      I previously understood this to mean merely that a successful setconf()
      operation has been performed on the MSIQ, which we have done at this
      point.  But it seems to also mean that it has been set VALID too.
      Signed-off-by: NDavid S. Miller <davem@davemloft.net>
      7cc85833