1. 01 4月, 2009 17 次提交
    • J
      loop: add ioctl to resize a loop device · 53d66608
      J. R. Okajima 提交于
      Add the ability to 'resize' the loop device on the fly.
      
      One practical application is a loop file with XFS filesystem, already
      mounted: You can easily enlarge the file (append some bytes) and then call
      ioctl(fd, LOOP_SET_CAPACITY, new); The loop driver will learn about the
      new size and you can use xfs_growfs later on, which will allow you to use
      full capacity of the loop file without the need to unmount.
      
      Test app:
      
      #include <linux/fs.h>
      #include <linux/loop.h>
      #include <sys/ioctl.h>
      #include <sys/stat.h>
      #include <sys/types.h>
      #include <assert.h>
      #include <errno.h>
      #include <fcntl.h>
      #include <stdio.h>
      #include <stdlib.h>
      #include <unistd.h>
      
      #define _GNU_SOURCE
      #include <getopt.h>
      
      char *me;
      
      void usage(FILE *f)
      {
      	fprintf(f, "%s [options] loop_dev [backend_file]\n"
      		"-s, --set new_size_in_bytes\n"
      		"\twhen backend_file is given, "
      		"it will be expanded too while keeping the original contents\n",
      		me);
      }
      
      struct option opts[] = {
      	{
      		.name		= "set",
      		.has_arg	= 1,
      		.flag		= NULL,
      		.val		= 's'
      	},
      	{
      		.name		= "help",
      		.has_arg	= 0,
      		.flag		= NULL,
      		.val		= 'h'
      	}
      };
      
      void err_size(char *name, __u64 old)
      {
      	fprintf(stderr, "size must be larger than current %s (%llu)\n",
      		name, old);
      }
      
      int main(int argc, char *argv[])
      {
      	int fd, err, c, i, bfd;
      	ssize_t ssz;
      	size_t sz;
      	__u64 old, new, append;
      	char a[BUFSIZ];
      	struct stat st;
      	FILE *out;
      	char *backend, *dev;
      
      	err = EINVAL;
      	out = stderr;
      	me = argv[0];
      	new = 0;
      	while ((c = getopt_long(argc, argv, "s:h", opts, &i)) != -1) {
      		switch (c) {
      		case 's':
      			errno = 0;
      			new = strtoull(optarg, NULL, 0);
      			if (errno) {
      				err = errno;
      				perror(argv[i]);
      				goto out;
      			}
      			break;
      
      		case 'h':
      			err = 0;
      			out = stdout;
      			goto err;
      
      		default:
      			perror(argv[i]);
      			goto err;
      		}
      	}
      
      	if (optind < argc)
      		dev = argv[optind++];
      	else
      		goto err;
      
      	fd = open(dev, O_RDONLY);
      	if (fd < 0) {
      		err = errno;
      		perror(dev);
      		goto out;
      	}
      
      	err = ioctl(fd, BLKGETSIZE64, &old);
      	if (err) {
      		err = errno;
      		perror("ioctl BLKGETSIZE64");
      		goto out;
      	}
      
      	if (!new) {
      		printf("%llu\n", old);
      		goto out;
      	}
      
      	if (new < old) {
      		err = EINVAL;
      		err_size(dev, old);
      		goto out;
      	}
      
      	if (optind < argc) {
      		backend = argv[optind++];
      		bfd = open(backend, O_WRONLY|O_APPEND);
      		if (bfd < 0) {
      			err = errno;
      			perror(backend);
      			goto out;
      		}
      		err = fstat(bfd, &st);
      		if (err) {
      			err = errno;
      			perror(backend);
      			goto out;
      		}
      		if (new < st.st_size) {
      			err = EINVAL;
      			err_size(backend, st.st_size);
      			goto out;
      		}
      		append = new - st.st_size;
      		sz = sizeof(a);
      		while (append > 0) {
      			if (append < sz)
      				sz = append;
      			ssz = write(bfd, a, sz);
      			if (ssz != sz) {
      				err = errno;
      				perror(backend);
      				goto out;
      			}
      			append -= sz;
      		}
      		err = fsync(bfd);
      		if (err) {
      			err = errno;
      			perror(backend);
      			goto out;
      		}
      	}
      
      	err = ioctl(fd, LOOP_SET_CAPACITY, new);
      	if (err) {
      		err = errno;
      		perror("ioctl LOOP_SET_CAPACITY");
      	}
      	goto out;
      
       err:
      	usage(out);
       out:
      	return err;
      }
      Signed-off-by: NJ. R. Okajima <hooanon05@yahoo.co.jp>
      Signed-off-by: NTomas Matejicek <tomas@slax.org>
      Cc: <util-linux-ng@vger.kernel.org>
      Cc: Karel Zak <kzak@redhat.com>
      Cc: Jens Axboe <jens.axboe@oracle.com>
      Cc: Al Viro <viro@zeniv.linux.org.uk>
      Cc: Christoph Hellwig <hch@lst.de>
      Cc: Akinobu Mita <akinobu.mita@gmail.com>
      Cc: <linux-api@vger.kernel.org>
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      53d66608
    • M
      pm: rework includes, remove arch ifdefs · a8af7898
      Magnus Damm 提交于
      Make the following header file changes:
      
       - remove arch ifdefs and asm/suspend.h from linux/suspend.h
       - add asm/suspend.h to disk.c (for arch_prepare_suspend())
       - add linux/io.h to swsusp.c (for ioremap())
       - x86 32/64 bit compile fixes
      Signed-off-by: NMagnus Damm <damm@igel.co.jp>
      Cc: Paul Mundt <lethal@linux-sh.org>
      Acked-by: N"Rafael J. Wysocki" <rjw@sisk.pl>
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      a8af7898
    • H
      shmem: writepage directly to swap · 9fab5619
      Hugh Dickins 提交于
      Synopsis: if shmem_writepage calls swap_writepage directly, most shmem
      swap loads benefit, and a catastrophic interaction between SLUB and some
      flash storage is avoided.
      
      shmem_writepage() has always been peculiar in making no attempt to write:
      it has just transferred a shmem page from file cache to swap cache, then
      let that page make its way around the LRU again before being written and
      freed.
      
      The idea was that people use tmpfs because they want those pages to stay
      in RAM; so although we give it an overflow to swap, we should resist
      writing too soon, giving those pages a second chance before they can be
      reclaimed.
      
      That was always questionable, and I've toyed with this patch for years;
      but never had a clear justification to depart from the original design.
      
      It became more questionable in 2.6.28, when the split LRU patches classed
      shmem and tmpfs pages as SwapBacked rather than as file_cache: that in
      itself gives them more resistance to reclaim than normal file pages.  I
      prepared this patch for 2.6.29, but the merge window arrived before I'd
      completed gathering statistics to justify sending it in.
      
      Then while comparing SLQB against SLUB, running SLUB on a laptop I'd
      habitually used with SLAB, I found SLUB to run my tmpfs kbuild swapping
      tests five times slower than SLAB or SLQB - other machines slower too, but
      nowhere near so bad.  Simpler "cp -a" swapping tests showed the same.
      
      slub_max_order=0 brings sanity to all, but heavy swapping is too far from
      normal to justify such a tuning.  The crucial factor on that laptop turns
      out to be that I'm using an SD card for swap.  What happens is this:
      
      By default, SLUB uses order-2 pages for shmem_inode_cache (and many other
      fs inodes), so creating tmpfs files under memory pressure brings lumpy
      reclaim into play.  One subpage of the order is chosen from the bottom of
      the LRU as usual, then the other three picked out from their random
      positions on the LRUs.
      
      In a tmpfs load, many of these pages will be ones which already passed
      through shmem_writepage, so already have swap allocated.  And though their
      offsets on swap were probably allocated sequentially, now that the pages
      are picked off at random, their swap offsets are scattered.
      
      But the flash storage on the SD card is very sensitive to having its
      writes merged: once swap is written at scattered offsets, performance
      falls apart.  Rotating disk seeks increase too, but less disastrously.
      
      So: stop giving shmem/tmpfs pages a second pass around the LRU, write them
      out to swap as soon as their swap has been allocated.
      
      It's surely possible to devise an artificial load which runs faster the
      old way, one whose sizing is such that the tmpfs pages on their second
      pass are the ones that are wanted again, and other pages not.
      
      But I've not yet found such a load: on all machines, under the loads I've
      tried, immediate swap_writepage speeds up shmem swapping: especially when
      using the SLUB allocator (and more effectively than slub_max_order=0), but
      also with the others; and it also reduces the variance between runs.  How
      much faster varies widely: a factor of five is rare, 5% is common.
      
      One load which might have suffered: imagine a swapping shmem load in a
      limited mem_cgroup on a machine with plenty of memory.  Before 2.6.29 the
      swapcache was not charged, and such a load would have run quickest with
      the shmem swapcache never written to swap.  But now swapcache is charged,
      so even this load benefits from shmem_writepage directly to swap.
      
      Apologies for the #ifndef CONFIG_SWAP swap_writepage() stub in swap.h:
      it's silly because that will never get called; but refactoring shmem.c
      sensibly according to CONFIG_SWAP will be a separate task.
      Signed-off-by: NHugh Dickins <hugh@veritas.com>
      Acked-by: NPekka Enberg <penberg@cs.helsinki.fi>
      Acked-by: NRik van Riel <riel@redhat.com>
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      9fab5619
    • K
      vmscan: fix it to take care of nodemask · 327c0e96
      KAMEZAWA Hiroyuki 提交于
      try_to_free_pages() is used for the direct reclaim of up to
      SWAP_CLUSTER_MAX pages when watermarks are low.  The caller to
      alloc_pages_nodemask() can specify a nodemask of nodes that are allowed to
      be used but this is not passed to try_to_free_pages().  This can lead to
      unnecessary reclaim of pages that are unusable by the caller and int the
      worst case lead to allocation failure as progress was not been make where
      it is needed.
      
      This patch passes the nodemask used for alloc_pages_nodemask() to
      try_to_free_pages().
      Reviewed-by: NKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
      Acked-by: NMel Gorman <mel@csn.ul.ie>
      Signed-off-by: NKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
      Cc: Rik van Riel <riel@redhat.com>
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      327c0e96
    • D
      nommu: there is no mlock() for NOMMU, so don't provide the bits · 33925b25
      David Howells 提交于
      The mlock() facility does not exist for NOMMU since all mappings are
      effectively locked anyway, so we don't make the bits available when
      they're not useful.
      Signed-off-by: NDavid Howells <dhowells@redhat.com>
      Reviewed-by: NKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
      Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
      Cc: Greg Ungerer <gerg@snapgear.com>
      Cc: Johannes Weiner <hannes@cmpxchg.org>
      Cc: Rik van Riel <riel@redhat.com>
      Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
      Cc: Enrik Berkhan <Enrik.Berkhan@ge.com>
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      33925b25
    • A
      mm: introduce debug_kmap_atomic · f4112de6
      Akinobu Mita 提交于
      x86 has debug_kmap_atomic_prot() which is error checking function for
      kmap_atomic.  It is usefull for the other architectures, although it needs
      CONFIG_TRACE_IRQFLAGS_SUPPORT.
      
      This patch exposes it to the other architectures.
      Signed-off-by: NAkinobu Mita <akinobu.mita@gmail.com>
      Cc: Thomas Gleixner <tglx@linutronix.de>
      Cc: Ingo Molnar <mingo@redhat.com>
      Cc: "H. Peter Anvin" <hpa@zytor.com>
      Cc: <linux-arch@vger.kernel.org>
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      f4112de6
    • N
      mm: page_mkwrite change prototype to match fault · c2ec175c
      Nick Piggin 提交于
      Change the page_mkwrite prototype to take a struct vm_fault, and return
      VM_FAULT_xxx flags.  There should be no functional change.
      
      This makes it possible to return much more detailed error information to
      the VM (and also can provide more information eg.  virtual_address to the
      driver, which might be important in some special cases).
      
      This is required for a subsequent fix.  And will also make it easier to
      merge page_mkwrite() with fault() in future.
      Signed-off-by: NNick Piggin <npiggin@suse.de>
      Cc: Chris Mason <chris.mason@oracle.com>
      Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
      Cc: Miklos Szeredi <miklos@szeredi.hu>
      Cc: Steven Whitehouse <swhiteho@redhat.com>
      Cc: Mark Fasheh <mfasheh@suse.com>
      Cc: Joel Becker <joel.becker@oracle.com>
      Cc: Artem Bityutskiy <dedekind@infradead.org>
      Cc: Felix Blyakher <felixb@sgi.com>
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      c2ec175c
    • A
      mm: enable hashdist by default on 64bit NUMA · c2fdf3a9
      Anton Blanchard 提交于
      On PowerPC we allocate large boot time hashes on node 0.  This leads to an
      imbalance in the free memory, for example on a 64GB box (4 x 16GB nodes):
      
      Free memory:
      Node 0: 97.03%
      Node 1: 98.54%
      Node 2: 98.42%
      Node 3: 98.53%
      
      If we switch to using vmalloc (like ia64 and x86-64) things are more
      balanced:
      
      Free memory:
      Node 0: 97.53%
      Node 1: 98.35%
      Node 2: 98.33%
      Node 3: 98.33%
      
      For many HPC applications we are limited by the free available memory on
      the smallest node, so even though the same amount of memory is used the
      better balancing helps.
      
      Since all 64bit NUMA capable architectures should have sufficient vmalloc
      space, it makes sense to enable it via CONFIG_64BIT.
      Signed-off-by: NAnton Blanchard <anton@samba.org>
      Acked-by: NDavid S. Miller <davem@davemloft.net>
      Acked-by: NBenjamin Herrenschmidt <benh@kernel.crashing.org>
      Acked-by: NRalf Baechle <ralf@linux-mips.org>
      Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
      Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
      Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
      Cc: Richard Henderson <rth@twiddle.net>
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      c2fdf3a9
    • A
      mm: fix proc_dointvec_userhz_jiffies "breakage" · 704503d8
      Alexey Dobriyan 提交于
      Addresses http://bugzilla.kernel.org/show_bug.cgi?id=9838
      
      On i386, HZ=1000, jiffies_to_clock_t() converts time in a somewhat strange
      way from the user's point of view:
      
      	# echo 500 >/proc/sys/vm/dirty_writeback_centisecs
      	# cat /proc/sys/vm/dirty_writeback_centisecs
      	499
      
      So, we have 5000 jiffies converted to only 499 clock ticks and reported
      back.
      
      TICK_NSEC = 999848
      ACTHZ = 256039
      
      Keeping in-kernel variable in units passed from userspace will fix issue
      of course, but this probably won't be right for every sysctl.
      
      [akpm@linux-foundation.org: coding-style fixes]
      Signed-off-by: NAlexey Dobriyan <adobriyan@gmail.com>
      Cc: Peter Zijlstra <peterz@infradead.org>
      Cc: Nick Piggin <nickpiggin@yahoo.com.au>
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      704503d8
    • A
      generic debug pagealloc · 6a11f75b
      Akinobu Mita 提交于
      CONFIG_DEBUG_PAGEALLOC is now supported by x86, powerpc, sparc64, and
      s390.  This patch implements it for the rest of the architectures by
      filling the pages with poison byte patterns after free_pages() and
      verifying the poison patterns before alloc_pages().
      
      This generic one cannot detect invalid page accesses immediately but
      invalid read access may cause invalid dereference by poisoned memory and
      invalid write access can be detected after a long delay.
      Signed-off-by: NAkinobu Mita <akinobu.mita@gmail.com>
      Cc: <linux-arch@vger.kernel.org>
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      6a11f75b
    • L
      memdup_user(): introduce · 610a77e0
      Li Zefan 提交于
      I notice there are many places doing copy_from_user() which follows
      kmalloc():
      
              dst = kmalloc(len, GFP_KERNEL);
              if (!dst)
                      return -ENOMEM;
              if (copy_from_user(dst, src, len)) {
      		kfree(dst);
      		return -EFAULT
      	}
      
      memdup_user() is a wrapper of the above code.  With this new function, we
      don't have to write 'len' twice, which can lead to typos/mistakes.  It
      also produces smaller code and kernel text.
      
      A quick grep shows 250+ places where memdup_user() *may* be used.  I'll
      prepare a patchset to do this conversion.
      Signed-off-by: NLi Zefan <lizf@cn.fujitsu.com>
      Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
      Cc: Americo Wang <xiyou.wangcong@gmail.com>
      Cc: Alexey Dobriyan <adobriyan@gmail.com>
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      610a77e0
    • K
      mm: remove pagevec_swap_free() · d1d74871
      KOSAKI Motohiro 提交于
      pagevec_swap_free() is now unused.
      Signed-off-by: NKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
      Cc: Johannes Weiner <hannes@cmpxchg.org>
      Cc: Rik van Riel <riel@redhat.com>
      Acked-by: NHugh Dickins <hugh@veritas.com>
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      d1d74871
    • E
      vfs: add/use account_page_dirtied() · e3a7cca1
      Edward Shishkin 提交于
      Add a helper function account_page_dirtied().  Use that from two
      callsites.  reiser4 adds a function which adds a third callsite.
      
      Signed-off-by: Edward Shishkin<edward.shishkin@gmail.com>
      Cc: Nick Piggin <nickpiggin@yahoo.com.au>
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      e3a7cca1
    • K
      mm: introduce for_each_populated_zone() macro · ee99c71c
      KOSAKI Motohiro 提交于
      Impact: cleanup
      
      In almost cases, for_each_zone() is used with populated_zone().  It's
      because almost function doesn't need memoryless node information.
      Therefore, for_each_populated_zone() can help to make code simplify.
      
      This patch has no functional change.
      
      [akpm@linux-foundation.org: small cleanup]
      Signed-off-by: NKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
      Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
      Cc: Mel Gorman <mel@csn.ul.ie>
      Reviewed-by: NJohannes Weiner <hannes@cmpxchg.org>
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      ee99c71c
    • O
      get_mm_hiwater_xxx: trivial, s/define/inline/ · 9de1581e
      Oleg Nesterov 提交于
      Andrew pointed out get_mm_hiwater_xxx() evaluate "mm" argument thrice/twice,
      make them inline.
      Signed-off-by: NOleg Nesterov <oleg@redhat.com>
      Cc: Hugh Dickins <hugh@veritas.com>
      Reviewed-by: NKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      9de1581e
    • A
      proc tty: remove struct tty_operations::read_proc · 0f043a81
      Alexey Dobriyan 提交于
      struct tty_operations::proc_fops took it's place and there is one less
      create_proc_read_entry() user now!
      Signed-off-by: NAlexey Dobriyan <adobriyan@gmail.com>
      Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      0f043a81
    • A
      proc tty: add struct tty_operations::proc_fops · ae149b6b
      Alexey Dobriyan 提交于
      Used for gradual switch of TTY drivers from using ->read_proc which helps
      with gradual switch from ->read_proc for the whole tree.
      
      As side effect, fix possible race condition when ->data initialized after
      PDE is hooked into proc tree.
      
      ->proc_fops takes precedence over ->read_proc.
      Signed-off-by: NAlexey Dobriyan <adobriyan@gmail.com>
      Cc: Alan Cox <alan@lxorguk.ukuu.org.uk>
      Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
      ae149b6b
  2. 31 3月, 2009 23 次提交