提交 4c9a44ae 编写于 作者: L Linus Torvalds

Merge branch 'akpm' (Andrew's patch-bomb)

Merge the rest of Andrew's patches for -rc1:
 "A bunch of fixes and misc missed-out-on things.

  That'll do for -rc1.  I still have a batch of IPC patches which still
  have a possible bug report which I'm chasing down."

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (25 commits)
  keys: use keyring_alloc() to create module signing keyring
  keys: fix unreachable code
  sendfile: allows bypassing of notifier events
  SGI-XP: handle non-fatal traps
  fat: fix incorrect function comment
  Documentation: ABI: remove testing/sysfs-devices-node
  proc: fix inconsistent lock state
  linux/kernel.h: fix DIV_ROUND_CLOSEST with unsigned divisors
  memcg: don't register hotcpu notifier from ->css_alloc()
  checkpatch: warn on uapi #includes that #include <uapi/...
  revert "rtc: recycle id when unloading a rtc driver"
  mm: clean up transparent hugepage sysfs error messages
  hfsplus: add error message for the case of failure of sync fs in delayed_sync_fs() method
  hfsplus: rework processing of hfs_btree_write() returned error
  hfsplus: rework processing errors in hfsplus_free_extents()
  hfsplus: avoid crash on failed block map free
  kcmp: include linux/ptrace.h
  drivers/rtc/rtc-imxdi.c: must include <linux/spinlock.h>
  mm: cma: WARN if freed memory is still in use
  exec: do not leave bprm->interp on stack
  ...
What: /sys/devices/system/node/nodeX/compact
Date: February 2010
Contact: Mel Gorman <mel@csn.ul.ie>
Description:
When this file is written to, all memory within that node
will be compacted. When it completes, memory will be freed
into blocks which have as many contiguous pages as possible
......@@ -446,12 +446,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
possible to determine what the correct size should be.
This option provides an override for these situations.
capability.disable=
[SECURITY] Disable capabilities. This would normally
be used only if an alternative security model is to be
configured. Potentially dangerous and should only be
used if you are entirely sure of the consequences.
ccw_timeout_log [S390]
See Documentation/s390/CommonIO for details.
......
......@@ -16,6 +16,7 @@
*/
static char dmi_empty_string[] = " ";
static u16 __initdata dmi_ver;
/*
* Catch too early calls to dmi_check_system():
*/
......@@ -118,12 +119,12 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
return 0;
}
static int __init dmi_checksum(const u8 *buf)
static int __init dmi_checksum(const u8 *buf, u8 len)
{
u8 sum = 0;
int a;
for (a = 0; a < 15; a++)
for (a = 0; a < len; a++)
sum += buf[a];
return sum == 0;
......@@ -161,8 +162,10 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
return;
for (i = 0; i < 16 && (is_ff || is_00); i++) {
if(d[i] != 0x00) is_ff = 0;
if(d[i] != 0xFF) is_00 = 0;
if (d[i] != 0x00)
is_00 = 0;
if (d[i] != 0xFF)
is_ff = 0;
}
if (is_ff || is_00)
......@@ -172,7 +175,15 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, int inde
if (!s)
return;
sprintf(s, "%pUB", d);
/*
* As of version 2.6 of the SMBIOS specification, the first 3 fields of
* the UUID are supposed to be little-endian encoded. The specification
* says that this is the defacto standard.
*/
if (dmi_ver >= 0x0206)
sprintf(s, "%pUL", d);
else
sprintf(s, "%pUB", d);
dmi_ident[slot] = s;
}
......@@ -404,29 +415,57 @@ static int __init dmi_present(const char __iomem *p)
u8 buf[15];
memcpy_fromio(buf, p, 15);
if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) {
if (dmi_checksum(buf, 15)) {
dmi_num = (buf[13] << 8) | buf[12];
dmi_len = (buf[7] << 8) | buf[6];
dmi_base = (buf[11] << 24) | (buf[10] << 16) |
(buf[9] << 8) | buf[8];
/*
* DMI version 0.0 means that the real version is taken from
* the SMBIOS version, which we don't know at this point.
*/
if (buf[14] != 0)
printk(KERN_INFO "DMI %d.%d present.\n",
buf[14] >> 4, buf[14] & 0xF);
else
printk(KERN_INFO "DMI present.\n");
if (dmi_walk_early(dmi_decode) == 0) {
if (dmi_ver)
pr_info("SMBIOS %d.%d present.\n",
dmi_ver >> 8, dmi_ver & 0xFF);
else {
dmi_ver = (buf[14] & 0xF0) << 4 |
(buf[14] & 0x0F);
pr_info("Legacy DMI %d.%d present.\n",
dmi_ver >> 8, dmi_ver & 0xFF);
}
dmi_dump_ids();
return 0;
}
}
dmi_ver = 0;
return 1;
}
static int __init smbios_present(const char __iomem *p)
{
u8 buf[32];
int offset = 0;
memcpy_fromio(buf, p, 32);
if ((buf[5] < 32) && dmi_checksum(buf, buf[5])) {
dmi_ver = (buf[6] << 8) + buf[7];
/* Some BIOS report weird SMBIOS version, fix that up */
switch (dmi_ver) {
case 0x021F:
case 0x0221:
pr_debug("SMBIOS version fixup(2.%d->2.%d)\n",
dmi_ver & 0xFF, 3);
dmi_ver = 0x0203;
break;
case 0x0233:
pr_debug("SMBIOS version fixup(2.%d->2.%d)\n", 51, 6);
dmi_ver = 0x0206;
break;
}
offset = 16;
}
return dmi_present(buf + offset);
}
void __init dmi_scan_machine(void)
{
char __iomem *p, *q;
......@@ -444,7 +483,7 @@ void __init dmi_scan_machine(void)
if (p == NULL)
goto error;
rc = dmi_present(p + 0x10); /* offset of _DMI_ string */
rc = smbios_present(p);
dmi_iounmap(p, 32);
if (!rc) {
dmi_available = 1;
......@@ -462,7 +501,12 @@ void __init dmi_scan_machine(void)
goto error;
for (q = p; q < p + 0x10000; q += 16) {
rc = dmi_present(q);
if (memcmp(q, "_SM_", 4) == 0 && q - p <= 0xFFE0)
rc = smbios_present(q);
else if (memcmp(q, "_DMI_", 5) == 0)
rc = dmi_present(q);
else
continue;
if (!rc) {
dmi_available = 1;
dmi_iounmap(p, 0x10000);
......
......@@ -53,6 +53,10 @@
#include <linux/kthread.h>
#include "xpc.h"
#ifdef CONFIG_X86_64
#include <asm/traps.h>
#endif
/* define two XPC debug device structures to be used with dev_dbg() et al */
struct device_driver xpc_dbg_name = {
......@@ -1079,6 +1083,9 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
return NOTIFY_DONE;
}
/* Used to only allow one cpu to complete disconnect */
static unsigned int xpc_die_disconnecting;
/*
* Notify other partitions to deactivate from us by first disengaging from all
* references to our memory.
......@@ -1092,6 +1099,9 @@ xpc_die_deactivate(void)
long keep_waiting;
long wait_to_print;
if (cmpxchg(&xpc_die_disconnecting, 0, 1))
return;
/* keep xpc_hb_checker thread from doing anything (just in case) */
xpc_exiting = 1;
......@@ -1159,7 +1169,7 @@ xpc_die_deactivate(void)
* about the lack of a heartbeat.
*/
static int
xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
{
#ifdef CONFIG_IA64 /* !!! temporary kludge */
switch (event) {
......@@ -1191,7 +1201,27 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
break;
}
#else
xpc_die_deactivate();
struct die_args *die_args = _die_args;
switch (event) {
case DIE_TRAP:
if (die_args->trapnr == X86_TRAP_DF)
xpc_die_deactivate();
if (((die_args->trapnr == X86_TRAP_MF) ||
(die_args->trapnr == X86_TRAP_XF)) &&
!user_mode_vm(die_args->regs))
xpc_die_deactivate();
break;
case DIE_INT3:
case DIE_DEBUG:
break;
case DIE_OOPS:
case DIE_GPF:
default:
xpc_die_deactivate();
}
#endif
return NOTIFY_DONE;
......
......@@ -244,7 +244,6 @@ void rtc_device_unregister(struct rtc_device *rtc)
rtc_proc_del_device(rtc);
device_unregister(&rtc->dev);
rtc->ops = NULL;
ida_simple_remove(&rtc_ida, rtc->id);
mutex_unlock(&rtc->ops_lock);
put_device(&rtc->dev);
}
......
......@@ -36,6 +36,7 @@
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/of.h>
......
......@@ -172,7 +172,10 @@ static int load_misc_binary(struct linux_binprm *bprm)
goto _error;
bprm->argc ++;
bprm->interp = iname; /* for binfmt_script */
/* Update interp in case binfmt_script needs it. */
retval = bprm_change_interp(iname, bprm);
if (retval < 0)
goto _error;
interp_file = open_exec (iname);
retval = PTR_ERR (interp_file);
......
......@@ -80,7 +80,9 @@ static int load_script(struct linux_binprm *bprm)
retval = copy_strings_kernel(1, &i_name, bprm);
if (retval) return retval;
bprm->argc++;
bprm->interp = interp;
retval = bprm_change_interp(interp, bprm);
if (retval < 0)
return retval;
/*
* OK, now restart the process with the interpreter's dentry.
......
......@@ -1175,9 +1175,24 @@ void free_bprm(struct linux_binprm *bprm)
mutex_unlock(&current->signal->cred_guard_mutex);
abort_creds(bprm->cred);
}
/* If a binfmt changed the interp, free it. */
if (bprm->interp != bprm->filename)
kfree(bprm->interp);
kfree(bprm);
}
int bprm_change_interp(char *interp, struct linux_binprm *bprm)
{
/* If a binfmt changed the interp, free it first. */
if (bprm->interp != bprm->filename)
kfree(bprm->interp);
bprm->interp = kstrdup(interp, GFP_KERNEL);
if (!bprm->interp)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(bprm_change_interp);
/*
* install the new credentials for this executable
*/
......
......@@ -461,8 +461,7 @@ static int fat_parse_short(struct super_block *sb,
}
/*
* Return values: negative -> error, 0 -> not found, positive -> found,
* value is the total amount of slots, including the shortname entry.
* Return values: negative -> error/not found, 0 -> found.
*/
int fat_search_long(struct inode *inode, const unsigned char *name,
int name_len, struct fat_slot_info *sinfo)
......@@ -1255,7 +1254,7 @@ int fat_add_entries(struct inode *dir, void *slots, int nr_slots,
sinfo->nr_slots = nr_slots;
/* First stage: search free direcotry entries */
/* First stage: search free directory entries */
free_slots = nr_bhs = 0;
bh = prev = NULL;
pos = 0;
......
......@@ -1344,7 +1344,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
sbi->dir_entries = get_unaligned_le16(&b->dir_entries);
if (sbi->dir_entries & (sbi->dir_per_block - 1)) {
if (!silent)
fat_msg(sb, KERN_ERR, "bogus directroy-entries per block"
fat_msg(sb, KERN_ERR, "bogus directory-entries per block"
" (%u)", sbi->dir_entries);
brelse(bh);
goto out_invalid;
......
......@@ -135,6 +135,10 @@ int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster)
}
if (ret < 0)
return ret;
/*
* FIXME:Although we can add this cache, fat_cache_add() is
* assuming to be called after linear search with fat_cache_id.
*/
// fat_cache_add(inode, new_fclus, new_dclus);
} else {
MSDOS_I(inode)->i_start = new_dclus;
......
......@@ -176,12 +176,14 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count);
/* are all of the bits in range? */
if ((offset + count) > sbi->total_blocks)
return -2;
return -ENOENT;
mutex_lock(&sbi->alloc_mutex);
mapping = sbi->alloc_file->i_mapping;
pnr = offset / PAGE_CACHE_BITS;
page = read_mapping_page(mapping, pnr, NULL);
if (IS_ERR(page))
goto kaboom;
pptr = kmap(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
end = pptr + PAGE_CACHE_BITS / 32;
......@@ -214,6 +216,8 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
set_page_dirty(page);
kunmap(page);
page = read_mapping_page(mapping, ++pnr, NULL);
if (IS_ERR(page))
goto kaboom;
pptr = kmap(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
......@@ -232,4 +236,11 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
mutex_unlock(&sbi->alloc_mutex);
return 0;
kaboom:
printk(KERN_CRIT "hfsplus: unable to mark blocks free: error %ld\n",
PTR_ERR(page));
mutex_unlock(&sbi->alloc_mutex);
return -EIO;
}
......@@ -159,7 +159,7 @@ void hfs_btree_close(struct hfs_btree *tree)
kfree(tree);
}
void hfs_btree_write(struct hfs_btree *tree)
int hfs_btree_write(struct hfs_btree *tree)
{
struct hfs_btree_header_rec *head;
struct hfs_bnode *node;
......@@ -168,7 +168,7 @@ void hfs_btree_write(struct hfs_btree *tree)
node = hfs_bnode_find(tree, 0);
if (IS_ERR(node))
/* panic? */
return;
return -EIO;
/* Load the header */
page = node->page[0];
head = (struct hfs_btree_header_rec *)(kmap(page) +
......@@ -186,6 +186,7 @@ void hfs_btree_write(struct hfs_btree *tree)
kunmap(page);
set_page_dirty(page);
hfs_bnode_put(node);
return 0;
}
static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
......
......@@ -329,6 +329,7 @@ static int hfsplus_free_extents(struct super_block *sb,
{
u32 count, start;
int i;
int err = 0;
hfsplus_dump_extent(extent);
for (i = 0; i < 8; extent++, i++) {
......@@ -345,18 +346,33 @@ static int hfsplus_free_extents(struct super_block *sb,
for (;;) {
start = be32_to_cpu(extent->start_block);
if (count <= block_nr) {
hfsplus_block_free(sb, start, count);
err = hfsplus_block_free(sb, start, count);
if (err) {
printk(KERN_ERR "hfs: can't free extent\n");
dprint(DBG_EXTENT, " start: %u count: %u\n",
start, count);
}
extent->block_count = 0;
extent->start_block = 0;
block_nr -= count;
} else {
count -= block_nr;
hfsplus_block_free(sb, start + count, block_nr);
err = hfsplus_block_free(sb, start + count, block_nr);
if (err) {
printk(KERN_ERR "hfs: can't free extent\n");
dprint(DBG_EXTENT, " start: %u count: %u\n",
start, count);
}
extent->block_count = cpu_to_be32(count);
block_nr = 0;
}
if (!block_nr || !i)
return 0;
if (!block_nr || !i) {
/*
* Try to free all extents and
* return only last error
*/
return err;
}
i--;
extent--;
count = be32_to_cpu(extent->block_count);
......
......@@ -335,7 +335,7 @@ int hfsplus_block_free(struct super_block *, u32, u32);
/* btree.c */
struct hfs_btree *hfs_btree_open(struct super_block *, u32);
void hfs_btree_close(struct hfs_btree *);
void hfs_btree_write(struct hfs_btree *);
int hfs_btree_write(struct hfs_btree *);
struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *);
void hfs_bmap_free(struct hfs_bnode *);
......
......@@ -127,8 +127,14 @@ static int hfsplus_system_write_inode(struct inode *inode)
hfsplus_mark_mdb_dirty(inode->i_sb);
}
hfsplus_inode_write_fork(inode, fork);
if (tree)
hfs_btree_write(tree);
if (tree) {
int err = hfs_btree_write(tree);
if (err) {
printk(KERN_ERR "hfs: b-tree write err: %d, ino %lu\n",
err, inode->i_ino);
return err;
}
}
return 0;
}
......@@ -226,6 +232,7 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
static void delayed_sync_fs(struct work_struct *work)
{
int err;
struct hfsplus_sb_info *sbi;
sbi = container_of(work, struct hfsplus_sb_info, sync_work.work);
......@@ -234,7 +241,9 @@ static void delayed_sync_fs(struct work_struct *work)
sbi->work_queued = 0;
spin_unlock(&sbi->work_lock);
hfsplus_sync_fs(sbi->alloc_file->i_sb, 1);
err = hfsplus_sync_fs(sbi->alloc_file->i_sb, 1);
if (err)
printk(KERN_ERR "hfs: delayed sync fs err %d\n", err);
}
void hfsplus_mark_mdb_dirty(struct super_block *sb)
......
......@@ -352,18 +352,18 @@ int proc_alloc_inum(unsigned int *inum)
if (!ida_pre_get(&proc_inum_ida, GFP_KERNEL))
return -ENOMEM;
spin_lock(&proc_inum_lock);
spin_lock_bh(&proc_inum_lock);
error = ida_get_new(&proc_inum_ida, &i);
spin_unlock(&proc_inum_lock);
spin_unlock_bh(&proc_inum_lock);
if (error == -EAGAIN)
goto retry;
else if (error)
return error;
if (i > UINT_MAX - PROC_DYNAMIC_FIRST) {
spin_lock(&proc_inum_lock);
spin_lock_bh(&proc_inum_lock);
ida_remove(&proc_inum_ida, i);
spin_unlock(&proc_inum_lock);
spin_unlock_bh(&proc_inum_lock);
return -ENOSPC;
}
*inum = PROC_DYNAMIC_FIRST + i;
......@@ -372,9 +372,9 @@ int proc_alloc_inum(unsigned int *inum)
void proc_free_inum(unsigned int inum)
{
spin_lock(&proc_inum_lock);
spin_lock_bh(&proc_inum_lock);
ida_remove(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST);
spin_unlock(&proc_inum_lock);
spin_unlock_bh(&proc_inum_lock);
}
static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
......
......@@ -935,6 +935,8 @@ ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, size_t count,
if (retval > 0) {
add_rchar(current, retval);
add_wchar(current, retval);
fsnotify_access(in.file);
fsnotify_modify(out.file);
}
inc_syscr(current);
......
......@@ -112,6 +112,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
unsigned long stack_top,
int executable_stack);
extern int bprm_mm_init(struct linux_binprm *bprm);
extern int bprm_change_interp(char *interp, struct linux_binprm *bprm);
extern int copy_strings_kernel(int argc, const char *const *argv,
struct linux_binprm *bprm);
extern int prepare_bprm_creds(struct linux_binprm *bprm);
......
......@@ -77,13 +77,15 @@
/*
* Divide positive or negative dividend by positive divisor and round
* to closest integer. Result is undefined for negative divisors.
* to closest integer. Result is undefined for negative divisors and
* for negative dividends if the divisor variable type is unsigned.
*/
#define DIV_ROUND_CLOSEST(x, divisor)( \
{ \
typeof(x) __x = x; \
typeof(divisor) __d = divisor; \
(((typeof(x))-1) > 0 || (__x) > 0) ? \
(((typeof(x))-1) > 0 || \
((typeof(divisor))-1) > 0 || (__x) > 0) ? \
(((__x) + ((__d) / 2)) / (__d)) : \
(((__x) - ((__d) / 2)) / (__d)); \
} \
......
......@@ -4,6 +4,7 @@
#include <linux/string.h>
#include <linux/random.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/cache.h>
......
......@@ -34,18 +34,15 @@ static __init int module_verify_init(void)
{
pr_notice("Initialise module verification\n");
modsign_keyring = key_alloc(&key_type_keyring, ".module_sign",
KUIDT_INIT(0), KGIDT_INIT(0),
current_cred(),
(KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ,
KEY_ALLOC_NOT_IN_QUOTA);
modsign_keyring = keyring_alloc(".module_sign",
KUIDT_INIT(0), KGIDT_INIT(0),
current_cred(),
((KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ),
KEY_ALLOC_NOT_IN_QUOTA, NULL);
if (IS_ERR(modsign_keyring))
panic("Can't allocate module signing keyring\n");
if (key_instantiate_and_link(modsign_keyring, NULL, 0, NULL, NULL) < 0)
panic("Can't instantiate module signing keyring\n");
return 0;
}
......
......@@ -17,6 +17,21 @@
#include <linux/balloon_compaction.h>
#include "internal.h"
#ifdef CONFIG_COMPACTION
static inline void count_compact_event(enum vm_event_item item)
{
count_vm_event(item);
}
static inline void count_compact_events(enum vm_event_item item, long delta)
{
count_vm_events(item, delta);
}
#else
#define count_compact_event(item) do { } while (0)
#define count_compact_events(item, delta) do { } while (0)
#endif
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
#define CREATE_TRACE_POINTS
......@@ -303,10 +318,9 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
if (blockpfn == end_pfn)
update_pageblock_skip(cc, valid_page, total_isolated, false);
count_vm_events(COMPACTFREE_SCANNED, nr_scanned);
count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
if (total_isolated)
count_vm_events(COMPACTISOLATED, total_isolated);
count_compact_events(COMPACTISOLATED, total_isolated);
return total_isolated;
}
......@@ -613,9 +627,9 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
count_vm_events(COMPACTMIGRATE_SCANNED, nr_scanned);
count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
if (nr_isolated)
count_vm_events(COMPACTISOLATED, nr_isolated);
count_compact_events(COMPACTISOLATED, nr_isolated);
return low_pfn;
}
......@@ -1110,7 +1124,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
if (!order || !may_enter_fs || !may_perform_io)
return rc;
count_vm_event(COMPACTSTALL);
count_compact_event(COMPACTSTALL);
#ifdef CONFIG_CMA
if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
......
......@@ -574,19 +574,19 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
if (unlikely(!*hugepage_kobj)) {
printk(KERN_ERR "hugepage: failed kobject create\n");
printk(KERN_ERR "hugepage: failed to create transparent hugepage kobject\n");
return -ENOMEM;
}
err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
if (err) {
printk(KERN_ERR "hugepage: failed register hugeage group\n");
printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
goto delete_obj;
}
err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
if (err) {
printk(KERN_ERR "hugepage: failed register hugeage group\n");
printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
goto remove_hp_group;
}
......
......@@ -6090,7 +6090,6 @@ mem_cgroup_css_alloc(struct cgroup *cont)
&per_cpu(memcg_stock, cpu);
INIT_WORK(&stock->work, drain_local_stock);
}
hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
} else {
parent = mem_cgroup_from_cont(cont->parent);
memcg->use_hierarchy = parent->use_hierarchy;
......@@ -6756,6 +6755,19 @@ struct cgroup_subsys mem_cgroup_subsys = {
.use_id = 1,
};
/*
* The rest of init is performed during ->css_alloc() for root css which
* happens before initcalls. hotcpu_notifier() can't be done together as
* it would introduce circular locking by adding cgroup_lock -> cpu hotplug
* dependency. Do it from a subsys_initcall().
*/
static int __init mem_cgroup_init(void)
{
hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
return 0;
}
subsys_initcall(mem_cgroup_init);
#ifdef CONFIG_MEMCG_SWAP
static int __init enable_swap_account(char *s)
{
......
......@@ -200,6 +200,18 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
x += zone_page_state(z, NR_FREE_PAGES) +
zone_reclaimable_pages(z) - z->dirty_balance_reserve;
}
/*
* Unreclaimable memory (kernel memory or anonymous memory
* without swap) can bring down the dirtyable pages below
* the zone's dirty balance reserve and the above calculation
* will underflow. However we still want to add in nodes
* which are below threshold (negative values) to get a more
* accurate calculation but make sure that the total never
* underflows.
*/
if ((long)x < 0)
x = 0;
/*
* Make sure that the number of highmem pages is never larger
* than the number of the total dirtyable memory. This can only
......@@ -222,8 +234,8 @@ static unsigned long global_dirtyable_memory(void)
{
unsigned long x;
x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() -
dirty_balance_reserve;
x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
x -= min(x, dirty_balance_reserve);
if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
......@@ -290,9 +302,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone)
* highmem zone can hold its share of dirty pages, so we don't
* care about vm_highmem_is_dirtyable here.
*/
return zone_page_state(zone, NR_FREE_PAGES) +
zone_reclaimable_pages(zone) -
zone->dirty_balance_reserve;
unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
zone_reclaimable_pages(zone);
/* don't allow this to underflow */
nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
return nr_pages;
}
/**
......
......@@ -5978,8 +5978,15 @@ int alloc_contig_range(unsigned long start, unsigned long end,
void free_contig_range(unsigned long pfn, unsigned nr_pages)
{
for (; nr_pages--; ++pfn)
__free_page(pfn_to_page(pfn));
unsigned int count = 0;
for (; nr_pages--; pfn++) {
struct page *page = pfn_to_page(pfn);
count += page_count(page) != 1;
__free_page(page);
}
WARN(count != 0, "%d pages are still in use!\n", count);
}
#endif
......
......@@ -2226,8 +2226,11 @@ sub process {
my $path = $1;
if ($path =~ m{//}) {
ERROR("MALFORMED_INCLUDE",
"malformed #include filename\n" .
$herecurr);
"malformed #include filename\n" . $herecurr);
}
if ($path =~ "^uapi/" && $realfile =~ m@\binclude/uapi/@) {
ERROR("UAPI_INCLUDE",
"No #include in ...include/uapi/... should use a uapi/ path prefix\n" . $herecurr);
}
}
......
......@@ -367,8 +367,6 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
if (ret)
break;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册