提交 9f30931a 编写于 作者: L Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge fixes from Andrew Morton:
 "9 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  ocfs2/dlm: unlock lockres spinlock before dlm_lockres_put
  fault-inject: fix inverted interval/probability values in printk
  lib/Kconfig.debug: disable -Wframe-larger-than warnings with KASAN=y
  mm: make sendfile(2) killable
  thp: use is_zero_pfn() only after pte_present() check
  mailmap: update Javier Martinez Canillas' email
  MAINTAINERS: add Sergey as zsmalloc reviewer
  mm: cma: fix incorrect type conversion for size during dma allocation
  kmod: don't run async usermode helper as a child of kworker thread
...@@ -59,6 +59,7 @@ James Bottomley <jejb@mulgrave.(none)> ...@@ -59,6 +59,7 @@ James Bottomley <jejb@mulgrave.(none)>
James Bottomley <jejb@titanic.il.steeleye.com> James Bottomley <jejb@titanic.il.steeleye.com>
James E Wilson <wilson@specifix.com> James E Wilson <wilson@specifix.com>
James Ketrenos <jketreno@io.(none)> James Ketrenos <jketreno@io.(none)>
<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
Jean Tourrilhes <jt@hpl.hp.com> Jean Tourrilhes <jt@hpl.hp.com>
Jeff Garzik <jgarzik@pretzel.yyz.us> Jeff Garzik <jgarzik@pretzel.yyz.us>
Jens Axboe <axboe@suse.de> Jens Axboe <axboe@suse.de>
......
...@@ -11675,6 +11675,7 @@ F: drivers/tty/serial/zs.* ...@@ -11675,6 +11675,7 @@ F: drivers/tty/serial/zs.*
ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR
M: Minchan Kim <minchan@kernel.org> M: Minchan Kim <minchan@kernel.org>
M: Nitin Gupta <ngupta@vflare.org> M: Nitin Gupta <ngupta@vflare.org>
R: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
L: linux-mm@kvack.org L: linux-mm@kvack.org
S: Maintained S: Maintained
F: mm/zsmalloc.c F: mm/zsmalloc.c
......
...@@ -187,7 +187,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, ...@@ -187,7 +187,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
* global one. Requires architecture specific dev_get_cma_area() helper * global one. Requires architecture specific dev_get_cma_area() helper
* function. * function.
*/ */
struct page *dma_alloc_from_contiguous(struct device *dev, int count, struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
unsigned int align) unsigned int align)
{ {
if (align > CONFIG_CMA_ALIGNMENT) if (align > CONFIG_CMA_ALIGNMENT)
......
...@@ -1658,12 +1658,13 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, ...@@ -1658,12 +1658,13 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
if (ret < 0) { if (ret < 0) {
mlog(ML_ERROR, "failed to dispatch assert master work\n"); mlog(ML_ERROR, "failed to dispatch assert master work\n");
response = DLM_MASTER_RESP_ERROR; response = DLM_MASTER_RESP_ERROR;
spin_unlock(&res->spinlock);
dlm_lockres_put(res); dlm_lockres_put(res);
} else { } else {
dispatched = 1; dispatched = 1;
__dlm_lockres_grab_inflight_worker(dlm, res); __dlm_lockres_grab_inflight_worker(dlm, res);
spin_unlock(&res->spinlock);
} }
spin_unlock(&res->spinlock);
} else { } else {
if (res) if (res)
dlm_lockres_put(res); dlm_lockres_put(res);
......
...@@ -1723,8 +1723,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, ...@@ -1723,8 +1723,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
} else { } else {
dispatched = 1; dispatched = 1;
__dlm_lockres_grab_inflight_worker(dlm, res); __dlm_lockres_grab_inflight_worker(dlm, res);
spin_unlock(&res->spinlock);
} }
spin_unlock(&res->spinlock);
} else { } else {
/* put.. incase we are not the master */ /* put.. incase we are not the master */
spin_unlock(&res->spinlock); spin_unlock(&res->spinlock);
......
...@@ -26,6 +26,6 @@ extern int __init cma_declare_contiguous(phys_addr_t base, ...@@ -26,6 +26,6 @@ extern int __init cma_declare_contiguous(phys_addr_t base,
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit, unsigned int order_per_bit,
struct cma **res_cma); struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align); extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
#endif #endif
...@@ -111,7 +111,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, ...@@ -111,7 +111,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
return ret; return ret;
} }
struct page *dma_alloc_from_contiguous(struct device *dev, int count, struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
unsigned int order); unsigned int order);
bool dma_release_from_contiguous(struct device *dev, struct page *pages, bool dma_release_from_contiguous(struct device *dev, struct page *pages,
int count); int count);
...@@ -144,7 +144,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size, ...@@ -144,7 +144,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size,
} }
static inline static inline
struct page *dma_alloc_from_contiguous(struct device *dev, int count, struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
unsigned int order) unsigned int order)
{ {
return NULL; return NULL;
......
...@@ -327,9 +327,13 @@ static void call_usermodehelper_exec_work(struct work_struct *work) ...@@ -327,9 +327,13 @@ static void call_usermodehelper_exec_work(struct work_struct *work)
call_usermodehelper_exec_sync(sub_info); call_usermodehelper_exec_sync(sub_info);
} else { } else {
pid_t pid; pid_t pid;
/*
* Use CLONE_PARENT to reparent it to kthreadd; we do not
* want to pollute current->children, and we need a parent
* that always ignores SIGCHLD to ensure auto-reaping.
*/
pid = kernel_thread(call_usermodehelper_exec_async, sub_info, pid = kernel_thread(call_usermodehelper_exec_async, sub_info,
SIGCHLD); CLONE_PARENT | SIGCHLD);
if (pid < 0) { if (pid < 0) {
sub_info->retval = pid; sub_info->retval = pid;
umh_complete(sub_info); umh_complete(sub_info);
......
...@@ -197,6 +197,7 @@ config ENABLE_MUST_CHECK ...@@ -197,6 +197,7 @@ config ENABLE_MUST_CHECK
config FRAME_WARN config FRAME_WARN
int "Warn for stack frames larger than (needs gcc 4.4)" int "Warn for stack frames larger than (needs gcc 4.4)"
range 0 8192 range 0 8192
default 0 if KASAN
default 1024 if !64BIT default 1024 if !64BIT
default 2048 if 64BIT default 2048 if 64BIT
help help
......
...@@ -44,7 +44,7 @@ static void fail_dump(struct fault_attr *attr) ...@@ -44,7 +44,7 @@ static void fail_dump(struct fault_attr *attr)
printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n" printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
"name %pd, interval %lu, probability %lu, " "name %pd, interval %lu, probability %lu, "
"space %d, times %d\n", attr->dname, "space %d, times %d\n", attr->dname,
attr->probability, attr->interval, attr->interval, attr->probability,
atomic_read(&attr->space), atomic_read(&attr->space),
atomic_read(&attr->times)); atomic_read(&attr->times));
if (attr->verbose > 1) if (attr->verbose > 1)
......
...@@ -361,7 +361,7 @@ int __init cma_declare_contiguous(phys_addr_t base, ...@@ -361,7 +361,7 @@ int __init cma_declare_contiguous(phys_addr_t base,
* This function allocates part of contiguous memory on specific * This function allocates part of contiguous memory on specific
* contiguous memory area. * contiguous memory area.
*/ */
struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align) struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
{ {
unsigned long mask, offset, pfn, start = 0; unsigned long mask, offset, pfn, start = 0;
unsigned long bitmap_maxno, bitmap_no, bitmap_count; unsigned long bitmap_maxno, bitmap_no, bitmap_count;
...@@ -371,7 +371,7 @@ struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align) ...@@ -371,7 +371,7 @@ struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align)
if (!cma || !cma->count) if (!cma || !cma->count)
return NULL; return NULL;
pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
count, align); count, align);
if (!count) if (!count)
......
...@@ -2488,6 +2488,11 @@ ssize_t generic_perform_write(struct file *file, ...@@ -2488,6 +2488,11 @@ ssize_t generic_perform_write(struct file *file,
break; break;
} }
if (fatal_signal_pending(current)) {
status = -EINTR;
break;
}
status = a_ops->write_begin(file, mapping, pos, bytes, flags, status = a_ops->write_begin(file, mapping, pos, bytes, flags,
&page, &fsdata); &page, &fsdata);
if (unlikely(status < 0)) if (unlikely(status < 0))
...@@ -2525,10 +2530,6 @@ ssize_t generic_perform_write(struct file *file, ...@@ -2525,10 +2530,6 @@ ssize_t generic_perform_write(struct file *file,
written += copied; written += copied;
balance_dirty_pages_ratelimited(mapping); balance_dirty_pages_ratelimited(mapping);
if (fatal_signal_pending(current)) {
status = -EINTR;
break;
}
} while (iov_iter_count(i)); } while (iov_iter_count(i));
return written ? written : status; return written ? written : status;
......
...@@ -2206,7 +2206,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, ...@@ -2206,7 +2206,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
for (_pte = pte; _pte < pte+HPAGE_PMD_NR; for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
_pte++, address += PAGE_SIZE) { _pte++, address += PAGE_SIZE) {
pte_t pteval = *_pte; pte_t pteval = *_pte;
if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { if (pte_none(pteval) || (pte_present(pteval) &&
is_zero_pfn(pte_pfn(pteval)))) {
if (!userfaultfd_armed(vma) && if (!userfaultfd_armed(vma) &&
++none_or_zero <= khugepaged_max_ptes_none) ++none_or_zero <= khugepaged_max_ptes_none)
continue; continue;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册