diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ed5072c64daac1a40ceaec919e73bb7dfb93f0ad..c5aa439933649675d1c9d1c33367fbd4ca2cfa4a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -3521,7 +3522,7 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address, #else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */ /* Can be overriden by architectures */ -__attribute__((weak)) struct page * +struct page * __weak follow_huge_pud(struct mm_struct *mm, unsigned long address, pud_t *pud, int write) { diff --git a/mm/nommu.c b/mm/nommu.c index 5d3f3524bbdc83b7ed2eceaf6ce064c3917bba51..e68deff6d44762a5888430f5c3a36da0866717a4 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -460,7 +461,7 @@ EXPORT_SYMBOL_GPL(vm_unmap_aliases); * Implement a stub for vmalloc_sync_all() if the architecture chose not to * have one. */ -void __attribute__((weak)) vmalloc_sync_all(void) +void __weak vmalloc_sync_all(void) { } diff --git a/mm/sparse.c b/mm/sparse.c index 38cad8fd739734573ce90a3c9b1c251116f01193..d1b48b691ac8c20040a262337cc7e0cbf566420f 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -5,10 +5,12 @@ #include #include #include +#include #include #include #include #include + #include "internal.h" #include #include @@ -461,7 +463,7 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) } #endif -void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) +void __weak __meminit vmemmap_populate_print_last(void) { } diff --git a/mm/util.c b/mm/util.c index a24aa22f2473690c1e2fa95514f778c6c0c616a7..d7813e6d4cc7c042c44a01692f0993a2b6bad04a 100644 --- a/mm/util.c +++ b/mm/util.c @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -307,7 +308,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm) * If the architecture not support this function, simply return with no * page pinned */ -int __attribute__((weak)) __get_user_pages_fast(unsigned long start, +int __weak __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { return 0; @@ -338,7 +339,7 @@ EXPORT_SYMBOL_GPL(__get_user_pages_fast); * callers need to carefully consider what to use. On many architectures, * get_user_pages_fast simply falls back to get_user_pages. */ -int __attribute__((weak)) get_user_pages_fast(unsigned long start, +int __weak get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { struct mm_struct *mm = current->mm; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 0fdf96803c5b59623792a24e57015fb0e25098bb..a7b522f4851d3869e1ff502cde866e07494532df 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -27,7 +27,9 @@ #include #include #include +#include #include + #include #include #include @@ -2181,7 +2183,7 @@ EXPORT_SYMBOL(remap_vmalloc_range); * Implement a stub for vmalloc_sync_all() if the architecture chose not to * have one. */ -void __attribute__((weak)) vmalloc_sync_all(void) +void __weak vmalloc_sync_all(void) { }