提交 094eaabb 编写于 作者: P Peng Wu 提交者: Yang Yingliang

proc: Count reliable memory usage of reliable tasks

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S
CVE: NA

----------------------------------------------

Counting reliable memory allocated by the reliable user tasks.

The policy of counting reliable memory usage is based on RSS statistics.
Anywhere with counter of mm need count reliable pages too. Reliable page
which is checked by page_reliable() need to update the reliable page
counter by calling reliable_page_counter().

Updating the reliable pages should be considered if the following logic is
added:
- add_mm_counter
- dec_mm_counter
- inc_mm_counter_fast
- dec_mm_counter_fast
- rss[mm_counter(page)]
Signed-off-by: NPeng Wu <wupeng58@huawei.com>
Signed-off-by: NMa Wupeng <mawupeng1@huawei.com>
Reviewed-by: NKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: NYang Yingliang <yangyingliang@huawei.com>
上级 8eb421e3
...@@ -181,6 +181,7 @@ read the file /proc/PID/status: ...@@ -181,6 +181,7 @@ read the file /proc/PID/status:
VmPTE: 20 kb VmPTE: 20 kb
VmSwap: 0 kB VmSwap: 0 kB
HugetlbPages: 0 kB HugetlbPages: 0 kB
Reliable: 1608 KB
CoreDumping: 0 CoreDumping: 0
Threads: 1 Threads: 1
SigQ: 0/28578 SigQ: 0/28578
...@@ -254,6 +255,7 @@ Table 1-2: Contents of the status files (as of 4.8) ...@@ -254,6 +255,7 @@ Table 1-2: Contents of the status files (as of 4.8)
VmSwap amount of swap used by anonymous private data VmSwap amount of swap used by anonymous private data
(shmem swap usage is not included) (shmem swap usage is not included)
HugetlbPages size of hugetlb memory portions HugetlbPages size of hugetlb memory portions
Reliable size of reliable memory used
CoreDumping process's memory is currently being dumped CoreDumping process's memory is currently being dumped
(killing the process may lead to a corrupted core) (killing the process may lead to a corrupted core)
Threads number of threads Threads number of threads
......
...@@ -77,6 +77,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) ...@@ -77,6 +77,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
SEQ_PUT_DEC(" kB\nVmSwap:\t", swap); SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
seq_puts(m, " kB\n"); seq_puts(m, " kB\n");
hugetlb_report_usage(m, mm); hugetlb_report_usage(m, mm);
reliable_report_usage(m, mm);
} }
#undef SEQ_PUT_DEC #undef SEQ_PUT_DEC
......
...@@ -14,11 +14,14 @@ ...@@ -14,11 +14,14 @@
extern struct static_key_false mem_reliable; extern struct static_key_false mem_reliable;
extern bool reliable_enabled; extern bool reliable_enabled;
extern atomic_long_t reliable_user_used_nr_page;
extern void add_reliable_mem_size(long sz); extern void add_reliable_mem_size(long sz);
extern void mem_reliable_init(bool has_unmirrored_mem, extern void mem_reliable_init(bool has_unmirrored_mem,
unsigned long *zone_movable_pfn); unsigned long *zone_movable_pfn);
extern void reliable_report_meminfo(struct seq_file *m); extern void reliable_report_meminfo(struct seq_file *m);
extern bool page_reliable(struct page *page);
extern void reliable_report_usage(struct seq_file *m, struct mm_struct *mm);
static inline bool mem_reliable_is_enabled(void) static inline bool mem_reliable_is_enabled(void)
{ {
...@@ -47,6 +50,15 @@ static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z) ...@@ -47,6 +50,15 @@ static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z)
return false; return false;
} }
static inline void reliable_page_counter(struct page *page,
struct mm_struct *mm, int val)
{
if (page_reliable(page)) {
atomic_long_add(val, &mm->reliable_nr_page);
atomic_long_add(val, &reliable_user_used_nr_page);
}
}
#else #else
#define reliable_enabled 0 #define reliable_enabled 0
...@@ -60,6 +72,11 @@ static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z) ...@@ -60,6 +72,11 @@ static inline bool skip_none_movable_zone(gfp_t gfp, struct zoneref *z)
return false; return false;
} }
static inline void reliable_report_meminfo(struct seq_file *m) {} static inline void reliable_report_meminfo(struct seq_file *m) {}
static inline bool page_reliable(struct page *page) { return false; }
static inline void reliable_page_counter(struct page *page,
struct mm_struct *mm, int val) {}
static inline void reliable_report_usage(struct seq_file *m,
struct mm_struct *mm) {}
#endif #endif
......
...@@ -191,7 +191,9 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, ...@@ -191,7 +191,9 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
if (!PageAnon(old_page)) { if (!PageAnon(old_page)) {
dec_mm_counter(mm, mm_counter_file(old_page)); dec_mm_counter(mm, mm_counter_file(old_page));
reliable_page_counter(old_page, mm, -1);
inc_mm_counter(mm, MM_ANONPAGES); inc_mm_counter(mm, MM_ANONPAGES);
reliable_page_counter(new_page, mm, 1);
} }
flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
......
...@@ -673,6 +673,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, ...@@ -673,6 +673,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
reliable_page_counter(page, vma->vm_mm, HPAGE_PMD_NR);
mm_inc_nr_ptes(vma->vm_mm); mm_inc_nr_ptes(vma->vm_mm);
spin_unlock(vmf->ptl); spin_unlock(vmf->ptl);
count_vm_event(THP_FAULT_ALLOC); count_vm_event(THP_FAULT_ALLOC);
...@@ -1080,6 +1081,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -1080,6 +1081,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
get_page(src_page); get_page(src_page);
page_dup_rmap(src_page, true); page_dup_rmap(src_page, true);
add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
reliable_page_counter(src_page, dst_mm, HPAGE_PMD_NR);
mm_inc_nr_ptes(dst_mm); mm_inc_nr_ptes(dst_mm);
pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
...@@ -1468,6 +1470,8 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd) ...@@ -1468,6 +1470,8 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
if (!page) { if (!page) {
add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
reliable_page_counter(new_page, vma->vm_mm,
HPAGE_PMD_NR);
} else { } else {
VM_BUG_ON_PAGE(!PageHead(page), page); VM_BUG_ON_PAGE(!PageHead(page), page);
page_remove_rmap(page, true); page_remove_rmap(page, true);
...@@ -1850,10 +1854,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, ...@@ -1850,10 +1854,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (PageAnon(page)) { if (PageAnon(page)) {
zap_deposited_table(tlb->mm, pmd); zap_deposited_table(tlb->mm, pmd);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
reliable_page_counter(page, tlb->mm, -HPAGE_PMD_NR);
} else { } else {
if (arch_needs_pgtable_deposit()) if (arch_needs_pgtable_deposit())
zap_deposited_table(tlb->mm, pmd); zap_deposited_table(tlb->mm, pmd);
add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR); add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
reliable_page_counter(page, tlb->mm, -HPAGE_PMD_NR);
} }
spin_unlock(ptl); spin_unlock(ptl);
...@@ -2209,6 +2215,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -2209,6 +2215,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
put_page(page); put_page(page);
} }
add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
reliable_page_counter(page, mm, -HPAGE_PMD_NR);
return; return;
} }
...@@ -3170,6 +3177,7 @@ vm_fault_t do_anon_huge_page_remap(struct vm_area_struct *vma, unsigned long add ...@@ -3170,6 +3177,7 @@ vm_fault_t do_anon_huge_page_remap(struct vm_area_struct *vma, unsigned long add
pgtable_trans_huge_deposit(vma->vm_mm, pmd, pgtable); pgtable_trans_huge_deposit(vma->vm_mm, pmd, pgtable);
set_pmd_at(vma->vm_mm, address, pmd, entry); set_pmd_at(vma->vm_mm, address, pmd, entry);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
reliable_page_counter(page, vma->vm_mm, HPAGE_PMD_NR);
mm_inc_nr_ptes(vma->vm_mm); mm_inc_nr_ptes(vma->vm_mm);
spin_unlock(ptl); spin_unlock(ptl);
count_vm_event(THP_FAULT_ALLOC); count_vm_event(THP_FAULT_ALLOC);
......
...@@ -648,6 +648,7 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page, ...@@ -648,6 +648,7 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
clear_user_highpage(page, address); clear_user_highpage(page, address);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
reliable_page_counter(page, vma->vm_mm, 1);
if (is_zero_pfn(pte_pfn(pteval))) { if (is_zero_pfn(pte_pfn(pteval))) {
/* /*
* ptl mostly unnecessary. * ptl mostly unnecessary.
......
...@@ -1184,6 +1184,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, ...@@ -1184,6 +1184,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
* when tearing down the mm. * when tearing down the mm.
*/ */
dec_mm_counter(mm, MM_ANONPAGES); dec_mm_counter(mm, MM_ANONPAGES);
reliable_page_counter(page, mm, -1);
} }
flush_cache_page(vma, addr, pte_pfn(*ptep)); flush_cache_page(vma, addr, pte_pfn(*ptep));
......
...@@ -12,14 +12,19 @@ ...@@ -12,14 +12,19 @@
DEFINE_STATIC_KEY_FALSE(mem_reliable); DEFINE_STATIC_KEY_FALSE(mem_reliable);
bool reliable_enabled; bool reliable_enabled;
static atomic_long_t total_reliable_mem; static atomic_long_t total_reliable_mem;
atomic_long_t reliable_user_used_nr_page;
void add_reliable_mem_size(long sz) void add_reliable_mem_size(long sz)
{ {
atomic_long_add(sz, &total_reliable_mem); atomic_long_add(sz, &total_reliable_mem);
} }
bool page_reliable(struct page *page)
{
return mem_reliable_is_enabled() && page_zonenum(page) < ZONE_MOVABLE;
}
static int reliable_mem_notifier(struct notifier_block *nb, static int reliable_mem_notifier(struct notifier_block *nb,
unsigned long action, void *arg) unsigned long action, void *arg)
{ {
...@@ -105,3 +110,11 @@ void reliable_report_meminfo(struct seq_file *m) ...@@ -105,3 +110,11 @@ void reliable_report_meminfo(struct seq_file *m)
used_reliable_mem_sz() >> 10); used_reliable_mem_sz() >> 10);
} }
} }
void reliable_report_usage(struct seq_file *m, struct mm_struct *mm)
{
if (mem_reliable_is_enabled()) {
seq_printf(m, "Reliable:\t%8lu kB\n",
atomic_long_read(&mm->reliable_nr_page));
}
}
...@@ -740,6 +740,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -740,6 +740,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
rss[mm_counter(page)]++; rss[mm_counter(page)]++;
reliable_page_counter(page, dst_mm, 1);
if (is_write_migration_entry(entry) && if (is_write_migration_entry(entry) &&
is_cow_mapping(vm_flags)) { is_cow_mapping(vm_flags)) {
/* /*
...@@ -766,6 +767,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -766,6 +767,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
*/ */
get_page(page); get_page(page);
rss[mm_counter(page)]++; rss[mm_counter(page)]++;
reliable_page_counter(page, dst_mm, 1);
page_dup_rmap(page, false); page_dup_rmap(page, false);
/* /*
...@@ -807,6 +809,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -807,6 +809,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
get_page(page); get_page(page);
page_dup_rmap(page, false); page_dup_rmap(page, false);
rss[mm_counter(page)]++; rss[mm_counter(page)]++;
reliable_page_counter(page, dst_mm, 1);
} else if (pte_devmap(pte)) { } else if (pte_devmap(pte)) {
page = pte_page(pte); page = pte_page(pte);
...@@ -819,6 +822,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -819,6 +822,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
get_page(page); get_page(page);
page_dup_rmap(page, false); page_dup_rmap(page, false);
rss[mm_counter(page)]++; rss[mm_counter(page)]++;
reliable_page_counter(page, dst_mm, 1);
} }
} }
...@@ -1102,6 +1106,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1102,6 +1106,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
mark_page_accessed(page); mark_page_accessed(page);
} }
rss[mm_counter(page)]--; rss[mm_counter(page)]--;
reliable_page_counter(page, mm, -1);
page_remove_rmap(page, false); page_remove_rmap(page, false);
if (unlikely(page_mapcount(page) < 0)) if (unlikely(page_mapcount(page) < 0))
print_bad_pte(vma, addr, ptent, page); print_bad_pte(vma, addr, ptent, page);
...@@ -1130,6 +1135,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1130,6 +1135,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
rss[mm_counter(page)]--; rss[mm_counter(page)]--;
reliable_page_counter(page, mm, -1);
page_remove_rmap(page, false); page_remove_rmap(page, false);
put_page(page); put_page(page);
continue; continue;
...@@ -1147,6 +1153,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, ...@@ -1147,6 +1153,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
page = migration_entry_to_page(entry); page = migration_entry_to_page(entry);
rss[mm_counter(page)]--; rss[mm_counter(page)]--;
reliable_page_counter(page, mm, -1);
} }
if (unlikely(!free_swap_and_cache(entry))) if (unlikely(!free_swap_and_cache(entry)))
print_bad_pte(vma, addr, ptent, NULL); print_bad_pte(vma, addr, ptent, NULL);
...@@ -1490,6 +1497,7 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr, ...@@ -1490,6 +1497,7 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
/* Ok, finally just insert the thing.. */ /* Ok, finally just insert the thing.. */
get_page(page); get_page(page);
inc_mm_counter_fast(mm, mm_counter_file(page)); inc_mm_counter_fast(mm, mm_counter_file(page));
reliable_page_counter(page, mm, 1);
page_add_file_rmap(page, false); page_add_file_rmap(page, false);
set_pte_at(mm, addr, pte, mk_pte(page, prot)); set_pte_at(mm, addr, pte, mk_pte(page, prot));
...@@ -2489,10 +2497,13 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) ...@@ -2489,10 +2497,13 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
if (!PageAnon(old_page)) { if (!PageAnon(old_page)) {
dec_mm_counter_fast(mm, dec_mm_counter_fast(mm,
mm_counter_file(old_page)); mm_counter_file(old_page));
reliable_page_counter(old_page, mm, -1);
inc_mm_counter_fast(mm, MM_ANONPAGES); inc_mm_counter_fast(mm, MM_ANONPAGES);
reliable_page_counter(new_page, mm, 1);
} }
} else { } else {
inc_mm_counter_fast(mm, MM_ANONPAGES); inc_mm_counter_fast(mm, MM_ANONPAGES);
reliable_page_counter(new_page, mm, 1);
} }
flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
entry = mk_pte(new_page, vma->vm_page_prot); entry = mk_pte(new_page, vma->vm_page_prot);
...@@ -3051,6 +3062,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ...@@ -3051,6 +3062,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
*/ */
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
reliable_page_counter(page, vma->vm_mm, 1);
dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
pte = mk_pte(page, vma->vm_page_prot); pte = mk_pte(page, vma->vm_page_prot);
if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) { if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
...@@ -3216,6 +3228,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) ...@@ -3216,6 +3228,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
} }
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
reliable_page_counter(page, vma->vm_mm, 1);
page_add_new_anon_rmap(page, vma, vmf->address, false); page_add_new_anon_rmap(page, vma, vmf->address, false);
mem_cgroup_commit_charge(page, memcg, false, false); mem_cgroup_commit_charge(page, memcg, false, false);
lru_cache_add_active_or_unevictable(page, vma); lru_cache_add_active_or_unevictable(page, vma);
...@@ -3416,6 +3429,7 @@ static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) ...@@ -3416,6 +3429,7 @@ static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
reliable_page_counter(page, vma->vm_mm, HPAGE_PMD_NR);
page_add_file_rmap(page, true); page_add_file_rmap(page, true);
/* /*
* deposit and withdraw with pmd lock held * deposit and withdraw with pmd lock held
...@@ -3489,6 +3503,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, ...@@ -3489,6 +3503,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
if (write) if (write)
entry = maybe_mkwrite(pte_mkdirty(entry), vma); entry = maybe_mkwrite(pte_mkdirty(entry), vma);
/* copy-on-write page */ /* copy-on-write page */
reliable_page_counter(page, vma->vm_mm, 1);
if (write && !(vma->vm_flags & VM_SHARED)) { if (write && !(vma->vm_flags & VM_SHARED)) {
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, vmf->address, false); page_add_new_anon_rmap(page, vma, vmf->address, false);
...@@ -4910,6 +4925,7 @@ vm_fault_t do_anon_page_remap(struct vm_area_struct *vma, unsigned long address, ...@@ -4910,6 +4925,7 @@ vm_fault_t do_anon_page_remap(struct vm_area_struct *vma, unsigned long address,
if (ret) if (ret)
goto release; goto release;
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
reliable_page_counter(page, vma->vm_mm, 1);
page_add_new_anon_rmap(page, vma, address, false); page_add_new_anon_rmap(page, vma, address, false);
mem_cgroup_commit_charge(page, memcg, false, false); mem_cgroup_commit_charge(page, memcg, false, false);
lru_cache_add_active_or_unevictable(page, vma); lru_cache_add_active_or_unevictable(page, vma);
......
...@@ -2714,6 +2714,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, ...@@ -2714,6 +2714,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
} }
inc_mm_counter(mm, MM_ANONPAGES); inc_mm_counter(mm, MM_ANONPAGES);
reliable_page_counter(page, mm, 1);
page_add_new_anon_rmap(page, vma, addr, false); page_add_new_anon_rmap(page, vma, addr, false);
mem_cgroup_commit_charge(page, memcg, false, false); mem_cgroup_commit_charge(page, memcg, false, false);
if (!is_zone_device_page(page)) if (!is_zone_device_page(page))
......
...@@ -1548,6 +1548,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1548,6 +1548,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
vma_mmu_pagesize(vma)); vma_mmu_pagesize(vma));
} else { } else {
dec_mm_counter(mm, mm_counter(page)); dec_mm_counter(mm, mm_counter(page));
reliable_page_counter(page, mm, -1);
set_pte_at(mm, address, pvmw.pte, pteval); set_pte_at(mm, address, pvmw.pte, pteval);
} }
...@@ -1563,6 +1564,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1563,6 +1564,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* copied pages. * copied pages.
*/ */
dec_mm_counter(mm, mm_counter(page)); dec_mm_counter(mm, mm_counter(page));
reliable_page_counter(page, mm, -1);
/* We have to invalidate as we cleared the pte */ /* We have to invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm, address, mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE); address + PAGE_SIZE);
...@@ -1617,6 +1619,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1617,6 +1619,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
mmu_notifier_invalidate_range(mm, mmu_notifier_invalidate_range(mm,
address, address + PAGE_SIZE); address, address + PAGE_SIZE);
dec_mm_counter(mm, MM_ANONPAGES); dec_mm_counter(mm, MM_ANONPAGES);
reliable_page_counter(page, mm, -1);
goto discard; goto discard;
} }
...@@ -1650,6 +1653,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1650,6 +1653,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
spin_unlock(&mmlist_lock); spin_unlock(&mmlist_lock);
} }
dec_mm_counter(mm, MM_ANONPAGES); dec_mm_counter(mm, MM_ANONPAGES);
reliable_page_counter(page, mm, -1);
inc_mm_counter(mm, MM_SWAPENTS); inc_mm_counter(mm, MM_SWAPENTS);
swp_pte = swp_entry_to_pte(entry); swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pteval)) if (pte_soft_dirty(pteval))
...@@ -1670,6 +1674,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1670,6 +1674,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* See Documentation/vm/mmu_notifier.rst * See Documentation/vm/mmu_notifier.rst
*/ */
dec_mm_counter(mm, mm_counter_file(page)); dec_mm_counter(mm, mm_counter_file(page));
reliable_page_counter(page, mm, -1);
} }
discard: discard:
/* /*
......
...@@ -2473,6 +2473,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, ...@@ -2473,6 +2473,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
spin_unlock_irq(&info->lock); spin_unlock_irq(&info->lock);
inc_mm_counter(dst_mm, mm_counter_file(page)); inc_mm_counter(dst_mm, mm_counter_file(page));
reliable_page_counter(page, dst_mm, 1);
page_add_file_rmap(page, false); page_add_file_rmap(page, false);
set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
......
...@@ -1869,6 +1869,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1869,6 +1869,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
dec_mm_counter(vma->vm_mm, MM_SWAPENTS); dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
inc_mm_counter(vma->vm_mm, MM_ANONPAGES); inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
reliable_page_counter(page, vma->vm_mm, 1);
get_page(page); get_page(page);
set_pte_at(vma->vm_mm, addr, pte, set_pte_at(vma->vm_mm, addr, pte,
pte_mkold(mk_pte(page, vma->vm_page_prot))); pte_mkold(mk_pte(page, vma->vm_page_prot)));
......
...@@ -116,6 +116,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm, ...@@ -116,6 +116,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
#endif #endif
inc_mm_counter(dst_mm, MM_ANONPAGES); inc_mm_counter(dst_mm, MM_ANONPAGES);
reliable_page_counter(page, dst_mm, 1);
page_add_new_anon_rmap(page, dst_vma, dst_addr, false); page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
mem_cgroup_commit_charge(page, memcg, false, false); mem_cgroup_commit_charge(page, memcg, false, false);
lru_cache_add_active_or_unevictable(page, dst_vma); lru_cache_add_active_or_unevictable(page, dst_vma);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册