share_pool.c 112.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Huawei Ascend Share Pool Memory
 *
 * Copyright (C) 2020 Huawei Limited
 * Author: Tang Yizhou <tangyizhou@huawei.com>
 *         Zefan Li <lizefan@huawei.com>
 *         Wu Peng <wupeng58@huawei.com>
 *         Ding Tianhong <dingtgianhong@huawei.com>
 *         Zhou Guanghui <zhouguanghui1@huawei.com>
 *         Li Ming <limingming.li@huawei.com>
 *
 * This code is based on the hisilicon ascend platform.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#define pr_fmt(fmt) "share pool: " fmt

#include <linux/share_pool.h>
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/mm.h>
#include <linux/mm_types.h>
#include <linux/idr.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
#include <linux/printk.h>
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
#include <linux/pid.h>
#include <linux/pid_namespace.h>
#include <linux/atomic.h>
#include <linux/lockdep.h>
#include <linux/kernel.h>
#include <linux/falloc.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/rmap.h>
#include <linux/preempt.h>
#include <linux/swapops.h>
#include <linux/mmzone.h>
#include <linux/timekeeping.h>
#include <linux/time64.h>
52
#include <linux/pagewalk.h>
53

54 55
#define spg_valid(spg)		((spg)->is_alive == true)

56 57 58 59 60
/* Use spa va address as mmap offset. This can work because spa_file
 * is setup with 64-bit address space. So va shall be well covered.
 */
#define addr_offset(spa)	((spa)->va_start)

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
#define byte2kb(size)		((size) >> 10)
#define byte2mb(size)		((size) >> 20)
#define page2kb(page_num)	((page_num) << (PAGE_SHIFT - 10))

#define MAX_GROUP_FOR_SYSTEM	50000
#define MAX_GROUP_FOR_TASK	3000
#define MAX_PROC_PER_GROUP	1024

#define GROUP_NONE		0

#define SEC2US(sec)		((sec) * 1000000)
#define NS2US(ns)		((ns) / 1000)

#define PF_DOMAIN_CORE		0x10000000	/* AOS CORE processes in sched.h */

76 77
static int system_group_count;

78 79 80 81 82
/* idr of all sp_groups */
static DEFINE_IDR(sp_group_idr);
/* rw semaphore for sp_group_idr and mm->sp_group_master */
static DECLARE_RWSEM(sp_group_sem);

83 84
static BLOCKING_NOTIFIER_HEAD(sp_notifier_chain);

85 86 87 88
static DEFINE_IDA(sp_group_id_ida);

/*** Statistical and maintenance tools ***/

89 90 91 92
/* list of all sp_group_masters */
static LIST_HEAD(master_list);
/* mutex to protect insert/delete ops from master_list */
static DEFINE_MUTEX(master_list_lock);
93

94 95 96 97 98
/* list of all spm-dvpp */
static LIST_HEAD(spm_dvpp_list);
/* mutex to protect insert/delete ops from master_list */
static DEFINE_MUTEX(spm_list_lock);

99 100 101
/* for kthread buff_module_guard_work */
static struct sp_proc_stat kthread_stat;

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
#ifndef __GENKSYMS__
struct sp_spg_stat {
	int spg_id;
	/* record the number of hugepage allocation failures */
	atomic_t hugepage_failures;
	/* number of sp_area */
	atomic_t	 spa_num;
	/* total size of all sp_area from sp_alloc and k2u */
	atomic64_t	 size;
	/* total size of all sp_area from sp_alloc 0-order page */
	atomic64_t	 alloc_nsize;
	/* total size of all sp_area from sp_alloc hugepage */
	atomic64_t	 alloc_hsize;
	/* total size of all sp_area from ap_alloc */
	atomic64_t	 alloc_size;
	/* total size of all sp_area from sp_k2u */
	atomic64_t	 k2u_size;
};

/* per process memory usage statistics indexed by tgid */
struct sp_proc_stat {
	int tgid;
	struct mm_struct *mm;
	char comm[TASK_COMM_LEN];
	/*
	 * alloc amount minus free amount, may be negative when freed by
	 * another task in the same sp group.
	 */
	atomic64_t alloc_size;
	atomic64_t alloc_nsize;
	atomic64_t alloc_hsize;
	atomic64_t k2u_size;
};

/* per process/sp-group memory usage statistics */
struct spg_proc_stat {
	int tgid;
	int spg_id;  /* 0 for non-group data, such as k2u_task */
	/*
	 * alloc amount minus free amount, may be negative when freed by
	 * another task in the same sp group.
	 */
	atomic64_t alloc_size;
	atomic64_t alloc_nsize;
	atomic64_t alloc_hsize;
	atomic64_t k2u_size;
};

150 151 152 153
enum sp_mapping_type {
	SP_MAPPING_START,
	SP_MAPPING_DVPP		= SP_MAPPING_START,
	SP_MAPPING_NORMAL,
C
Chen Jun 已提交
154
	SP_MAPPING_RO,
155 156 157
	SP_MAPPING_END,
};

158 159 160 161
/*
 * address space management
 */
struct sp_mapping {
162
	unsigned long type;
163 164 165 166 167 168 169 170 171 172 173
	atomic_t user;
	unsigned long start[MAX_DEVID];
	unsigned long end[MAX_DEVID];
	struct rb_root area_root;

	struct rb_node *free_area_cache;
	unsigned long cached_hole_size;
	unsigned long cached_vstart;

	/* list head for all groups attached to this mapping, dvpp mapping only */
	struct list_head group_head;
174
	struct list_head spm_node;
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
};

/* Processes in the same sp_group can share memory.
 * Memory layout for share pool:
 *
 * |-------------------- 8T -------------------|---|------ 8T ------------|
 * |		Device 0	   |  Device 1 |...|                      |
 * |----------------------------------------------------------------------|
 * |------------- 16G -------------|    16G    |   |                      |
 * | DVPP GROUP0   | DVPP GROUP1   | ... | ... |...|  sp normal memory    |
 * |     sp        |    sp         |     |     |   |                      |
 * |----------------------------------------------------------------------|
 *
 * The host SVM feature reserves 8T virtual memory by mmap, and due to the
 * restriction of DVPP, while SVM and share pool will both allocate memory
 * for DVPP, the memory have to be in the same 32G range.
 *
 * Share pool reserves 16T memory, with 8T for normal uses and 8T for DVPP.
 * Within this 8T DVPP memory, SVM will call sp_config_dvpp_range() to
 * tell us which 16G memory range is reserved for share pool .
 *
 * In some scenarios where there is no host SVM feature, share pool uses
 * the default 8G memory setting for DVPP.
 */
struct sp_group {
	int		 id;
	unsigned long	 flag;
	struct file	 *file;
	struct file	 *file_hugetlb;
	/* number of process in this group */
	int		 proc_num;
	/* list head of processes (sp_group_node, each represents a process) */
	struct list_head procs;
	/* list head of sp_area. it is protected by spin_lock sp_area_lock */
	struct list_head spa_list;
	/* group statistics */
	struct sp_spg_stat instat;
	/* is_alive == false means it's being destroyed */
	bool		 is_alive;
	atomic_t	 use_count;
	/* protect the group internal elements, except spa_list */
	struct rw_semaphore	rw_lock;
	/* list node for dvpp mapping */
	struct list_head	mnode;
219
	struct sp_mapping       *mapping[SP_MAPPING_END];
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
};

/* a per-process(per mm) struct which manages a sp_group_node list */
struct sp_group_master {
	/*
	 * number of sp groups the process belongs to,
	 * a.k.a the number of sp_node in node_list
	 */
	unsigned int count;
	/* list head of sp_node */
	struct list_head node_list;
	struct mm_struct *mm;
	/*
	 * Used to apply for the shared pool memory of the current process.
	 * For example, sp_alloc non-share memory or k2task.
	 */
	struct sp_group *local;
	struct sp_proc_stat instat;
	struct list_head list_node;
};

/*
 * each instance represents an sp group the process belongs to
 * sp_group_master    : sp_group_node   = 1 : N
 * sp_group_node->spg : sp_group        = 1 : 1
 * sp_group_node      : sp_group->procs = N : 1
 */
struct sp_group_node {
	/* list node in sp_group->procs */
	struct list_head proc_node;
	/* list node in sp_group_maseter->node_list */
	struct list_head group_node;
	struct sp_group_master *master;
	struct sp_group *spg;
	unsigned long prot;
	struct spg_proc_stat instat;
};
#endif

/* The caller should hold mmap_sem to protect master (TBD) */
G
Guo Mengqi 已提交
260 261
static void sp_init_group_master_stat(int tgid, struct mm_struct *mm,
		struct sp_proc_stat *stat)
262 263 264 265 266
{
	atomic64_set(&stat->alloc_nsize, 0);
	atomic64_set(&stat->alloc_hsize, 0);
	atomic64_set(&stat->k2u_size, 0);
	stat->mm = mm;
G
Guo Mengqi 已提交
267
	stat->tgid = tgid;
268 269 270
	get_task_comm(stat->comm, current);
}

271 272 273 274 275 276 277 278 279 280
static unsigned long sp_mapping_type(struct sp_mapping *spm)
{
	return spm->type;
}

static void sp_mapping_set_type(struct sp_mapping *spm, unsigned long type)
{
	spm->type = type;
}

281
static struct sp_mapping *sp_mapping_normal;
C
Chen Jun 已提交
282
static struct sp_mapping *sp_mapping_ro;
283

284 285 286
static void sp_mapping_add_to_list(struct sp_mapping *spm)
{
	mutex_lock(&spm_list_lock);
287
	if (sp_mapping_type(spm) == SP_MAPPING_DVPP)
288 289 290 291 292 293 294
		list_add_tail(&spm->spm_node, &spm_dvpp_list);
	mutex_unlock(&spm_list_lock);
}

static void sp_mapping_remove_from_list(struct sp_mapping *spm)
{
	mutex_lock(&spm_list_lock);
295
	if (sp_mapping_type(spm) == SP_MAPPING_DVPP)
296 297 298 299
		list_del(&spm->spm_node);
	mutex_unlock(&spm_list_lock);
}

300 301 302 303 304
static void sp_mapping_range_init(struct sp_mapping *spm)
{
	int i;

	for (i = 0; i < MAX_DEVID; i++) {
305
		switch (sp_mapping_type(spm)) {
C
Chen Jun 已提交
306 307 308 309
		case SP_MAPPING_RO:
			spm->start[i] = MMAP_SHARE_POOL_RO_START;
			spm->end[i]   = MMAP_SHARE_POOL_RO_END;
			break;
310
		case SP_MAPPING_NORMAL:
311
			spm->start[i] = MMAP_SHARE_POOL_NORMAL_START;
312 313 314 315 316 317 318 319 320
			spm->end[i]   = MMAP_SHARE_POOL_NORMAL_END;
			break;
		case SP_MAPPING_DVPP:
			spm->start[i] = MMAP_SHARE_POOL_DVPP_START + i * MMAP_SHARE_POOL_16G_SIZE;
			spm->end[i]   = spm->start[i] + MMAP_SHARE_POOL_16G_SIZE;
			break;
		default:
			pr_err("Invalid sp_mapping type [%lu]\n", sp_mapping_type(spm));
			break;
321 322 323 324
		}
	}
}

325
static struct sp_mapping *sp_mapping_create(unsigned long type)
326 327 328 329 330 331 332
{
	struct sp_mapping *spm;

	spm = kzalloc(sizeof(struct sp_mapping), GFP_KERNEL);
	if (!spm)
		return ERR_PTR(-ENOMEM);

333
	sp_mapping_set_type(spm, type);
334 335 336
	sp_mapping_range_init(spm);
	atomic_set(&spm->user, 0);
	spm->area_root = RB_ROOT;
337
	INIT_LIST_HEAD(&spm->group_head);
338
	sp_mapping_add_to_list(spm);
339 340 341 342

	return spm;
}

343 344
static void sp_mapping_destroy(struct sp_mapping *spm)
{
345
	sp_mapping_remove_from_list(spm);
346 347 348 349 350
	kfree(spm);
}

static void sp_mapping_attach(struct sp_group *spg, struct sp_mapping *spm)
{
351
	unsigned long type = sp_mapping_type(spm);
352
	atomic_inc(&spm->user);
353

354 355
	spg->mapping[type] = spm;
	if (type == SP_MAPPING_DVPP)
356
		list_add_tail(&spg->mnode, &spm->group_head);
357 358 359 360
}

static void sp_mapping_detach(struct sp_group *spg, struct sp_mapping *spm)
{
361 362
	unsigned long type;

363 364
	if (!spm)
		return;
365

366 367
	type = sp_mapping_type(spm);
	if (type == SP_MAPPING_DVPP)
368 369
		list_del(&spg->mnode);
	if (atomic_dec_and_test(&spm->user))
370
		sp_mapping_destroy(spm);
371 372

	spg->mapping[type] = NULL;
373 374
}

375 376 377 378 379 380 381 382 383 384
/* merge old mapping to new, and the old mapping would be destroyed */
static void sp_mapping_merge(struct sp_mapping *new, struct sp_mapping *old)
{
	struct sp_group *spg, *tmp;

	if (new == old)
		return;

	list_for_each_entry_safe(spg, tmp, &old->group_head, mnode) {
		list_move_tail(&spg->mnode, &new->group_head);
385
		spg->mapping[SP_MAPPING_DVPP] = new;
386 387 388 389 390 391 392 393 394 395 396
	}

	atomic_add(atomic_read(&old->user), &new->user);
	sp_mapping_destroy(old);
}

static bool is_mapping_empty(struct sp_mapping *spm)
{
	return RB_EMPTY_ROOT(&spm->area_root);
}

397 398 399 400
static bool can_mappings_merge(struct sp_mapping *m1, struct sp_mapping *m2)
{
	int i;

401
	for (i = 0; i < MAX_DEVID; i++)
402 403 404 405 406 407
		if (m1->start[i] != m2->start[i] || m1->end[i] != m2->end[i])
			return false;

	return true;
}

408
/*
409 410 411 412 413
 * 1. The mappings of local group is set on creating.
 * 2. This is used to setup the mapping for groups created during add_task.
 * 3. The normal mapping exists for all groups.
 * 4. The dvpp mappings for the new group and local group can merge _iff_ at
 *    least one of the mapping is empty.
414
 * the caller must hold sp_group_sem
415
 * NOTE: undo the mergeing when the later process failed.
416 417 418
 */
static int sp_mapping_group_setup(struct mm_struct *mm, struct sp_group *spg)
{
419 420 421 422
	struct sp_mapping *local_dvpp_mapping, *spg_dvpp_mapping;

	local_dvpp_mapping = mm->sp_group_master->local->mapping[SP_MAPPING_DVPP];
	spg_dvpp_mapping = spg->mapping[SP_MAPPING_DVPP];
423

424
	if (!list_empty(&spg->procs) && !(spg->flag & SPG_FLAG_NON_DVPP)) {
425 426 427 428 429 430
		/*
		 * Don't return an error when the mappings' address range conflict.
		 * As long as the mapping is unused, we can drop the empty mapping.
		 * This may change the address range for the task or group implicitly,
		 * give a warn for it.
		 */
431
		bool is_conflict = !can_mappings_merge(local_dvpp_mapping, spg_dvpp_mapping);
432

433 434
		if (is_mapping_empty(local_dvpp_mapping)) {
			sp_mapping_merge(spg_dvpp_mapping, local_dvpp_mapping);
435 436
			if (is_conflict)
				pr_warn_ratelimited("task address space conflict, spg_id=%d\n", spg->id);
437 438
		} else if (is_mapping_empty(spg_dvpp_mapping)) {
			sp_mapping_merge(local_dvpp_mapping, spg_dvpp_mapping);
439 440 441
			if (is_conflict)
				pr_warn_ratelimited("group address space conflict, spg_id=%d\n", spg->id);
		} else {
442 443
			pr_info_ratelimited("Duplicate address space, id=%d\n", spg->id);
			return -EINVAL;
444 445
		}
	} else {
446 447
		if (!(spg->flag & SPG_FLAG_NON_DVPP))
			/* the mapping of local group is always set */
448 449
			sp_mapping_attach(spg, local_dvpp_mapping);
		if (!spg->mapping[SP_MAPPING_NORMAL])
450
			sp_mapping_attach(spg, sp_mapping_normal);
C
Chen Jun 已提交
451 452
		if (!spg->mapping[SP_MAPPING_RO])
			sp_mapping_attach(spg, sp_mapping_ro);
453 454 455 456 457
	}

	return 0;
}

458
static struct sp_mapping *sp_mapping_find(struct sp_group *spg,
C
Chen Jun 已提交
459 460 461 462 463
						 unsigned long addr)
{
	if (addr >= MMAP_SHARE_POOL_NORMAL_START && addr < MMAP_SHARE_POOL_NORMAL_END)
		return spg->mapping[SP_MAPPING_NORMAL];

C
Chen Jun 已提交
464 465 466
	if (addr >= MMAP_SHARE_POOL_RO_START && addr < MMAP_SHARE_POOL_RO_END)
		return spg->mapping[SP_MAPPING_RO];

C
Chen Jun 已提交
467 468 469
	return spg->mapping[SP_MAPPING_DVPP];
}

470
static struct sp_group *create_spg(int spg_id, unsigned long flag);
471
static void free_new_spg_id(bool new, int spg_id);
472 473 474
static void free_sp_group_locked(struct sp_group *spg);
static int local_group_add_task(struct mm_struct *mm, struct sp_group *spg);
static int init_local_group(struct mm_struct *mm)
475
{
476
	int spg_id, ret;
477
	struct sp_group *spg;
478
	struct sp_mapping *spm;
479 480
	struct sp_group_master *master = mm->sp_group_master;

481 482 483 484
	spg_id = ida_alloc_range(&sp_group_id_ida, SPG_ID_LOCAL_MIN,
				 SPG_ID_LOCAL_MAX, GFP_ATOMIC);
	if (spg_id < 0) {
		pr_err_ratelimited("generate local group id failed %d\n", spg_id);
485
		return spg_id;
486 487
	}

488
	spg = create_spg(spg_id, 0);
489
	if (IS_ERR(spg)) {
490 491
		ret = PTR_ERR(spg);
		goto free_spg_id;
492 493 494
	}

	master->local = spg;
495 496 497 498 499 500 501
	spm = sp_mapping_create(SP_MAPPING_DVPP);
	if (IS_ERR(spm)) {
		ret = PTR_ERR(spm);
		goto free_spg;
	}
	sp_mapping_attach(master->local, spm);
	sp_mapping_attach(master->local, sp_mapping_normal);
C
Chen Jun 已提交
502
	sp_mapping_attach(master->local, sp_mapping_ro);
503

504 505
	ret = local_group_add_task(mm, spg);
	if (ret < 0)
506
		/* The spm would be released while destroying the spg */
507 508
		goto free_spg;

509
	return 0;
510 511 512

free_spg:
	free_sp_group_locked(spg);
513
	master->local = NULL;
514 515
free_spg_id:
	free_new_spg_id(true, spg_id);
516

517
	return ret;
518 519
}

520 521
/* The caller must hold sp_group_sem */
static int sp_init_group_master_locked(struct task_struct *tsk, struct mm_struct *mm)
522
{
523
	int ret;
524 525
	struct sp_group_master *master;

526
	if (mm->sp_group_master)
527 528
		return 0;

529 530 531 532 533 534 535
	master = kmalloc(sizeof(struct sp_group_master), GFP_KERNEL);
	if (!master)
		return -ENOMEM;

	INIT_LIST_HEAD(&master->node_list);
	master->count = 0;
	master->mm = mm;
G
Guo Mengqi 已提交
536
	sp_init_group_master_stat(tsk->tgid, mm, &master->instat);
537 538
	mm->sp_group_master = master;

539 540 541
	mutex_lock(&master_list_lock);
	list_add_tail(&master->list_node, &master_list);
	mutex_unlock(&master_list_lock);
542 543 544

	ret = init_local_group(mm);
	if (ret)
545
		goto free_master;
546 547

	return 0;
548 549

free_master:
550
	mutex_lock(&master_list_lock);
551
	list_del(&master->list_node);
552
	mutex_unlock(&master_list_lock);
553 554 555 556 557 558 559 560 561
	mm->sp_group_master = NULL;
	kfree(master);

	return ret;
}

static inline bool is_local_group(int spg_id)
{
	return spg_id >= SPG_ID_LOCAL_MIN && spg_id <= SPG_ID_LOCAL_MAX;
562 563
}

564
static struct sp_group *sp_get_local_group(struct task_struct *tsk, struct mm_struct *mm)
565 566 567 568 569 570 571 572 573 574 575 576 577 578
{
	int ret;
	struct sp_group_master *master;

	down_read(&sp_group_sem);
	master = mm->sp_group_master;
	if (master && master->local) {
		atomic_inc(&master->local->use_count);
		up_read(&sp_group_sem);
		return master->local;
	}
	up_read(&sp_group_sem);

	down_write(&sp_group_sem);
579
	ret = sp_init_group_master_locked(tsk, mm);
580 581 582 583 584 585 586 587 588 589 590
	if (ret) {
		up_write(&sp_group_sem);
		return ERR_PTR(ret);
	}
	master = mm->sp_group_master;
	atomic_inc(&master->local->use_count);
	up_write(&sp_group_sem);

	return master->local;
}

591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
static void update_spg_stat_alloc(unsigned long size, bool inc,
	bool huge, struct sp_spg_stat *stat)
{
	if (inc) {
		atomic_inc(&stat->spa_num);
		atomic64_add(size, &stat->size);
		atomic64_add(size, &stat->alloc_size);
		if (huge)
			atomic64_add(size, &stat->alloc_hsize);
		else
			atomic64_add(size, &stat->alloc_nsize);
	} else {
		atomic_dec(&stat->spa_num);
		atomic64_sub(size, &stat->size);
		atomic64_sub(size, &stat->alloc_size);
		if (huge)
			atomic64_sub(size, &stat->alloc_hsize);
		else
			atomic64_sub(size, &stat->alloc_nsize);
	}
}

static void update_spg_stat_k2u(unsigned long size, bool inc,
	struct sp_spg_stat *stat)
{
	if (inc) {
		atomic_inc(&stat->spa_num);
		atomic64_add(size, &stat->size);
		atomic64_add(size, &stat->k2u_size);
	} else {
		atomic_dec(&stat->spa_num);
		atomic64_sub(size, &stat->size);
		atomic64_sub(size, &stat->k2u_size);
	}
}

627 628
static void update_mem_usage_alloc(unsigned long size, bool inc,
		bool is_hugepage, struct sp_group_node *spg_node)
629
{
630
	struct sp_proc_stat *proc_stat = &spg_node->master->instat;
631 632

	if (inc) {
633 634 635 636 637 638 639 640
		if (is_hugepage) {
			atomic64_add(size, &spg_node->instat.alloc_hsize);
			atomic64_add(size, &proc_stat->alloc_hsize);
			return;
		}
		atomic64_add(size, &spg_node->instat.alloc_nsize);
		atomic64_add(size, &proc_stat->alloc_nsize);
		return;
641
	}
642 643 644 645 646 647 648 649 650

	if (is_hugepage) {
		atomic64_sub(size, &spg_node->instat.alloc_hsize);
		atomic64_sub(size, &proc_stat->alloc_hsize);
		return;
	}
	atomic64_sub(size, &spg_node->instat.alloc_nsize);
	atomic64_sub(size, &proc_stat->alloc_nsize);
	return;
651 652
}

653 654
static void update_mem_usage_k2u(unsigned long size, bool inc,
		struct sp_group_node *spg_node)
655
{
656
	struct sp_proc_stat *proc_stat = &spg_node->master->instat;
657 658

	if (inc) {
659
		atomic64_add(size, &spg_node->instat.k2u_size);
660 661
		atomic64_add(size, &proc_stat->k2u_size);
	} else {
662
		atomic64_sub(size, &spg_node->instat.k2u_size);
663 664 665 666
		atomic64_sub(size, &proc_stat->k2u_size);
	}
}

667
static void sp_init_spg_proc_stat(struct spg_proc_stat *stat, int spg_id)
668
{
669
	stat->tgid = current->tgid;
670
	stat->spg_id = spg_id;
671 672
	atomic64_set(&stat->alloc_nsize, 0);
	atomic64_set(&stat->alloc_hsize, 0);
673 674 675
	atomic64_set(&stat->k2u_size, 0);
}

676
static void sp_init_group_stat(struct sp_spg_stat *stat)
677 678 679 680 681 682 683
{
	atomic_set(&stat->hugepage_failures, 0);
	atomic_set(&stat->spa_num, 0);
	atomic64_set(&stat->size, 0);
	atomic64_set(&stat->alloc_nsize, 0);
	atomic64_set(&stat->alloc_hsize, 0);
	atomic64_set(&stat->alloc_size, 0);
684
	atomic64_set(&stat->k2u_size, 0);
685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714
}

/* statistics of all sp area, protected by sp_area_lock */
struct sp_spa_stat {
	unsigned int total_num;
	unsigned int alloc_num;
	unsigned int k2u_task_num;
	unsigned int k2u_spg_num;
	unsigned long total_size;
	unsigned long alloc_size;
	unsigned long k2u_task_size;
	unsigned long k2u_spg_size;
	unsigned long dvpp_size;
	unsigned long dvpp_va_size;
};

static struct sp_spa_stat spa_stat;

/* statistics of all sp group born from sp_alloc and k2u(spg) */
struct sp_overall_stat {
	atomic_t spa_total_num;
	atomic64_t spa_total_size;
};

static struct sp_overall_stat sp_overall_stat;

/*** Global share pool VA allocator ***/

enum spa_type {
	SPA_TYPE_ALLOC = 1,
715 716
	/* NOTE: reorganize after the statisical structure is reconstructed. */
	SPA_TYPE_ALLOC_PRIVATE = SPA_TYPE_ALLOC,
717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
	SPA_TYPE_K2TASK,
	SPA_TYPE_K2SPG,
};

/*
 * We bump the reference when each mmap succeeds, and it will be dropped
 * when vma is about to release, so sp_area object will be automatically
 * freed when all tasks in the sp group has exited.
 */
struct sp_area {
	unsigned long va_start;
	unsigned long va_end;		/* va_end always align to hugepage */
	unsigned long real_size;	/* real size with alignment */
	unsigned long region_vstart;	/* belong to normal region or DVPP region */
	unsigned long flags;
	bool is_hugepage;
	bool is_dead;
	atomic_t use_count;		/* How many vmas use this VA region */
	struct rb_node rb_node;		/* address sorted rbtree */
	struct list_head link;		/* link to the spg->head */
	struct sp_group *spg;
	enum spa_type type;		/* where spa born from */
	struct mm_struct *mm;		/* owner of k2u(task) */
	unsigned long kva;		/* shared kva */
	pid_t applier;			/* the original applier process */
	int node_id;			/* memory node */
	int device_id;
};
static DEFINE_SPINLOCK(sp_area_lock);

static unsigned long spa_size(struct sp_area *spa)
{
	return spa->real_size;
}

static struct file *spa_file(struct sp_area *spa)
{
	if (spa->is_hugepage)
		return spa->spg->file_hugetlb;
	else
		return spa->spg->file;
}

760 761
/* the caller should hold sp_area_lock */
static void spa_inc_usage(struct sp_area *spa)
762
{
763 764 765 766 767 768 769 770 771
	enum spa_type type = spa->type;
	unsigned long size = spa->real_size;
	bool is_dvpp = spa->flags & SP_DVPP;
	bool is_huge = spa->is_hugepage;

	switch (type) {
	case SPA_TYPE_ALLOC:
		spa_stat.alloc_num += 1;
		spa_stat.alloc_size += size;
772
		update_spg_stat_alloc(size, true, is_huge, &spa->spg->instat);
773 774 775 776
		break;
	case SPA_TYPE_K2TASK:
		spa_stat.k2u_task_num += 1;
		spa_stat.k2u_task_size += size;
777
		update_spg_stat_k2u(size, true, &spa->spg->instat);
778 779 780 781
		break;
	case SPA_TYPE_K2SPG:
		spa_stat.k2u_spg_num += 1;
		spa_stat.k2u_spg_size += size;
782
		update_spg_stat_k2u(size, true, &spa->spg->instat);
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
		break;
	default:
		WARN(1, "invalid spa type");
	}

	if (is_dvpp) {
		spa_stat.dvpp_size += size;
		spa_stat.dvpp_va_size += ALIGN(size, PMD_SIZE);
	}

	/*
	 * all the calculations won't overflow due to system limitation and
	 * parameter checking in sp_alloc_area()
	 */
	spa_stat.total_num += 1;
	spa_stat.total_size += size;

800
	if (!is_local_group(spa->spg->id)) {
801 802 803
		atomic_inc(&sp_overall_stat.spa_total_num);
		atomic64_add(size, &sp_overall_stat.spa_total_size);
	}
804 805
}

806 807
/* the caller should hold sp_area_lock */
static void spa_dec_usage(struct sp_area *spa)
808
{
809 810 811 812 813 814 815 816 817
	enum spa_type type = spa->type;
	unsigned long size = spa->real_size;
	bool is_dvpp = spa->flags & SP_DVPP;
	bool is_huge = spa->is_hugepage;

	switch (type) {
	case SPA_TYPE_ALLOC:
		spa_stat.alloc_num -= 1;
		spa_stat.alloc_size -= size;
818
		update_spg_stat_alloc(size, false, is_huge, &spa->spg->instat);
819 820 821 822
		break;
	case SPA_TYPE_K2TASK:
		spa_stat.k2u_task_num -= 1;
		spa_stat.k2u_task_size -= size;
823
		update_spg_stat_k2u(size, false, &spa->spg->instat);
824 825 826 827
		break;
	case SPA_TYPE_K2SPG:
		spa_stat.k2u_spg_num -= 1;
		spa_stat.k2u_spg_size -= size;
828
		update_spg_stat_k2u(size, false, &spa->spg->instat);
829 830 831 832 833 834 835 836 837 838 839 840 841
		break;
	default:
		WARN(1, "invalid spa type");
	}

	if (is_dvpp) {
		spa_stat.dvpp_size -= size;
		spa_stat.dvpp_va_size -= ALIGN(size, PMD_SIZE);
	}

	spa_stat.total_num -= 1;
	spa_stat.total_size -= size;

842
	if (!is_local_group(spa->spg->id)) {
843 844 845
		atomic_dec(&sp_overall_stat.spa_total_num);
		atomic64_sub(spa->real_size, &sp_overall_stat.spa_total_size);
	}
846 847
}

848 849
static void update_mem_usage(unsigned long size, bool inc, bool is_hugepage,
	struct sp_group_node *spg_node, enum spa_type type)
850
{
851 852
	switch (type) {
	case SPA_TYPE_ALLOC:
853
		update_mem_usage_alloc(size, inc, is_hugepage, spg_node);
854 855 856
		break;
	case SPA_TYPE_K2TASK:
	case SPA_TYPE_K2SPG:
857
		update_mem_usage_k2u(size, inc, spg_node);
858 859 860 861
		break;
	default:
		WARN(1, "invalid stat type\n");
	}
862 863
}

864 865 866 867 868 869 870 871 872 873 874 875
struct sp_group_node *find_spg_node_by_spg(struct mm_struct *mm,
		struct sp_group *spg)
{
	struct sp_group_node *spg_node;

	list_for_each_entry(spg_node, &mm->sp_group_master->node_list, group_node) {
		if (spg_node->spg == spg)
			return spg_node;
	}
	return NULL;
}

876 877
static void sp_update_process_stat(struct task_struct *tsk, bool inc,
	struct sp_area *spa)
878
{
879
	struct sp_group_node *spg_node;
880 881
	unsigned long size = spa->real_size;
	enum spa_type type = spa->type;
882

883
	spg_node = find_spg_node_by_spg(tsk->mm, spa->spg);
G
Guo Mengqi 已提交
884
	update_mem_usage(size, inc, spa->is_hugepage, spg_node, type);
885 886 887 888 889 890
}

static inline void check_interrupt_context(void)
{
	if (unlikely(in_interrupt()))
		panic("function can't be used in interrupt context\n");
891 892
}

893 894 895 896 897 898 899 900
static inline bool check_aoscore_process(struct task_struct *tsk)
{
	if (tsk->flags & PF_DOMAIN_CORE)
		return true;
	else
		return false;
}

901 902
static unsigned long sp_mmap(struct mm_struct *mm, struct file *file,
			     struct sp_area *spa, unsigned long *populate,
903
			     unsigned long prot, struct vm_area_struct **pvma);
904
static void sp_munmap(struct mm_struct *mm, unsigned long addr, unsigned long size);
905 906 907 908 909 910 911 912 913 914 915 916 917 918 919

#define K2U_NORMAL	0
#define K2U_COREDUMP	1

struct sp_k2u_context {
	unsigned long kva;
	unsigned long kva_aligned;
	unsigned long size;
	unsigned long size_aligned;
	unsigned long sp_flags;
	int state;
	int spg_id;
	bool to_task;
};

920
static unsigned long sp_remap_kva_to_vma(unsigned long kva, struct sp_area *spa,
921
				struct mm_struct *mm, unsigned long prot, struct sp_k2u_context *kc);
922

923 924 925
static void free_sp_group_id(int spg_id)
{
	/* ida operation is protected by an internal spin_lock */
926 927
	if ((spg_id >= SPG_ID_AUTO_MIN && spg_id <= SPG_ID_AUTO_MAX) ||
	    (spg_id >= SPG_ID_LOCAL_MIN && spg_id <= SPG_ID_LOCAL_MAX))
928 929 930
		ida_free(&sp_group_id_ida, spg_id);
}

931 932 933 934 935 936
static void free_new_spg_id(bool new, int spg_id)
{
	if (new)
		free_sp_group_id(spg_id);
}

937
static void free_sp_group_locked(struct sp_group *spg)
938
{
939 940
	int type;

941 942 943 944
	fput(spg->file);
	fput(spg->file_hugetlb);
	idr_remove(&sp_group_idr, spg->id);
	free_sp_group_id((unsigned int)spg->id);
945 946 947 948

	for (type = SP_MAPPING_START; type < SP_MAPPING_END; type++)
		sp_mapping_detach(spg, spg->mapping[type]);

949 950
	if (!is_local_group(spg->id))
		system_group_count--;
951

952 953 954 955
	kfree(spg);
	WARN(system_group_count < 0, "unexpected group count\n");
}

956 957 958 959 960 961 962
static void free_sp_group(struct sp_group *spg)
{
	down_write(&sp_group_sem);
	free_sp_group_locked(spg);
	up_write(&sp_group_sem);
}

963 964 965 966 967 968 969 970
static void sp_group_drop_locked(struct sp_group *spg)
{
	lockdep_assert_held_write(&sp_group_sem);

	if (atomic_dec_and_test(&spg->use_count))
		free_sp_group_locked(spg);
}

971 972 973 974 975 976 977
static void sp_group_drop(struct sp_group *spg)
{
	if (atomic_dec_and_test(&spg->use_count))
		free_sp_group(spg);
}

/* use with put_task_struct(task) */
978
static int get_task(int tgid, struct task_struct **task)
979 980
{
	struct task_struct *tsk;
981
	struct pid *p;
982 983

	rcu_read_lock();
984 985
	p = find_pid_ns(tgid, &init_pid_ns);
	tsk = pid_task(p, PIDTYPE_TGID);
986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001
	if (!tsk || (tsk->flags & PF_EXITING)) {
		rcu_read_unlock();
		return -ESRCH;
	}
	get_task_struct(tsk);
	rcu_read_unlock();

	*task = tsk;
	return 0;
}

/*
 * the caller must:
 * 1. hold spg->rw_lock
 * 2. ensure no concurrency problem for mm_struct
 */
1002
static bool is_process_in_group(struct sp_group *spg,
1003 1004 1005 1006 1007 1008
						 struct mm_struct *mm)
{
	struct sp_group_node *spg_node;

	list_for_each_entry(spg_node, &spg->procs, proc_node)
		if (spg_node->master->mm == mm)
1009
			return true;
1010

1011
	return false;
1012 1013 1014
}

/* user must call sp_group_drop() after use */
1015
static struct sp_group *__sp_find_spg_locked(int tgid, int spg_id)
1016 1017 1018 1019 1020 1021
{
	struct sp_group *spg = NULL;
	struct task_struct *tsk = NULL;
	int ret = 0;

	if (spg_id == SPG_ID_DEFAULT) {
1022
		ret = get_task(tgid, &tsk);
1023 1024 1025
		if (ret)
			return NULL;

1026 1027 1028
		task_lock(tsk);
		if (tsk->mm == NULL)
			spg = NULL;
1029 1030
		else if (tsk->mm->sp_group_master)
			spg = tsk->mm->sp_group_master->local;
1031
		task_unlock(tsk);
1032 1033

		put_task_struct(tsk);
1034 1035 1036 1037
	} else {
		spg = idr_find(&sp_group_idr, spg_id);
	}

1038 1039
	if (!spg || !atomic_inc_not_zero(&spg->use_count))
		return NULL;
1040

1041
	return spg;
1042 1043
}

1044
static struct sp_group *__sp_find_spg(int tgid, int spg_id)
1045 1046 1047 1048
{
	struct sp_group *spg;

	down_read(&sp_group_sem);
1049
	spg = __sp_find_spg_locked(tgid, spg_id);
1050 1051 1052 1053
	up_read(&sp_group_sem);
	return spg;
}

1054 1055
/**
 * mp_sp_group_id_by_pid() - Get the sp_group ID array of a process.
1056
 * @tgid: tgid of target process.
1057 1058 1059 1060 1061 1062 1063 1064 1065
 * @spg_ids: point to an array to save the group ids the process belongs to
 * @num: input the spg_ids array size; output the spg number of the process
 *
 * Return:
 * >0		- the sp_group ID.
 * -ENODEV	- target process doesn't belong to any sp_group.
 * -EINVAL	- spg_ids or num is NULL.
 * -E2BIG	- the num of groups process belongs to is larger than *num
 */
1066
int mg_sp_group_id_by_pid(int tgid, int *spg_ids, int *num)
1067
{
1068
	int ret = 0, real_count;
1069 1070 1071 1072
	struct sp_group_node *node;
	struct sp_group_master *master = NULL;
	struct task_struct *tsk;

1073 1074 1075
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1076 1077
	check_interrupt_context();

1078
	if (!spg_ids || !num || *num <= 0)
1079 1080
		return -EINVAL;

1081
	ret = get_task(tgid, &tsk);
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
	if (ret)
		return ret;

	down_read(&sp_group_sem);
	task_lock(tsk);
	if (tsk->mm)
		master = tsk->mm->sp_group_master;
	task_unlock(tsk);

	if (!master) {
		ret = -ENODEV;
		goto out_up_read;
	}

1096 1097 1098 1099 1100 1101 1102 1103
	/*
	 * There is a local group for each process which is used for
	 * passthrough allocation. The local group is a internal
	 * implementation for convenience and is not attempt to bother
	 * the user.
	 */
	real_count = master->count - 1;
	if (real_count <= 0) {
1104 1105 1106
		ret = -ENODEV;
		goto out_up_read;
	}
1107
	if ((unsigned int)*num < real_count) {
1108 1109 1110
		ret = -E2BIG;
		goto out_up_read;
	}
1111
	*num = real_count;
1112

1113 1114 1115
	list_for_each_entry(node, &master->node_list, group_node) {
		if (is_local_group(node->spg->id))
			continue;
1116
		*(spg_ids++) = node->spg->id;
1117
	}
1118 1119 1120 1121 1122

out_up_read:
	up_read(&sp_group_sem);
	put_task_struct(tsk);
	return ret;
1123 1124 1125
}
EXPORT_SYMBOL_GPL(mg_sp_group_id_by_pid);

1126 1127 1128 1129 1130
static bool is_online_node_id(int node_id)
{
	return node_id >= 0 && node_id < MAX_NUMNODES && node_online(node_id);
}

1131
static struct sp_group *create_spg(int spg_id, unsigned long flag)
1132
{
1133 1134 1135 1136 1137 1138
	int ret;
	struct sp_group *spg;
	char name[20];
	struct user_struct *user = NULL;
	int hsize_log = MAP_HUGE_2MB >> MAP_HUGE_SHIFT;

1139 1140
	if (unlikely(system_group_count + 1 == MAX_GROUP_FOR_SYSTEM &&
		     !is_local_group(spg_id))) {
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
		pr_err_ratelimited("reach system max group num\n");
		return ERR_PTR(-ENOSPC);
	}

	spg = kzalloc(sizeof(*spg), GFP_KERNEL);
	if (spg == NULL)
		return ERR_PTR(-ENOMEM);

	ret = idr_alloc(&sp_group_idr, spg, spg_id, spg_id + 1, GFP_KERNEL);
	if (ret < 0) {
		pr_err_ratelimited("group %d idr alloc failed %d\n",
				   spg_id, ret);
		goto out_kfree;
	}

	spg->id = spg_id;
1157
	spg->flag = flag;
1158 1159 1160 1161 1162
	spg->is_alive = true;
	spg->proc_num = 0;
	atomic_set(&spg->use_count, 1);
	INIT_LIST_HEAD(&spg->procs);
	INIT_LIST_HEAD(&spg->spa_list);
1163
	INIT_LIST_HEAD(&spg->mnode);
1164
	init_rwsem(&spg->rw_lock);
1165
	sp_init_group_stat(&spg->instat);
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184

	sprintf(name, "sp_group_%d", spg_id);
	spg->file = shmem_kernel_file_setup(name, MAX_LFS_FILESIZE,
					    VM_NORESERVE);
	if (IS_ERR(spg->file)) {
		pr_err("spg file setup failed %ld\n", PTR_ERR(spg->file));
		ret = PTR_ERR(spg->file);
		goto out_idr;
	}

	spg->file_hugetlb = hugetlb_file_setup(name, MAX_LFS_FILESIZE,
					       VM_NORESERVE, &user, HUGETLB_ANONHUGE_INODE, hsize_log);
	if (IS_ERR(spg->file_hugetlb)) {
		pr_err("spg file_hugetlb setup failed %ld\n",
		       PTR_ERR(spg->file_hugetlb));
		ret = PTR_ERR(spg->file_hugetlb);
		goto out_fput;
	}

1185 1186
	if (!is_local_group(spg_id))
		system_group_count++;
1187 1188 1189 1190 1191 1192 1193 1194 1195
	return spg;

out_fput:
	fput(spg->file);
out_idr:
	idr_remove(&sp_group_idr, spg_id);
out_kfree:
	kfree(spg);
	return ERR_PTR(ret);
1196 1197
}

1198
/* the caller must hold sp_group_sem */
1199
static struct sp_group *find_or_alloc_sp_group(int spg_id, unsigned long flag)
1200 1201 1202
{
	struct sp_group *spg;

1203
	spg = __sp_find_spg_locked(current->tgid, spg_id);
1204 1205

	if (!spg) {
1206
		spg = create_spg(spg_id, flag);
1207 1208 1209 1210
	} else {
		down_read(&spg->rw_lock);
		if (!spg_valid(spg)) {
			up_read(&spg->rw_lock);
1211
			sp_group_drop_locked(spg);
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255
			return ERR_PTR(-ENODEV);
		}
		up_read(&spg->rw_lock);
		/* spg->use_count has increased due to __sp_find_spg() */
	}

	return spg;
}

static void __sp_area_drop_locked(struct sp_area *spa);

/* The caller must down_write(&mm->mmap_lock) */
static void sp_munmap_task_areas(struct mm_struct *mm, struct sp_group *spg, struct list_head *stop)
{
	struct sp_area *spa, *prev = NULL;
	int err;


	spin_lock(&sp_area_lock);
	list_for_each_entry(spa, &spg->spa_list, link) {
		if (&spa->link == stop)
			break;

		__sp_area_drop_locked(prev);
		prev = spa;

		atomic_inc(&spa->use_count);
		spin_unlock(&sp_area_lock);

		err = do_munmap(mm, spa->va_start, spa_size(spa), NULL);
		if (err) {
			/* we are not supposed to fail */
			pr_err("failed to unmap VA %pK when munmap task areas\n",
			       (void *)spa->va_start);
		}

		spin_lock(&sp_area_lock);
	}
	__sp_area_drop_locked(prev);

	spin_unlock(&sp_area_lock);
}

/* the caller must hold sp_group_sem */
1256 1257
static int mm_add_group_init(struct task_struct *tsk, struct mm_struct *mm,
			     struct sp_group *spg)
1258
{
1259 1260
	int ret;
	struct sp_group_master *master;
1261

1262 1263 1264 1265 1266 1267 1268 1269 1270
	if (!mm->sp_group_master) {
		ret = sp_init_group_master_locked(tsk, mm);
		if (ret)
			return ret;
	} else {
		if (is_process_in_group(spg, mm)) {
			pr_err_ratelimited("task already in target group, id=%d\n", spg->id);
			return -EEXIST;
		}
1271

1272 1273 1274 1275 1276
		master = mm->sp_group_master;
		if (master->count == MAX_GROUP_FOR_TASK) {
			pr_err("task reaches max group num\n");
			return -ENOSPC;
		}
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
	}

	return 0;
}

/* the caller must hold sp_group_sem */
static struct sp_group_node *create_spg_node(struct mm_struct *mm,
	unsigned long prot, struct sp_group *spg)
{
	struct sp_group_master *master = mm->sp_group_master;
	struct sp_group_node *spg_node;

	spg_node = kzalloc(sizeof(struct sp_group_node), GFP_KERNEL);
	if (spg_node == NULL)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&spg_node->group_node);
	INIT_LIST_HEAD(&spg_node->proc_node);
	spg_node->spg = spg;
	spg_node->master = master;
	spg_node->prot = prot;
1298
	sp_init_spg_proc_stat(&spg_node->instat, spg->id);
1299 1300 1301 1302 1303 1304 1305 1306 1307

	list_add_tail(&spg_node->group_node, &master->node_list);
	master->count++;

	return spg_node;
}

/* the caller must down_write(&spg->rw_lock) */
static int insert_spg_node(struct sp_group *spg, struct sp_group_node *node)
1308
{
1309 1310 1311 1312 1313 1314 1315
	if (spg->proc_num + 1 == MAX_PROC_PER_GROUP) {
		pr_err_ratelimited("add group: group reaches max process num\n");
		return -ENOSPC;
	}

	spg->proc_num++;
	list_add_tail(&node->proc_node, &spg->procs);
1316 1317 1318 1319

	return 0;
}

1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338
/* the caller must down_write(&spg->rw_lock) */
static void delete_spg_node(struct sp_group *spg, struct sp_group_node *node)
{
	list_del(&node->proc_node);
	spg->proc_num--;
}

/* the caller must hold sp_group_sem */
static void free_spg_node(struct mm_struct *mm, struct sp_group *spg,
	struct sp_group_node *spg_node)
{
	struct sp_group_master *master = mm->sp_group_master;

	list_del(&spg_node->group_node);
	master->count--;

	kfree(spg_node);
}

1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352
static int local_group_add_task(struct mm_struct *mm, struct sp_group *spg)
{
	struct sp_group_node *node;

	node = create_spg_node(mm, PROT_READ | PROT_WRITE, spg);
	if (IS_ERR(node))
		return PTR_ERR(node);

	insert_spg_node(spg, node);
	mmget(mm);

	return 0;
}

1353
/**
1354
 * mg_sp_group_add_task() - Add a process to an share group (sp_group).
1355
 * @tgid: the tgid of the task to be added.
1356 1357
 * @prot: the prot of task for this spg.
 * @spg_id: the ID of the sp_group.
1358
 * @flag: to give some special message.
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
 *
 * A process can't be added to more than one sp_group in single group mode
 * and can in multiple group mode.
 *
 * Return: A postive group number for success, -errno on failure.
 *
 * The manually specified ID is between [SPG_ID_MIN, SPG_ID_MAX].
 * The automatically allocated ID is between [SPG_ID_AUTO_MIN, SPG_ID_AUTO_MAX].
 * When negative, the return value is -errno.
 */
1369
int mg_sp_group_add_task(int tgid, unsigned long prot, int spg_id)
1370
{
1371
	unsigned long flag = 0;
1372 1373 1374 1375 1376 1377 1378 1379
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct sp_group *spg;
	struct sp_group_node *node = NULL;
	int ret = 0;
	bool id_newly_generated = false;
	struct sp_area *spa, *prev = NULL;

1380 1381 1382
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397
	check_interrupt_context();

	/* only allow READ, READ | WRITE */
	if (!((prot == PROT_READ)
	      || (prot == (PROT_READ | PROT_WRITE)))) {
		pr_err_ratelimited("prot is invalid 0x%lx\n", prot);
		return -EINVAL;
	}

	if (spg_id < SPG_ID_MIN || spg_id > SPG_ID_AUTO) {
		pr_err_ratelimited("add group failed, invalid group id %d\n", spg_id);
		return -EINVAL;
	}

	if (spg_id >= SPG_ID_AUTO_MIN && spg_id <= SPG_ID_AUTO_MAX) {
1398
		spg = __sp_find_spg(tgid, spg_id);
1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428

		if (!spg) {
			pr_err_ratelimited("spg %d hasn't been created\n", spg_id);
			return -EINVAL;
		}

		down_read(&spg->rw_lock);
		if (!spg_valid(spg)) {
			up_read(&spg->rw_lock);
			pr_err_ratelimited("add group failed, group id %d is dead\n", spg_id);
			sp_group_drop(spg);
			return -EINVAL;
		}
		up_read(&spg->rw_lock);

		sp_group_drop(spg);
	}

	if (spg_id == SPG_ID_AUTO) {
		spg_id = ida_alloc_range(&sp_group_id_ida, SPG_ID_AUTO_MIN,
					 SPG_ID_AUTO_MAX, GFP_ATOMIC);
		if (spg_id < 0) {
			pr_err_ratelimited("add group failed, auto generate group id failed\n");
			return spg_id;
		}
		id_newly_generated = true;
	}

	down_write(&sp_group_sem);

1429
	ret = get_task(tgid, &tsk);
1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462
	if (ret) {
		up_write(&sp_group_sem);
		free_new_spg_id(id_newly_generated, spg_id);
		goto out;
	}

	if (check_aoscore_process(tsk)) {
		up_write(&sp_group_sem);
		ret = -EACCES;
		free_new_spg_id(id_newly_generated, spg_id);
		goto out_put_task;
	}

	/*
	 * group_leader: current thread may be exiting in a multithread process
	 *
	 * DESIGN IDEA
	 * We increase mm->mm_users deliberately to ensure it's decreased in
	 * share pool under only 2 circumstances, which will simply the overall
	 * design as mm won't be freed unexpectedly.
	 *
	 * The corresponding refcount decrements are as follows:
	 * 1. the error handling branch of THIS function.
	 * 2. In sp_group_exit(). It's called only when process is exiting.
	 */
	mm = get_task_mm(tsk->group_leader);
	if (!mm) {
		up_write(&sp_group_sem);
		ret = -ESRCH;
		free_new_spg_id(id_newly_generated, spg_id);
		goto out_put_task;
	}

1463
	spg = find_or_alloc_sp_group(spg_id, flag);
1464 1465 1466 1467 1468 1469 1470
	if (IS_ERR(spg)) {
		up_write(&sp_group_sem);
		ret = PTR_ERR(spg);
		free_new_spg_id(id_newly_generated, spg_id);
		goto out_put_mm;
	}

1471 1472 1473 1474
	down_write(&spg->rw_lock);
	ret = mm_add_group_init(tsk, mm, spg);
	if (ret) {
		up_write(&spg->rw_lock);
1475
		goto out_drop_group;
1476
	}
1477

1478
	ret = sp_mapping_group_setup(mm, spg);
1479 1480
	if (ret) {
		up_write(&spg->rw_lock);
1481
		goto out_drop_group;
1482
	}
1483

1484 1485
	node = create_spg_node(mm, prot, spg);
	if (unlikely(IS_ERR(node))) {
1486
		up_write(&spg->rw_lock);
1487
		ret = PTR_ERR(node);
1488
		goto out_drop_group;
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506
	}

	ret = insert_spg_node(spg, node);
	if (unlikely(ret)) {
		up_write(&spg->rw_lock);
		goto out_drop_spg_node;
	}

	/*
	 * create mappings of existing shared memory segments into this
	 * new process' page table.
	 */
	spin_lock(&sp_area_lock);

	list_for_each_entry(spa, &spg->spa_list, link) {
		unsigned long populate = 0;
		struct file *file = spa_file(spa);
		unsigned long addr;
1507
		unsigned long prot_spa = prot;
C
Chen Jun 已提交
1508 1509

		if ((spa->flags & (SP_PROT_RO | SP_PROT_FOCUS)) == (SP_PROT_RO | SP_PROT_FOCUS))
1510
			prot_spa &= ~PROT_WRITE;
1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522

		__sp_area_drop_locked(prev);
		prev = spa;

		atomic_inc(&spa->use_count);

		if (spa->is_dead == true)
			continue;

		spin_unlock(&sp_area_lock);

		if (spa->type == SPA_TYPE_K2SPG && spa->kva) {
1523
			addr = sp_remap_kva_to_vma(spa->kva, spa, mm, prot_spa, NULL);
1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
			if (IS_ERR_VALUE(addr))
				pr_warn("add group remap k2u failed %ld\n", addr);

			spin_lock(&sp_area_lock);
			continue;
		}

		down_write(&mm->mmap_lock);
		if (unlikely(mm->core_state)) {
			sp_munmap_task_areas(mm, spg, &spa->link);
			up_write(&mm->mmap_lock);
			ret = -EBUSY;
			pr_err("add group: encountered coredump, abort\n");
			spin_lock(&sp_area_lock);
			break;
		}

1541
		addr = sp_mmap(mm, file, spa, &populate, prot_spa, NULL);
1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
		if (IS_ERR_VALUE(addr)) {
			sp_munmap_task_areas(mm, spg, &spa->link);
			up_write(&mm->mmap_lock);
			ret = addr;
			pr_err("add group: sp mmap failed %d\n", ret);
			spin_lock(&sp_area_lock);
			break;
		}
		up_write(&mm->mmap_lock);

		if (populate) {
			ret = do_mm_populate(mm, spa->va_start, populate, 0);
			if (ret) {
				if (unlikely(fatal_signal_pending(current)))
					pr_warn_ratelimited("add group failed, current thread is killed\n");
				else
					pr_warn_ratelimited("add group failed, mm populate failed (potential no enough memory when -12): %d, spa type is %d\n",
					ret, spa->type);
				down_write(&mm->mmap_lock);
				sp_munmap_task_areas(mm, spg, spa->link.next);
				up_write(&mm->mmap_lock);
				spin_lock(&sp_area_lock);
				break;
			}
		}

		spin_lock(&sp_area_lock);
	}
	__sp_area_drop_locked(prev);
	spin_unlock(&sp_area_lock);

	if (unlikely(ret))
		delete_spg_node(spg, node);
	up_write(&spg->rw_lock);

out_drop_spg_node:
	if (unlikely(ret))
		free_spg_node(mm, spg, node);
	/*
	 * to simplify design, we don't release the resource of
	 * group_master and proc_stat, they will be freed when
	 * process is exiting.
	 */
out_drop_group:
	if (unlikely(ret)) {
		up_write(&sp_group_sem);
		sp_group_drop(spg);
	} else
		up_write(&sp_group_sem);
out_put_mm:
	/* No need to put the mm if the sp group adds this mm successfully */
	if (unlikely(ret))
		mmput(mm);
out_put_task:
	put_task_struct(tsk);
out:
	return ret == 0 ? spg_id : ret;
}
1600 1601
EXPORT_SYMBOL_GPL(mg_sp_group_add_task);

1602 1603
/**
 * mg_sp_group_del_task() - delete a process from a sp group.
1604
 * @tgid: the tgid of the task to be deleted
1605 1606 1607 1608 1609 1610 1611
 * @spg_id: sharepool group id
 *
 * the group's spa list must be empty, or deletion will fail.
 *
 * Return:
 * * if success, return 0.
 * * -EINVAL, spg_id invalid or spa_lsit not emtpy or spg dead
1612
 * * -ESRCH, the task group of tgid is not in group / process dead
1613
 */
1614
int mg_sp_group_del_task(int tgid, int spg_id)
1615
{
1616 1617 1618 1619 1620 1621 1622
	int ret = 0;
	struct sp_group *spg;
	struct sp_group_node *spg_node;
	struct task_struct *tsk = NULL;
	struct mm_struct *mm = NULL;
	bool is_alive = true;

1623 1624 1625
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1626 1627 1628 1629 1630
	if (spg_id < SPG_ID_MIN || spg_id > SPG_ID_AUTO) {
		pr_err_ratelimited("del from group failed, invalid group id %d\n", spg_id);
		return -EINVAL;
	}

1631
	spg = __sp_find_spg(tgid, spg_id);
1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651
	if (!spg) {
		pr_err_ratelimited("spg not found or get task failed.");
		return -EINVAL;
	}
	down_write(&sp_group_sem);

	if (!spg_valid(spg)) {
		up_write(&sp_group_sem);
		pr_err_ratelimited("spg dead.");
		ret = -EINVAL;
		goto out;
	}

	if (!list_empty(&spg->spa_list)) {
		up_write(&sp_group_sem);
		pr_err_ratelimited("spa is not empty");
		ret = -EINVAL;
		goto out;
	}

1652
	ret = get_task(tgid, &tsk);
1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
	if (ret) {
		up_write(&sp_group_sem);
		pr_err_ratelimited("task is not found");
		goto out;
	}
	mm = get_task_mm(tsk->group_leader);
	if (!mm) {
		up_write(&sp_group_sem);
		pr_err_ratelimited("mm is not found");
		ret = -ESRCH;
		goto out_put_task;
	}

1666
	spg_node = find_spg_node_by_spg(mm, spg);
1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686
	if (!spg_node) {
		up_write(&sp_group_sem);
		pr_err_ratelimited("process not in group");
		ret = -ESRCH;
		goto out_put_mm;
	}

	down_write(&spg->rw_lock);
	if (list_is_singular(&spg->procs))
		is_alive = spg->is_alive = false;
	spg->proc_num--;
	list_del(&spg_node->proc_node);
	sp_group_drop(spg);
	up_write(&spg->rw_lock);
	if (!is_alive)
		blocking_notifier_call_chain(&sp_notifier_chain, 0, spg);

	list_del(&spg_node->group_node);
	mm->sp_group_master->count--;
	kfree(spg_node);
1687
	atomic_dec(&mm->mm_users);
1688 1689 1690 1691 1692 1693 1694 1695 1696 1697

	up_write(&sp_group_sem);

out_put_mm:
	mmput(mm);
out_put_task:
	put_task_struct(tsk);
out:
	sp_group_drop(spg); /* if spg dead, freed here */
	return ret;
1698 1699 1700
}
EXPORT_SYMBOL_GPL(mg_sp_group_del_task);

1701
int mg_sp_id_of_current(void)
1702 1703 1704 1705
{
	int ret, spg_id;
	struct sp_group_master *master;

1706 1707 1708
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1709
	if ((current->flags & PF_KTHREAD) || !current->mm)
1710 1711 1712 1713
		return -EINVAL;

	down_read(&sp_group_sem);
	master = current->mm->sp_group_master;
1714
	if (master) {
1715 1716 1717 1718 1719 1720 1721
		spg_id = master->local->id;
		up_read(&sp_group_sem);
		return spg_id;
	}
	up_read(&sp_group_sem);

	down_write(&sp_group_sem);
1722
	ret = sp_init_group_master_locked(current, current->mm);
1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
	if (ret) {
		up_write(&sp_group_sem);
		return ret;
	}
	master = current->mm->sp_group_master;
	spg_id = master->local->id;
	up_write(&sp_group_sem);

	return spg_id;
}
EXPORT_SYMBOL_GPL(mg_sp_id_of_current);

1735
/* the caller must hold sp_area_lock */
1736
static void insert_sp_area(struct sp_mapping *spm, struct sp_area *spa)
1737
{
1738
	struct rb_node **p = &spm->area_root.rb_node;
1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754
	struct rb_node *parent = NULL;

	while (*p) {
		struct sp_area *tmp;

		parent = *p;
		tmp = rb_entry(parent, struct sp_area, rb_node);
		if (spa->va_start < tmp->va_end)
			p = &(*p)->rb_left;
		else if (spa->va_end > tmp->va_start)
			p = &(*p)->rb_right;
		else
			BUG();
	}

	rb_link_node(&spa->rb_node, parent, p);
1755
	rb_insert_color(&spa->rb_node, &spm->area_root);
1756 1757 1758 1759 1760 1761 1762 1763
}

/**
 * sp_alloc_area() - Allocate a region of VA from the share pool.
 * @size: the size of VA to allocate.
 * @flags: how to allocate the memory.
 * @spg: the share group that the memory is allocated to.
 * @type: the type of the region.
1764
 * @applier: the tgid of the task which allocates the region.
1765 1766 1767 1768 1769 1770 1771 1772 1773
 *
 * Return: a valid pointer for success, NULL on failure.
 */
static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags,
				     struct sp_group *spg, enum spa_type type,
				     pid_t applier)
{
	struct sp_area *spa, *first, *err;
	struct rb_node *n;
1774 1775
	unsigned long vstart;
	unsigned long vend;
1776 1777 1778
	unsigned long addr;
	unsigned long size_align = ALIGN(size, PMD_SIZE); /* va aligned to 2M */
	int device_id, node_id;
1779
	struct sp_mapping *mapping;
1780 1781 1782 1783 1784 1785 1786 1787 1788

	device_id = sp_flags_device_id(flags);
	node_id = flags & SP_SPEC_NODE_ID ? sp_flags_node_id(flags) : device_id;

	if (!is_online_node_id(node_id)) {
		pr_err_ratelimited("invalid numa node id %d\n", node_id);
		return ERR_PTR(-EINVAL);
	}

C
Chen Jun 已提交
1789 1790 1791 1792 1793 1794
	if (flags & SP_PROT_FOCUS) {
		if ((flags & (SP_DVPP | SP_PROT_RO)) != SP_PROT_RO) {
			pr_err("invalid sp_flags [%lx]\n", flags);
			return ERR_PTR(-EINVAL);
		}
		mapping = spg->mapping[SP_MAPPING_RO];
1795
	} else if (flags & SP_DVPP) {
1796
		mapping = spg->mapping[SP_MAPPING_DVPP];
1797
	} else {
1798
		mapping = spg->mapping[SP_MAPPING_NORMAL];
1799
	}
1800

1801 1802 1803 1804 1805
	if (!mapping) {
		pr_err_ratelimited("non DVPP spg, id %d\n", spg->id);
		return ERR_PTR(-EINVAL);
	}

1806 1807
	vstart = mapping->start[device_id];
	vend = mapping->end[device_id];
1808 1809 1810 1811 1812 1813 1814 1815 1816
	spa = __kmalloc_node(sizeof(struct sp_area), GFP_KERNEL, node_id);
	if (unlikely(!spa))
		return ERR_PTR(-ENOMEM);

	spin_lock(&sp_area_lock);

	/*
	 * Invalidate cache if we have more permissive parameters.
	 * cached_hole_size notes the largest hole noticed _below_
1817
	 * the sp_area cached in free_area_cache: if size fits
1818
	 * into that hole, we want to scan from vstart to reuse
1819 1820
	 * the hole instead of allocating above free_area_cache.
	 * Note that sp_free_area may update free_area_cache
1821 1822
	 * without updating cached_hole_size.
	 */
1823 1824 1825 1826
	if (!mapping->free_area_cache || size_align < mapping->cached_hole_size ||
	    vstart != mapping->cached_vstart) {
		mapping->cached_hole_size = 0;
		mapping->free_area_cache = NULL;
1827 1828 1829
	}

	/* record if we encounter less permissive parameters */
1830
	mapping->cached_vstart = vstart;
1831 1832

	/* find starting point for our search */
1833 1834
	if (mapping->free_area_cache) {
		first = rb_entry(mapping->free_area_cache, struct sp_area, rb_node);
1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846
		addr = first->va_end;
		if (addr + size_align < addr) {
			err = ERR_PTR(-EOVERFLOW);
			goto error;
		}
	} else {
		addr = vstart;
		if (addr + size_align < addr) {
			err = ERR_PTR(-EOVERFLOW);
			goto error;
		}

1847
		n = mapping->area_root.rb_node;
1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868
		first = NULL;

		while (n) {
			struct sp_area *tmp;

			tmp = rb_entry(n, struct sp_area, rb_node);
			if (tmp->va_end >= addr) {
				first = tmp;
				if (tmp->va_start <= addr)
					break;
				n = n->rb_left;
			} else
				n = n->rb_right;
		}

		if (!first)
			goto found;
	}

	/* from the starting point, traverse areas until a suitable hole is found */
	while (addr + size_align > first->va_start && addr + size_align <= vend) {
1869 1870
		if (addr + mapping->cached_hole_size < first->va_start)
			mapping->cached_hole_size = first->va_start - addr;
1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906
		addr = first->va_end;
		if (addr + size_align < addr) {
			err = ERR_PTR(-EOVERFLOW);
			goto error;
		}

		n = rb_next(&first->rb_node);
		if (n)
			first = rb_entry(n, struct sp_area, rb_node);
		else
			goto found;
	}

found:
	if (addr + size_align > vend) {
		err = ERR_PTR(-EOVERFLOW);
		goto error;
	}

	spa->va_start = addr;
	spa->va_end = addr + size_align;
	spa->real_size = size;
	spa->region_vstart = vstart;
	spa->flags = flags;
	spa->is_hugepage = (flags & SP_HUGEPAGE);
	spa->is_dead = false;
	spa->spg = spg;
	atomic_set(&spa->use_count, 1);
	spa->type = type;
	spa->mm = NULL;
	spa->kva = 0;   /* NULL pointer */
	spa->applier = applier;
	spa->node_id = node_id;
	spa->device_id = device_id;

	spa_inc_usage(spa);
1907
	insert_sp_area(mapping, spa);
1908 1909
	mapping->free_area_cache = &spa->rb_node;
	list_add_tail(&spa->link, &spg->spa_list);
1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921

	spin_unlock(&sp_area_lock);

	return spa;

error:
	spin_unlock(&sp_area_lock);
	kfree(spa);
	return err;
}

/* the caller should hold sp_area_lock */
1922
static struct sp_area *find_sp_area_locked(struct sp_group *spg,
1923
		unsigned long addr)
1924
{
C
Chen Jun 已提交
1925 1926
	struct sp_mapping *spm = sp_mapping_find(spg, addr);
	struct rb_node *n = spm->area_root.rb_node;
1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942
	while (n) {
		struct sp_area *spa;

		spa = rb_entry(n, struct sp_area, rb_node);
		if (addr < spa->va_start) {
			n = n->rb_left;
		} else if (addr > spa->va_start) {
			n = n->rb_right;
		} else {
			return spa;
		}
	}

	return NULL;
}

1943
static struct sp_area *get_sp_area(struct sp_group *spg, unsigned long addr)
1944 1945 1946 1947
{
	struct sp_area *n;

	spin_lock(&sp_area_lock);
1948
	n = find_sp_area_locked(spg, addr);
1949 1950 1951 1952 1953 1954
	if (n)
		atomic_inc(&n->use_count);
	spin_unlock(&sp_area_lock);
	return n;
}

1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967
static bool vmalloc_area_clr_flag(unsigned long kva, unsigned long flags)
{
	struct vm_struct *area;

	area = find_vm_area((void *)kva);
	if (area) {
		area->flags &= ~flags;
		return true;
	}

	return false;
}

1968 1969 1970 1971 1972
/*
 * Free the VA region starting from addr to the share pool
 */
static void sp_free_area(struct sp_area *spa)
{
1973 1974 1975
	unsigned long addr = spa->va_start;
	struct sp_mapping *spm;

1976 1977
	lockdep_assert_held(&sp_area_lock);

C
Chen Jun 已提交
1978
	spm = sp_mapping_find(spa->spg, addr);
1979
	if (spm->free_area_cache) {
1980 1981
		struct sp_area *cache;

1982
		cache = rb_entry(spm->free_area_cache, struct sp_area, rb_node);
1983
		if (spa->va_start <= cache->va_start) {
1984
			spm->free_area_cache = rb_prev(&spa->rb_node);
1985 1986 1987 1988
			/*
			 * the new cache node may be changed to another region,
			 * i.e. from DVPP region to normal region
			 */
1989 1990
			if (spm->free_area_cache) {
				cache = rb_entry(spm->free_area_cache,
1991
						 struct sp_area, rb_node);
1992
				spm->cached_vstart = cache->region_vstart;
1993 1994 1995 1996 1997 1998 1999 2000
			}
			/*
			 * We don't try to update cached_hole_size,
			 * but it won't go very wrong.
			 */
		}
	}

2001 2002 2003
	if (spa->kva && !vmalloc_area_clr_flag(spa->kva, VM_SHAREPOOL))
		pr_debug("clear spa->kva %ld is not valid\n", spa->kva);

2004
	spa_dec_usage(spa);
2005
	list_del(&spa->link);
2006

2007
	rb_erase(&spa->rb_node, &spm->area_root);
2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042
	RB_CLEAR_NODE(&spa->rb_node);
	kfree(spa);
}

static void __sp_area_drop_locked(struct sp_area *spa)
{
	/*
	 * Considering a situation where task A and B are in the same spg.
	 * A is exiting and calling remove_vma(). Before A calls this func,
	 * B calls sp_free() to free the same spa. So spa maybe NULL when A
	 * calls this func later.
	 */
	if (!spa)
		return;

	if (atomic_dec_and_test(&spa->use_count))
		sp_free_area(spa);
}

static void __sp_area_drop(struct sp_area *spa)
{
	spin_lock(&sp_area_lock);
	__sp_area_drop_locked(spa);
	spin_unlock(&sp_area_lock);
}

void sp_area_drop(struct vm_area_struct *vma)
{
	if (!(vma->vm_flags & VM_SHARE_POOL))
		return;

	/*
	 * Considering a situation where task A and B are in the same spg.
	 * A is exiting and calling remove_vma() -> ... -> sp_area_drop().
	 * Concurrently, B is calling sp_free() to free the same spa.
2043
	 * find_sp_area_locked() and __sp_area_drop_locked() should be
2044 2045 2046
	 * an atomic operation.
	 */
	spin_lock(&sp_area_lock);
2047
	__sp_area_drop_locked(vma->vm_private_data);
2048 2049 2050
	spin_unlock(&sp_area_lock);
}

W
Wang Wensheng 已提交
2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104
/*
 * The function calls of do_munmap() won't change any non-atomic member
 * of struct sp_group. Please review the following chain:
 * do_munmap -> remove_vma_list -> remove_vma -> sp_area_drop ->
 * __sp_area_drop_locked -> sp_free_area
 */
static void sp_munmap(struct mm_struct *mm, unsigned long addr,
			   unsigned long size)
{
	int err;

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		up_write(&mm->mmap_lock);
		pr_info("munmap: encoutered coredump\n");
		return;
	}

	err = do_munmap(mm, addr, size, NULL);
	/* we are not supposed to fail */
	if (err)
		pr_err("failed to unmap VA %pK when sp munmap\n", (void *)addr);

	up_write(&mm->mmap_lock);
}

static void __sp_free(struct sp_group *spg, unsigned long addr,
		      unsigned long size, struct mm_struct *stop)
{
	struct mm_struct *mm;
	struct sp_group_node *spg_node = NULL;

	list_for_each_entry(spg_node, &spg->procs, proc_node) {
		mm = spg_node->master->mm;
		if (mm == stop)
			break;
		sp_munmap(mm, addr, size);
	}
}

/* Free the memory of the backing shmem or hugetlbfs */
static void sp_fallocate(struct sp_area *spa)
{
	int ret;
	unsigned long mode = FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE;
	unsigned long offset = addr_offset(spa);

	ret = vfs_fallocate(spa_file(spa), mode, offset, spa_size(spa));
	if (ret)
		WARN(1, "sp fallocate failed %d\n", ret);
}

static void sp_free_unmap_fallocate(struct sp_area *spa)
{
2105 2106 2107 2108
	down_read(&spa->spg->rw_lock);
	__sp_free(spa->spg, spa->va_start, spa_size(spa), NULL);
	sp_fallocate(spa);
	up_read(&spa->spg->rw_lock);
W
Wang Wensheng 已提交
2109 2110 2111 2112 2113 2114 2115
}

static int sp_check_caller_permission(struct sp_group *spg, struct mm_struct *mm)
{
	int ret = 0;

	down_read(&spg->rw_lock);
2116
	if (!is_process_in_group(spg, mm))
W
Wang Wensheng 已提交
2117 2118
		ret = -EPERM;
	up_read(&spg->rw_lock);
2119

W
Wang Wensheng 已提交
2120 2121 2122 2123 2124 2125 2126 2127 2128 2129
	return ret;
}

#define FREE_CONT	1
#define FREE_END	2

struct sp_free_context {
	unsigned long addr;
	struct sp_area *spa;
	int state;
2130
	int spg_id;
W
Wang Wensheng 已提交
2131 2132 2133 2134 2135 2136 2137 2138
};

/* when success, __sp_area_drop(spa) should be used */
static int sp_free_get_spa(struct sp_free_context *fc)
{
	int ret = 0;
	unsigned long addr = fc->addr;
	struct sp_area *spa;
2139 2140 2141 2142 2143 2144 2145
	struct sp_group *spg;

	spg = __sp_find_spg(current->tgid, fc->spg_id);
	if (!spg) {
		pr_debug("sp free get group failed %d\n", fc->spg_id);
		return -EINVAL;
	}
W
Wang Wensheng 已提交
2146 2147 2148

	fc->state = FREE_CONT;

2149
	spa = get_sp_area(spg, addr);
2150
	sp_group_drop(spg);
W
Wang Wensheng 已提交
2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162
	if (!spa) {
		pr_debug("sp free invalid input addr %lx\n", addr);
		return -EINVAL;
	}

	if (spa->type != SPA_TYPE_ALLOC) {
		ret = -EINVAL;
		pr_debug("sp free failed, %lx is not sp alloc addr\n", addr);
		goto drop_spa;
	}
	fc->spa = spa;

2163 2164
	if (!current->mm)
		goto check_spa;
W
Wang Wensheng 已提交
2165

2166 2167 2168
	ret = sp_check_caller_permission(spa->spg, current->mm);
	if (ret < 0)
		goto drop_spa;
W
Wang Wensheng 已提交
2169 2170

check_spa:
2171 2172 2173 2174
	if (is_local_group(spa->spg->id) && (current->tgid != spa->applier)) {
		ret = -EPERM;
		goto drop_spa;
	}
W
Wang Wensheng 已提交
2175

2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189
	down_write(&spa->spg->rw_lock);
	if (!spg_valid(spa->spg)) {
		fc->state = FREE_END;
		up_write(&spa->spg->rw_lock);
		goto drop_spa;
		/* we must return success(0) in this situation */
	}
	/* the life cycle of spa has a direct relation with sp group */
	if (unlikely(spa->is_dead)) {
		up_write(&spa->spg->rw_lock);
		pr_err_ratelimited("unexpected double sp free\n");
		dump_stack();
		ret = -EINVAL;
		goto drop_spa;
W
Wang Wensheng 已提交
2190
	}
2191 2192 2193
	spa->is_dead = true;
	up_write(&spa->spg->rw_lock);

W
Wang Wensheng 已提交
2194 2195 2196 2197 2198 2199 2200
	return 0;

drop_spa:
	__sp_area_drop(spa);
	return ret;
}

2201
/**
2202
 * mg_sp_free() - Free the memory allocated by mg_sp_alloc().
2203
 * @addr: the starting VA of the memory.
2204
 * @id: Address space identifier, which is used to distinguish the addr.
2205 2206 2207 2208 2209 2210
 *
 * Return:
 * * 0		- success.
 * * -EINVAL	- the memory can't be found or was not allocted by share pool.
 * * -EPERM	- the caller has no permision to free the memory.
 */
2211
int mg_sp_free(unsigned long addr, int id)
2212
{
W
Wang Wensheng 已提交
2213 2214 2215
	int ret = 0;
	struct sp_free_context fc = {
		.addr = addr,
2216
		.spg_id = id,
W
Wang Wensheng 已提交
2217 2218
	};

2219 2220 2221
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

W
Wang Wensheng 已提交
2222 2223
	check_interrupt_context();

2224 2225 2226
	if (current->flags & PF_KTHREAD)
		return -EINVAL;

W
Wang Wensheng 已提交
2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237
	ret = sp_free_get_spa(&fc);
	if (ret || fc.state == FREE_END)
		goto out;

	sp_free_unmap_fallocate(fc.spa);

	if (current->mm == NULL)
		atomic64_sub(fc.spa->real_size, &kthread_stat.alloc_size);
	else
		sp_update_process_stat(current, false, fc.spa);

2238
	__sp_area_drop(fc.spa);  /* match get_sp_area in sp_free_get_spa */
W
Wang Wensheng 已提交
2239 2240
out:
	return ret;
2241 2242 2243
}
EXPORT_SYMBOL_GPL(mg_sp_free);

2244 2245 2246
/* wrapper of __do_mmap() and the caller must hold down_write(&mm->mmap_lock). */
static unsigned long sp_mmap(struct mm_struct *mm, struct file *file,
			     struct sp_area *spa, unsigned long *populate,
2247
			     unsigned long prot, struct vm_area_struct **pvma)
2248 2249 2250 2251 2252 2253 2254
{
	unsigned long addr = spa->va_start;
	unsigned long size = spa_size(spa);
	unsigned long flags = MAP_FIXED | MAP_SHARED | MAP_POPULATE |
			      MAP_SHARE_POOL;
	unsigned long vm_flags = VM_NORESERVE | VM_SHARE_POOL | VM_DONTCOPY;
	unsigned long pgoff = addr_offset(spa) >> PAGE_SHIFT;
2255
	struct vm_area_struct *vma;
2256 2257 2258 2259 2260 2261 2262 2263 2264

	atomic_inc(&spa->use_count);
	addr = __do_mmap_mm(mm, file, addr, size, prot, flags, vm_flags, pgoff,
			 populate, NULL);
	if (IS_ERR_VALUE(addr)) {
		atomic_dec(&spa->use_count);
		pr_err("do_mmap fails %ld\n", addr);
	} else {
		BUG_ON(addr != spa->va_start);
2265 2266 2267 2268
		vma = find_vma(mm, addr);
		vma->vm_private_data = spa;
		if (pvma)
			*pvma = vma;
2269 2270 2271 2272 2273
	}

	return addr;
}

W
Wang Wensheng 已提交
2274 2275 2276
#define ALLOC_NORMAL	1
#define ALLOC_RETRY	2
#define ALLOC_NOMEM	3
2277
#define ALLOC_COREDUMP	4
W
Wang Wensheng 已提交
2278 2279 2280 2281 2282 2283 2284 2285 2286 2287

struct sp_alloc_context {
	struct sp_group *spg;
	struct file *file;
	unsigned long size;
	unsigned long size_aligned;
	unsigned long sp_flags;
	unsigned long populate;
	int state;
	bool need_fallocate;
2288
	bool have_mbind;
2289
	enum spa_type type;
W
Wang Wensheng 已提交
2290 2291 2292 2293 2294 2295 2296 2297 2298
};

static int sp_alloc_prepare(unsigned long size, unsigned long sp_flags,
	int spg_id, struct sp_alloc_context *ac)
{
	struct sp_group *spg;

	check_interrupt_context();

2299 2300 2301 2302 2303
	if (current->flags & PF_KTHREAD) {
		pr_err_ratelimited("allocation failed, task is kthread\n");
		return -EINVAL;
	}

W
Wang Wensheng 已提交
2304 2305 2306 2307 2308
	if (unlikely(!size || (size >> PAGE_SHIFT) > totalram_pages())) {
		pr_err_ratelimited("allocation failed, invalid size %lu\n", size);
		return -EINVAL;
	}

2309
	if (spg_id != SPG_ID_DEFAULT && (spg_id < SPG_ID_MIN || spg_id >= SPG_ID_AUTO)) {
W
Wang Wensheng 已提交
2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321
		pr_err_ratelimited("allocation failed, invalid group id %d\n", spg_id);
		return -EINVAL;
	}

	if (sp_flags & (~SP_FLAG_MASK)) {
		pr_err_ratelimited("allocation failed, invalid flag %lx\n", sp_flags);
		return -EINVAL;
	}

	if (sp_flags & SP_HUGEPAGE_ONLY)
		sp_flags |= SP_HUGEPAGE;

2322
	if (spg_id != SPG_ID_DEFAULT) {
2323
		spg = __sp_find_spg(current->tgid, spg_id);
2324 2325 2326
		if (!spg) {
			pr_err_ratelimited("allocation failed, can't find group\n");
			return -ENODEV;
W
Wang Wensheng 已提交
2327 2328
		}

2329 2330 2331 2332 2333 2334 2335 2336
		/* up_read will be at the end of sp_alloc */
		down_read(&spg->rw_lock);
		if (!spg_valid(spg)) {
			up_read(&spg->rw_lock);
			sp_group_drop(spg);
			pr_err_ratelimited("allocation failed, spg is dead\n");
			return -ENODEV;
		}
W
Wang Wensheng 已提交
2337

2338 2339 2340 2341 2342
		if (!is_process_in_group(spg, current->mm)) {
			up_read(&spg->rw_lock);
			sp_group_drop(spg);
			pr_err_ratelimited("allocation failed, task not in group\n");
			return -ENODEV;
W
Wang Wensheng 已提交
2343
		}
2344
		ac->type = SPA_TYPE_ALLOC;
2345
	} else {  /* allocation pass through scene */
2346
		spg = sp_get_local_group(current, current->mm);
2347 2348
		if (IS_ERR(spg))
			return PTR_ERR(spg);
2349 2350
		down_read(&spg->rw_lock);
		ac->type = SPA_TYPE_ALLOC_PRIVATE;
W
Wang Wensheng 已提交
2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365
	}

	if (sp_flags & SP_HUGEPAGE) {
		ac->file = spg->file_hugetlb;
		ac->size_aligned = ALIGN(size, PMD_SIZE);
	} else {
		ac->file = spg->file;
		ac->size_aligned = ALIGN(size, PAGE_SIZE);
	}

	ac->spg = spg;
	ac->size = size;
	ac->sp_flags = sp_flags;
	ac->state = ALLOC_NORMAL;
	ac->need_fallocate = false;
2366
	ac->have_mbind = false;
W
Wang Wensheng 已提交
2367 2368 2369 2370 2371 2372
	return 0;
}

static void sp_alloc_unmap(struct mm_struct *mm, struct sp_area *spa,
	struct sp_group_node *spg_node)
{
2373
	__sp_free(spa->spg, spa->va_start, spa->real_size, mm);
W
Wang Wensheng 已提交
2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388
}

static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa,
	struct sp_group_node *spg_node, struct sp_alloc_context *ac)
{
	int ret = 0;
	unsigned long mmap_addr;
	/* pass through default permission */
	unsigned long prot = PROT_READ | PROT_WRITE;
	unsigned long populate = 0;
	struct vm_area_struct *vma;

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		up_write(&mm->mmap_lock);
2389
		ac->state = ALLOC_COREDUMP;
W
Wang Wensheng 已提交
2390 2391 2392 2393 2394 2395 2396
		pr_info("allocation encountered coredump\n");
		return -EFAULT;
	}

	if (spg_node)
		prot = spg_node->prot;

2397 2398 2399
	if (ac->sp_flags & SP_PROT_RO)
		prot = PROT_READ;

W
Wang Wensheng 已提交
2400
	/* when success, mmap_addr == spa->va_start */
2401
	mmap_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot, &vma);
W
Wang Wensheng 已提交
2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416
	if (IS_ERR_VALUE(mmap_addr)) {
		up_write(&mm->mmap_lock);
		sp_alloc_unmap(mm, spa, spg_node);
		pr_err("sp mmap in allocation failed %ld\n", mmap_addr);
		return PTR_ERR((void *)mmap_addr);
	}

	if (unlikely(populate == 0)) {
		up_write(&mm->mmap_lock);
		pr_err("allocation sp mmap populate failed\n");
		ret = -EFAULT;
		goto unmap;
	}
	ac->populate = populate;

2417 2418 2419
	if (ac->sp_flags & SP_PROT_RO)
		vma->vm_flags &= ~VM_MAYWRITE;

W
Wang Wensheng 已提交
2420 2421 2422 2423 2424 2425 2426 2427
	/* clean PTE_RDONLY flags or trigger SMMU event */
	if (prot & PROT_WRITE)
		vma->vm_page_prot = __pgprot(((~PTE_RDONLY) & vma->vm_page_prot.pgprot) | PTE_DIRTY);
	up_write(&mm->mmap_lock);

	return ret;

unmap:
2428
	sp_alloc_unmap(list_next_entry(spg_node, proc_node)->master->mm, spa, spg_node);
W
Wang Wensheng 已提交
2429 2430 2431 2432 2433 2434 2435 2436 2437 2438
	return ret;
}

static void sp_alloc_fallback(struct sp_area *spa, struct sp_alloc_context *ac)
{
	if (ac->file == ac->spg->file) {
		ac->state = ALLOC_NOMEM;
		return;
	}

2439
	atomic_inc(&ac->spg->instat.hugepage_failures);
W
Wang Wensheng 已提交
2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451
	if (!(ac->sp_flags & SP_HUGEPAGE_ONLY)) {
		ac->file = ac->spg->file;
		ac->size_aligned = ALIGN(ac->size, PAGE_SIZE);
		ac->sp_flags &= ~SP_HUGEPAGE;
		ac->state = ALLOC_RETRY;
		__sp_area_drop(spa);
		return;
	}
	ac->state = ALLOC_NOMEM;
}

static int sp_alloc_populate(struct mm_struct *mm, struct sp_area *spa,
2452
			     struct sp_alloc_context *ac)
W
Wang Wensheng 已提交
2453 2454 2455 2456 2457 2458 2459
{
	/*
	 * We are not ignoring errors, so if we fail to allocate
	 * physical memory we just return failure, so we won't encounter
	 * page fault later on, and more importantly sp_make_share_u2k()
	 * depends on this feature (and MAP_LOCKED) to work correctly.
	 */
2460
	return do_mm_populate(mm, spa->va_start, ac->populate, 0);
W
Wang Wensheng 已提交
2461 2462
}

2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473
static long sp_mbind(struct mm_struct *mm, unsigned long start, unsigned long len,
		unsigned long node)
{
	nodemask_t nmask;

	nodes_clear(nmask);
	node_set(node, nmask);
	return __do_mbind(start, len, MPOL_BIND, MPOL_F_STATIC_NODES,
			&nmask, MPOL_MF_STRICT, mm);
}

W
Wang Wensheng 已提交
2474 2475 2476 2477 2478 2479
static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa,
	struct sp_group_node *spg_node, struct sp_alloc_context *ac)
{
	int ret;

	ret = sp_alloc_mmap(mm, spa, spg_node, ac);
2480
	if (ret < 0)
W
Wang Wensheng 已提交
2481 2482
		return ret;

2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499
	if (!ac->have_mbind) {
		ret = sp_mbind(mm, spa->va_start, spa->real_size, spa->node_id);
		if (ret < 0) {
			pr_err("cannot bind the memory range to specified node:%d, err:%d\n",
				spa->node_id, ret);
			goto err;
		}
		ac->have_mbind = true;
	}

	ret = sp_alloc_populate(mm, spa, ac);
	if (ret) {
err:
		if (unlikely(fatal_signal_pending(current)))
			pr_warn_ratelimited("allocation failed, current thread is killed\n");
		else
			pr_warn_ratelimited("allocation failed due to mm populate failed(potential no enough memory when -12): %d\n",
2500
					ret);
2501
	} else {
2502
		ac->need_fallocate = true;
2503
	}
W
Wang Wensheng 已提交
2504 2505 2506 2507 2508 2509
	return ret;
}

static int sp_alloc_mmap_populate(struct sp_area *spa,
				  struct sp_alloc_context *ac)
{
2510 2511
	int ret = -EINVAL;
	int mmap_ret = 0;
2512
	struct mm_struct *mm, *end_mm = NULL;
W
Wang Wensheng 已提交
2513 2514
	struct sp_group_node *spg_node;

2515 2516 2517 2518 2519 2520
	/* create mapping for each process in the group */
	list_for_each_entry(spg_node, &spa->spg->procs, proc_node) {
		mm = spg_node->master->mm;
		mmap_ret = __sp_alloc_mmap_populate(mm, spa, spg_node, ac);
		if (mmap_ret) {
			if (ac->state != ALLOC_COREDUMP)
2521
				goto unmap;
2522 2523
			ac->state = ALLOC_NORMAL;
			continue;
W
Wang Wensheng 已提交
2524
		}
2525
		ret = mmap_ret;
W
Wang Wensheng 已提交
2526
	}
2527

W
Wang Wensheng 已提交
2528
	return ret;
2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547

unmap:
	/* use the next mm in proc list as end mark */
	if (!list_is_last(&spg_node->proc_node, &spa->spg->procs))
		end_mm = list_next_entry(spg_node, proc_node)->master->mm;
	sp_alloc_unmap(end_mm, spa, spg_node);

	/* only fallocate spa if physical memory had been allocated */
	if (ac->need_fallocate) {
		sp_fallocate(spa);
		ac->need_fallocate = false;
	}

	/* if hugepage allocation fails, this will transfer to normal page
	 * and try again. (only if SP_HUGEPAGE_ONLY is not flagged
	 */
	sp_alloc_fallback(spa, ac);

	return mmap_ret;
W
Wang Wensheng 已提交
2548 2549 2550 2551
}

/* spa maybe an error pointer, so introduce variable spg */
static void sp_alloc_finish(int result, struct sp_area *spa,
2552
		struct sp_alloc_context *ac)
W
Wang Wensheng 已提交
2553 2554 2555
{
	struct sp_group *spg = ac->spg;

2556
	/* match sp_alloc_prepare */
2557
	up_read(&spg->rw_lock);
W
Wang Wensheng 已提交
2558 2559 2560 2561 2562

	if (!result)
		sp_update_process_stat(current, true, spa);

	/* this will free spa if mmap failed */
2563
	if (spa && !IS_ERR(spa))
W
Wang Wensheng 已提交
2564 2565
		__sp_area_drop(spa);

2566
	sp_group_drop(spg);
W
Wang Wensheng 已提交
2567 2568
}

2569
/**
2570
 * mg_sp_alloc() - Allocate shared memory for all the processes in a sp_group.
2571 2572 2573 2574 2575 2576 2577 2578 2579 2580
 * @size: the size of memory to allocate.
 * @sp_flags: how to allocate the memory.
 * @spg_id: the share group that the memory is allocated to.
 *
 * Use pass through allocation if spg_id == SPG_ID_DEFAULT in multi-group mode.
 *
 * Return:
 * * if succeed, return the starting address of the shared memory.
 * * if fail, return the pointer of -errno.
 */
2581
void *mg_sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id)
2582
{
W
Wang Wensheng 已提交
2583 2584 2585 2586
	struct sp_area *spa = NULL;
	int ret = 0;
	struct sp_alloc_context ac;

2587 2588 2589
	if (!sp_is_enabled())
		return ERR_PTR(-EOPNOTSUPP);

W
Wang Wensheng 已提交
2590 2591 2592 2593 2594 2595
	ret = sp_alloc_prepare(size, sp_flags, spg_id, &ac);
	if (ret)
		return ERR_PTR(ret);

try_again:
	spa = sp_alloc_area(ac.size_aligned, ac.sp_flags, ac.spg,
2596
			    ac.type, current->tgid);
W
Wang Wensheng 已提交
2597 2598 2599 2600 2601 2602 2603 2604
	if (IS_ERR(spa)) {
		pr_err_ratelimited("alloc spa failed in allocation(potential no enough virtual memory when -75): %ld\n",
			PTR_ERR(spa));
		ret = PTR_ERR(spa);
		goto out;
	}

	ret = sp_alloc_mmap_populate(spa, &ac);
2605 2606 2607 2608 2609 2610 2611
	if (ret && ac.state == ALLOC_RETRY) {
		/*
		 * The mempolicy for shared memory is located at backend file, which varies
		 * between normal pages and huge pages. So we should set the mbind policy again
		 * when we retry using normal pages.
		 */
		ac.have_mbind = false;
W
Wang Wensheng 已提交
2612
		goto try_again;
2613
	}
W
Wang Wensheng 已提交
2614 2615 2616 2617 2618 2619 2620

out:
	sp_alloc_finish(ret, spa, &ac);
	if (ret)
		return ERR_PTR(ret);
	else
		return (void *)(spa->va_start);
2621 2622 2623
}
EXPORT_SYMBOL_GPL(mg_sp_alloc);

2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653
/**
 * is_vmap_hugepage() - Check if a kernel address belongs to vmalloc family.
 * @addr: the kernel space address to be checked.
 *
 * Return:
 * * >0		- a vmalloc hugepage addr.
 * * =0		- a normal vmalloc addr.
 * * -errno	- failure.
 */
static int is_vmap_hugepage(unsigned long addr)
{
	struct vm_struct *area;

	if (unlikely(!addr)) {
		pr_err_ratelimited("null vmap addr pointer\n");
		return -EINVAL;
	}

	area = find_vm_area((void *)addr);
	if (unlikely(!area)) {
		pr_debug("can't find vm area(%lx)\n", addr);
		return -EINVAL;
	}

	if (area->flags & VM_HUGE_PAGES)
		return 1;
	else
		return 0;
}

2654 2655
static unsigned long __sp_remap_get_pfn(unsigned long kva)
{
G
Guo Mengqi 已提交
2656
	unsigned long pfn = -EINVAL;
2657

G
Guo Mengqi 已提交
2658
	/* sp_make_share_k2u only support vmalloc address */
2659 2660 2661 2662 2663 2664 2665 2666
	if (is_vmalloc_addr((void *)kva))
		pfn = vmalloc_to_pfn((void *)kva);

	return pfn;
}

/* when called by k2u to group, always make sure rw_lock of spg is down */
static unsigned long sp_remap_kva_to_vma(unsigned long kva, struct sp_area *spa,
2667
					 struct mm_struct *mm, unsigned long prot, struct sp_k2u_context *kc)
2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678
{
	struct vm_area_struct *vma;
	unsigned long ret_addr;
	unsigned long populate = 0;
	int ret = 0;
	unsigned long addr, buf, offset;

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		pr_err("k2u mmap: encountered coredump, abort\n");
		ret_addr = -EBUSY;
2679 2680
		if (kc)
			kc->state = K2U_COREDUMP;
2681 2682 2683
		goto put_mm;
	}

2684
	if (kc && (kc->sp_flags & SP_PROT_RO))
2685 2686
		prot = PROT_READ;

2687
	ret_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot, &vma);
2688 2689 2690 2691 2692 2693 2694 2695
	if (IS_ERR_VALUE(ret_addr)) {
		pr_debug("k2u mmap failed %lx\n", ret_addr);
		goto put_mm;
	}

	if (prot & PROT_WRITE)
		vma->vm_page_prot = __pgprot(((~PTE_RDONLY) & vma->vm_page_prot.pgprot) | PTE_DIRTY);

2696
	if (kc && (kc->sp_flags & SP_PROT_RO))
2697 2698
		vma->vm_flags &= ~VM_MAYWRITE;

2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745
	if (is_vm_hugetlb_page(vma)) {
		ret = remap_vmalloc_hugepage_range(vma, (void *)kva, 0);
		if (ret) {
			do_munmap(mm, ret_addr, spa_size(spa), NULL);
			pr_debug("remap vmalloc hugepage failed, ret %d, kva is %lx\n",
				 ret, (unsigned long)kva);
			ret_addr = ret;
			goto put_mm;
		}
		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
	} else {
		buf = ret_addr;
		addr = kva;
		offset = 0;
		do {
			ret = remap_pfn_range(vma, buf, __sp_remap_get_pfn(addr), PAGE_SIZE,
					__pgprot(vma->vm_page_prot.pgprot));
			if (ret) {
				do_munmap(mm, ret_addr, spa_size(spa), NULL);
				pr_err("remap_pfn_range failed %d\n", ret);
				ret_addr = ret;
				goto put_mm;
			}
			offset += PAGE_SIZE;
			buf += PAGE_SIZE;
			addr += PAGE_SIZE;
		} while (offset < spa_size(spa));
	}

put_mm:
	up_write(&mm->mmap_lock);

	return ret_addr;
}

/**
 * sp_make_share_kva_to_task() - Share kernel memory to current task.
 * @kva: the VA of shared kernel memory
 * @size: the size of area to share, should be aligned properly
 * @sp_flags: the flags for the opreation
 *
 * Return:
 * * if succeed, return the shared user address to start at.
 * * if fail, return the pointer of -errno.
 */
static void *sp_make_share_kva_to_task(unsigned long kva, unsigned long size, unsigned long sp_flags)
{
2746
	int ret;
2747 2748
	void *uva;
	struct sp_area *spa;
2749
	struct sp_group_node *spg_node;
2750
	unsigned long prot = PROT_READ | PROT_WRITE;
2751
	struct sp_k2u_context kc;
2752
	struct sp_group *spg;
2753 2754

	down_write(&sp_group_sem);
2755
	ret = sp_init_group_master_locked(current, current->mm);
2756 2757 2758 2759 2760 2761 2762 2763
	if (ret) {
		up_write(&sp_group_sem);
		pr_err_ratelimited("k2u_task init local mapping failed %d\n", ret);
		return ERR_PTR(ret);
	}

	spg = current->mm->sp_group_master->local;
	up_write(&sp_group_sem);
2764

2765
	spa = sp_alloc_area(size, sp_flags, spg, SPA_TYPE_K2TASK, current->tgid);
2766 2767 2768 2769 2770 2771 2772
	if (IS_ERR(spa)) {
		pr_err_ratelimited("alloc spa failed in k2u_task (potential no enough virtual memory when -75): %ld\n",
				PTR_ERR(spa));
		return spa;
	}

	spa->kva = kva;
2773 2774
	kc.sp_flags = sp_flags;
	uva = (void *)sp_remap_kva_to_vma(kva, spa, current->mm, prot, &kc);
2775 2776 2777
	if (IS_ERR(uva))
		pr_err("remap k2u to task failed %ld\n", PTR_ERR(uva));
	else {
2778
		spg_node = find_spg_node_by_spg(current->mm, spa->spg);
G
Guo Mengqi 已提交
2779
		update_mem_usage(size, true, spa->is_hugepage, spg_node, SPA_TYPE_K2TASK);
2780 2781
		spa->mm = current->mm;
	}
Z
Zhou Guanghui 已提交
2782
	__sp_area_drop(spa);
2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802

	return uva;
}

/**
 * Share kernel memory to a spg, the current process must be in that group
 * @kva: the VA of shared kernel memory
 * @size: the size of area to share, should be aligned properly
 * @sp_flags: the flags for the opreation
 * @spg: the sp group to be shared with
 *
 * Return: the shared user address to start at
 */
static void *sp_make_share_kva_to_spg(unsigned long kva, unsigned long size,
				      unsigned long sp_flags, struct sp_group *spg)
{
	struct sp_area *spa;
	struct mm_struct *mm;
	struct sp_group_node *spg_node;
	void *uva = ERR_PTR(-ENODEV);
2803 2804
	struct sp_k2u_context kc;
	unsigned long ret_addr = -ENODEV;
2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815

	down_read(&spg->rw_lock);
	spa = sp_alloc_area(size, sp_flags, spg, SPA_TYPE_K2SPG, current->tgid);
	if (IS_ERR(spa)) {
		up_read(&spg->rw_lock);
		pr_err_ratelimited("alloc spa failed in k2u_spg (potential no enough virtual memory when -75): %ld\n",
				PTR_ERR(spa));
		return spa;
	}

	spa->kva = kva;
2816
	kc.sp_flags = sp_flags;
2817 2818
	list_for_each_entry(spg_node, &spg->procs, proc_node) {
		mm = spg_node->master->mm;
2819 2820 2821 2822 2823 2824
		kc.state = K2U_NORMAL;
		ret_addr = sp_remap_kva_to_vma(kva, spa, mm, spg_node->prot, &kc);
		if (IS_ERR_VALUE(ret_addr)) {
			if (kc.state == K2U_COREDUMP)
				continue;
			uva = (void *)ret_addr;
2825 2826 2827 2828
			pr_err("remap k2u to spg failed %ld\n", PTR_ERR(uva));
			__sp_free(spg, spa->va_start, spa_size(spa), mm);
			goto out;
		}
2829
		uva = (void *)ret_addr;
2830 2831 2832 2833 2834 2835
	}

out:
	up_read(&spg->rw_lock);
	if (!IS_ERR(uva))
		sp_update_process_stat(current, true, spa);
Z
Zhou Guanghui 已提交
2836
	__sp_area_drop(spa);
2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860

	return uva;
}

static bool vmalloc_area_set_flag(unsigned long kva, unsigned long flags)
{
	struct vm_struct *area;

	area = find_vm_area((void *)kva);
	if (area) {
		area->flags |= flags;
		return true;
	}

	return false;
}

static int sp_k2u_prepare(unsigned long kva, unsigned long size,
	unsigned long sp_flags, int spg_id, struct sp_k2u_context *kc)
{
	int is_hugepage;
	unsigned int page_size = PAGE_SIZE;
	unsigned long kva_aligned, size_aligned;

2861 2862 2863 2864 2865
	if (!size) {
		pr_err_ratelimited("k2u input size is 0.\n");
		return -EINVAL;
	}

2866
	if (sp_flags & ~SP_FLAG_MASK) {
2867 2868 2869
		pr_err_ratelimited("k2u sp_flags %lx error\n", sp_flags);
		return -EINVAL;
	}
2870
	sp_flags &= ~SP_HUGEPAGE;
2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902

	if (!current->mm) {
		pr_err_ratelimited("k2u: kthread is not allowed\n");
		return -EPERM;
	}

	is_hugepage = is_vmap_hugepage(kva);
	if (is_hugepage > 0) {
		sp_flags |= SP_HUGEPAGE;
		page_size = PMD_SIZE;
	} else if (is_hugepage == 0) {
		/* do nothing */
	} else {
		pr_err_ratelimited("k2u kva is not vmalloc address\n");
		return is_hugepage;
	}

	/* aligned down kva is convenient for caller to start with any valid kva */
	kva_aligned = ALIGN_DOWN(kva, page_size);
	size_aligned = ALIGN(kva + size, page_size) - kva_aligned;

	if (!vmalloc_area_set_flag(kva_aligned, VM_SHAREPOOL)) {
		pr_debug("k2u_task kva %lx is not valid\n", kva_aligned);
		return -EINVAL;
	}

	kc->kva = kva;
	kc->kva_aligned = kva_aligned;
	kc->size = size;
	kc->size_aligned = size_aligned;
	kc->sp_flags = sp_flags;
	kc->spg_id = spg_id;
2903 2904 2905 2906
	if (spg_id == SPG_ID_DEFAULT || spg_id == SPG_ID_NONE)
		kc->to_task = true;
	else
		kc->to_task = false;
2907

2908
	return 0;
2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920
}

static void *sp_k2u_finish(void *uva, struct sp_k2u_context *kc)
{
	if (IS_ERR(uva))
		vmalloc_area_clr_flag(kc->kva_aligned, VM_SHAREPOOL);
	else
		uva = uva + (kc->kva - kc->kva_aligned);

	return uva;
}

2921
/**
2922
 * mg_sp_make_share_k2u() - Share kernel memory to current process or an sp_group.
2923 2924 2925
 * @kva: the VA of shared kernel memory.
 * @size: the size of shared kernel memory.
 * @sp_flags: how to allocate the memory. We only support SP_DVPP.
2926
 * @tgid:  the tgid of the specified process (Not currently in use).
2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937
 * @spg_id: the share group that the memory is shared to.
 *
 * Return: the shared target user address to start at
 *
 * Share kernel memory to current task if spg_id == SPG_ID_NONE
 * or SPG_ID_DEFAULT in multi-group mode.
 *
 * Return:
 * * if succeed, return the shared user address to start at.
 * * if fail, return the pointer of -errno.
 */
2938
void *mg_sp_make_share_k2u(unsigned long kva, unsigned long size,
2939
			unsigned long sp_flags, int tgid, int spg_id)
2940
{
2941 2942 2943 2944
	void *uva;
	int ret;
	struct sp_k2u_context kc;

2945 2946 2947
	if (!sp_is_enabled())
		return ERR_PTR(-EOPNOTSUPP);

2948 2949 2950 2951 2952 2953
	check_interrupt_context();

	ret = sp_k2u_prepare(kva, size, sp_flags, spg_id, &kc);
	if (ret)
		return ERR_PTR(ret);

2954
	if (kc.to_task) {
2955
		uva = sp_make_share_kva_to_task(kc.kva_aligned, kc.size_aligned, kc.sp_flags);
2956
	} else {
2957 2958
		struct sp_group *spg;

2959
		spg = __sp_find_spg(current->tgid, kc.spg_id);
2960 2961 2962 2963 2964 2965 2966 2967 2968
		if (spg) {
			ret = sp_check_caller_permission(spg, current->mm);
			if (ret < 0) {
				sp_group_drop(spg);
				uva = ERR_PTR(ret);
				goto out;
			}
			uva = sp_make_share_kva_to_spg(kc.kva_aligned, kc.size_aligned, kc.sp_flags, spg);
			sp_group_drop(spg);
2969
		} else {
2970
			uva = ERR_PTR(-ENODEV);
2971
		}
2972 2973 2974 2975
	}

out:
	return sp_k2u_finish(uva, &kc);
2976 2977 2978
}
EXPORT_SYMBOL_GPL(mg_sp_make_share_k2u);

2979 2980 2981
static int sp_pmd_entry(pmd_t *pmd, unsigned long addr,
			unsigned long next, struct mm_walk *walk)
{
2982
	struct page *page;
2983 2984
	struct sp_walk_data *sp_walk_data = walk->private;

2985 2986 2987 2988 2989 2990 2991 2992 2993 2994
	/*
	 * There exist a scene in DVPP where the pagetable is huge page but its
	 * vma doesn't record it, something like THP.
	 * So we cannot make out whether it is a hugepage map until we access the
	 * pmd here. If mixed size of pages appear, just return an error.
	 */
	if (pmd_huge(*pmd)) {
		if (!sp_walk_data->is_page_type_set) {
			sp_walk_data->is_page_type_set = true;
			sp_walk_data->is_hugepage = true;
2995
		} else if (!sp_walk_data->is_hugepage) {
2996
			return -EFAULT;
2997
		}
2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014

		/* To skip pte level walk */
		walk->action = ACTION_CONTINUE;

		page = pmd_page(*pmd);
		get_page(page);
		sp_walk_data->pages[sp_walk_data->page_count++] = page;

		return 0;
	}

	if (!sp_walk_data->is_page_type_set) {
		sp_walk_data->is_page_type_set = true;
		sp_walk_data->is_hugepage = false;
	} else if (sp_walk_data->is_hugepage)
		return -EFAULT;

3015
	sp_walk_data->pmd = pmd;
3016

3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159
	return 0;
}

static int sp_pte_entry(pte_t *pte, unsigned long addr,
			unsigned long next, struct mm_walk *walk)
{
	struct page *page;
	struct sp_walk_data *sp_walk_data = walk->private;
	pmd_t *pmd = sp_walk_data->pmd;

retry:
	if (unlikely(!pte_present(*pte))) {
		swp_entry_t entry;

		if (pte_none(*pte))
			goto no_page;
		entry = pte_to_swp_entry(*pte);
		if (!is_migration_entry(entry))
			goto no_page;
		migration_entry_wait(walk->mm, pmd, addr);
		goto retry;
	}

	page = pte_page(*pte);
	get_page(page);
	sp_walk_data->pages[sp_walk_data->page_count++] = page;
	return 0;

no_page:
	pr_debug("the page of addr %lx unexpectedly not in RAM\n",
		 (unsigned long)addr);
	return -EFAULT;
}

static int sp_test_walk(unsigned long addr, unsigned long next,
			struct mm_walk *walk)
{
	/*
	 * FIXME: The devmm driver uses remap_pfn_range() but actually there
	 * are associated struct pages, so they should use vm_map_pages() or
	 * similar APIs. Before the driver has been converted to correct APIs
	 * we use this test_walk() callback so we can treat VM_PFNMAP VMAs as
	 * normal VMAs.
	 */
	return 0;
}

static int sp_pte_hole(unsigned long start, unsigned long end,
		       int depth, struct mm_walk *walk)
{
	pr_debug("hole [%lx, %lx) appeared unexpectedly\n", (unsigned long)start, (unsigned long)end);
	return -EFAULT;
}

static int sp_hugetlb_entry(pte_t *ptep, unsigned long hmask,
			    unsigned long addr, unsigned long next,
			    struct mm_walk *walk)
{
	pte_t pte = huge_ptep_get(ptep);
	struct page *page = pte_page(pte);
	struct sp_walk_data *sp_walk_data;

	if (unlikely(!pte_present(pte))) {
		pr_debug("the page of addr %lx unexpectedly not in RAM\n", (unsigned long)addr);
		return -EFAULT;
	}

	sp_walk_data = walk->private;
	get_page(page);
	sp_walk_data->pages[sp_walk_data->page_count++] = page;
	return 0;
}

/*
 * __sp_walk_page_range() - Walk page table with caller specific callbacks.
 * @uva: the start VA of user memory.
 * @size: the size of user memory.
 * @mm: mm struct of the target task.
 * @sp_walk_data: a structure of a page pointer array.
 *
 * the caller must hold mm->mmap_lock
 *
 * Notes for parameter alignment:
 * When size == 0, let it be page_size, so that at least one page is walked.
 *
 * When size > 0, for convenience, usually the parameters of uva and
 * size are not page aligned. There are four different alignment scenarios and
 * we must handler all of them correctly.
 *
 * The basic idea is to align down uva and align up size so all the pages
 * in range [uva, uva + size) are walked. However, there are special cases.
 *
 * Considering a 2M-hugepage addr scenario. Assuming the caller wants to
 * traverse range [1001M, 1004.5M), so uva and size is 1001M and 3.5M
 * accordingly. The aligned-down uva is 1000M and the aligned-up size is 4M.
 * The traverse range will be [1000M, 1004M). Obviously, the final page for
 * [1004M, 1004.5M) is not covered.
 *
 * To fix this problem, we need to walk an additional page, size should be
 * ALIGN(uva+size) - uva_aligned
 */
static int __sp_walk_page_range(unsigned long uva, unsigned long size,
	struct mm_struct *mm, struct sp_walk_data *sp_walk_data)
{
	int ret = 0;
	struct vm_area_struct *vma;
	unsigned long page_nr;
	struct page **pages = NULL;
	bool is_hugepage = false;
	unsigned long uva_aligned;
	unsigned long size_aligned;
	unsigned int page_size = PAGE_SIZE;
	struct mm_walk_ops sp_walk = {};

	/*
	 * Here we also support non share pool memory in this interface
	 * because the caller can't distinguish whether a uva is from the
	 * share pool or not. It is not the best idea to do so, but currently
	 * it simplifies overall design.
	 *
	 * In this situation, the correctness of the parameters is mainly
	 * guaranteed by the caller.
	 */
	vma = find_vma(mm, uva);
	if (!vma) {
		pr_debug("u2k input uva %lx is invalid\n", (unsigned long)uva);
		return -EINVAL;
	}
	if (is_vm_hugetlb_page(vma))
		is_hugepage = true;

	sp_walk.pte_hole = sp_pte_hole;
	sp_walk.test_walk = sp_test_walk;
	if (is_hugepage) {
		sp_walk_data->is_hugepage = true;
		sp_walk.hugetlb_entry = sp_hugetlb_entry;
		page_size = PMD_SIZE;
	} else {
		sp_walk_data->is_hugepage = false;
		sp_walk.pte_entry = sp_pte_entry;
		sp_walk.pmd_entry = sp_pmd_entry;
	}

3160 3161
	sp_walk_data->is_page_type_set = false;
	sp_walk_data->page_count = 0;
3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185
	sp_walk_data->page_size = page_size;
	uva_aligned = ALIGN_DOWN(uva, page_size);
	sp_walk_data->uva_aligned = uva_aligned;
	if (size == 0)
		size_aligned = page_size;
	else
		/* special alignment handling */
		size_aligned = ALIGN(uva + size, page_size) - uva_aligned;

	if (uva_aligned + size_aligned < uva_aligned) {
		pr_err_ratelimited("overflow happened in walk page range\n");
		return -EINVAL;
	}

	page_nr = size_aligned / page_size;
	pages = kvmalloc(page_nr * sizeof(struct page *), GFP_KERNEL);
	if (!pages) {
		pr_err_ratelimited("alloc page array failed in walk page range\n");
		return -ENOMEM;
	}
	sp_walk_data->pages = pages;

	ret = walk_page_range(mm, uva_aligned, uva_aligned + size_aligned,
			      &sp_walk, sp_walk_data);
3186 3187 3188
	if (ret) {
		while (sp_walk_data->page_count--)
			put_page(pages[sp_walk_data->page_count]);
3189
		kvfree(pages);
3190 3191
		sp_walk_data->pages = NULL;
	}
3192

Z
Zhou Guanghui 已提交
3193 3194 3195
	if (sp_walk_data->is_hugepage)
		sp_walk_data->uva_aligned = ALIGN_DOWN(uva, PMD_SIZE);

3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214
	return ret;
}

static void __sp_walk_page_free(struct sp_walk_data *data)
{
	int i = 0;
	struct page *page;

	while (i < data->page_count) {
		page = data->pages[i++];
		put_page(page);
	}

	kvfree(data->pages);
	/* prevent repeated release */
	data->page_count = 0;
	data->pages = NULL;
}

3215
/**
3216
 * mg_sp_make_share_u2k() - Share user memory of a specified process to kernel.
3217 3218
 * @uva: the VA of shared user memory
 * @size: the size of shared user memory
3219
 * @tgid: the tgid of the specified process(Not currently in use)
3220 3221 3222 3223 3224
 *
 * Return:
 * * if success, return the starting kernel address of the shared memory.
 * * if failed, return the pointer of -errno.
 */
3225
void *mg_sp_make_share_u2k(unsigned long uva, unsigned long size, int tgid)
3226
{
3227 3228 3229
	int ret = 0;
	struct mm_struct *mm = current->mm;
	void *p = ERR_PTR(-ESRCH);
3230
	struct sp_walk_data sp_walk_data;
3231 3232
	struct vm_struct *area;

3233 3234 3235
	if (!sp_is_enabled())
		return ERR_PTR(-EOPNOTSUPP);

3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282
	check_interrupt_context();

	if (mm == NULL) {
		pr_err("u2k: kthread is not allowed\n");
		return ERR_PTR(-EPERM);
	}

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		up_write(&mm->mmap_lock);
		pr_err("u2k: encountered coredump, abort\n");
		return p;
	}

	ret = __sp_walk_page_range(uva, size, mm, &sp_walk_data);
	if (ret) {
		pr_err_ratelimited("walk page range failed %d\n", ret);
		up_write(&mm->mmap_lock);
		return ERR_PTR(ret);
	}

	if (sp_walk_data.is_hugepage)
		p = vmap_hugepage(sp_walk_data.pages, sp_walk_data.page_count,
				  VM_MAP, PAGE_KERNEL);
	else
		p = vmap(sp_walk_data.pages, sp_walk_data.page_count, VM_MAP,
			 PAGE_KERNEL);
	up_write(&mm->mmap_lock);

	if (!p) {
		pr_err("vmap(huge) in u2k failed\n");
		__sp_walk_page_free(&sp_walk_data);
		return ERR_PTR(-ENOMEM);
	}

	p = p + (uva - sp_walk_data.uva_aligned);

	/*
	 * kva p may be used later in k2u. Since p comes from uva originally,
	 * it's reasonable to add flag VM_USERMAP so that p can be remapped
	 * into userspace again.
	 */
	area = find_vm_area(p);
	area->flags |= VM_USERMAP;

	kvfree(sp_walk_data.pages);
	return p;
3283 3284 3285
}
EXPORT_SYMBOL_GPL(mg_sp_make_share_u2k);

3286
/*
3287
 * Input parameters uva, tgid and spg_id are now useless. spg_id will be useful
3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302
 * when supporting a process in multiple sp groups.
 *
 * Procedure of unshare uva must be compatible with:
 *
 * 1. DVPP channel destroy procedure:
 * do_exit() -> exit_mm() (mm no longer in spg and current->mm == NULL) ->
 * exit_task_work() -> task_work_run() -> __fput() -> ... -> vdec_close() ->
 * sp_unshare(uva, SPG_ID_DEFAULT)
 *
 * 2. Process A once was the target of k2u(to group), then it exits.
 * Guard worker kthread tries to free this uva and it must succeed, otherwise
 * spa of this uva leaks.
 *
 * This also means we must trust DVPP channel destroy and guard worker code.
 */
3303
static int sp_unshare_uva(unsigned long uva, unsigned long size, int group_id)
3304
{
3305 3306 3307 3308 3309 3310
	int ret = 0;
	struct mm_struct *mm;
	struct sp_area *spa;
	unsigned long uva_aligned;
	unsigned long size_aligned;
	unsigned int page_size;
3311 3312 3313 3314 3315 3316 3317
	struct sp_group *spg;

	spg = __sp_find_spg(current->tgid, group_id);
	if (!spg) {
		pr_debug("sp unshare find group failed %d\n", group_id);
		return -EINVAL;
	}
3318 3319 3320 3321 3322

	/*
	 * at first we guess it's a hugepage addr
	 * we can tolerate at most PMD_SIZE or PAGE_SIZE which is matched in k2u
	 */
3323
	spa = get_sp_area(spg, ALIGN_DOWN(uva, PMD_SIZE));
3324
	if (!spa) {
3325
		spa = get_sp_area(spg, ALIGN_DOWN(uva, PAGE_SIZE));
3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440
		if (!spa) {
			ret = -EINVAL;
			pr_debug("invalid input uva %lx in unshare uva\n", (unsigned long)uva);
			goto out;
		}
	}

	if (spa->type != SPA_TYPE_K2TASK && spa->type != SPA_TYPE_K2SPG) {
		pr_err_ratelimited("unshare wrong type spa\n");
		ret = -EINVAL;
		goto out_drop_area;
	}
	/*
	 * 1. overflow actually won't happen due to an spa must be valid.
	 * 2. we must unshare [spa->va_start, spa->va_start + spa->real_size) completely
	 *    because an spa is one-to-one correspondence with an vma.
	 *    Thus input parameter size is not necessarily needed.
	 */
	page_size = (spa->is_hugepage ? PMD_SIZE : PAGE_SIZE);
	uva_aligned = spa->va_start;
	size_aligned = spa->real_size;

	if (size_aligned < ALIGN(size, page_size)) {
		ret = -EINVAL;
		pr_err_ratelimited("unshare uva failed, invalid parameter size %lu\n", size);
		goto out_drop_area;
	}

	if (spa->type == SPA_TYPE_K2TASK) {
		if (spa->applier != current->tgid) {
			pr_err_ratelimited("unshare uva(to task) no permission\n");
			ret = -EPERM;
			goto out_drop_area;
		}

		/*
		 * current thread may be exiting in a multithread process
		 *
		 * 1. never need a kthread to make unshare when process has exited
		 * 2. in dvpp channel destroy procedure, exit_mm() has been called
		 *    and don't need to make unshare
		 */
		mm = get_task_mm(current->group_leader);
		if (!mm) {
			pr_info_ratelimited("no need to unshare uva(to task), target process mm is exiting\n");
			goto out_clr_flag;
		}

		down_write(&mm->mmap_lock);
		if (unlikely(mm->core_state)) {
			ret = 0;
			up_write(&mm->mmap_lock);
			mmput(mm);
			goto out_drop_area;
		}

		ret = do_munmap(mm, uva_aligned, size_aligned, NULL);
		up_write(&mm->mmap_lock);
		mmput(mm);
		/* we are not supposed to fail */
		if (ret)
			pr_err("failed to unmap VA %pK when munmap in unshare uva\n",
			       (void *)uva_aligned);
		sp_update_process_stat(current, false, spa);

	} else if (spa->type == SPA_TYPE_K2SPG) {
		down_read(&spa->spg->rw_lock);
		/* always allow kthread and dvpp channel destroy procedure */
		if (current->mm) {
			if (!is_process_in_group(spa->spg, current->mm)) {
				up_read(&spa->spg->rw_lock);
				pr_err_ratelimited("unshare uva(to group) failed, caller process doesn't belong to target group\n");
				ret = -EPERM;
				goto out_drop_area;
			}
		}
		up_read(&spa->spg->rw_lock);

		down_write(&spa->spg->rw_lock);
		if (!spg_valid(spa->spg)) {
			up_write(&spa->spg->rw_lock);
			pr_info_ratelimited("share pool: no need to unshare uva(to group), sp group of spa is dead\n");
			goto out_clr_flag;
		}
		/* the life cycle of spa has a direct relation with sp group */
		if (unlikely(spa->is_dead)) {
			up_write(&spa->spg->rw_lock);
			pr_err_ratelimited("unexpected double sp unshare\n");
			dump_stack();
			ret = -EINVAL;
			goto out_drop_area;
		}
		spa->is_dead = true;
		up_write(&spa->spg->rw_lock);

		down_read(&spa->spg->rw_lock);
		__sp_free(spa->spg, uva_aligned, size_aligned, NULL);
		up_read(&spa->spg->rw_lock);

		if (current->mm == NULL)
			atomic64_sub(spa->real_size, &kthread_stat.k2u_size);
		else
			sp_update_process_stat(current, false, spa);
	} else {
		WARN(1, "unshare uva invalid spa type");
	}

out_clr_flag:
	if (!vmalloc_area_clr_flag(spa->kva, VM_SHAREPOOL))
		pr_debug("clear spa->kva %ld is not valid\n", spa->kva);
	spa->kva = 0;

out_drop_area:
	__sp_area_drop(spa);
out:
3441
	sp_group_drop(spg);
3442
	return ret;
3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488
}

/* No possible concurrent protection, take care when use */
static int sp_unshare_kva(unsigned long kva, unsigned long size)
{
	unsigned long addr, kva_aligned;
	struct page *page;
	unsigned long size_aligned;
	unsigned long step;
	bool is_hugepage = true;
	int ret;

	ret = is_vmap_hugepage(kva);
	if (ret > 0) {
		kva_aligned = ALIGN_DOWN(kva, PMD_SIZE);
		size_aligned = ALIGN(kva + size, PMD_SIZE) - kva_aligned;
		step = PMD_SIZE;
	} else if (ret == 0) {
		kva_aligned = ALIGN_DOWN(kva, PAGE_SIZE);
		size_aligned = ALIGN(kva + size, PAGE_SIZE) - kva_aligned;
		step = PAGE_SIZE;
		is_hugepage = false;
	} else {
		pr_err_ratelimited("check vmap hugepage failed %d\n", ret);
		return -EINVAL;
	}

	if (kva_aligned + size_aligned < kva_aligned) {
		pr_err_ratelimited("overflow happened in unshare kva\n");
		return -EINVAL;
	}

	for (addr = kva_aligned; addr < (kva_aligned + size_aligned); addr += step) {
		page = vmalloc_to_page((void *)addr);
		if (page)
			put_page(page);
		else
			WARN(1, "vmalloc %pK to page/hugepage failed\n",
			       (void *)addr);
	}

	vunmap((void *)kva_aligned);

	return 0;
}

3489
/**
3490
 * mg_sp_unshare() - Unshare the kernel or user memory which shared by calling
3491 3492 3493 3494 3495 3496 3497 3498
 *                sp_make_share_{k2u,u2k}().
 * @va: the specified virtual address of memory
 * @size: the size of unshared memory
 *
 * Use spg_id of current thread if spg_id == SPG_ID_DEFAULT.
 *
 * Return: 0 for success, -errno on failure.
 */
3499
int mg_sp_unshare(unsigned long va, unsigned long size, int spg_id)
3500
{
3501 3502
	int ret = 0;

3503 3504 3505
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

3506 3507
	check_interrupt_context();

3508 3509 3510
	if (current->flags & PF_KTHREAD)
		return -EINVAL;

3511 3512
	if (va < TASK_SIZE) {
		/* user address */
3513
		ret = sp_unshare_uva(va, size, spg_id);
3514 3515 3516 3517 3518 3519 3520 3521 3522 3523
	} else if (va >= PAGE_OFFSET) {
		/* kernel address */
		ret = sp_unshare_kva(va, size);
	} else {
		/* regard user and kernel address ranges as bad address */
		pr_debug("unshare addr %lx is not a user or kernel addr\n", (unsigned long)va);
		ret = -EFAULT;
	}

	return ret;
3524 3525 3526 3527
}
EXPORT_SYMBOL_GPL(mg_sp_unshare);

/**
3528
 * mg_sp_walk_page_range() - Walk page table with caller specific callbacks.
3529 3530 3531 3532 3533 3534 3535 3536 3537 3538
 * @uva: the start VA of user memory.
 * @size: the size of user memory.
 * @tsk: task struct of the target task.
 * @sp_walk_data: a structure of a page pointer array.
 *
 * Return: 0 for success, -errno on failure.
 *
 * When return 0, sp_walk_data describing [uva, uva+size) can be used.
 * When return -errno, information in sp_walk_data is useless.
 */
3539
int mg_sp_walk_page_range(unsigned long uva, unsigned long size,
3540 3541
	struct task_struct *tsk, struct sp_walk_data *sp_walk_data)
{
3542 3543 3544
	struct mm_struct *mm;
	int ret = 0;

3545 3546 3547
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564
	check_interrupt_context();

	if (unlikely(!sp_walk_data)) {
		pr_err_ratelimited("null pointer when walk page range\n");
		return -EINVAL;
	}
	if (!tsk || (tsk->flags & PF_EXITING))
		return -ESRCH;

	get_task_struct(tsk);
	mm = get_task_mm(tsk);
	if (!mm) {
		put_task_struct(tsk);
		return -ESRCH;
	}

	down_write(&mm->mmap_lock);
3565
	if (likely(!mm->core_state)) {
3566
		ret = __sp_walk_page_range(uva, size, mm, sp_walk_data);
3567
	} else {
3568 3569 3570 3571 3572 3573 3574 3575 3576
		pr_err("walk page range: encoutered coredump\n");
		ret = -ESRCH;
	}
	up_write(&mm->mmap_lock);

	mmput(mm);
	put_task_struct(tsk);

	return ret;
3577 3578 3579 3580
}
EXPORT_SYMBOL_GPL(mg_sp_walk_page_range);

/**
3581
 * mg_sp_walk_page_free() - Free the sp_walk_data structure.
3582 3583
 * @sp_walk_data: a structure of a page pointer array to be freed.
 */
3584
void mg_sp_walk_page_free(struct sp_walk_data *sp_walk_data)
3585
{
3586 3587 3588
	if (!sp_is_enabled())
		return;

3589 3590 3591 3592 3593 3594
	check_interrupt_context();

	if (!sp_walk_data)
		return;

	__sp_walk_page_free(sp_walk_data);
3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609
}
EXPORT_SYMBOL_GPL(mg_sp_walk_page_free);

int sp_register_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&sp_notifier_chain, nb);
}
EXPORT_SYMBOL_GPL(sp_register_notifier);

int sp_unregister_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&sp_notifier_chain, nb);
}
EXPORT_SYMBOL_GPL(sp_unregister_notifier);

3610
static bool is_sp_dynamic_dvpp_addr(unsigned long addr);
3611
/**
3612
 * mg_sp_config_dvpp_range() - User can config the share pool start address
3613 3614 3615 3616
 *                          of each Da-vinci device.
 * @start: the value of share pool start
 * @size: the value of share pool
 * @device_id: the num of Da-vinci device
3617
 * @tgid: the tgid of device process
3618 3619 3620 3621 3622
 *
 * Return true for success.
 * Return false if parameter invalid or has been set up.
 * This functuon has no concurrent problem.
 */
3623
bool mg_sp_config_dvpp_range(size_t start, size_t size, int device_id, int tgid)
3624
{
3625 3626 3627 3628 3629 3630 3631 3632
	int ret;
	bool err = false;
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct sp_group *spg;
	struct sp_mapping *spm;
	unsigned long default_start;

3633 3634 3635
	if (!sp_is_enabled())
		return false;

3636
	/* NOTE: check the start address */
3637
	if (tgid < 0 || size <= 0 || size > MMAP_SHARE_POOL_16G_SIZE ||
3638
	    device_id < 0 || device_id >= MAX_DEVID || !is_online_node_id(device_id)
3639
		|| !is_sp_dynamic_dvpp_addr(start) || !is_sp_dynamic_dvpp_addr(start + size - 1))
3640 3641
		return false;

3642
	ret = get_task(tgid, &tsk);
3643 3644 3645 3646 3647 3648 3649
	if (ret)
		return false;

	mm = get_task_mm(tsk->group_leader);
	if (!mm)
		goto put_task;

3650
	spg = sp_get_local_group(tsk, mm);
3651 3652 3653
	if (IS_ERR(spg))
		goto put_mm;

3654
	spm = spg->mapping[SP_MAPPING_DVPP];
3655
	default_start = MMAP_SHARE_POOL_DVPP_START + device_id * MMAP_SHARE_POOL_16G_SIZE;
3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672
	/* The dvpp range of each group can be configured only once */
	if (spm->start[device_id] != default_start)
		goto put_spg;

	spm->start[device_id] = start;
	spm->end[device_id] = start + size;

	err = true;

put_spg:
	sp_group_drop(spg);
put_mm:
	mmput(mm);
put_task:
	put_task_struct(tsk);

	return err;
3673 3674 3675
}
EXPORT_SYMBOL_GPL(mg_sp_config_dvpp_range);

3676
static bool is_sp_reserve_addr(unsigned long addr)
3677
{
3678
	return addr >= MMAP_SHARE_POOL_START && addr < MMAP_SHARE_POOL_END;
3679 3680
}

3681 3682 3683 3684 3685 3686 3687
/*
 *	| 16G host | 16G device | ... |     |
 *	^
 *	|
 *	MMAP_SHARE_POOL_DVPP_BASE + 16G * 64
 *	We only check the device regions.
 */
3688
static bool is_sp_dynamic_dvpp_addr(unsigned long addr)
3689
{
3690
	if (addr < MMAP_SHARE_POOL_DYNAMIC_DVPP_BASE || addr >= MMAP_SHARE_POOL_DYNAMIC_DVPP_END)
3691 3692
		return false;

3693
	return (addr - MMAP_SHARE_POOL_DYNAMIC_DVPP_BASE) & MMAP_SHARE_POOL_16G_SIZE;
3694 3695
}

3696
/**
3697
 * mg_is_sharepool_addr() - Check if a user memory address belongs to share pool.
3698 3699 3700 3701
 * @addr: the userspace address to be checked.
 *
 * Return true if addr belongs to share pool, or false vice versa.
 */
3702
bool mg_is_sharepool_addr(unsigned long addr)
3703
{
3704
	return sp_is_enabled() &&
3705
		((is_sp_reserve_addr(addr) || is_sp_dynamic_dvpp_addr(addr)));
3706 3707 3708
}
EXPORT_SYMBOL_GPL(mg_is_sharepool_addr);

3709 3710 3711 3712 3713 3714 3715 3716
int sp_node_id(struct vm_area_struct *vma)
{
	struct sp_area *spa;
	int node_id = numa_node_id();

	if (!sp_is_enabled())
		return node_id;

3717
	if (vma && (vma->vm_flags & VM_SHARE_POOL) && vma->vm_private_data) {
3718 3719
		spa = vma->vm_private_data;
		node_id = spa->node_id;
3720 3721 3722 3723 3724
	}

	return node_id;
}

3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740
/*** Statistical and maintenance functions ***/

static void get_mm_rss_info(struct mm_struct *mm, unsigned long *anon,
	unsigned long *file, unsigned long *shmem, unsigned long *total_rss)
{
	*anon = get_mm_counter(mm, MM_ANONPAGES);
	*file = get_mm_counter(mm, MM_FILEPAGES);
	*shmem = get_mm_counter(mm, MM_SHMEMPAGES);
	*total_rss = *anon + *file + *shmem;
}

static long get_proc_k2u(struct sp_proc_stat *stat)
{
	return byte2kb(atomic64_read(&stat->k2u_size));
}

3741
static long get_proc_alloc(struct sp_proc_stat *stat)
3742
{
3743 3744
	return byte2kb(atomic64_read(&stat->alloc_nsize) +
			atomic64_read(&stat->alloc_hsize));
3745 3746
}

G
Guo Mengqi 已提交
3747
static void get_process_sp_res(struct sp_group_master *master,
3748
		long *sp_res_out, long *sp_res_nsize_out)
3749
{
G
Guo Mengqi 已提交
3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761
	struct sp_group *spg;
	struct sp_group_node *spg_node;

	*sp_res_out = 0;
	*sp_res_nsize_out = 0;

	list_for_each_entry(spg_node, &master->node_list, group_node) {
		spg = spg_node->spg;
		*sp_res_out += byte2kb(atomic64_read(&spg->instat.alloc_nsize));
		*sp_res_out += byte2kb(atomic64_read(&spg->instat.alloc_hsize));
		*sp_res_nsize_out += byte2kb(atomic64_read(&spg->instat.alloc_nsize));
	}
3762 3763
}

3764
static long get_sp_res_by_spg_proc(struct sp_group_node *spg_node)
3765
{
G
Guo Mengqi 已提交
3766 3767
	return byte2kb(atomic64_read(&spg_node->spg->instat.alloc_nsize) +
			atomic64_read(&spg_node->spg->instat.alloc_hsize));
3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787
}

/*
 *  Statistics of RSS has a maximum 64 pages deviation (256KB).
 *  Please check_sync_rss_stat().
 */
static void get_process_non_sp_res(unsigned long total_rss, unsigned long shmem,
	long sp_res_nsize, long *non_sp_res_out, long *non_sp_shm_out)
{
	long non_sp_res, non_sp_shm;

	non_sp_res = page2kb(total_rss) - sp_res_nsize;
	non_sp_res = non_sp_res < 0 ? 0 : non_sp_res;
	non_sp_shm = page2kb(shmem) - sp_res_nsize;
	non_sp_shm = non_sp_shm < 0 ? 0 : non_sp_shm;

	*non_sp_res_out = non_sp_res;
	*non_sp_shm_out = non_sp_shm;
}

3788
static long get_spg_proc_alloc(struct sp_group_node *spg_node)
3789
{
3790 3791
	return byte2kb(atomic64_read(&spg_node->instat.alloc_nsize) +
				atomic64_read(&spg_node->instat.alloc_hsize));
3792 3793
}

3794
static long get_spg_proc_k2u(struct sp_group_node *spg_node)
3795
{
3796
	return byte2kb(atomic64_read(&spg_node->instat.k2u_size));
3797 3798 3799 3800 3801 3802 3803 3804
}

static void print_process_prot(struct seq_file *seq, unsigned long prot)
{
	if (prot == PROT_READ)
		seq_puts(seq, "R");
	else if (prot == (PROT_READ | PROT_WRITE))
		seq_puts(seq, "RW");
3805
	else
3806 3807 3808 3809 3810 3811
		seq_puts(seq, "-");
}

int proc_sp_group_state(struct seq_file *m, struct pid_namespace *ns,
			struct pid *pid, struct task_struct *task)
{
Z
Zhou Guanghui 已提交
3812
	struct mm_struct *mm;
3813 3814
	struct sp_group_master *master;
	struct sp_proc_stat *proc_stat;
3815 3816
	struct sp_group_node *spg_node;
	unsigned long anon, file, shmem, total_rss;
3817 3818
	long sp_res, sp_res_nsize, non_sp_res, non_sp_shm;

3819 3820 3821
	if (!sp_is_enabled())
		return 0;

Z
Zhou Guanghui 已提交
3822
	mm = get_task_mm(task);
3823 3824 3825
	if (!mm)
		return 0;

3826
	down_read(&sp_group_sem);
3827
	down_read(&mm->mmap_lock);
3828
	master = mm->sp_group_master;
Z
Zhou Guanghui 已提交
3829 3830
	if (!master)
		goto out;
3831 3832

	get_mm_rss_info(mm, &anon, &file, &shmem, &total_rss);
3833
	proc_stat = &master->instat;
G
Guo Mengqi 已提交
3834
	get_process_sp_res(master, &sp_res, &sp_res_nsize);
3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850
	get_process_non_sp_res(total_rss, shmem, sp_res_nsize,
			       &non_sp_res, &non_sp_shm);

	seq_puts(m, "Share Pool Aggregate Data of This Process\n\n");
	seq_printf(m, "%-8s %-16s %-9s %-9s %-9s %-10s %-10s %-8s\n",
		   "PID", "COMM", "SP_ALLOC", "SP_K2U", "SP_RES", "Non-SP_RES",
		   "Non-SP_Shm", "VIRT");
	seq_printf(m, "%-8d %-16s %-9ld %-9ld %-9ld %-10ld %-10ld %-8ld\n",
		   proc_stat->tgid, proc_stat->comm,
		   get_proc_alloc(proc_stat),
		   get_proc_k2u(proc_stat),
		   sp_res, non_sp_res, non_sp_shm,
		   page2kb(mm->total_vm));

	seq_puts(m, "\n\nProcess in Each SP Group\n\n");
	seq_printf(m, "%-8s %-9s %-9s %-9s %-4s\n",
3851
			"Group_ID", "SP_ALLOC", "SP_K2U", "SP_RES", "PROT");
3852

3853
	list_for_each_entry(spg_node, &master->node_list, group_node) {
3854
		seq_printf(m, "%-8d %-9ld %-9ld %-9ld ",
3855 3856 3857 3858 3859
				spg_node->spg->id,
				get_spg_proc_alloc(spg_node),
				get_spg_proc_k2u(spg_node),
				get_sp_res_by_spg_proc(spg_node));
		print_process_prot(m, spg_node->prot);
3860 3861
		seq_putc(m, '\n');
	}
Z
Zhou Guanghui 已提交
3862 3863

out:
3864
	up_read(&mm->mmap_lock);
3865
	up_read(&sp_group_sem);
Z
Zhou Guanghui 已提交
3866
	mmput(mm);
3867 3868 3869
	return 0;
}

3870
static void spa_stat_of_mapping_show(struct seq_file *seq, struct sp_mapping *spm)
3871 3872 3873 3874 3875
{
	struct rb_node *node;
	struct sp_area *spa, *prev = NULL;

	spin_lock(&sp_area_lock);
3876
	for (node = rb_first(&spm->area_root); node; node = rb_next(node)) {
3877 3878 3879 3880 3881 3882 3883
		__sp_area_drop_locked(prev);

		spa = rb_entry(node, struct sp_area, rb_node);
		prev = spa;
		atomic_inc(&spa->use_count);
		spin_unlock(&sp_area_lock);

3884 3885 3886 3887
		if (spg_valid(spa->spg))  /* k2u to group */
			seq_printf(seq, "%-10d ", spa->spg->id);
		else  /* spg is dead */
			seq_printf(seq, "%-10s ", "Dead");
3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922

		seq_printf(seq, "%2s%-14lx %2s%-14lx %-10ld ",
			   "0x", spa->va_start,
			   "0x", spa->va_end,
			   byte2kb(spa->real_size));

		switch (spa->type) {
		case SPA_TYPE_ALLOC:
			seq_printf(seq, "%-7s ", "ALLOC");
			break;
		case SPA_TYPE_K2TASK:
			seq_printf(seq, "%-7s ", "TASK");
			break;
		case SPA_TYPE_K2SPG:
			seq_printf(seq, "%-7s ", "SPG");
			break;
		default:
			/* usually impossible, perhaps a developer's mistake */
			break;
		}

		if (spa->is_hugepage)
			seq_printf(seq, "%-5s ", "Y");
		else
			seq_printf(seq, "%-5s ", "N");

		seq_printf(seq, "%-8d ",  spa->applier);
		seq_printf(seq, "%-8d\n", atomic_read(&spa->use_count));

		spin_lock(&sp_area_lock);
	}
	__sp_area_drop_locked(prev);
	spin_unlock(&sp_area_lock);
}

C
Chen Jun 已提交
3923 3924 3925 3926 3927
static void spa_ro_stat_show(struct seq_file *seq)
{
	spa_stat_of_mapping_show(seq, sp_mapping_ro);
}

3928 3929 3930 3931 3932 3933 3934
static void spa_normal_stat_show(struct seq_file *seq)
{
	spa_stat_of_mapping_show(seq, sp_mapping_normal);
}

static void spa_dvpp_stat_show(struct seq_file *seq)
{
3935 3936 3937 3938 3939 3940
	struct sp_mapping *spm;

	mutex_lock(&spm_list_lock);
	list_for_each_entry(spm, &spm_dvpp_list, spm_node)
		spa_stat_of_mapping_show(seq, spm);
	mutex_unlock(&spm_list_lock);
3941 3942 3943
}


3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990
void spa_overview_show(struct seq_file *seq)
{
	unsigned int total_num, alloc_num, k2u_task_num, k2u_spg_num;
	unsigned long total_size, alloc_size, k2u_task_size, k2u_spg_size;
	unsigned long dvpp_size, dvpp_va_size;

	if (!sp_is_enabled())
		return;

	spin_lock(&sp_area_lock);
	total_num     = spa_stat.total_num;
	alloc_num     = spa_stat.alloc_num;
	k2u_task_num  = spa_stat.k2u_task_num;
	k2u_spg_num   = spa_stat.k2u_spg_num;
	total_size    = spa_stat.total_size;
	alloc_size    = spa_stat.alloc_size;
	k2u_task_size = spa_stat.k2u_task_size;
	k2u_spg_size  = spa_stat.k2u_spg_size;
	dvpp_size     = spa_stat.dvpp_size;
	dvpp_va_size  = spa_stat.dvpp_va_size;
	spin_unlock(&sp_area_lock);

	if (seq != NULL) {
		seq_printf(seq, "Spa total num %u.\n", total_num);
		seq_printf(seq, "Spa alloc num %u, k2u(task) num %u, k2u(spg) num %u.\n",
			   alloc_num, k2u_task_num, k2u_spg_num);
		seq_printf(seq, "Spa total size:     %13lu KB\n", byte2kb(total_size));
		seq_printf(seq, "Spa alloc size:     %13lu KB\n", byte2kb(alloc_size));
		seq_printf(seq, "Spa k2u(task) size: %13lu KB\n", byte2kb(k2u_task_size));
		seq_printf(seq, "Spa k2u(spg) size:  %13lu KB\n", byte2kb(k2u_spg_size));
		seq_printf(seq, "Spa dvpp size:      %13lu KB\n", byte2kb(dvpp_size));
		seq_printf(seq, "Spa dvpp va size:   %13lu MB\n", byte2mb(dvpp_va_size));
		seq_puts(seq, "\n");
	} else {
		pr_info("Spa total num %u.\n", total_num);
		pr_info("Spa alloc num %u, k2u(task) num %u, k2u(spg) num %u.\n",
			alloc_num, k2u_task_num, k2u_spg_num);
		pr_info("Spa total size:     %13lu KB\n", byte2kb(total_size));
		pr_info("Spa alloc size:     %13lu KB\n", byte2kb(alloc_size));
		pr_info("Spa k2u(task) size: %13lu KB\n", byte2kb(k2u_task_size));
		pr_info("Spa k2u(spg) size:  %13lu KB\n", byte2kb(k2u_spg_size));
		pr_info("Spa dvpp size:      %13lu KB\n", byte2kb(dvpp_size));
		pr_info("Spa dvpp va size:   %13lu MB\n", byte2mb(dvpp_va_size));
		pr_info("\n");
	}
}

3991
static int spg_info_show(int id, void *p, void *data)
3992
{
3993
	struct sp_group *spg = p;
3994 3995
	struct seq_file *seq = data;

3996
	if (id >= SPG_ID_LOCAL_MIN && id <= SPG_ID_LOCAL_MAX)
3997
		return 0;
3998

3999
	if (seq != NULL) {
G
Guo Mengqi 已提交
4000
		seq_printf(seq, "Group %6d ", id);
4001 4002

		down_read(&spg->rw_lock);
4003
		seq_printf(seq, "size: %lld KB, spa num: %d, total alloc: %lld KB, normal alloc: %lld KB, huge alloc: %lld KB\n",
4004 4005 4006 4007 4008 4009
				byte2kb(atomic64_read(&spg->instat.size)),
				atomic_read(&spg->instat.spa_num),
				byte2kb(atomic64_read(&spg->instat.alloc_size)),
				byte2kb(atomic64_read(&spg->instat.alloc_nsize)),
				byte2kb(atomic64_read(&spg->instat.alloc_hsize)));
		up_read(&spg->rw_lock);
4010
	} else {
G
Guo Mengqi 已提交
4011
		pr_info("Group %6d ", id);
4012 4013

		down_read(&spg->rw_lock);
4014
		pr_info("size: %lld KB, spa num: %d, total alloc: %lld KB, normal alloc: %lld KB, huge alloc: %lld KB\n",
4015 4016 4017 4018 4019 4020
				byte2kb(atomic64_read(&spg->instat.size)),
				atomic_read(&spg->instat.spa_num),
				byte2kb(atomic64_read(&spg->instat.alloc_size)),
				byte2kb(atomic64_read(&spg->instat.alloc_nsize)),
				byte2kb(atomic64_read(&spg->instat.alloc_hsize)));
		up_read(&spg->rw_lock);
4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032
	}

	return 0;
}

void spg_overview_show(struct seq_file *seq)
{
	if (!sp_is_enabled())
		return;

	if (seq != NULL) {
		seq_printf(seq, "Share pool total size: %lld KB, spa total num: %d.\n",
4033 4034
				byte2kb(atomic64_read(&sp_overall_stat.spa_total_size)),
				atomic_read(&sp_overall_stat.spa_total_num));
4035 4036
	} else {
		pr_info("Share pool total size: %lld KB, spa total num: %d.\n",
4037 4038
				byte2kb(atomic64_read(&sp_overall_stat.spa_total_size)),
				atomic_read(&sp_overall_stat.spa_total_num));
4039 4040
	}

4041 4042 4043
	down_read(&sp_group_sem);
	idr_for_each(&sp_group_idr, spg_info_show, seq);
	up_read(&sp_group_sem);
4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056

	if (seq != NULL)
		seq_puts(seq, "\n");
	else
		pr_info("\n");
}

static int spa_stat_show(struct seq_file *seq, void *offset)
{
	spg_overview_show(seq);
	spa_overview_show(seq);
	/* print the file header */
	seq_printf(seq, "%-10s %-16s %-16s %-10s %-7s %-5s %-8s %-8s\n",
4057
			"Group ID", "va_start", "va_end", "Size(KB)", "Type", "Huge", "PID", "Ref");
C
Chen Jun 已提交
4058
	spa_ro_stat_show(seq);
4059 4060
	spa_normal_stat_show(seq);
	spa_dvpp_stat_show(seq);
4061 4062 4063
	return 0;
}

4064
static int proc_usage_by_group(int id, void *p, void *data)
4065
{
4066
	struct sp_group *spg = p;
4067
	struct seq_file *seq = data;
4068
	struct sp_group_node *spg_node;
4069
	struct mm_struct *mm;
4070 4071 4072
	struct sp_group_master *master;
	int tgid;
	unsigned long anon, file, shmem, total_rss;
4073

4074 4075 4076 4077 4078
	down_read(&spg->rw_lock);
	list_for_each_entry(spg_node, &spg->procs, proc_node) {
		master = spg_node->master;
		mm = master->mm;
		tgid = master->instat.tgid;
4079 4080 4081 4082

		get_mm_rss_info(mm, &anon, &file, &shmem, &total_rss);

		seq_printf(seq, "%-8d ", tgid);
4083 4084
		seq_printf(seq, "%-8d ", id);
		seq_printf(seq, "%-9ld %-9ld %-9ld %-8ld %-7ld %-7ld ",
4085 4086 4087 4088
				get_spg_proc_alloc(spg_node),
				get_spg_proc_k2u(spg_node),
				get_sp_res_by_spg_proc(spg_node),
				page2kb(mm->total_vm), page2kb(total_rss),
4089
				page2kb(shmem));
4090
		print_process_prot(seq, spg_node->prot);
4091 4092
		seq_putc(seq, '\n');
	}
4093
	up_read(&spg->rw_lock);
4094
	cond_resched();
4095

4096 4097 4098
	return 0;
}

4099
static int proc_group_usage_show(struct seq_file *seq, void *offset)
4100 4101 4102
{
	spg_overview_show(seq);
	spa_overview_show(seq);
4103

4104
	/* print the file header */
4105 4106 4107
	seq_printf(seq, "%-8s %-8s %-9s %-9s %-9s %-8s %-7s %-7s %-4s\n",
			"PID", "Group_ID", "SP_ALLOC", "SP_K2U", "SP_RES",
			"VIRT", "RES", "Shm", "PROT");
4108 4109
	/* print kthread buff_module_guard_work */
	seq_printf(seq, "%-8s %-8s %-9lld %-9lld\n",
4110 4111 4112
			"guard", "-",
			byte2kb(atomic64_read(&kthread_stat.alloc_size)),
			byte2kb(atomic64_read(&kthread_stat.k2u_size)));
4113

W
Wang Wensheng 已提交
4114
	down_read(&sp_group_sem);
4115
	idr_for_each(&sp_group_idr, proc_usage_by_group, seq);
W
Wang Wensheng 已提交
4116 4117
	up_read(&sp_group_sem);

4118 4119 4120
	return 0;
}

4121
static int proc_usage_show(struct seq_file *seq, void *offset)
4122
{
4123
	struct sp_group_master *master = NULL;
4124 4125
	unsigned long anon, file, shmem, total_rss;
	long sp_res, sp_res_nsize, non_sp_res, non_sp_shm;
4126
	struct sp_proc_stat *proc_stat;
4127 4128

	seq_printf(seq, "%-8s %-16s %-9s %-9s %-9s %-10s %-10s %-8s\n",
4129 4130 4131
			"PID", "COMM", "SP_ALLOC", "SP_K2U", "SP_RES", "Non-SP_RES",
			"Non-SP_Shm", "VIRT");

4132
	down_read(&sp_group_sem);
4133 4134 4135 4136
	mutex_lock(&master_list_lock);
	list_for_each_entry(master, &master_list, list_node) {
		proc_stat = &master->instat;
		get_mm_rss_info(master->mm, &anon, &file, &shmem, &total_rss);
G
Guo Mengqi 已提交
4137
		get_process_sp_res(master, &sp_res, &sp_res_nsize);
4138 4139 4140 4141 4142 4143 4144 4145 4146 4147
		get_process_non_sp_res(total_rss, shmem, sp_res_nsize,
				&non_sp_res, &non_sp_shm);
		seq_printf(seq, "%-8d %-16s %-9ld %-9ld %-9ld %-10ld %-10ld %-8ld\n",
				proc_stat->tgid, proc_stat->comm,
				get_proc_alloc(proc_stat),
				get_proc_k2u(proc_stat),
				sp_res, non_sp_res, non_sp_shm,
				page2kb(master->mm->total_vm));
	}
	mutex_unlock(&master_list_lock);
4148
	up_read(&sp_group_sem);
4149 4150 4151 4152 4153 4154 4155 4156 4157 4158

	return 0;
}

static void __init proc_sharepool_init(void)
{
	if (!proc_mkdir("sharepool", NULL))
		return;

	proc_create_single_data("sharepool/spa_stat", 0400, NULL, spa_stat_show, NULL);
4159 4160
	proc_create_single_data("sharepool/proc_stat", 0400, NULL, proc_group_usage_show, NULL);
	proc_create_single_data("sharepool/proc_overview", 0400, NULL, proc_usage_show, NULL);
4161 4162 4163 4164
}

/*** End of tatistical and maintenance functions ***/

4165 4166
bool sp_check_addr(unsigned long addr)
{
4167
	if (sp_is_enabled() && mg_is_sharepool_addr(addr) &&
4168
	    !check_aoscore_process(current))
4169
		return true;
4170
	else
4171 4172 4173 4174 4175
		return false;
}

bool sp_check_mmap_addr(unsigned long addr, unsigned long flags)
{
4176
	if (sp_is_enabled() && mg_is_sharepool_addr(addr) &&
4177
	    !check_aoscore_process(current) && !(flags & MAP_SHARE_POOL))
4178
		return true;
4179
	else
4180 4181 4182
		return false;
}

4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199
vm_fault_t sharepool_no_page(struct mm_struct *mm,
			struct vm_area_struct *vma,
			struct address_space *mapping, pgoff_t idx,
			unsigned long address, pte_t *ptep, unsigned int flags)
{
	struct hstate *h = hstate_vma(vma);
	vm_fault_t ret = VM_FAULT_SIGBUS;
	unsigned long size;
	struct page *page;
	pte_t new_pte;
	spinlock_t *ptl;
	unsigned long haddr = address & huge_page_mask(h);
	bool new_page = false;
	int err;
	int node_id;
	struct sp_area *spa;

4200
	spa = vma->vm_private_data;
4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215
	if (!spa) {
		pr_err("share pool: vma is invalid, not from sp mmap\n");
		return ret;
	}
	node_id = spa->node_id;

retry:
	page = find_lock_page(mapping, idx);
	if (!page) {
		size = i_size_read(mapping->host) >> huge_page_shift(h);
		if (idx >= size)
			goto out;

		page = alloc_huge_page(vma, haddr, 0);
		if (IS_ERR(page)) {
4216 4217
			page = hugetlb_alloc_hugepage(node_id,
					HUGETLB_ALLOC_BUDDY | HUGETLB_ALLOC_NORECLAIM);
4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278
			if (!page)
				page = ERR_PTR(-ENOMEM);
		}
		if (IS_ERR(page)) {
			ptl = huge_pte_lock(h, mm, ptep);
			if (!huge_pte_none(huge_ptep_get(ptep))) {
				ret = 0;
				spin_unlock(ptl);
				goto out;
			}
			spin_unlock(ptl);
			ret = vmf_error(PTR_ERR(page));
			goto out;
		}
		__SetPageUptodate(page);
		new_page = true;

		/* sharepool pages are all shared */
		err = huge_add_to_page_cache(page, mapping, idx);
		if (err) {
			put_page(page);
			if (err == -EEXIST)
				goto retry;
			goto out;
		}
	}


	ptl = huge_pte_lock(h, mm, ptep);
	size = i_size_read(mapping->host) >> huge_page_shift(h);
	if (idx >= size)
		goto backout;

	ret = 0;
	if (!huge_pte_none(huge_ptep_get(ptep)))
		goto backout;

	page_dup_rmap(page, true);
	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
				&& (vma->vm_flags & VM_SHARED)));
	set_huge_pte_at(mm, haddr, ptep, new_pte);

	hugetlb_count_add(pages_per_huge_page(h), mm);

	spin_unlock(ptl);

	if (new_page) {
		SetPagePrivate(&page[1]);
	}

	unlock_page(page);
out:
	return ret;

backout:
	spin_unlock(ptl);
	unlock_page(page);
	put_page(page);
	goto out;
}

4279
/*
4280 4281
 * The caller must ensure that this function is called
 * when the last thread in the thread group exits.
4282
 */
4283
int sp_group_exit(void)
4284
{
4285
	struct mm_struct *mm;
4286 4287 4288 4289 4290 4291 4292 4293
	struct sp_group *spg;
	struct sp_group_master *master;
	struct sp_group_node *spg_node, *tmp;
	bool is_alive = true;

	if (!sp_is_enabled())
		return 0;

4294 4295 4296 4297
	if (current->flags & PF_KTHREAD)
		return 0;

	mm = current->mm;
4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359
	down_write(&sp_group_sem);

	master = mm->sp_group_master;
	if (!master) {
		up_write(&sp_group_sem);
		return 0;
	}

	list_for_each_entry_safe(spg_node, tmp, &master->node_list, group_node) {
		spg = spg_node->spg;

		down_write(&spg->rw_lock);
		/* a dead group should NOT be reactive again */
		if (spg_valid(spg) && list_is_singular(&spg->procs))
			is_alive = spg->is_alive = false;
		spg->proc_num--;
		list_del(&spg_node->proc_node);
		up_write(&spg->rw_lock);

		if (!is_alive)
			blocking_notifier_call_chain(&sp_notifier_chain, 0,
						     spg);
	}

	/* match with get_task_mm() in sp_group_add_task() */
	if (atomic_sub_and_test(master->count, &mm->mm_users)) {
		up_write(&sp_group_sem);
		WARN(1, "Invalid user counting\n");
		return 1;
	}

	up_write(&sp_group_sem);
	return 0;
}

void sp_group_post_exit(struct mm_struct *mm)
{
	struct sp_proc_stat *stat;
	long alloc_size, k2u_size;
	/* lockless visit */
	struct sp_group_master *master = mm->sp_group_master;
	struct sp_group_node *spg_node, *tmp;
	struct sp_group *spg;

	if (!sp_is_enabled() || !master)
		return;

	/*
	 * There are two basic scenarios when a process in the share pool is
	 * exiting but its share pool memory usage is not 0.
	 * 1. Process A called sp_alloc(), but it terminates without calling
	 *    sp_free(). Then its share pool memory usage is a positive number.
	 * 2. Process A never called sp_alloc(), and process B in the same spg
	 *    called sp_alloc() to get an addr u. Then A gets u somehow and
	 *    called sp_free(u). Now A's share pool memory usage is a negative
	 *    number. Notice B's memory usage will be a positive number.
	 *
	 * We decide to print an info when seeing both of the scenarios.
	 *
	 * A process not in an sp group doesn't need to print because there
	 * wont't be any memory which is not freed.
	 */
4360
	stat = &master->instat;
4361
	if (stat) {
4362
		alloc_size = atomic64_read(&stat->alloc_nsize) + atomic64_read(&stat->alloc_hsize);
4363 4364 4365 4366 4367 4368 4369 4370
		k2u_size = atomic64_read(&stat->k2u_size);

		if (alloc_size != 0 || k2u_size != 0)
			pr_info("process %s(%d) exits. It applied %ld aligned KB, k2u shared %ld aligned KB\n",
				stat->comm, stat->tgid,
				byte2kb(alloc_size), byte2kb(k2u_size));
	}

4371
	down_write(&sp_group_sem);
4372 4373 4374
	list_for_each_entry_safe(spg_node, tmp, &master->node_list, group_node) {
		spg = spg_node->spg;
		/* match with refcount inc in sp_group_add_task */
4375 4376
		if (atomic_dec_and_test(&spg->use_count))
			free_sp_group_locked(spg);
4377
		list_del(&spg_node->group_node);
4378 4379
		kfree(spg_node);
	}
4380
	up_write(&sp_group_sem);
4381

4382 4383 4384 4385
	mutex_lock(&master_list_lock);
	list_del(&master->list_node);
	mutex_unlock(&master_list_lock);

4386 4387 4388
	kfree(master);
}

4389 4390 4391 4392 4393 4394 4395 4396 4397 4398
DEFINE_STATIC_KEY_FALSE(share_pool_enabled_key);

static int __init enable_share_pool(char *s)
{
	static_branch_enable(&share_pool_enabled_key);
	pr_info("Ascend enable share pool features via bootargs\n");

	return 1;
}
__setup("enable_ascend_share_pool", enable_share_pool);
4399 4400 4401

static int __init share_pool_init(void)
{
4402 4403 4404 4405
	if (!sp_is_enabled())
		return 0;

	sp_mapping_normal = sp_mapping_create(SP_MAPPING_NORMAL);
4406
	if (IS_ERR(sp_mapping_normal))
4407 4408 4409
		goto fail;
	atomic_inc(&sp_mapping_normal->user);

C
Chen Jun 已提交
4410 4411 4412 4413 4414
	sp_mapping_ro = sp_mapping_create(SP_MAPPING_RO);
	if (IS_ERR(sp_mapping_ro))
		goto free_normal;
	atomic_inc(&sp_mapping_ro->user);

4415
	proc_sharepool_init();
4416 4417

	return 0;
C
Chen Jun 已提交
4418 4419 4420

free_normal:
	kfree(sp_mapping_normal);
4421 4422 4423 4424 4425 4426
fail:
	pr_err("Ascend share pool initialization failed\n");
	static_branch_disable(&share_pool_enabled_key);
	return 1;
}
late_initcall(share_pool_init);