share_pool.c 110.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Huawei Ascend Share Pool Memory
 *
 * Copyright (C) 2020 Huawei Limited
 * Author: Tang Yizhou <tangyizhou@huawei.com>
 *         Zefan Li <lizefan@huawei.com>
 *         Wu Peng <wupeng58@huawei.com>
 *         Ding Tianhong <dingtgianhong@huawei.com>
 *         Zhou Guanghui <zhouguanghui1@huawei.com>
 *         Li Ming <limingming.li@huawei.com>
 *
 * This code is based on the hisilicon ascend platform.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#define pr_fmt(fmt) "share pool: " fmt

#include <linux/share_pool.h>
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/mm.h>
#include <linux/mm_types.h>
#include <linux/idr.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
#include <linux/printk.h>
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
#include <linux/pid.h>
#include <linux/pid_namespace.h>
#include <linux/atomic.h>
#include <linux/lockdep.h>
#include <linux/kernel.h>
#include <linux/falloc.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/rmap.h>
#include <linux/preempt.h>
#include <linux/swapops.h>
#include <linux/mmzone.h>
#include <linux/timekeeping.h>
#include <linux/time64.h>
52
#include <linux/pagewalk.h>
53

54 55
#define spg_valid(spg)		((spg)->is_alive == true)

56 57 58 59 60
/* Use spa va address as mmap offset. This can work because spa_file
 * is setup with 64-bit address space. So va shall be well covered.
 */
#define addr_offset(spa)	((spa)->va_start)

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
#define byte2kb(size)		((size) >> 10)
#define byte2mb(size)		((size) >> 20)
#define page2kb(page_num)	((page_num) << (PAGE_SHIFT - 10))

#define MAX_GROUP_FOR_SYSTEM	50000
#define MAX_GROUP_FOR_TASK	3000
#define MAX_PROC_PER_GROUP	1024

#define GROUP_NONE		0

#define SEC2US(sec)		((sec) * 1000000)
#define NS2US(ns)		((ns) / 1000)

#define PF_DOMAIN_CORE		0x10000000	/* AOS CORE processes in sched.h */

76 77
static int system_group_count;

78 79 80 81 82
/* idr of all sp_groups */
static DEFINE_IDR(sp_group_idr);
/* rw semaphore for sp_group_idr and mm->sp_group_master */
static DECLARE_RWSEM(sp_group_sem);

83 84
static BLOCKING_NOTIFIER_HEAD(sp_notifier_chain);

85 86 87 88
static DEFINE_IDA(sp_group_id_ida);

/*** Statistical and maintenance tools ***/

89 90 91 92
/* list of all sp_group_masters */
static LIST_HEAD(master_list);
/* mutex to protect insert/delete ops from master_list */
static DEFINE_MUTEX(master_list_lock);
93

94 95 96 97 98
/* list of all spm-dvpp */
static LIST_HEAD(spm_dvpp_list);
/* mutex to protect insert/delete ops from master_list */
static DEFINE_MUTEX(spm_list_lock);

99 100 101
/* for kthread buff_module_guard_work */
static struct sp_proc_stat kthread_stat;

102 103 104 105 106 107 108 109
#define SEQ_printf(m, x...)			\
do {						\
	if (m)					\
		seq_printf(m, x);		\
	else					\
		pr_info(x);			\
} while (0)

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
#ifndef __GENKSYMS__
struct sp_spg_stat {
	int spg_id;
	/* record the number of hugepage allocation failures */
	atomic_t hugepage_failures;
	/* number of sp_area */
	atomic_t	 spa_num;
	/* total size of all sp_area from sp_alloc and k2u */
	atomic64_t	 size;
	/* total size of all sp_area from sp_alloc 0-order page */
	atomic64_t	 alloc_nsize;
	/* total size of all sp_area from sp_alloc hugepage */
	atomic64_t	 alloc_hsize;
	/* total size of all sp_area from ap_alloc */
	atomic64_t	 alloc_size;
	/* total size of all sp_area from sp_k2u */
	atomic64_t	 k2u_size;
};

/* per process memory usage statistics indexed by tgid */
struct sp_proc_stat {
	int tgid;
	struct mm_struct *mm;
	char comm[TASK_COMM_LEN];
	/*
	 * alloc amount minus free amount, may be negative when freed by
	 * another task in the same sp group.
	 */
	atomic64_t alloc_size;
	atomic64_t alloc_nsize;
	atomic64_t alloc_hsize;
	atomic64_t k2u_size;
};

/* per process/sp-group memory usage statistics */
struct spg_proc_stat {
	int tgid;
	int spg_id;  /* 0 for non-group data, such as k2u_task */
	/*
	 * alloc amount minus free amount, may be negative when freed by
	 * another task in the same sp group.
	 */
	atomic64_t alloc_size;
	atomic64_t alloc_nsize;
	atomic64_t alloc_hsize;
	atomic64_t k2u_size;
};

158 159 160 161
enum sp_mapping_type {
	SP_MAPPING_START,
	SP_MAPPING_DVPP		= SP_MAPPING_START,
	SP_MAPPING_NORMAL,
C
Chen Jun 已提交
162
	SP_MAPPING_RO,
163 164 165
	SP_MAPPING_END,
};

166 167 168 169
/*
 * address space management
 */
struct sp_mapping {
170
	unsigned long type;
171 172 173 174 175 176 177 178 179 180 181
	atomic_t user;
	unsigned long start[MAX_DEVID];
	unsigned long end[MAX_DEVID];
	struct rb_root area_root;

	struct rb_node *free_area_cache;
	unsigned long cached_hole_size;
	unsigned long cached_vstart;

	/* list head for all groups attached to this mapping, dvpp mapping only */
	struct list_head group_head;
182
	struct list_head spm_node;
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
};

/* Processes in the same sp_group can share memory.
 * Memory layout for share pool:
 *
 * |-------------------- 8T -------------------|---|------ 8T ------------|
 * |		Device 0	   |  Device 1 |...|                      |
 * |----------------------------------------------------------------------|
 * |------------- 16G -------------|    16G    |   |                      |
 * | DVPP GROUP0   | DVPP GROUP1   | ... | ... |...|  sp normal memory    |
 * |     sp        |    sp         |     |     |   |                      |
 * |----------------------------------------------------------------------|
 *
 * The host SVM feature reserves 8T virtual memory by mmap, and due to the
 * restriction of DVPP, while SVM and share pool will both allocate memory
 * for DVPP, the memory have to be in the same 32G range.
 *
 * Share pool reserves 16T memory, with 8T for normal uses and 8T for DVPP.
 * Within this 8T DVPP memory, SVM will call sp_config_dvpp_range() to
 * tell us which 16G memory range is reserved for share pool .
 *
 * In some scenarios where there is no host SVM feature, share pool uses
 * the default 8G memory setting for DVPP.
 */
struct sp_group {
	int		 id;
	unsigned long	 flag;
	struct file	 *file;
	struct file	 *file_hugetlb;
	/* number of process in this group */
	int		 proc_num;
	/* list head of processes (sp_group_node, each represents a process) */
	struct list_head procs;
	/* list head of sp_area. it is protected by spin_lock sp_area_lock */
	struct list_head spa_list;
	/* group statistics */
	struct sp_spg_stat instat;
	/* is_alive == false means it's being destroyed */
	bool		 is_alive;
	atomic_t	 use_count;
	/* protect the group internal elements, except spa_list */
	struct rw_semaphore	rw_lock;
	/* list node for dvpp mapping */
	struct list_head	mnode;
227
	struct sp_mapping       *mapping[SP_MAPPING_END];
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
};

/* a per-process(per mm) struct which manages a sp_group_node list */
struct sp_group_master {
	/*
	 * number of sp groups the process belongs to,
	 * a.k.a the number of sp_node in node_list
	 */
	unsigned int count;
	/* list head of sp_node */
	struct list_head node_list;
	struct mm_struct *mm;
	/*
	 * Used to apply for the shared pool memory of the current process.
	 * For example, sp_alloc non-share memory or k2task.
	 */
	struct sp_group *local;
	struct sp_proc_stat instat;
	struct list_head list_node;
};

/*
 * each instance represents an sp group the process belongs to
 * sp_group_master    : sp_group_node   = 1 : N
 * sp_group_node->spg : sp_group        = 1 : 1
 * sp_group_node      : sp_group->procs = N : 1
 */
struct sp_group_node {
	/* list node in sp_group->procs */
	struct list_head proc_node;
	/* list node in sp_group_maseter->node_list */
	struct list_head group_node;
	struct sp_group_master *master;
	struct sp_group *spg;
	unsigned long prot;
	struct spg_proc_stat instat;
};
#endif

267 268 269 270 271 272 273 274 275 276 277 278 279 280
static inline void sp_add_group_master(struct sp_group_master *master)
{
	mutex_lock(&master_list_lock);
	list_add_tail(&master->list_node, &master_list);
	mutex_unlock(&master_list_lock);
}

static inline void sp_del_group_master(struct sp_group_master *master)
{
	mutex_lock(&master_list_lock);
	list_del(&master->list_node);
	mutex_unlock(&master_list_lock);
}

281
/* The caller should hold mmap_sem to protect master (TBD) */
G
Guo Mengqi 已提交
282 283
static void sp_init_group_master_stat(int tgid, struct mm_struct *mm,
		struct sp_proc_stat *stat)
284 285 286 287 288
{
	atomic64_set(&stat->alloc_nsize, 0);
	atomic64_set(&stat->alloc_hsize, 0);
	atomic64_set(&stat->k2u_size, 0);
	stat->mm = mm;
G
Guo Mengqi 已提交
289
	stat->tgid = tgid;
290 291 292
	get_task_comm(stat->comm, current);
}

293 294 295 296 297 298 299 300 301 302
static unsigned long sp_mapping_type(struct sp_mapping *spm)
{
	return spm->type;
}

static void sp_mapping_set_type(struct sp_mapping *spm, unsigned long type)
{
	spm->type = type;
}

303
static struct sp_mapping *sp_mapping_normal;
C
Chen Jun 已提交
304
static struct sp_mapping *sp_mapping_ro;
305

306 307 308
static void sp_mapping_add_to_list(struct sp_mapping *spm)
{
	mutex_lock(&spm_list_lock);
309
	if (sp_mapping_type(spm) == SP_MAPPING_DVPP)
310 311 312 313 314 315 316
		list_add_tail(&spm->spm_node, &spm_dvpp_list);
	mutex_unlock(&spm_list_lock);
}

static void sp_mapping_remove_from_list(struct sp_mapping *spm)
{
	mutex_lock(&spm_list_lock);
317
	if (sp_mapping_type(spm) == SP_MAPPING_DVPP)
318 319 320 321
		list_del(&spm->spm_node);
	mutex_unlock(&spm_list_lock);
}

322 323 324 325 326
static void sp_mapping_range_init(struct sp_mapping *spm)
{
	int i;

	for (i = 0; i < MAX_DEVID; i++) {
327
		switch (sp_mapping_type(spm)) {
C
Chen Jun 已提交
328 329 330 331
		case SP_MAPPING_RO:
			spm->start[i] = MMAP_SHARE_POOL_RO_START;
			spm->end[i]   = MMAP_SHARE_POOL_RO_END;
			break;
332
		case SP_MAPPING_NORMAL:
333
			spm->start[i] = MMAP_SHARE_POOL_NORMAL_START;
334 335 336 337 338 339 340 341 342
			spm->end[i]   = MMAP_SHARE_POOL_NORMAL_END;
			break;
		case SP_MAPPING_DVPP:
			spm->start[i] = MMAP_SHARE_POOL_DVPP_START + i * MMAP_SHARE_POOL_16G_SIZE;
			spm->end[i]   = spm->start[i] + MMAP_SHARE_POOL_16G_SIZE;
			break;
		default:
			pr_err("Invalid sp_mapping type [%lu]\n", sp_mapping_type(spm));
			break;
343 344 345 346
		}
	}
}

347
static struct sp_mapping *sp_mapping_create(unsigned long type)
348 349 350 351 352 353 354
{
	struct sp_mapping *spm;

	spm = kzalloc(sizeof(struct sp_mapping), GFP_KERNEL);
	if (!spm)
		return ERR_PTR(-ENOMEM);

355
	sp_mapping_set_type(spm, type);
356 357 358
	sp_mapping_range_init(spm);
	atomic_set(&spm->user, 0);
	spm->area_root = RB_ROOT;
359
	INIT_LIST_HEAD(&spm->group_head);
360
	sp_mapping_add_to_list(spm);
361 362 363 364

	return spm;
}

365 366
static void sp_mapping_destroy(struct sp_mapping *spm)
{
367
	sp_mapping_remove_from_list(spm);
368 369 370 371 372
	kfree(spm);
}

static void sp_mapping_attach(struct sp_group *spg, struct sp_mapping *spm)
{
373
	unsigned long type = sp_mapping_type(spm);
374
	atomic_inc(&spm->user);
375

376 377
	spg->mapping[type] = spm;
	if (type == SP_MAPPING_DVPP)
378
		list_add_tail(&spg->mnode, &spm->group_head);
379 380 381 382
}

static void sp_mapping_detach(struct sp_group *spg, struct sp_mapping *spm)
{
383 384
	unsigned long type;

385 386
	if (!spm)
		return;
387

388 389
	type = sp_mapping_type(spm);
	if (type == SP_MAPPING_DVPP)
390 391
		list_del(&spg->mnode);
	if (atomic_dec_and_test(&spm->user))
392
		sp_mapping_destroy(spm);
393 394

	spg->mapping[type] = NULL;
395 396
}

397 398 399 400 401 402 403 404 405 406
/* merge old mapping to new, and the old mapping would be destroyed */
static void sp_mapping_merge(struct sp_mapping *new, struct sp_mapping *old)
{
	struct sp_group *spg, *tmp;

	if (new == old)
		return;

	list_for_each_entry_safe(spg, tmp, &old->group_head, mnode) {
		list_move_tail(&spg->mnode, &new->group_head);
407
		spg->mapping[SP_MAPPING_DVPP] = new;
408 409 410 411 412 413 414 415 416 417 418
	}

	atomic_add(atomic_read(&old->user), &new->user);
	sp_mapping_destroy(old);
}

static bool is_mapping_empty(struct sp_mapping *spm)
{
	return RB_EMPTY_ROOT(&spm->area_root);
}

419 420 421 422
static bool can_mappings_merge(struct sp_mapping *m1, struct sp_mapping *m2)
{
	int i;

423
	for (i = 0; i < MAX_DEVID; i++)
424 425 426 427 428 429
		if (m1->start[i] != m2->start[i] || m1->end[i] != m2->end[i])
			return false;

	return true;
}

430
/*
431 432 433 434 435
 * 1. The mappings of local group is set on creating.
 * 2. This is used to setup the mapping for groups created during add_task.
 * 3. The normal mapping exists for all groups.
 * 4. The dvpp mappings for the new group and local group can merge _iff_ at
 *    least one of the mapping is empty.
436
 * the caller must hold sp_group_sem
437
 * NOTE: undo the mergeing when the later process failed.
438 439 440
 */
static int sp_mapping_group_setup(struct mm_struct *mm, struct sp_group *spg)
{
441 442 443 444
	struct sp_mapping *local_dvpp_mapping, *spg_dvpp_mapping;

	local_dvpp_mapping = mm->sp_group_master->local->mapping[SP_MAPPING_DVPP];
	spg_dvpp_mapping = spg->mapping[SP_MAPPING_DVPP];
445

446
	if (!list_empty(&spg->procs) && !(spg->flag & SPG_FLAG_NON_DVPP)) {
447 448 449 450 451 452
		/*
		 * Don't return an error when the mappings' address range conflict.
		 * As long as the mapping is unused, we can drop the empty mapping.
		 * This may change the address range for the task or group implicitly,
		 * give a warn for it.
		 */
453
		bool is_conflict = !can_mappings_merge(local_dvpp_mapping, spg_dvpp_mapping);
454

455 456
		if (is_mapping_empty(local_dvpp_mapping)) {
			sp_mapping_merge(spg_dvpp_mapping, local_dvpp_mapping);
457 458
			if (is_conflict)
				pr_warn_ratelimited("task address space conflict, spg_id=%d\n", spg->id);
459 460
		} else if (is_mapping_empty(spg_dvpp_mapping)) {
			sp_mapping_merge(local_dvpp_mapping, spg_dvpp_mapping);
461 462 463
			if (is_conflict)
				pr_warn_ratelimited("group address space conflict, spg_id=%d\n", spg->id);
		} else {
464 465
			pr_info_ratelimited("Duplicate address space, id=%d\n", spg->id);
			return -EINVAL;
466 467
		}
	} else {
468 469
		if (!(spg->flag & SPG_FLAG_NON_DVPP))
			/* the mapping of local group is always set */
470 471
			sp_mapping_attach(spg, local_dvpp_mapping);
		if (!spg->mapping[SP_MAPPING_NORMAL])
472
			sp_mapping_attach(spg, sp_mapping_normal);
C
Chen Jun 已提交
473 474
		if (!spg->mapping[SP_MAPPING_RO])
			sp_mapping_attach(spg, sp_mapping_ro);
475 476 477 478 479
	}

	return 0;
}

480
static struct sp_mapping *sp_mapping_find(struct sp_group *spg,
C
Chen Jun 已提交
481 482 483 484 485
						 unsigned long addr)
{
	if (addr >= MMAP_SHARE_POOL_NORMAL_START && addr < MMAP_SHARE_POOL_NORMAL_END)
		return spg->mapping[SP_MAPPING_NORMAL];

C
Chen Jun 已提交
486 487 488
	if (addr >= MMAP_SHARE_POOL_RO_START && addr < MMAP_SHARE_POOL_RO_END)
		return spg->mapping[SP_MAPPING_RO];

C
Chen Jun 已提交
489 490 491
	return spg->mapping[SP_MAPPING_DVPP];
}

492
static struct sp_group *create_spg(int spg_id, unsigned long flag);
493
static void free_new_spg_id(bool new, int spg_id);
494 495 496
static void free_sp_group_locked(struct sp_group *spg);
static int local_group_add_task(struct mm_struct *mm, struct sp_group *spg);
static int init_local_group(struct mm_struct *mm)
497
{
498
	int spg_id, ret;
499
	struct sp_group *spg;
500
	struct sp_mapping *spm;
501 502
	struct sp_group_master *master = mm->sp_group_master;

503 504 505 506
	spg_id = ida_alloc_range(&sp_group_id_ida, SPG_ID_LOCAL_MIN,
				 SPG_ID_LOCAL_MAX, GFP_ATOMIC);
	if (spg_id < 0) {
		pr_err_ratelimited("generate local group id failed %d\n", spg_id);
507
		return spg_id;
508 509
	}

510
	spg = create_spg(spg_id, 0);
511
	if (IS_ERR(spg)) {
512 513
		free_new_spg_id(true, spg_id);
		return PTR_ERR(spg);
514 515 516
	}

	master->local = spg;
517 518 519 520 521 522 523
	spm = sp_mapping_create(SP_MAPPING_DVPP);
	if (IS_ERR(spm)) {
		ret = PTR_ERR(spm);
		goto free_spg;
	}
	sp_mapping_attach(master->local, spm);
	sp_mapping_attach(master->local, sp_mapping_normal);
C
Chen Jun 已提交
524
	sp_mapping_attach(master->local, sp_mapping_ro);
525

526 527
	ret = local_group_add_task(mm, spg);
	if (ret < 0)
528
		/* The spm would be released while destroying the spg */
529 530
		goto free_spg;

531
	return 0;
532 533

free_spg:
534
	/* spg_id is freed in free_sp_group_locked */
535
	free_sp_group_locked(spg);
536 537
	master->local = NULL;
	return ret;
538 539
}

540 541
/* The caller must hold sp_group_sem */
static int sp_init_group_master_locked(struct task_struct *tsk, struct mm_struct *mm)
542
{
543
	int ret;
544 545
	struct sp_group_master *master;

546
	if (mm->sp_group_master)
547 548
		return 0;

549 550 551 552 553 554 555
	master = kmalloc(sizeof(struct sp_group_master), GFP_KERNEL);
	if (!master)
		return -ENOMEM;

	INIT_LIST_HEAD(&master->node_list);
	master->count = 0;
	master->mm = mm;
G
Guo Mengqi 已提交
556
	sp_init_group_master_stat(tsk->tgid, mm, &master->instat);
557
	mm->sp_group_master = master;
558
	sp_add_group_master(master);
559 560 561

	ret = init_local_group(mm);
	if (ret)
562
		goto free_master;
563 564

	return 0;
565 566

free_master:
567
	sp_del_group_master(master);
568 569 570 571 572 573 574 575 576
	mm->sp_group_master = NULL;
	kfree(master);

	return ret;
}

static inline bool is_local_group(int spg_id)
{
	return spg_id >= SPG_ID_LOCAL_MIN && spg_id <= SPG_ID_LOCAL_MAX;
577 578
}

579
static struct sp_group *sp_get_local_group(struct task_struct *tsk, struct mm_struct *mm)
580 581 582 583 584 585 586 587 588 589 590 591 592 593
{
	int ret;
	struct sp_group_master *master;

	down_read(&sp_group_sem);
	master = mm->sp_group_master;
	if (master && master->local) {
		atomic_inc(&master->local->use_count);
		up_read(&sp_group_sem);
		return master->local;
	}
	up_read(&sp_group_sem);

	down_write(&sp_group_sem);
594
	ret = sp_init_group_master_locked(tsk, mm);
595 596 597 598 599 600 601 602 603 604 605
	if (ret) {
		up_write(&sp_group_sem);
		return ERR_PTR(ret);
	}
	master = mm->sp_group_master;
	atomic_inc(&master->local->use_count);
	up_write(&sp_group_sem);

	return master->local;
}

606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
static void update_spg_stat_alloc(unsigned long size, bool inc,
	bool huge, struct sp_spg_stat *stat)
{
	if (inc) {
		atomic_inc(&stat->spa_num);
		atomic64_add(size, &stat->size);
		atomic64_add(size, &stat->alloc_size);
		if (huge)
			atomic64_add(size, &stat->alloc_hsize);
		else
			atomic64_add(size, &stat->alloc_nsize);
	} else {
		atomic_dec(&stat->spa_num);
		atomic64_sub(size, &stat->size);
		atomic64_sub(size, &stat->alloc_size);
		if (huge)
			atomic64_sub(size, &stat->alloc_hsize);
		else
			atomic64_sub(size, &stat->alloc_nsize);
	}
}

static void update_spg_stat_k2u(unsigned long size, bool inc,
	struct sp_spg_stat *stat)
{
	if (inc) {
		atomic_inc(&stat->spa_num);
		atomic64_add(size, &stat->size);
		atomic64_add(size, &stat->k2u_size);
	} else {
		atomic_dec(&stat->spa_num);
		atomic64_sub(size, &stat->size);
		atomic64_sub(size, &stat->k2u_size);
	}
}

642 643
static void update_mem_usage_alloc(unsigned long size, bool inc,
		bool is_hugepage, struct sp_group_node *spg_node)
644
{
645
	struct sp_proc_stat *proc_stat = &spg_node->master->instat;
646 647

	if (inc) {
648 649 650 651 652 653 654 655
		if (is_hugepage) {
			atomic64_add(size, &spg_node->instat.alloc_hsize);
			atomic64_add(size, &proc_stat->alloc_hsize);
			return;
		}
		atomic64_add(size, &spg_node->instat.alloc_nsize);
		atomic64_add(size, &proc_stat->alloc_nsize);
		return;
656
	}
657 658 659 660 661 662 663 664 665

	if (is_hugepage) {
		atomic64_sub(size, &spg_node->instat.alloc_hsize);
		atomic64_sub(size, &proc_stat->alloc_hsize);
		return;
	}
	atomic64_sub(size, &spg_node->instat.alloc_nsize);
	atomic64_sub(size, &proc_stat->alloc_nsize);
	return;
666 667
}

668 669
static void update_mem_usage_k2u(unsigned long size, bool inc,
		struct sp_group_node *spg_node)
670
{
671
	struct sp_proc_stat *proc_stat = &spg_node->master->instat;
672 673

	if (inc) {
674
		atomic64_add(size, &spg_node->instat.k2u_size);
675 676
		atomic64_add(size, &proc_stat->k2u_size);
	} else {
677
		atomic64_sub(size, &spg_node->instat.k2u_size);
678 679 680 681
		atomic64_sub(size, &proc_stat->k2u_size);
	}
}

682
static void sp_init_spg_proc_stat(struct spg_proc_stat *stat, int spg_id)
683
{
684
	stat->tgid = current->tgid;
685
	stat->spg_id = spg_id;
686 687
	atomic64_set(&stat->alloc_nsize, 0);
	atomic64_set(&stat->alloc_hsize, 0);
688 689 690
	atomic64_set(&stat->k2u_size, 0);
}

691
static void sp_init_group_stat(struct sp_spg_stat *stat)
692 693 694 695 696 697 698
{
	atomic_set(&stat->hugepage_failures, 0);
	atomic_set(&stat->spa_num, 0);
	atomic64_set(&stat->size, 0);
	atomic64_set(&stat->alloc_nsize, 0);
	atomic64_set(&stat->alloc_hsize, 0);
	atomic64_set(&stat->alloc_size, 0);
699
	atomic64_set(&stat->k2u_size, 0);
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
}

/* statistics of all sp area, protected by sp_area_lock */
struct sp_spa_stat {
	unsigned int total_num;
	unsigned int alloc_num;
	unsigned int k2u_task_num;
	unsigned int k2u_spg_num;
	unsigned long total_size;
	unsigned long alloc_size;
	unsigned long k2u_task_size;
	unsigned long k2u_spg_size;
	unsigned long dvpp_size;
	unsigned long dvpp_va_size;
};

static struct sp_spa_stat spa_stat;

/* statistics of all sp group born from sp_alloc and k2u(spg) */
struct sp_overall_stat {
	atomic_t spa_total_num;
	atomic64_t spa_total_size;
};

static struct sp_overall_stat sp_overall_stat;

/*** Global share pool VA allocator ***/

enum spa_type {
	SPA_TYPE_ALLOC = 1,
730 731
	/* NOTE: reorganize after the statisical structure is reconstructed. */
	SPA_TYPE_ALLOC_PRIVATE = SPA_TYPE_ALLOC,
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
	SPA_TYPE_K2TASK,
	SPA_TYPE_K2SPG,
};

/*
 * We bump the reference when each mmap succeeds, and it will be dropped
 * when vma is about to release, so sp_area object will be automatically
 * freed when all tasks in the sp group has exited.
 */
struct sp_area {
	unsigned long va_start;
	unsigned long va_end;		/* va_end always align to hugepage */
	unsigned long real_size;	/* real size with alignment */
	unsigned long region_vstart;	/* belong to normal region or DVPP region */
	unsigned long flags;
	bool is_hugepage;
	bool is_dead;
	atomic_t use_count;		/* How many vmas use this VA region */
	struct rb_node rb_node;		/* address sorted rbtree */
	struct list_head link;		/* link to the spg->head */
	struct sp_group *spg;
	enum spa_type type;		/* where spa born from */
	struct mm_struct *mm;		/* owner of k2u(task) */
	unsigned long kva;		/* shared kva */
	pid_t applier;			/* the original applier process */
	int node_id;			/* memory node */
	int device_id;
};
static DEFINE_SPINLOCK(sp_area_lock);

static unsigned long spa_size(struct sp_area *spa)
{
	return spa->real_size;
}

static struct file *spa_file(struct sp_area *spa)
{
	if (spa->is_hugepage)
		return spa->spg->file_hugetlb;
	else
		return spa->spg->file;
}

775 776
/* the caller should hold sp_area_lock */
static void spa_inc_usage(struct sp_area *spa)
777
{
778 779 780 781 782 783 784 785 786
	enum spa_type type = spa->type;
	unsigned long size = spa->real_size;
	bool is_dvpp = spa->flags & SP_DVPP;
	bool is_huge = spa->is_hugepage;

	switch (type) {
	case SPA_TYPE_ALLOC:
		spa_stat.alloc_num += 1;
		spa_stat.alloc_size += size;
787
		update_spg_stat_alloc(size, true, is_huge, &spa->spg->instat);
788 789 790 791
		break;
	case SPA_TYPE_K2TASK:
		spa_stat.k2u_task_num += 1;
		spa_stat.k2u_task_size += size;
792
		update_spg_stat_k2u(size, true, &spa->spg->instat);
793 794 795 796
		break;
	case SPA_TYPE_K2SPG:
		spa_stat.k2u_spg_num += 1;
		spa_stat.k2u_spg_size += size;
797
		update_spg_stat_k2u(size, true, &spa->spg->instat);
798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
		break;
	default:
		WARN(1, "invalid spa type");
	}

	if (is_dvpp) {
		spa_stat.dvpp_size += size;
		spa_stat.dvpp_va_size += ALIGN(size, PMD_SIZE);
	}

	/*
	 * all the calculations won't overflow due to system limitation and
	 * parameter checking in sp_alloc_area()
	 */
	spa_stat.total_num += 1;
	spa_stat.total_size += size;

815
	if (!is_local_group(spa->spg->id)) {
816 817 818
		atomic_inc(&sp_overall_stat.spa_total_num);
		atomic64_add(size, &sp_overall_stat.spa_total_size);
	}
819 820
}

821 822
/* the caller should hold sp_area_lock */
static void spa_dec_usage(struct sp_area *spa)
823
{
824 825 826 827 828 829 830 831 832
	enum spa_type type = spa->type;
	unsigned long size = spa->real_size;
	bool is_dvpp = spa->flags & SP_DVPP;
	bool is_huge = spa->is_hugepage;

	switch (type) {
	case SPA_TYPE_ALLOC:
		spa_stat.alloc_num -= 1;
		spa_stat.alloc_size -= size;
833
		update_spg_stat_alloc(size, false, is_huge, &spa->spg->instat);
834 835 836 837
		break;
	case SPA_TYPE_K2TASK:
		spa_stat.k2u_task_num -= 1;
		spa_stat.k2u_task_size -= size;
838
		update_spg_stat_k2u(size, false, &spa->spg->instat);
839 840 841 842
		break;
	case SPA_TYPE_K2SPG:
		spa_stat.k2u_spg_num -= 1;
		spa_stat.k2u_spg_size -= size;
843
		update_spg_stat_k2u(size, false, &spa->spg->instat);
844 845 846 847 848 849 850 851 852 853 854 855 856
		break;
	default:
		WARN(1, "invalid spa type");
	}

	if (is_dvpp) {
		spa_stat.dvpp_size -= size;
		spa_stat.dvpp_va_size -= ALIGN(size, PMD_SIZE);
	}

	spa_stat.total_num -= 1;
	spa_stat.total_size -= size;

857
	if (!is_local_group(spa->spg->id)) {
858 859 860
		atomic_dec(&sp_overall_stat.spa_total_num);
		atomic64_sub(spa->real_size, &sp_overall_stat.spa_total_size);
	}
861 862
}

863 864
static void update_mem_usage(unsigned long size, bool inc, bool is_hugepage,
	struct sp_group_node *spg_node, enum spa_type type)
865
{
866 867
	switch (type) {
	case SPA_TYPE_ALLOC:
868
		update_mem_usage_alloc(size, inc, is_hugepage, spg_node);
869 870 871
		break;
	case SPA_TYPE_K2TASK:
	case SPA_TYPE_K2SPG:
872
		update_mem_usage_k2u(size, inc, spg_node);
873 874 875 876
		break;
	default:
		WARN(1, "invalid stat type\n");
	}
877 878
}

879 880 881 882 883 884 885 886 887 888 889 890
struct sp_group_node *find_spg_node_by_spg(struct mm_struct *mm,
		struct sp_group *spg)
{
	struct sp_group_node *spg_node;

	list_for_each_entry(spg_node, &mm->sp_group_master->node_list, group_node) {
		if (spg_node->spg == spg)
			return spg_node;
	}
	return NULL;
}

891 892
static void sp_update_process_stat(struct task_struct *tsk, bool inc,
	struct sp_area *spa)
893
{
894
	struct sp_group_node *spg_node;
895 896
	unsigned long size = spa->real_size;
	enum spa_type type = spa->type;
897

898
	spg_node = find_spg_node_by_spg(tsk->mm, spa->spg);
G
Guo Mengqi 已提交
899
	update_mem_usage(size, inc, spa->is_hugepage, spg_node, type);
900 901 902 903 904 905
}

static inline void check_interrupt_context(void)
{
	if (unlikely(in_interrupt()))
		panic("function can't be used in interrupt context\n");
906 907
}

908 909 910 911 912 913 914 915
static inline bool check_aoscore_process(struct task_struct *tsk)
{
	if (tsk->flags & PF_DOMAIN_CORE)
		return true;
	else
		return false;
}

916 917
static unsigned long sp_mmap(struct mm_struct *mm, struct file *file,
			     struct sp_area *spa, unsigned long *populate,
918
			     unsigned long prot, struct vm_area_struct **pvma);
919
static void sp_munmap(struct mm_struct *mm, unsigned long addr, unsigned long size);
920 921 922 923 924 925 926 927 928 929 930

#define K2U_NORMAL	0
#define K2U_COREDUMP	1

struct sp_k2u_context {
	unsigned long kva;
	unsigned long kva_aligned;
	unsigned long size;
	unsigned long size_aligned;
	unsigned long sp_flags;
	int state;
931
	enum spa_type type;
932 933
};

934 935
static unsigned long sp_remap_kva_to_vma(struct sp_area *spa, struct mm_struct *mm,
					unsigned long prot, struct sp_k2u_context *kc);
936

937 938 939
static void free_sp_group_id(int spg_id)
{
	/* ida operation is protected by an internal spin_lock */
940 941
	if ((spg_id >= SPG_ID_AUTO_MIN && spg_id <= SPG_ID_AUTO_MAX) ||
	    (spg_id >= SPG_ID_LOCAL_MIN && spg_id <= SPG_ID_LOCAL_MAX))
942 943 944
		ida_free(&sp_group_id_ida, spg_id);
}

945 946 947 948 949 950
static void free_new_spg_id(bool new, int spg_id)
{
	if (new)
		free_sp_group_id(spg_id);
}

951
static void free_sp_group_locked(struct sp_group *spg)
952
{
953 954
	int type;

955 956 957 958
	fput(spg->file);
	fput(spg->file_hugetlb);
	idr_remove(&sp_group_idr, spg->id);
	free_sp_group_id((unsigned int)spg->id);
959 960 961 962

	for (type = SP_MAPPING_START; type < SP_MAPPING_END; type++)
		sp_mapping_detach(spg, spg->mapping[type]);

963 964
	if (!is_local_group(spg->id))
		system_group_count--;
965

966 967 968 969
	kfree(spg);
	WARN(system_group_count < 0, "unexpected group count\n");
}

970 971 972 973 974 975 976
static void free_sp_group(struct sp_group *spg)
{
	down_write(&sp_group_sem);
	free_sp_group_locked(spg);
	up_write(&sp_group_sem);
}

977 978 979 980 981 982 983 984
static void sp_group_drop_locked(struct sp_group *spg)
{
	lockdep_assert_held_write(&sp_group_sem);

	if (atomic_dec_and_test(&spg->use_count))
		free_sp_group_locked(spg);
}

985 986 987 988 989 990 991
static void sp_group_drop(struct sp_group *spg)
{
	if (atomic_dec_and_test(&spg->use_count))
		free_sp_group(spg);
}

/* use with put_task_struct(task) */
992
static int get_task(int tgid, struct task_struct **task)
993 994
{
	struct task_struct *tsk;
995
	struct pid *p;
996 997

	rcu_read_lock();
998 999
	p = find_pid_ns(tgid, &init_pid_ns);
	tsk = pid_task(p, PIDTYPE_TGID);
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
	if (!tsk || (tsk->flags & PF_EXITING)) {
		rcu_read_unlock();
		return -ESRCH;
	}
	get_task_struct(tsk);
	rcu_read_unlock();

	*task = tsk;
	return 0;
}

/*
 * the caller must:
 * 1. hold spg->rw_lock
 * 2. ensure no concurrency problem for mm_struct
 */
1016
static bool is_process_in_group(struct sp_group *spg,
1017 1018 1019 1020 1021 1022
						 struct mm_struct *mm)
{
	struct sp_group_node *spg_node;

	list_for_each_entry(spg_node, &spg->procs, proc_node)
		if (spg_node->master->mm == mm)
1023
			return true;
1024

1025
	return false;
1026 1027 1028
}

/* user must call sp_group_drop() after use */
1029
static struct sp_group *__sp_find_spg_locked(int tgid, int spg_id)
1030 1031 1032 1033 1034 1035
{
	struct sp_group *spg = NULL;
	struct task_struct *tsk = NULL;
	int ret = 0;

	if (spg_id == SPG_ID_DEFAULT) {
1036
		ret = get_task(tgid, &tsk);
1037 1038 1039
		if (ret)
			return NULL;

1040 1041 1042
		task_lock(tsk);
		if (tsk->mm == NULL)
			spg = NULL;
1043 1044
		else if (tsk->mm->sp_group_master)
			spg = tsk->mm->sp_group_master->local;
1045
		task_unlock(tsk);
1046 1047

		put_task_struct(tsk);
1048 1049 1050 1051
	} else {
		spg = idr_find(&sp_group_idr, spg_id);
	}

1052 1053
	if (!spg || !atomic_inc_not_zero(&spg->use_count))
		return NULL;
1054

1055
	return spg;
1056 1057
}

1058
static struct sp_group *__sp_find_spg(int tgid, int spg_id)
1059 1060 1061 1062
{
	struct sp_group *spg;

	down_read(&sp_group_sem);
1063
	spg = __sp_find_spg_locked(tgid, spg_id);
1064 1065 1066 1067
	up_read(&sp_group_sem);
	return spg;
}

1068 1069
/**
 * mp_sp_group_id_by_pid() - Get the sp_group ID array of a process.
1070
 * @tgid: tgid of target process.
1071 1072 1073 1074 1075 1076 1077 1078 1079
 * @spg_ids: point to an array to save the group ids the process belongs to
 * @num: input the spg_ids array size; output the spg number of the process
 *
 * Return:
 * >0		- the sp_group ID.
 * -ENODEV	- target process doesn't belong to any sp_group.
 * -EINVAL	- spg_ids or num is NULL.
 * -E2BIG	- the num of groups process belongs to is larger than *num
 */
1080
int mg_sp_group_id_by_pid(int tgid, int *spg_ids, int *num)
1081
{
1082
	int ret = 0, real_count;
1083 1084 1085 1086
	struct sp_group_node *node;
	struct sp_group_master *master = NULL;
	struct task_struct *tsk;

1087 1088 1089
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1090 1091
	check_interrupt_context();

1092
	if (!spg_ids || !num || *num <= 0)
1093 1094
		return -EINVAL;

1095
	ret = get_task(tgid, &tsk);
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
	if (ret)
		return ret;

	down_read(&sp_group_sem);
	task_lock(tsk);
	if (tsk->mm)
		master = tsk->mm->sp_group_master;
	task_unlock(tsk);

	if (!master) {
		ret = -ENODEV;
		goto out_up_read;
	}

1110 1111 1112 1113 1114 1115 1116 1117
	/*
	 * There is a local group for each process which is used for
	 * passthrough allocation. The local group is a internal
	 * implementation for convenience and is not attempt to bother
	 * the user.
	 */
	real_count = master->count - 1;
	if (real_count <= 0) {
1118 1119 1120
		ret = -ENODEV;
		goto out_up_read;
	}
1121
	if ((unsigned int)*num < real_count) {
1122 1123 1124
		ret = -E2BIG;
		goto out_up_read;
	}
1125
	*num = real_count;
1126

1127 1128 1129
	list_for_each_entry(node, &master->node_list, group_node) {
		if (is_local_group(node->spg->id))
			continue;
1130
		*(spg_ids++) = node->spg->id;
1131
	}
1132 1133 1134 1135 1136

out_up_read:
	up_read(&sp_group_sem);
	put_task_struct(tsk);
	return ret;
1137 1138 1139
}
EXPORT_SYMBOL_GPL(mg_sp_group_id_by_pid);

1140 1141 1142 1143 1144
static bool is_online_node_id(int node_id)
{
	return node_id >= 0 && node_id < MAX_NUMNODES && node_online(node_id);
}

1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
static void sp_group_init(struct sp_group *spg, int spg_id, unsigned long flag)
{
	spg->id = spg_id;
	spg->flag = flag;
	spg->is_alive = true;
	spg->proc_num = 0;
	atomic_set(&spg->use_count, 1);
	INIT_LIST_HEAD(&spg->procs);
	INIT_LIST_HEAD(&spg->spa_list);
	INIT_LIST_HEAD(&spg->mnode);
	init_rwsem(&spg->rw_lock);
	sp_init_group_stat(&spg->instat);
}

1159
static struct sp_group *create_spg(int spg_id, unsigned long flag)
1160
{
1161 1162
	int ret;
	struct sp_group *spg;
1163
	char name[DNAME_INLINE_LEN];
1164 1165 1166
	struct user_struct *user = NULL;
	int hsize_log = MAP_HUGE_2MB >> MAP_HUGE_SHIFT;

1167 1168
	if (unlikely(system_group_count + 1 == MAX_GROUP_FOR_SYSTEM &&
		     !is_local_group(spg_id))) {
1169
		pr_err("reach system max group num\n");
1170 1171 1172 1173 1174 1175 1176 1177
		return ERR_PTR(-ENOSPC);
	}

	spg = kzalloc(sizeof(*spg), GFP_KERNEL);
	if (spg == NULL)
		return ERR_PTR(-ENOMEM);

	sprintf(name, "sp_group_%d", spg_id);
1178
	spg->file = shmem_kernel_file_setup(name, MAX_LFS_FILESIZE, VM_NORESERVE);
1179 1180 1181
	if (IS_ERR(spg->file)) {
		pr_err("spg file setup failed %ld\n", PTR_ERR(spg->file));
		ret = PTR_ERR(spg->file);
1182
		goto out_kfree;
1183 1184
	}

1185
	sprintf(name, "sp_group_%d_huge", spg_id);
1186
	spg->file_hugetlb = hugetlb_file_setup(name, MAX_LFS_FILESIZE,
1187
				VM_NORESERVE, &user, HUGETLB_ANONHUGE_INODE, hsize_log);
1188
	if (IS_ERR(spg->file_hugetlb)) {
1189
		pr_err("spg file_hugetlb setup failed %ld\n", PTR_ERR(spg->file_hugetlb));
1190 1191 1192 1193
		ret = PTR_ERR(spg->file_hugetlb);
		goto out_fput;
	}

1194 1195 1196 1197 1198 1199 1200 1201
	sp_group_init(spg, spg_id, flag);

	ret = idr_alloc(&sp_group_idr, spg, spg_id, spg_id + 1, GFP_KERNEL);
	if (ret < 0) {
		pr_err("group %d idr alloc failed %d\n", spg_id, ret);
		goto out_fput_huge;
	}

1202 1203
	if (!is_local_group(spg_id))
		system_group_count++;
1204

1205 1206
	return spg;

1207 1208
out_fput_huge:
	fput(spg->file_hugetlb);
1209 1210 1211 1212 1213
out_fput:
	fput(spg->file);
out_kfree:
	kfree(spg);
	return ERR_PTR(ret);
1214 1215
}

1216
/* the caller must hold sp_group_sem */
1217
static struct sp_group *find_or_alloc_sp_group(int spg_id, unsigned long flag)
1218 1219 1220
{
	struct sp_group *spg;

1221
	spg = __sp_find_spg_locked(current->tgid, spg_id);
1222 1223

	if (!spg) {
1224
		spg = create_spg(spg_id, flag);
1225 1226 1227 1228
	} else {
		down_read(&spg->rw_lock);
		if (!spg_valid(spg)) {
			up_read(&spg->rw_lock);
1229
			sp_group_drop_locked(spg);
1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273
			return ERR_PTR(-ENODEV);
		}
		up_read(&spg->rw_lock);
		/* spg->use_count has increased due to __sp_find_spg() */
	}

	return spg;
}

static void __sp_area_drop_locked(struct sp_area *spa);

/* The caller must down_write(&mm->mmap_lock) */
static void sp_munmap_task_areas(struct mm_struct *mm, struct sp_group *spg, struct list_head *stop)
{
	struct sp_area *spa, *prev = NULL;
	int err;


	spin_lock(&sp_area_lock);
	list_for_each_entry(spa, &spg->spa_list, link) {
		if (&spa->link == stop)
			break;

		__sp_area_drop_locked(prev);
		prev = spa;

		atomic_inc(&spa->use_count);
		spin_unlock(&sp_area_lock);

		err = do_munmap(mm, spa->va_start, spa_size(spa), NULL);
		if (err) {
			/* we are not supposed to fail */
			pr_err("failed to unmap VA %pK when munmap task areas\n",
			       (void *)spa->va_start);
		}

		spin_lock(&sp_area_lock);
	}
	__sp_area_drop_locked(prev);

	spin_unlock(&sp_area_lock);
}

/* the caller must hold sp_group_sem */
1274 1275
static int mm_add_group_init(struct task_struct *tsk, struct mm_struct *mm,
			     struct sp_group *spg)
1276
{
1277 1278
	int ret;
	struct sp_group_master *master;
1279

1280 1281 1282 1283 1284 1285 1286 1287 1288
	if (!mm->sp_group_master) {
		ret = sp_init_group_master_locked(tsk, mm);
		if (ret)
			return ret;
	} else {
		if (is_process_in_group(spg, mm)) {
			pr_err_ratelimited("task already in target group, id=%d\n", spg->id);
			return -EEXIST;
		}
1289

1290 1291 1292 1293 1294
		master = mm->sp_group_master;
		if (master->count == MAX_GROUP_FOR_TASK) {
			pr_err("task reaches max group num\n");
			return -ENOSPC;
		}
1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
	}

	return 0;
}

/* the caller must hold sp_group_sem */
static struct sp_group_node *create_spg_node(struct mm_struct *mm,
	unsigned long prot, struct sp_group *spg)
{
	struct sp_group_master *master = mm->sp_group_master;
	struct sp_group_node *spg_node;

	spg_node = kzalloc(sizeof(struct sp_group_node), GFP_KERNEL);
	if (spg_node == NULL)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&spg_node->group_node);
	INIT_LIST_HEAD(&spg_node->proc_node);
	spg_node->spg = spg;
	spg_node->master = master;
	spg_node->prot = prot;
1316
	sp_init_spg_proc_stat(&spg_node->instat, spg->id);
1317 1318 1319 1320 1321 1322 1323 1324 1325

	list_add_tail(&spg_node->group_node, &master->node_list);
	master->count++;

	return spg_node;
}

/* the caller must down_write(&spg->rw_lock) */
static int insert_spg_node(struct sp_group *spg, struct sp_group_node *node)
1326
{
1327 1328 1329 1330 1331 1332 1333
	if (spg->proc_num + 1 == MAX_PROC_PER_GROUP) {
		pr_err_ratelimited("add group: group reaches max process num\n");
		return -ENOSPC;
	}

	spg->proc_num++;
	list_add_tail(&node->proc_node, &spg->procs);
1334 1335 1336 1337

	return 0;
}

1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356
/* the caller must down_write(&spg->rw_lock) */
static void delete_spg_node(struct sp_group *spg, struct sp_group_node *node)
{
	list_del(&node->proc_node);
	spg->proc_num--;
}

/* the caller must hold sp_group_sem */
static void free_spg_node(struct mm_struct *mm, struct sp_group *spg,
	struct sp_group_node *spg_node)
{
	struct sp_group_master *master = mm->sp_group_master;

	list_del(&spg_node->group_node);
	master->count--;

	kfree(spg_node);
}

1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370
static int local_group_add_task(struct mm_struct *mm, struct sp_group *spg)
{
	struct sp_group_node *node;

	node = create_spg_node(mm, PROT_READ | PROT_WRITE, spg);
	if (IS_ERR(node))
		return PTR_ERR(node);

	insert_spg_node(spg, node);
	mmget(mm);

	return 0;
}

1371
/**
1372
 * mg_sp_group_add_task() - Add a process to an share group (sp_group).
1373
 * @tgid: the tgid of the task to be added.
1374 1375
 * @prot: the prot of task for this spg.
 * @spg_id: the ID of the sp_group.
1376
 * @flag: to give some special message.
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
 *
 * A process can't be added to more than one sp_group in single group mode
 * and can in multiple group mode.
 *
 * Return: A postive group number for success, -errno on failure.
 *
 * The manually specified ID is between [SPG_ID_MIN, SPG_ID_MAX].
 * The automatically allocated ID is between [SPG_ID_AUTO_MIN, SPG_ID_AUTO_MAX].
 * When negative, the return value is -errno.
 */
1387
int mg_sp_group_add_task(int tgid, unsigned long prot, int spg_id)
1388
{
1389
	unsigned long flag = 0;
1390 1391 1392 1393 1394 1395 1396 1397
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct sp_group *spg;
	struct sp_group_node *node = NULL;
	int ret = 0;
	bool id_newly_generated = false;
	struct sp_area *spa, *prev = NULL;

1398 1399 1400
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415
	check_interrupt_context();

	/* only allow READ, READ | WRITE */
	if (!((prot == PROT_READ)
	      || (prot == (PROT_READ | PROT_WRITE)))) {
		pr_err_ratelimited("prot is invalid 0x%lx\n", prot);
		return -EINVAL;
	}

	if (spg_id < SPG_ID_MIN || spg_id > SPG_ID_AUTO) {
		pr_err_ratelimited("add group failed, invalid group id %d\n", spg_id);
		return -EINVAL;
	}

	if (spg_id >= SPG_ID_AUTO_MIN && spg_id <= SPG_ID_AUTO_MAX) {
1416
		spg = __sp_find_spg(tgid, spg_id);
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446

		if (!spg) {
			pr_err_ratelimited("spg %d hasn't been created\n", spg_id);
			return -EINVAL;
		}

		down_read(&spg->rw_lock);
		if (!spg_valid(spg)) {
			up_read(&spg->rw_lock);
			pr_err_ratelimited("add group failed, group id %d is dead\n", spg_id);
			sp_group_drop(spg);
			return -EINVAL;
		}
		up_read(&spg->rw_lock);

		sp_group_drop(spg);
	}

	if (spg_id == SPG_ID_AUTO) {
		spg_id = ida_alloc_range(&sp_group_id_ida, SPG_ID_AUTO_MIN,
					 SPG_ID_AUTO_MAX, GFP_ATOMIC);
		if (spg_id < 0) {
			pr_err_ratelimited("add group failed, auto generate group id failed\n");
			return spg_id;
		}
		id_newly_generated = true;
	}

	down_write(&sp_group_sem);

1447
	ret = get_task(tgid, &tsk);
1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
	if (ret) {
		up_write(&sp_group_sem);
		free_new_spg_id(id_newly_generated, spg_id);
		goto out;
	}

	if (check_aoscore_process(tsk)) {
		up_write(&sp_group_sem);
		ret = -EACCES;
		free_new_spg_id(id_newly_generated, spg_id);
		goto out_put_task;
	}

	/*
	 * group_leader: current thread may be exiting in a multithread process
	 *
	 * DESIGN IDEA
	 * We increase mm->mm_users deliberately to ensure it's decreased in
	 * share pool under only 2 circumstances, which will simply the overall
	 * design as mm won't be freed unexpectedly.
	 *
	 * The corresponding refcount decrements are as follows:
	 * 1. the error handling branch of THIS function.
	 * 2. In sp_group_exit(). It's called only when process is exiting.
	 */
	mm = get_task_mm(tsk->group_leader);
	if (!mm) {
		up_write(&sp_group_sem);
		ret = -ESRCH;
		free_new_spg_id(id_newly_generated, spg_id);
		goto out_put_task;
	}

1481
	spg = find_or_alloc_sp_group(spg_id, flag);
1482 1483 1484 1485 1486 1487 1488
	if (IS_ERR(spg)) {
		up_write(&sp_group_sem);
		ret = PTR_ERR(spg);
		free_new_spg_id(id_newly_generated, spg_id);
		goto out_put_mm;
	}

1489 1490 1491 1492
	down_write(&spg->rw_lock);
	ret = mm_add_group_init(tsk, mm, spg);
	if (ret) {
		up_write(&spg->rw_lock);
1493
		goto out_drop_group;
1494
	}
1495

1496
	ret = sp_mapping_group_setup(mm, spg);
1497 1498
	if (ret) {
		up_write(&spg->rw_lock);
1499
		goto out_drop_group;
1500
	}
1501

1502 1503
	node = create_spg_node(mm, prot, spg);
	if (unlikely(IS_ERR(node))) {
1504
		up_write(&spg->rw_lock);
1505
		ret = PTR_ERR(node);
1506
		goto out_drop_group;
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
	}

	ret = insert_spg_node(spg, node);
	if (unlikely(ret)) {
		up_write(&spg->rw_lock);
		goto out_drop_spg_node;
	}

	/*
	 * create mappings of existing shared memory segments into this
	 * new process' page table.
	 */
	spin_lock(&sp_area_lock);

	list_for_each_entry(spa, &spg->spa_list, link) {
		unsigned long populate = 0;
		struct file *file = spa_file(spa);
		unsigned long addr;
1525
		unsigned long prot_spa = prot;
C
Chen Jun 已提交
1526 1527

		if ((spa->flags & (SP_PROT_RO | SP_PROT_FOCUS)) == (SP_PROT_RO | SP_PROT_FOCUS))
1528
			prot_spa &= ~PROT_WRITE;
1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540

		__sp_area_drop_locked(prev);
		prev = spa;

		atomic_inc(&spa->use_count);

		if (spa->is_dead == true)
			continue;

		spin_unlock(&sp_area_lock);

		if (spa->type == SPA_TYPE_K2SPG && spa->kva) {
1541
			addr = sp_remap_kva_to_vma(spa, mm, prot_spa, NULL);
1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558
			if (IS_ERR_VALUE(addr))
				pr_warn("add group remap k2u failed %ld\n", addr);

			spin_lock(&sp_area_lock);
			continue;
		}

		down_write(&mm->mmap_lock);
		if (unlikely(mm->core_state)) {
			sp_munmap_task_areas(mm, spg, &spa->link);
			up_write(&mm->mmap_lock);
			ret = -EBUSY;
			pr_err("add group: encountered coredump, abort\n");
			spin_lock(&sp_area_lock);
			break;
		}

1559
		addr = sp_mmap(mm, file, spa, &populate, prot_spa, NULL);
1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617
		if (IS_ERR_VALUE(addr)) {
			sp_munmap_task_areas(mm, spg, &spa->link);
			up_write(&mm->mmap_lock);
			ret = addr;
			pr_err("add group: sp mmap failed %d\n", ret);
			spin_lock(&sp_area_lock);
			break;
		}
		up_write(&mm->mmap_lock);

		if (populate) {
			ret = do_mm_populate(mm, spa->va_start, populate, 0);
			if (ret) {
				if (unlikely(fatal_signal_pending(current)))
					pr_warn_ratelimited("add group failed, current thread is killed\n");
				else
					pr_warn_ratelimited("add group failed, mm populate failed (potential no enough memory when -12): %d, spa type is %d\n",
					ret, spa->type);
				down_write(&mm->mmap_lock);
				sp_munmap_task_areas(mm, spg, spa->link.next);
				up_write(&mm->mmap_lock);
				spin_lock(&sp_area_lock);
				break;
			}
		}

		spin_lock(&sp_area_lock);
	}
	__sp_area_drop_locked(prev);
	spin_unlock(&sp_area_lock);

	if (unlikely(ret))
		delete_spg_node(spg, node);
	up_write(&spg->rw_lock);

out_drop_spg_node:
	if (unlikely(ret))
		free_spg_node(mm, spg, node);
	/*
	 * to simplify design, we don't release the resource of
	 * group_master and proc_stat, they will be freed when
	 * process is exiting.
	 */
out_drop_group:
	if (unlikely(ret)) {
		up_write(&sp_group_sem);
		sp_group_drop(spg);
	} else
		up_write(&sp_group_sem);
out_put_mm:
	/* No need to put the mm if the sp group adds this mm successfully */
	if (unlikely(ret))
		mmput(mm);
out_put_task:
	put_task_struct(tsk);
out:
	return ret == 0 ? spg_id : ret;
}
1618 1619
EXPORT_SYMBOL_GPL(mg_sp_group_add_task);

1620 1621
/**
 * mg_sp_group_del_task() - delete a process from a sp group.
1622
 * @tgid: the tgid of the task to be deleted
1623 1624 1625 1626 1627 1628 1629
 * @spg_id: sharepool group id
 *
 * the group's spa list must be empty, or deletion will fail.
 *
 * Return:
 * * if success, return 0.
 * * -EINVAL, spg_id invalid or spa_lsit not emtpy or spg dead
1630
 * * -ESRCH, the task group of tgid is not in group / process dead
1631
 */
1632
int mg_sp_group_del_task(int tgid, int spg_id)
1633
{
1634 1635 1636 1637 1638 1639 1640
	int ret = 0;
	struct sp_group *spg;
	struct sp_group_node *spg_node;
	struct task_struct *tsk = NULL;
	struct mm_struct *mm = NULL;
	bool is_alive = true;

1641 1642 1643
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1644 1645 1646 1647 1648
	if (spg_id < SPG_ID_MIN || spg_id > SPG_ID_AUTO) {
		pr_err_ratelimited("del from group failed, invalid group id %d\n", spg_id);
		return -EINVAL;
	}

1649
	spg = __sp_find_spg(tgid, spg_id);
1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669
	if (!spg) {
		pr_err_ratelimited("spg not found or get task failed.");
		return -EINVAL;
	}
	down_write(&sp_group_sem);

	if (!spg_valid(spg)) {
		up_write(&sp_group_sem);
		pr_err_ratelimited("spg dead.");
		ret = -EINVAL;
		goto out;
	}

	if (!list_empty(&spg->spa_list)) {
		up_write(&sp_group_sem);
		pr_err_ratelimited("spa is not empty");
		ret = -EINVAL;
		goto out;
	}

1670
	ret = get_task(tgid, &tsk);
1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683
	if (ret) {
		up_write(&sp_group_sem);
		pr_err_ratelimited("task is not found");
		goto out;
	}
	mm = get_task_mm(tsk->group_leader);
	if (!mm) {
		up_write(&sp_group_sem);
		pr_err_ratelimited("mm is not found");
		ret = -ESRCH;
		goto out_put_task;
	}

1684 1685 1686 1687 1688 1689 1690
	if (!mm->sp_group_master) {
		up_write(&sp_group_sem);
		pr_err("task(%d) is not in any group(%d)\n", tgid, spg_id);
		ret = -EINVAL;
		goto out_put_mm;
	}

1691
	spg_node = find_spg_node_by_spg(mm, spg);
1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711
	if (!spg_node) {
		up_write(&sp_group_sem);
		pr_err_ratelimited("process not in group");
		ret = -ESRCH;
		goto out_put_mm;
	}

	down_write(&spg->rw_lock);
	if (list_is_singular(&spg->procs))
		is_alive = spg->is_alive = false;
	spg->proc_num--;
	list_del(&spg_node->proc_node);
	sp_group_drop(spg);
	up_write(&spg->rw_lock);
	if (!is_alive)
		blocking_notifier_call_chain(&sp_notifier_chain, 0, spg);

	list_del(&spg_node->group_node);
	mm->sp_group_master->count--;
	kfree(spg_node);
1712
	atomic_dec(&mm->mm_users);
1713 1714 1715 1716 1717 1718 1719 1720 1721 1722

	up_write(&sp_group_sem);

out_put_mm:
	mmput(mm);
out_put_task:
	put_task_struct(tsk);
out:
	sp_group_drop(spg); /* if spg dead, freed here */
	return ret;
1723 1724 1725
}
EXPORT_SYMBOL_GPL(mg_sp_group_del_task);

1726
int mg_sp_id_of_current(void)
1727 1728 1729 1730
{
	int ret, spg_id;
	struct sp_group_master *master;

1731 1732 1733
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1734
	if ((current->flags & PF_KTHREAD) || !current->mm)
1735 1736 1737 1738
		return -EINVAL;

	down_read(&sp_group_sem);
	master = current->mm->sp_group_master;
1739
	if (master) {
1740 1741 1742 1743 1744 1745 1746
		spg_id = master->local->id;
		up_read(&sp_group_sem);
		return spg_id;
	}
	up_read(&sp_group_sem);

	down_write(&sp_group_sem);
1747
	ret = sp_init_group_master_locked(current, current->mm);
1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759
	if (ret) {
		up_write(&sp_group_sem);
		return ret;
	}
	master = current->mm->sp_group_master;
	spg_id = master->local->id;
	up_write(&sp_group_sem);

	return spg_id;
}
EXPORT_SYMBOL_GPL(mg_sp_id_of_current);

1760
/* the caller must hold sp_area_lock */
1761
static void insert_sp_area(struct sp_mapping *spm, struct sp_area *spa)
1762
{
1763
	struct rb_node **p = &spm->area_root.rb_node;
1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779
	struct rb_node *parent = NULL;

	while (*p) {
		struct sp_area *tmp;

		parent = *p;
		tmp = rb_entry(parent, struct sp_area, rb_node);
		if (spa->va_start < tmp->va_end)
			p = &(*p)->rb_left;
		else if (spa->va_end > tmp->va_start)
			p = &(*p)->rb_right;
		else
			BUG();
	}

	rb_link_node(&spa->rb_node, parent, p);
1780
	rb_insert_color(&spa->rb_node, &spm->area_root);
1781 1782 1783 1784 1785 1786 1787 1788
}

/**
 * sp_alloc_area() - Allocate a region of VA from the share pool.
 * @size: the size of VA to allocate.
 * @flags: how to allocate the memory.
 * @spg: the share group that the memory is allocated to.
 * @type: the type of the region.
1789
 * @applier: the tgid of the task which allocates the region.
1790 1791 1792 1793 1794 1795 1796 1797 1798
 *
 * Return: a valid pointer for success, NULL on failure.
 */
static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags,
				     struct sp_group *spg, enum spa_type type,
				     pid_t applier)
{
	struct sp_area *spa, *first, *err;
	struct rb_node *n;
1799 1800
	unsigned long vstart;
	unsigned long vend;
1801 1802 1803
	unsigned long addr;
	unsigned long size_align = ALIGN(size, PMD_SIZE); /* va aligned to 2M */
	int device_id, node_id;
1804
	struct sp_mapping *mapping;
1805 1806 1807 1808 1809 1810 1811 1812 1813

	device_id = sp_flags_device_id(flags);
	node_id = flags & SP_SPEC_NODE_ID ? sp_flags_node_id(flags) : device_id;

	if (!is_online_node_id(node_id)) {
		pr_err_ratelimited("invalid numa node id %d\n", node_id);
		return ERR_PTR(-EINVAL);
	}

C
Chen Jun 已提交
1814 1815 1816 1817 1818 1819
	if (flags & SP_PROT_FOCUS) {
		if ((flags & (SP_DVPP | SP_PROT_RO)) != SP_PROT_RO) {
			pr_err("invalid sp_flags [%lx]\n", flags);
			return ERR_PTR(-EINVAL);
		}
		mapping = spg->mapping[SP_MAPPING_RO];
1820
	} else if (flags & SP_DVPP) {
1821
		mapping = spg->mapping[SP_MAPPING_DVPP];
1822
	} else {
1823
		mapping = spg->mapping[SP_MAPPING_NORMAL];
1824
	}
1825

1826 1827 1828 1829 1830
	if (!mapping) {
		pr_err_ratelimited("non DVPP spg, id %d\n", spg->id);
		return ERR_PTR(-EINVAL);
	}

1831 1832
	vstart = mapping->start[device_id];
	vend = mapping->end[device_id];
1833 1834 1835 1836 1837 1838 1839 1840 1841
	spa = __kmalloc_node(sizeof(struct sp_area), GFP_KERNEL, node_id);
	if (unlikely(!spa))
		return ERR_PTR(-ENOMEM);

	spin_lock(&sp_area_lock);

	/*
	 * Invalidate cache if we have more permissive parameters.
	 * cached_hole_size notes the largest hole noticed _below_
1842
	 * the sp_area cached in free_area_cache: if size fits
1843
	 * into that hole, we want to scan from vstart to reuse
1844 1845
	 * the hole instead of allocating above free_area_cache.
	 * Note that sp_free_area may update free_area_cache
1846 1847
	 * without updating cached_hole_size.
	 */
1848 1849 1850 1851
	if (!mapping->free_area_cache || size_align < mapping->cached_hole_size ||
	    vstart != mapping->cached_vstart) {
		mapping->cached_hole_size = 0;
		mapping->free_area_cache = NULL;
1852 1853 1854
	}

	/* record if we encounter less permissive parameters */
1855
	mapping->cached_vstart = vstart;
1856 1857

	/* find starting point for our search */
1858 1859
	if (mapping->free_area_cache) {
		first = rb_entry(mapping->free_area_cache, struct sp_area, rb_node);
1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871
		addr = first->va_end;
		if (addr + size_align < addr) {
			err = ERR_PTR(-EOVERFLOW);
			goto error;
		}
	} else {
		addr = vstart;
		if (addr + size_align < addr) {
			err = ERR_PTR(-EOVERFLOW);
			goto error;
		}

1872
		n = mapping->area_root.rb_node;
1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893
		first = NULL;

		while (n) {
			struct sp_area *tmp;

			tmp = rb_entry(n, struct sp_area, rb_node);
			if (tmp->va_end >= addr) {
				first = tmp;
				if (tmp->va_start <= addr)
					break;
				n = n->rb_left;
			} else
				n = n->rb_right;
		}

		if (!first)
			goto found;
	}

	/* from the starting point, traverse areas until a suitable hole is found */
	while (addr + size_align > first->va_start && addr + size_align <= vend) {
1894 1895
		if (addr + mapping->cached_hole_size < first->va_start)
			mapping->cached_hole_size = first->va_start - addr;
1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931
		addr = first->va_end;
		if (addr + size_align < addr) {
			err = ERR_PTR(-EOVERFLOW);
			goto error;
		}

		n = rb_next(&first->rb_node);
		if (n)
			first = rb_entry(n, struct sp_area, rb_node);
		else
			goto found;
	}

found:
	if (addr + size_align > vend) {
		err = ERR_PTR(-EOVERFLOW);
		goto error;
	}

	spa->va_start = addr;
	spa->va_end = addr + size_align;
	spa->real_size = size;
	spa->region_vstart = vstart;
	spa->flags = flags;
	spa->is_hugepage = (flags & SP_HUGEPAGE);
	spa->is_dead = false;
	spa->spg = spg;
	atomic_set(&spa->use_count, 1);
	spa->type = type;
	spa->mm = NULL;
	spa->kva = 0;   /* NULL pointer */
	spa->applier = applier;
	spa->node_id = node_id;
	spa->device_id = device_id;

	spa_inc_usage(spa);
1932
	insert_sp_area(mapping, spa);
1933 1934
	mapping->free_area_cache = &spa->rb_node;
	list_add_tail(&spa->link, &spg->spa_list);
1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946

	spin_unlock(&sp_area_lock);

	return spa;

error:
	spin_unlock(&sp_area_lock);
	kfree(spa);
	return err;
}

/* the caller should hold sp_area_lock */
1947
static struct sp_area *find_sp_area_locked(struct sp_group *spg,
1948
		unsigned long addr)
1949
{
C
Chen Jun 已提交
1950 1951
	struct sp_mapping *spm = sp_mapping_find(spg, addr);
	struct rb_node *n = spm->area_root.rb_node;
1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967
	while (n) {
		struct sp_area *spa;

		spa = rb_entry(n, struct sp_area, rb_node);
		if (addr < spa->va_start) {
			n = n->rb_left;
		} else if (addr > spa->va_start) {
			n = n->rb_right;
		} else {
			return spa;
		}
	}

	return NULL;
}

1968
static struct sp_area *get_sp_area(struct sp_group *spg, unsigned long addr)
1969 1970 1971 1972
{
	struct sp_area *n;

	spin_lock(&sp_area_lock);
1973
	n = find_sp_area_locked(spg, addr);
1974 1975 1976 1977 1978 1979
	if (n)
		atomic_inc(&n->use_count);
	spin_unlock(&sp_area_lock);
	return n;
}

1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992
static bool vmalloc_area_clr_flag(unsigned long kva, unsigned long flags)
{
	struct vm_struct *area;

	area = find_vm_area((void *)kva);
	if (area) {
		area->flags &= ~flags;
		return true;
	}

	return false;
}

1993 1994 1995 1996 1997
/*
 * Free the VA region starting from addr to the share pool
 */
static void sp_free_area(struct sp_area *spa)
{
1998 1999 2000
	unsigned long addr = spa->va_start;
	struct sp_mapping *spm;

2001 2002
	lockdep_assert_held(&sp_area_lock);

C
Chen Jun 已提交
2003
	spm = sp_mapping_find(spa->spg, addr);
2004
	if (spm->free_area_cache) {
2005 2006
		struct sp_area *cache;

2007
		cache = rb_entry(spm->free_area_cache, struct sp_area, rb_node);
2008
		if (spa->va_start <= cache->va_start) {
2009
			spm->free_area_cache = rb_prev(&spa->rb_node);
2010 2011 2012 2013
			/*
			 * the new cache node may be changed to another region,
			 * i.e. from DVPP region to normal region
			 */
2014 2015
			if (spm->free_area_cache) {
				cache = rb_entry(spm->free_area_cache,
2016
						 struct sp_area, rb_node);
2017
				spm->cached_vstart = cache->region_vstart;
2018 2019 2020 2021 2022 2023 2024 2025
			}
			/*
			 * We don't try to update cached_hole_size,
			 * but it won't go very wrong.
			 */
		}
	}

2026 2027 2028
	if (spa->kva && !vmalloc_area_clr_flag(spa->kva, VM_SHAREPOOL))
		pr_debug("clear spa->kva %ld is not valid\n", spa->kva);

2029
	spa_dec_usage(spa);
2030
	list_del(&spa->link);
2031

2032
	rb_erase(&spa->rb_node, &spm->area_root);
2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067
	RB_CLEAR_NODE(&spa->rb_node);
	kfree(spa);
}

static void __sp_area_drop_locked(struct sp_area *spa)
{
	/*
	 * Considering a situation where task A and B are in the same spg.
	 * A is exiting and calling remove_vma(). Before A calls this func,
	 * B calls sp_free() to free the same spa. So spa maybe NULL when A
	 * calls this func later.
	 */
	if (!spa)
		return;

	if (atomic_dec_and_test(&spa->use_count))
		sp_free_area(spa);
}

static void __sp_area_drop(struct sp_area *spa)
{
	spin_lock(&sp_area_lock);
	__sp_area_drop_locked(spa);
	spin_unlock(&sp_area_lock);
}

void sp_area_drop(struct vm_area_struct *vma)
{
	if (!(vma->vm_flags & VM_SHARE_POOL))
		return;

	/*
	 * Considering a situation where task A and B are in the same spg.
	 * A is exiting and calling remove_vma() -> ... -> sp_area_drop().
	 * Concurrently, B is calling sp_free() to free the same spa.
2068
	 * find_sp_area_locked() and __sp_area_drop_locked() should be
2069 2070 2071
	 * an atomic operation.
	 */
	spin_lock(&sp_area_lock);
2072
	__sp_area_drop_locked(vma->vm_private_data);
2073 2074 2075
	spin_unlock(&sp_area_lock);
}

W
Wang Wensheng 已提交
2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129
/*
 * The function calls of do_munmap() won't change any non-atomic member
 * of struct sp_group. Please review the following chain:
 * do_munmap -> remove_vma_list -> remove_vma -> sp_area_drop ->
 * __sp_area_drop_locked -> sp_free_area
 */
static void sp_munmap(struct mm_struct *mm, unsigned long addr,
			   unsigned long size)
{
	int err;

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		up_write(&mm->mmap_lock);
		pr_info("munmap: encoutered coredump\n");
		return;
	}

	err = do_munmap(mm, addr, size, NULL);
	/* we are not supposed to fail */
	if (err)
		pr_err("failed to unmap VA %pK when sp munmap\n", (void *)addr);

	up_write(&mm->mmap_lock);
}

static void __sp_free(struct sp_group *spg, unsigned long addr,
		      unsigned long size, struct mm_struct *stop)
{
	struct mm_struct *mm;
	struct sp_group_node *spg_node = NULL;

	list_for_each_entry(spg_node, &spg->procs, proc_node) {
		mm = spg_node->master->mm;
		if (mm == stop)
			break;
		sp_munmap(mm, addr, size);
	}
}

/* Free the memory of the backing shmem or hugetlbfs */
static void sp_fallocate(struct sp_area *spa)
{
	int ret;
	unsigned long mode = FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE;
	unsigned long offset = addr_offset(spa);

	ret = vfs_fallocate(spa_file(spa), mode, offset, spa_size(spa));
	if (ret)
		WARN(1, "sp fallocate failed %d\n", ret);
}

static void sp_free_unmap_fallocate(struct sp_area *spa)
{
2130 2131 2132 2133
	down_read(&spa->spg->rw_lock);
	__sp_free(spa->spg, spa->va_start, spa_size(spa), NULL);
	sp_fallocate(spa);
	up_read(&spa->spg->rw_lock);
W
Wang Wensheng 已提交
2134 2135 2136 2137 2138 2139 2140
}

static int sp_check_caller_permission(struct sp_group *spg, struct mm_struct *mm)
{
	int ret = 0;

	down_read(&spg->rw_lock);
2141
	if (!is_process_in_group(spg, mm))
W
Wang Wensheng 已提交
2142 2143
		ret = -EPERM;
	up_read(&spg->rw_lock);
2144

W
Wang Wensheng 已提交
2145 2146 2147 2148 2149 2150 2151 2152 2153 2154
	return ret;
}

#define FREE_CONT	1
#define FREE_END	2

struct sp_free_context {
	unsigned long addr;
	struct sp_area *spa;
	int state;
2155
	int spg_id;
W
Wang Wensheng 已提交
2156 2157 2158 2159 2160 2161 2162 2163
};

/* when success, __sp_area_drop(spa) should be used */
static int sp_free_get_spa(struct sp_free_context *fc)
{
	int ret = 0;
	unsigned long addr = fc->addr;
	struct sp_area *spa;
2164 2165 2166 2167 2168 2169 2170
	struct sp_group *spg;

	spg = __sp_find_spg(current->tgid, fc->spg_id);
	if (!spg) {
		pr_debug("sp free get group failed %d\n", fc->spg_id);
		return -EINVAL;
	}
W
Wang Wensheng 已提交
2171 2172 2173

	fc->state = FREE_CONT;

2174
	spa = get_sp_area(spg, addr);
2175
	sp_group_drop(spg);
W
Wang Wensheng 已提交
2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187
	if (!spa) {
		pr_debug("sp free invalid input addr %lx\n", addr);
		return -EINVAL;
	}

	if (spa->type != SPA_TYPE_ALLOC) {
		ret = -EINVAL;
		pr_debug("sp free failed, %lx is not sp alloc addr\n", addr);
		goto drop_spa;
	}
	fc->spa = spa;

2188 2189
	if (!current->mm)
		goto check_spa;
W
Wang Wensheng 已提交
2190

2191 2192 2193
	ret = sp_check_caller_permission(spa->spg, current->mm);
	if (ret < 0)
		goto drop_spa;
W
Wang Wensheng 已提交
2194 2195

check_spa:
2196 2197 2198 2199
	if (is_local_group(spa->spg->id) && (current->tgid != spa->applier)) {
		ret = -EPERM;
		goto drop_spa;
	}
W
Wang Wensheng 已提交
2200

2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214
	down_write(&spa->spg->rw_lock);
	if (!spg_valid(spa->spg)) {
		fc->state = FREE_END;
		up_write(&spa->spg->rw_lock);
		goto drop_spa;
		/* we must return success(0) in this situation */
	}
	/* the life cycle of spa has a direct relation with sp group */
	if (unlikely(spa->is_dead)) {
		up_write(&spa->spg->rw_lock);
		pr_err_ratelimited("unexpected double sp free\n");
		dump_stack();
		ret = -EINVAL;
		goto drop_spa;
W
Wang Wensheng 已提交
2215
	}
2216 2217 2218
	spa->is_dead = true;
	up_write(&spa->spg->rw_lock);

W
Wang Wensheng 已提交
2219 2220 2221 2222 2223 2224 2225
	return 0;

drop_spa:
	__sp_area_drop(spa);
	return ret;
}

2226
/**
2227
 * mg_sp_free() - Free the memory allocated by mg_sp_alloc().
2228
 * @addr: the starting VA of the memory.
2229
 * @id: Address space identifier, which is used to distinguish the addr.
2230 2231 2232 2233 2234 2235
 *
 * Return:
 * * 0		- success.
 * * -EINVAL	- the memory can't be found or was not allocted by share pool.
 * * -EPERM	- the caller has no permision to free the memory.
 */
2236
int mg_sp_free(unsigned long addr, int id)
2237
{
W
Wang Wensheng 已提交
2238 2239 2240
	int ret = 0;
	struct sp_free_context fc = {
		.addr = addr,
2241
		.spg_id = id,
W
Wang Wensheng 已提交
2242 2243
	};

2244 2245 2246
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

W
Wang Wensheng 已提交
2247 2248
	check_interrupt_context();

2249 2250 2251
	if (current->flags & PF_KTHREAD)
		return -EINVAL;

W
Wang Wensheng 已提交
2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262
	ret = sp_free_get_spa(&fc);
	if (ret || fc.state == FREE_END)
		goto out;

	sp_free_unmap_fallocate(fc.spa);

	if (current->mm == NULL)
		atomic64_sub(fc.spa->real_size, &kthread_stat.alloc_size);
	else
		sp_update_process_stat(current, false, fc.spa);

2263
	__sp_area_drop(fc.spa);  /* match get_sp_area in sp_free_get_spa */
W
Wang Wensheng 已提交
2264 2265
out:
	return ret;
2266 2267 2268
}
EXPORT_SYMBOL_GPL(mg_sp_free);

2269 2270 2271
/* wrapper of __do_mmap() and the caller must hold down_write(&mm->mmap_lock). */
static unsigned long sp_mmap(struct mm_struct *mm, struct file *file,
			     struct sp_area *spa, unsigned long *populate,
2272
			     unsigned long prot, struct vm_area_struct **pvma)
2273 2274 2275 2276 2277 2278 2279
{
	unsigned long addr = spa->va_start;
	unsigned long size = spa_size(spa);
	unsigned long flags = MAP_FIXED | MAP_SHARED | MAP_POPULATE |
			      MAP_SHARE_POOL;
	unsigned long vm_flags = VM_NORESERVE | VM_SHARE_POOL | VM_DONTCOPY;
	unsigned long pgoff = addr_offset(spa) >> PAGE_SHIFT;
2280
	struct vm_area_struct *vma;
2281 2282 2283 2284 2285 2286 2287 2288 2289

	atomic_inc(&spa->use_count);
	addr = __do_mmap_mm(mm, file, addr, size, prot, flags, vm_flags, pgoff,
			 populate, NULL);
	if (IS_ERR_VALUE(addr)) {
		atomic_dec(&spa->use_count);
		pr_err("do_mmap fails %ld\n", addr);
	} else {
		BUG_ON(addr != spa->va_start);
2290 2291 2292 2293
		vma = find_vma(mm, addr);
		vma->vm_private_data = spa;
		if (pvma)
			*pvma = vma;
2294 2295 2296 2297 2298
	}

	return addr;
}

W
Wang Wensheng 已提交
2299 2300 2301
#define ALLOC_NORMAL	1
#define ALLOC_RETRY	2
#define ALLOC_NOMEM	3
2302
#define ALLOC_COREDUMP	4
W
Wang Wensheng 已提交
2303 2304 2305 2306 2307 2308 2309 2310 2311

struct sp_alloc_context {
	struct sp_group *spg;
	struct file *file;
	unsigned long size;
	unsigned long size_aligned;
	unsigned long sp_flags;
	unsigned long populate;
	int state;
2312
	bool have_mbind;
2313
	enum spa_type type;
W
Wang Wensheng 已提交
2314 2315 2316 2317 2318 2319 2320 2321 2322
};

static int sp_alloc_prepare(unsigned long size, unsigned long sp_flags,
	int spg_id, struct sp_alloc_context *ac)
{
	struct sp_group *spg;

	check_interrupt_context();

2323 2324 2325 2326 2327
	if (current->flags & PF_KTHREAD) {
		pr_err_ratelimited("allocation failed, task is kthread\n");
		return -EINVAL;
	}

W
Wang Wensheng 已提交
2328 2329 2330 2331 2332
	if (unlikely(!size || (size >> PAGE_SHIFT) > totalram_pages())) {
		pr_err_ratelimited("allocation failed, invalid size %lu\n", size);
		return -EINVAL;
	}

2333
	if (spg_id != SPG_ID_DEFAULT && (spg_id < SPG_ID_MIN || spg_id >= SPG_ID_AUTO)) {
W
Wang Wensheng 已提交
2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345
		pr_err_ratelimited("allocation failed, invalid group id %d\n", spg_id);
		return -EINVAL;
	}

	if (sp_flags & (~SP_FLAG_MASK)) {
		pr_err_ratelimited("allocation failed, invalid flag %lx\n", sp_flags);
		return -EINVAL;
	}

	if (sp_flags & SP_HUGEPAGE_ONLY)
		sp_flags |= SP_HUGEPAGE;

2346
	if (spg_id != SPG_ID_DEFAULT) {
2347
		spg = __sp_find_spg(current->tgid, spg_id);
2348 2349 2350
		if (!spg) {
			pr_err_ratelimited("allocation failed, can't find group\n");
			return -ENODEV;
W
Wang Wensheng 已提交
2351 2352
		}

2353 2354 2355 2356 2357 2358 2359 2360
		/* up_read will be at the end of sp_alloc */
		down_read(&spg->rw_lock);
		if (!spg_valid(spg)) {
			up_read(&spg->rw_lock);
			sp_group_drop(spg);
			pr_err_ratelimited("allocation failed, spg is dead\n");
			return -ENODEV;
		}
W
Wang Wensheng 已提交
2361

2362 2363 2364 2365 2366
		if (!is_process_in_group(spg, current->mm)) {
			up_read(&spg->rw_lock);
			sp_group_drop(spg);
			pr_err_ratelimited("allocation failed, task not in group\n");
			return -ENODEV;
W
Wang Wensheng 已提交
2367
		}
2368
		ac->type = SPA_TYPE_ALLOC;
2369
	} else {  /* allocation pass through scene */
2370
		spg = sp_get_local_group(current, current->mm);
2371 2372
		if (IS_ERR(spg))
			return PTR_ERR(spg);
2373 2374
		down_read(&spg->rw_lock);
		ac->type = SPA_TYPE_ALLOC_PRIVATE;
W
Wang Wensheng 已提交
2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388
	}

	if (sp_flags & SP_HUGEPAGE) {
		ac->file = spg->file_hugetlb;
		ac->size_aligned = ALIGN(size, PMD_SIZE);
	} else {
		ac->file = spg->file;
		ac->size_aligned = ALIGN(size, PAGE_SIZE);
	}

	ac->spg = spg;
	ac->size = size;
	ac->sp_flags = sp_flags;
	ac->state = ALLOC_NORMAL;
2389
	ac->have_mbind = false;
W
Wang Wensheng 已提交
2390 2391 2392 2393 2394 2395
	return 0;
}

static void sp_alloc_unmap(struct mm_struct *mm, struct sp_area *spa,
	struct sp_group_node *spg_node)
{
2396
	__sp_free(spa->spg, spa->va_start, spa->real_size, mm);
W
Wang Wensheng 已提交
2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411
}

static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa,
	struct sp_group_node *spg_node, struct sp_alloc_context *ac)
{
	int ret = 0;
	unsigned long mmap_addr;
	/* pass through default permission */
	unsigned long prot = PROT_READ | PROT_WRITE;
	unsigned long populate = 0;
	struct vm_area_struct *vma;

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		up_write(&mm->mmap_lock);
2412
		ac->state = ALLOC_COREDUMP;
W
Wang Wensheng 已提交
2413 2414 2415 2416 2417 2418 2419
		pr_info("allocation encountered coredump\n");
		return -EFAULT;
	}

	if (spg_node)
		prot = spg_node->prot;

2420 2421 2422
	if (ac->sp_flags & SP_PROT_RO)
		prot = PROT_READ;

W
Wang Wensheng 已提交
2423
	/* when success, mmap_addr == spa->va_start */
2424
	mmap_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot, &vma);
W
Wang Wensheng 已提交
2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439
	if (IS_ERR_VALUE(mmap_addr)) {
		up_write(&mm->mmap_lock);
		sp_alloc_unmap(mm, spa, spg_node);
		pr_err("sp mmap in allocation failed %ld\n", mmap_addr);
		return PTR_ERR((void *)mmap_addr);
	}

	if (unlikely(populate == 0)) {
		up_write(&mm->mmap_lock);
		pr_err("allocation sp mmap populate failed\n");
		ret = -EFAULT;
		goto unmap;
	}
	ac->populate = populate;

2440 2441 2442
	if (ac->sp_flags & SP_PROT_RO)
		vma->vm_flags &= ~VM_MAYWRITE;

W
Wang Wensheng 已提交
2443 2444 2445 2446 2447 2448 2449 2450
	/* clean PTE_RDONLY flags or trigger SMMU event */
	if (prot & PROT_WRITE)
		vma->vm_page_prot = __pgprot(((~PTE_RDONLY) & vma->vm_page_prot.pgprot) | PTE_DIRTY);
	up_write(&mm->mmap_lock);

	return ret;

unmap:
2451
	sp_alloc_unmap(list_next_entry(spg_node, proc_node)->master->mm, spa, spg_node);
W
Wang Wensheng 已提交
2452 2453 2454 2455 2456 2457 2458 2459 2460 2461
	return ret;
}

static void sp_alloc_fallback(struct sp_area *spa, struct sp_alloc_context *ac)
{
	if (ac->file == ac->spg->file) {
		ac->state = ALLOC_NOMEM;
		return;
	}

2462
	atomic_inc(&ac->spg->instat.hugepage_failures);
W
Wang Wensheng 已提交
2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474
	if (!(ac->sp_flags & SP_HUGEPAGE_ONLY)) {
		ac->file = ac->spg->file;
		ac->size_aligned = ALIGN(ac->size, PAGE_SIZE);
		ac->sp_flags &= ~SP_HUGEPAGE;
		ac->state = ALLOC_RETRY;
		__sp_area_drop(spa);
		return;
	}
	ac->state = ALLOC_NOMEM;
}

static int sp_alloc_populate(struct mm_struct *mm, struct sp_area *spa,
2475
			     struct sp_alloc_context *ac)
W
Wang Wensheng 已提交
2476 2477 2478 2479 2480 2481 2482
{
	/*
	 * We are not ignoring errors, so if we fail to allocate
	 * physical memory we just return failure, so we won't encounter
	 * page fault later on, and more importantly sp_make_share_u2k()
	 * depends on this feature (and MAP_LOCKED) to work correctly.
	 */
2483

2484
	return do_mm_populate(mm, spa->va_start, ac->populate, 0);
W
Wang Wensheng 已提交
2485 2486
}

2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497
static long sp_mbind(struct mm_struct *mm, unsigned long start, unsigned long len,
		unsigned long node)
{
	nodemask_t nmask;

	nodes_clear(nmask);
	node_set(node, nmask);
	return __do_mbind(start, len, MPOL_BIND, MPOL_F_STATIC_NODES,
			&nmask, MPOL_MF_STRICT, mm);
}

W
Wang Wensheng 已提交
2498 2499 2500 2501 2502 2503
static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa,
	struct sp_group_node *spg_node, struct sp_alloc_context *ac)
{
	int ret;

	ret = sp_alloc_mmap(mm, spa, spg_node, ac);
2504

2505
	if (ret < 0)
W
Wang Wensheng 已提交
2506 2507
		return ret;

2508 2509 2510 2511 2512
	if (!ac->have_mbind) {
		ret = sp_mbind(mm, spa->va_start, spa->real_size, spa->node_id);
		if (ret < 0) {
			pr_err("cannot bind the memory range to specified node:%d, err:%d\n",
				spa->node_id, ret);
2513
			return ret;
2514 2515 2516 2517 2518 2519 2520 2521 2522 2523
		}
		ac->have_mbind = true;
	}

	ret = sp_alloc_populate(mm, spa, ac);
	if (ret) {
		if (unlikely(fatal_signal_pending(current)))
			pr_warn_ratelimited("allocation failed, current thread is killed\n");
		else
			pr_warn_ratelimited("allocation failed due to mm populate failed(potential no enough memory when -12): %d\n",
2524
					ret);
2525
	}
W
Wang Wensheng 已提交
2526 2527 2528 2529 2530 2531
	return ret;
}

static int sp_alloc_mmap_populate(struct sp_area *spa,
				  struct sp_alloc_context *ac)
{
2532 2533
	int ret = -EINVAL;
	int mmap_ret = 0;
2534
	struct mm_struct *mm, *end_mm = NULL;
W
Wang Wensheng 已提交
2535 2536
	struct sp_group_node *spg_node;

2537 2538 2539 2540 2541
	/* create mapping for each process in the group */
	list_for_each_entry(spg_node, &spa->spg->procs, proc_node) {
		mm = spg_node->master->mm;
		mmap_ret = __sp_alloc_mmap_populate(mm, spa, spg_node, ac);
		if (mmap_ret) {
2542 2543 2544 2545 2546 2547

			/*
			 * Goto fallback procedure upon ERR_VALUE,
			 * but skip the coredump situation,
			 * because we don't want one misbehaving process to affect others.
			 */
2548
			if (ac->state != ALLOC_COREDUMP)
2549
				goto unmap;
2550 2551

			/* Reset state and discard the coredump error. */
2552 2553
			ac->state = ALLOC_NORMAL;
			continue;
W
Wang Wensheng 已提交
2554
		}
2555
		ret = mmap_ret;
W
Wang Wensheng 已提交
2556
	}
2557

W
Wang Wensheng 已提交
2558
	return ret;
2559 2560 2561 2562 2563 2564 2565

unmap:
	/* use the next mm in proc list as end mark */
	if (!list_is_last(&spg_node->proc_node, &spa->spg->procs))
		end_mm = list_next_entry(spg_node, proc_node)->master->mm;
	sp_alloc_unmap(end_mm, spa, spg_node);

2566 2567 2568 2569 2570 2571 2572 2573 2574 2575
	/*
	 * Sometimes do_mm_populate() allocates some memory and then failed to
	 * allocate more. (e.g. memory use reaches cgroup limit.)
	 * In this case, it will return enomem, but will not free the
	 * memory which has already been allocated.
	 *
	 * So if __sp_alloc_mmap_populate fails, always call sp_fallocate()
	 * to make sure backup physical memory of the shared file is freed.
	 */
	sp_fallocate(spa);
2576 2577 2578 2579 2580 2581 2582

	/* if hugepage allocation fails, this will transfer to normal page
	 * and try again. (only if SP_HUGEPAGE_ONLY is not flagged
	 */
	sp_alloc_fallback(spa, ac);

	return mmap_ret;
W
Wang Wensheng 已提交
2583 2584 2585 2586
}

/* spa maybe an error pointer, so introduce variable spg */
static void sp_alloc_finish(int result, struct sp_area *spa,
2587
		struct sp_alloc_context *ac)
W
Wang Wensheng 已提交
2588 2589 2590
{
	struct sp_group *spg = ac->spg;

2591
	/* match sp_alloc_prepare */
2592
	up_read(&spg->rw_lock);
W
Wang Wensheng 已提交
2593 2594 2595 2596 2597

	if (!result)
		sp_update_process_stat(current, true, spa);

	/* this will free spa if mmap failed */
2598
	if (spa && !IS_ERR(spa))
W
Wang Wensheng 已提交
2599 2600
		__sp_area_drop(spa);

2601
	sp_group_drop(spg);
W
Wang Wensheng 已提交
2602 2603
}

2604
/**
2605
 * mg_sp_alloc() - Allocate shared memory for all the processes in a sp_group.
2606 2607 2608 2609 2610 2611 2612 2613 2614 2615
 * @size: the size of memory to allocate.
 * @sp_flags: how to allocate the memory.
 * @spg_id: the share group that the memory is allocated to.
 *
 * Use pass through allocation if spg_id == SPG_ID_DEFAULT in multi-group mode.
 *
 * Return:
 * * if succeed, return the starting address of the shared memory.
 * * if fail, return the pointer of -errno.
 */
2616
void *mg_sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id)
2617
{
W
Wang Wensheng 已提交
2618 2619 2620 2621
	struct sp_area *spa = NULL;
	int ret = 0;
	struct sp_alloc_context ac;

2622 2623 2624
	if (!sp_is_enabled())
		return ERR_PTR(-EOPNOTSUPP);

W
Wang Wensheng 已提交
2625 2626 2627 2628 2629 2630
	ret = sp_alloc_prepare(size, sp_flags, spg_id, &ac);
	if (ret)
		return ERR_PTR(ret);

try_again:
	spa = sp_alloc_area(ac.size_aligned, ac.sp_flags, ac.spg,
2631
			    ac.type, current->tgid);
W
Wang Wensheng 已提交
2632 2633 2634 2635 2636 2637 2638 2639
	if (IS_ERR(spa)) {
		pr_err_ratelimited("alloc spa failed in allocation(potential no enough virtual memory when -75): %ld\n",
			PTR_ERR(spa));
		ret = PTR_ERR(spa);
		goto out;
	}

	ret = sp_alloc_mmap_populate(spa, &ac);
2640 2641 2642 2643 2644 2645 2646
	if (ret && ac.state == ALLOC_RETRY) {
		/*
		 * The mempolicy for shared memory is located at backend file, which varies
		 * between normal pages and huge pages. So we should set the mbind policy again
		 * when we retry using normal pages.
		 */
		ac.have_mbind = false;
W
Wang Wensheng 已提交
2647
		goto try_again;
2648
	}
W
Wang Wensheng 已提交
2649 2650 2651 2652 2653 2654 2655

out:
	sp_alloc_finish(ret, spa, &ac);
	if (ret)
		return ERR_PTR(ret);
	else
		return (void *)(spa->va_start);
2656 2657 2658
}
EXPORT_SYMBOL_GPL(mg_sp_alloc);

2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688
/**
 * is_vmap_hugepage() - Check if a kernel address belongs to vmalloc family.
 * @addr: the kernel space address to be checked.
 *
 * Return:
 * * >0		- a vmalloc hugepage addr.
 * * =0		- a normal vmalloc addr.
 * * -errno	- failure.
 */
static int is_vmap_hugepage(unsigned long addr)
{
	struct vm_struct *area;

	if (unlikely(!addr)) {
		pr_err_ratelimited("null vmap addr pointer\n");
		return -EINVAL;
	}

	area = find_vm_area((void *)addr);
	if (unlikely(!area)) {
		pr_debug("can't find vm area(%lx)\n", addr);
		return -EINVAL;
	}

	if (area->flags & VM_HUGE_PAGES)
		return 1;
	else
		return 0;
}

2689 2690
static unsigned long __sp_remap_get_pfn(unsigned long kva)
{
G
Guo Mengqi 已提交
2691
	unsigned long pfn = -EINVAL;
2692

G
Guo Mengqi 已提交
2693
	/* sp_make_share_k2u only support vmalloc address */
2694 2695 2696 2697 2698 2699 2700
	if (is_vmalloc_addr((void *)kva))
		pfn = vmalloc_to_pfn((void *)kva);

	return pfn;
}

/* when called by k2u to group, always make sure rw_lock of spg is down */
2701 2702
static unsigned long sp_remap_kva_to_vma(struct sp_area *spa, struct mm_struct *mm,
					unsigned long prot, struct sp_k2u_context *kc)
2703 2704 2705 2706 2707 2708
{
	struct vm_area_struct *vma;
	unsigned long ret_addr;
	unsigned long populate = 0;
	int ret = 0;
	unsigned long addr, buf, offset;
2709
	unsigned long kva = spa->kva;
2710 2711 2712 2713 2714

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		pr_err("k2u mmap: encountered coredump, abort\n");
		ret_addr = -EBUSY;
2715 2716
		if (kc)
			kc->state = K2U_COREDUMP;
2717 2718 2719
		goto put_mm;
	}

2720
	if (kc && (kc->sp_flags & SP_PROT_RO))
2721 2722
		prot = PROT_READ;

2723
	ret_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot, &vma);
2724 2725 2726 2727 2728 2729 2730 2731
	if (IS_ERR_VALUE(ret_addr)) {
		pr_debug("k2u mmap failed %lx\n", ret_addr);
		goto put_mm;
	}

	if (prot & PROT_WRITE)
		vma->vm_page_prot = __pgprot(((~PTE_RDONLY) & vma->vm_page_prot.pgprot) | PTE_DIRTY);

2732
	if (kc && (kc->sp_flags & SP_PROT_RO))
2733 2734
		vma->vm_flags &= ~VM_MAYWRITE;

2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771
	if (is_vm_hugetlb_page(vma)) {
		ret = remap_vmalloc_hugepage_range(vma, (void *)kva, 0);
		if (ret) {
			do_munmap(mm, ret_addr, spa_size(spa), NULL);
			pr_debug("remap vmalloc hugepage failed, ret %d, kva is %lx\n",
				 ret, (unsigned long)kva);
			ret_addr = ret;
			goto put_mm;
		}
		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
	} else {
		buf = ret_addr;
		addr = kva;
		offset = 0;
		do {
			ret = remap_pfn_range(vma, buf, __sp_remap_get_pfn(addr), PAGE_SIZE,
					__pgprot(vma->vm_page_prot.pgprot));
			if (ret) {
				do_munmap(mm, ret_addr, spa_size(spa), NULL);
				pr_err("remap_pfn_range failed %d\n", ret);
				ret_addr = ret;
				goto put_mm;
			}
			offset += PAGE_SIZE;
			buf += PAGE_SIZE;
			addr += PAGE_SIZE;
		} while (offset < spa_size(spa));
	}

put_mm:
	up_write(&mm->mmap_lock);

	return ret_addr;
}

/**
 * Share kernel memory to a spg, the current process must be in that group
2772
 * @kc: the context for k2u, including kva, size, flags...
2773 2774 2775 2776
 * @spg: the sp group to be shared with
 *
 * Return: the shared user address to start at
 */
2777
static void *sp_make_share_kva_to_spg(struct sp_k2u_context *kc, struct sp_group *spg)
2778 2779 2780 2781
{
	struct sp_area *spa;
	struct mm_struct *mm;
	struct sp_group_node *spg_node;
2782
	unsigned long ret_addr = -ENODEV;
2783 2784

	down_read(&spg->rw_lock);
2785
	spa = sp_alloc_area(kc->size_aligned, kc->sp_flags, spg, kc->type, current->tgid);
2786 2787
	if (IS_ERR(spa)) {
		up_read(&spg->rw_lock);
2788
		pr_err("alloc spa failed in k2u_spg (potential no enough virtual memory when -75): %ld\n",
2789 2790 2791 2792
				PTR_ERR(spa));
		return spa;
	}

2793
	spa->kva = kc->kva_aligned;
2794 2795
	list_for_each_entry(spg_node, &spg->procs, proc_node) {
		mm = spg_node->master->mm;
2796 2797
		kc->state = K2U_NORMAL;
		ret_addr = sp_remap_kva_to_vma(spa, mm, spg_node->prot, kc);
2798
		if (IS_ERR_VALUE(ret_addr)) {
2799
			if (kc->state == K2U_COREDUMP)
2800
				continue;
2801
			pr_err("remap k2u to spg failed %ld\n", ret_addr);
2802 2803 2804 2805 2806 2807 2808
			__sp_free(spg, spa->va_start, spa_size(spa), mm);
			goto out;
		}
	}

out:
	up_read(&spg->rw_lock);
2809
	if (!IS_ERR_VALUE(ret_addr))
2810
		sp_update_process_stat(current, true, spa);
Z
Zhou Guanghui 已提交
2811
	__sp_area_drop(spa);
2812

2813
	return (void *)ret_addr;
2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835
}

static bool vmalloc_area_set_flag(unsigned long kva, unsigned long flags)
{
	struct vm_struct *area;

	area = find_vm_area((void *)kva);
	if (area) {
		area->flags |= flags;
		return true;
	}

	return false;
}

static int sp_k2u_prepare(unsigned long kva, unsigned long size,
	unsigned long sp_flags, int spg_id, struct sp_k2u_context *kc)
{
	int is_hugepage;
	unsigned int page_size = PAGE_SIZE;
	unsigned long kva_aligned, size_aligned;

2836 2837 2838 2839 2840
	if (!size) {
		pr_err_ratelimited("k2u input size is 0.\n");
		return -EINVAL;
	}

2841
	if (sp_flags & ~SP_FLAG_MASK) {
2842 2843 2844
		pr_err_ratelimited("k2u sp_flags %lx error\n", sp_flags);
		return -EINVAL;
	}
2845
	sp_flags &= ~SP_HUGEPAGE;
2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871

	if (!current->mm) {
		pr_err_ratelimited("k2u: kthread is not allowed\n");
		return -EPERM;
	}

	is_hugepage = is_vmap_hugepage(kva);
	if (is_hugepage > 0) {
		sp_flags |= SP_HUGEPAGE;
		page_size = PMD_SIZE;
	} else if (is_hugepage == 0) {
		/* do nothing */
	} else {
		pr_err_ratelimited("k2u kva is not vmalloc address\n");
		return is_hugepage;
	}

	/* aligned down kva is convenient for caller to start with any valid kva */
	kva_aligned = ALIGN_DOWN(kva, page_size);
	size_aligned = ALIGN(kva + size, page_size) - kva_aligned;

	if (!vmalloc_area_set_flag(kva_aligned, VM_SHAREPOOL)) {
		pr_debug("k2u_task kva %lx is not valid\n", kva_aligned);
		return -EINVAL;
	}

2872 2873 2874
	kc->kva          = kva;
	kc->kva_aligned  = kva_aligned;
	kc->size         = size;
2875
	kc->size_aligned = size_aligned;
2876 2877 2878
	kc->sp_flags     = sp_flags;
	kc->type         = (spg_id == SPG_ID_DEFAULT || spg_id == SPG_ID_NONE)
				? SPA_TYPE_K2TASK : SPA_TYPE_K2SPG;
2879

2880
	return 0;
2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892
}

static void *sp_k2u_finish(void *uva, struct sp_k2u_context *kc)
{
	if (IS_ERR(uva))
		vmalloc_area_clr_flag(kc->kva_aligned, VM_SHAREPOOL);
	else
		uva = uva + (kc->kva - kc->kva_aligned);

	return uva;
}

2893
/**
2894
 * mg_sp_make_share_k2u() - Share kernel memory to current process or an sp_group.
2895 2896 2897
 * @kva: the VA of shared kernel memory.
 * @size: the size of shared kernel memory.
 * @sp_flags: how to allocate the memory. We only support SP_DVPP.
2898
 * @tgid:  the tgid of the specified process (Not currently in use).
2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909
 * @spg_id: the share group that the memory is shared to.
 *
 * Return: the shared target user address to start at
 *
 * Share kernel memory to current task if spg_id == SPG_ID_NONE
 * or SPG_ID_DEFAULT in multi-group mode.
 *
 * Return:
 * * if succeed, return the shared user address to start at.
 * * if fail, return the pointer of -errno.
 */
2910
void *mg_sp_make_share_k2u(unsigned long kva, unsigned long size,
2911
			unsigned long sp_flags, int tgid, int spg_id)
2912
{
2913 2914 2915
	void *uva;
	int ret;
	struct sp_k2u_context kc;
2916
	struct sp_group *spg;
2917

2918 2919 2920
	if (!sp_is_enabled())
		return ERR_PTR(-EOPNOTSUPP);

2921 2922 2923 2924 2925 2926
	check_interrupt_context();

	ret = sp_k2u_prepare(kva, size, sp_flags, spg_id, &kc);
	if (ret)
		return ERR_PTR(ret);

2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943
	if (kc.type == SPA_TYPE_K2TASK) {
		down_write(&sp_group_sem);
		ret = sp_init_group_master_locked(current, current->mm);
		up_write(&sp_group_sem);
		if (ret) {
			pr_err("k2u_task init local mapping failed %d\n", ret);
			uva = ERR_PTR(ret);
			goto out;
		}
		/* the caller could use SPG_ID_NONE */
		spg_id = SPG_ID_DEFAULT;
	}

	spg = __sp_find_spg(current->tgid, spg_id);
	if (spg) {
		ret = sp_check_caller_permission(spg, current->mm);
		if (ret < 0) {
2944
			sp_group_drop(spg);
2945 2946
			uva = ERR_PTR(ret);
			goto out;
2947
		}
2948 2949 2950 2951
		uva = sp_make_share_kva_to_spg(&kc, spg);
		sp_group_drop(spg);
	} else {
		uva = ERR_PTR(-ENODEV);
2952 2953 2954 2955
	}

out:
	return sp_k2u_finish(uva, &kc);
2956 2957 2958
}
EXPORT_SYMBOL_GPL(mg_sp_make_share_k2u);

2959 2960 2961
static int sp_pmd_entry(pmd_t *pmd, unsigned long addr,
			unsigned long next, struct mm_walk *walk)
{
2962
	struct page *page;
2963 2964
	struct sp_walk_data *sp_walk_data = walk->private;

2965 2966 2967 2968 2969 2970 2971 2972 2973 2974
	/*
	 * There exist a scene in DVPP where the pagetable is huge page but its
	 * vma doesn't record it, something like THP.
	 * So we cannot make out whether it is a hugepage map until we access the
	 * pmd here. If mixed size of pages appear, just return an error.
	 */
	if (pmd_huge(*pmd)) {
		if (!sp_walk_data->is_page_type_set) {
			sp_walk_data->is_page_type_set = true;
			sp_walk_data->is_hugepage = true;
2975
		} else if (!sp_walk_data->is_hugepage) {
2976
			return -EFAULT;
2977
		}
2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994

		/* To skip pte level walk */
		walk->action = ACTION_CONTINUE;

		page = pmd_page(*pmd);
		get_page(page);
		sp_walk_data->pages[sp_walk_data->page_count++] = page;

		return 0;
	}

	if (!sp_walk_data->is_page_type_set) {
		sp_walk_data->is_page_type_set = true;
		sp_walk_data->is_hugepage = false;
	} else if (sp_walk_data->is_hugepage)
		return -EFAULT;

2995
	sp_walk_data->pmd = pmd;
2996

2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139
	return 0;
}

static int sp_pte_entry(pte_t *pte, unsigned long addr,
			unsigned long next, struct mm_walk *walk)
{
	struct page *page;
	struct sp_walk_data *sp_walk_data = walk->private;
	pmd_t *pmd = sp_walk_data->pmd;

retry:
	if (unlikely(!pte_present(*pte))) {
		swp_entry_t entry;

		if (pte_none(*pte))
			goto no_page;
		entry = pte_to_swp_entry(*pte);
		if (!is_migration_entry(entry))
			goto no_page;
		migration_entry_wait(walk->mm, pmd, addr);
		goto retry;
	}

	page = pte_page(*pte);
	get_page(page);
	sp_walk_data->pages[sp_walk_data->page_count++] = page;
	return 0;

no_page:
	pr_debug("the page of addr %lx unexpectedly not in RAM\n",
		 (unsigned long)addr);
	return -EFAULT;
}

static int sp_test_walk(unsigned long addr, unsigned long next,
			struct mm_walk *walk)
{
	/*
	 * FIXME: The devmm driver uses remap_pfn_range() but actually there
	 * are associated struct pages, so they should use vm_map_pages() or
	 * similar APIs. Before the driver has been converted to correct APIs
	 * we use this test_walk() callback so we can treat VM_PFNMAP VMAs as
	 * normal VMAs.
	 */
	return 0;
}

static int sp_pte_hole(unsigned long start, unsigned long end,
		       int depth, struct mm_walk *walk)
{
	pr_debug("hole [%lx, %lx) appeared unexpectedly\n", (unsigned long)start, (unsigned long)end);
	return -EFAULT;
}

static int sp_hugetlb_entry(pte_t *ptep, unsigned long hmask,
			    unsigned long addr, unsigned long next,
			    struct mm_walk *walk)
{
	pte_t pte = huge_ptep_get(ptep);
	struct page *page = pte_page(pte);
	struct sp_walk_data *sp_walk_data;

	if (unlikely(!pte_present(pte))) {
		pr_debug("the page of addr %lx unexpectedly not in RAM\n", (unsigned long)addr);
		return -EFAULT;
	}

	sp_walk_data = walk->private;
	get_page(page);
	sp_walk_data->pages[sp_walk_data->page_count++] = page;
	return 0;
}

/*
 * __sp_walk_page_range() - Walk page table with caller specific callbacks.
 * @uva: the start VA of user memory.
 * @size: the size of user memory.
 * @mm: mm struct of the target task.
 * @sp_walk_data: a structure of a page pointer array.
 *
 * the caller must hold mm->mmap_lock
 *
 * Notes for parameter alignment:
 * When size == 0, let it be page_size, so that at least one page is walked.
 *
 * When size > 0, for convenience, usually the parameters of uva and
 * size are not page aligned. There are four different alignment scenarios and
 * we must handler all of them correctly.
 *
 * The basic idea is to align down uva and align up size so all the pages
 * in range [uva, uva + size) are walked. However, there are special cases.
 *
 * Considering a 2M-hugepage addr scenario. Assuming the caller wants to
 * traverse range [1001M, 1004.5M), so uva and size is 1001M and 3.5M
 * accordingly. The aligned-down uva is 1000M and the aligned-up size is 4M.
 * The traverse range will be [1000M, 1004M). Obviously, the final page for
 * [1004M, 1004.5M) is not covered.
 *
 * To fix this problem, we need to walk an additional page, size should be
 * ALIGN(uva+size) - uva_aligned
 */
static int __sp_walk_page_range(unsigned long uva, unsigned long size,
	struct mm_struct *mm, struct sp_walk_data *sp_walk_data)
{
	int ret = 0;
	struct vm_area_struct *vma;
	unsigned long page_nr;
	struct page **pages = NULL;
	bool is_hugepage = false;
	unsigned long uva_aligned;
	unsigned long size_aligned;
	unsigned int page_size = PAGE_SIZE;
	struct mm_walk_ops sp_walk = {};

	/*
	 * Here we also support non share pool memory in this interface
	 * because the caller can't distinguish whether a uva is from the
	 * share pool or not. It is not the best idea to do so, but currently
	 * it simplifies overall design.
	 *
	 * In this situation, the correctness of the parameters is mainly
	 * guaranteed by the caller.
	 */
	vma = find_vma(mm, uva);
	if (!vma) {
		pr_debug("u2k input uva %lx is invalid\n", (unsigned long)uva);
		return -EINVAL;
	}
	if (is_vm_hugetlb_page(vma))
		is_hugepage = true;

	sp_walk.pte_hole = sp_pte_hole;
	sp_walk.test_walk = sp_test_walk;
	if (is_hugepage) {
		sp_walk_data->is_hugepage = true;
		sp_walk.hugetlb_entry = sp_hugetlb_entry;
		page_size = PMD_SIZE;
	} else {
		sp_walk_data->is_hugepage = false;
		sp_walk.pte_entry = sp_pte_entry;
		sp_walk.pmd_entry = sp_pmd_entry;
	}

3140 3141
	sp_walk_data->is_page_type_set = false;
	sp_walk_data->page_count = 0;
3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165
	sp_walk_data->page_size = page_size;
	uva_aligned = ALIGN_DOWN(uva, page_size);
	sp_walk_data->uva_aligned = uva_aligned;
	if (size == 0)
		size_aligned = page_size;
	else
		/* special alignment handling */
		size_aligned = ALIGN(uva + size, page_size) - uva_aligned;

	if (uva_aligned + size_aligned < uva_aligned) {
		pr_err_ratelimited("overflow happened in walk page range\n");
		return -EINVAL;
	}

	page_nr = size_aligned / page_size;
	pages = kvmalloc(page_nr * sizeof(struct page *), GFP_KERNEL);
	if (!pages) {
		pr_err_ratelimited("alloc page array failed in walk page range\n");
		return -ENOMEM;
	}
	sp_walk_data->pages = pages;

	ret = walk_page_range(mm, uva_aligned, uva_aligned + size_aligned,
			      &sp_walk, sp_walk_data);
3166 3167 3168
	if (ret) {
		while (sp_walk_data->page_count--)
			put_page(pages[sp_walk_data->page_count]);
3169
		kvfree(pages);
3170 3171
		sp_walk_data->pages = NULL;
	}
3172

Z
Zhou Guanghui 已提交
3173 3174 3175
	if (sp_walk_data->is_hugepage)
		sp_walk_data->uva_aligned = ALIGN_DOWN(uva, PMD_SIZE);

3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194
	return ret;
}

static void __sp_walk_page_free(struct sp_walk_data *data)
{
	int i = 0;
	struct page *page;

	while (i < data->page_count) {
		page = data->pages[i++];
		put_page(page);
	}

	kvfree(data->pages);
	/* prevent repeated release */
	data->page_count = 0;
	data->pages = NULL;
}

3195
/**
3196
 * mg_sp_make_share_u2k() - Share user memory of a specified process to kernel.
3197 3198
 * @uva: the VA of shared user memory
 * @size: the size of shared user memory
3199
 * @tgid: the tgid of the specified process(Not currently in use)
3200 3201 3202 3203 3204
 *
 * Return:
 * * if success, return the starting kernel address of the shared memory.
 * * if failed, return the pointer of -errno.
 */
3205
void *mg_sp_make_share_u2k(unsigned long uva, unsigned long size, int tgid)
3206
{
3207 3208 3209
	int ret = 0;
	struct mm_struct *mm = current->mm;
	void *p = ERR_PTR(-ESRCH);
3210
	struct sp_walk_data sp_walk_data;
3211 3212
	struct vm_struct *area;

3213 3214 3215
	if (!sp_is_enabled())
		return ERR_PTR(-EOPNOTSUPP);

3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262
	check_interrupt_context();

	if (mm == NULL) {
		pr_err("u2k: kthread is not allowed\n");
		return ERR_PTR(-EPERM);
	}

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		up_write(&mm->mmap_lock);
		pr_err("u2k: encountered coredump, abort\n");
		return p;
	}

	ret = __sp_walk_page_range(uva, size, mm, &sp_walk_data);
	if (ret) {
		pr_err_ratelimited("walk page range failed %d\n", ret);
		up_write(&mm->mmap_lock);
		return ERR_PTR(ret);
	}

	if (sp_walk_data.is_hugepage)
		p = vmap_hugepage(sp_walk_data.pages, sp_walk_data.page_count,
				  VM_MAP, PAGE_KERNEL);
	else
		p = vmap(sp_walk_data.pages, sp_walk_data.page_count, VM_MAP,
			 PAGE_KERNEL);
	up_write(&mm->mmap_lock);

	if (!p) {
		pr_err("vmap(huge) in u2k failed\n");
		__sp_walk_page_free(&sp_walk_data);
		return ERR_PTR(-ENOMEM);
	}

	p = p + (uva - sp_walk_data.uva_aligned);

	/*
	 * kva p may be used later in k2u. Since p comes from uva originally,
	 * it's reasonable to add flag VM_USERMAP so that p can be remapped
	 * into userspace again.
	 */
	area = find_vm_area(p);
	area->flags |= VM_USERMAP;

	kvfree(sp_walk_data.pages);
	return p;
3263 3264 3265
}
EXPORT_SYMBOL_GPL(mg_sp_make_share_u2k);

3266
/*
3267
 * Input parameters uva, tgid and spg_id are now useless. spg_id will be useful
3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282
 * when supporting a process in multiple sp groups.
 *
 * Procedure of unshare uva must be compatible with:
 *
 * 1. DVPP channel destroy procedure:
 * do_exit() -> exit_mm() (mm no longer in spg and current->mm == NULL) ->
 * exit_task_work() -> task_work_run() -> __fput() -> ... -> vdec_close() ->
 * sp_unshare(uva, SPG_ID_DEFAULT)
 *
 * 2. Process A once was the target of k2u(to group), then it exits.
 * Guard worker kthread tries to free this uva and it must succeed, otherwise
 * spa of this uva leaks.
 *
 * This also means we must trust DVPP channel destroy and guard worker code.
 */
3283
static int sp_unshare_uva(unsigned long uva, unsigned long size, int group_id)
3284
{
3285 3286 3287 3288 3289 3290
	int ret = 0;
	struct mm_struct *mm;
	struct sp_area *spa;
	unsigned long uva_aligned;
	unsigned long size_aligned;
	unsigned int page_size;
3291 3292 3293 3294 3295 3296 3297
	struct sp_group *spg;

	spg = __sp_find_spg(current->tgid, group_id);
	if (!spg) {
		pr_debug("sp unshare find group failed %d\n", group_id);
		return -EINVAL;
	}
3298 3299 3300 3301 3302

	/*
	 * at first we guess it's a hugepage addr
	 * we can tolerate at most PMD_SIZE or PAGE_SIZE which is matched in k2u
	 */
3303
	spa = get_sp_area(spg, ALIGN_DOWN(uva, PMD_SIZE));
3304
	if (!spa) {
3305
		spa = get_sp_area(spg, ALIGN_DOWN(uva, PAGE_SIZE));
3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420
		if (!spa) {
			ret = -EINVAL;
			pr_debug("invalid input uva %lx in unshare uva\n", (unsigned long)uva);
			goto out;
		}
	}

	if (spa->type != SPA_TYPE_K2TASK && spa->type != SPA_TYPE_K2SPG) {
		pr_err_ratelimited("unshare wrong type spa\n");
		ret = -EINVAL;
		goto out_drop_area;
	}
	/*
	 * 1. overflow actually won't happen due to an spa must be valid.
	 * 2. we must unshare [spa->va_start, spa->va_start + spa->real_size) completely
	 *    because an spa is one-to-one correspondence with an vma.
	 *    Thus input parameter size is not necessarily needed.
	 */
	page_size = (spa->is_hugepage ? PMD_SIZE : PAGE_SIZE);
	uva_aligned = spa->va_start;
	size_aligned = spa->real_size;

	if (size_aligned < ALIGN(size, page_size)) {
		ret = -EINVAL;
		pr_err_ratelimited("unshare uva failed, invalid parameter size %lu\n", size);
		goto out_drop_area;
	}

	if (spa->type == SPA_TYPE_K2TASK) {
		if (spa->applier != current->tgid) {
			pr_err_ratelimited("unshare uva(to task) no permission\n");
			ret = -EPERM;
			goto out_drop_area;
		}

		/*
		 * current thread may be exiting in a multithread process
		 *
		 * 1. never need a kthread to make unshare when process has exited
		 * 2. in dvpp channel destroy procedure, exit_mm() has been called
		 *    and don't need to make unshare
		 */
		mm = get_task_mm(current->group_leader);
		if (!mm) {
			pr_info_ratelimited("no need to unshare uva(to task), target process mm is exiting\n");
			goto out_clr_flag;
		}

		down_write(&mm->mmap_lock);
		if (unlikely(mm->core_state)) {
			ret = 0;
			up_write(&mm->mmap_lock);
			mmput(mm);
			goto out_drop_area;
		}

		ret = do_munmap(mm, uva_aligned, size_aligned, NULL);
		up_write(&mm->mmap_lock);
		mmput(mm);
		/* we are not supposed to fail */
		if (ret)
			pr_err("failed to unmap VA %pK when munmap in unshare uva\n",
			       (void *)uva_aligned);
		sp_update_process_stat(current, false, spa);

	} else if (spa->type == SPA_TYPE_K2SPG) {
		down_read(&spa->spg->rw_lock);
		/* always allow kthread and dvpp channel destroy procedure */
		if (current->mm) {
			if (!is_process_in_group(spa->spg, current->mm)) {
				up_read(&spa->spg->rw_lock);
				pr_err_ratelimited("unshare uva(to group) failed, caller process doesn't belong to target group\n");
				ret = -EPERM;
				goto out_drop_area;
			}
		}
		up_read(&spa->spg->rw_lock);

		down_write(&spa->spg->rw_lock);
		if (!spg_valid(spa->spg)) {
			up_write(&spa->spg->rw_lock);
			pr_info_ratelimited("share pool: no need to unshare uva(to group), sp group of spa is dead\n");
			goto out_clr_flag;
		}
		/* the life cycle of spa has a direct relation with sp group */
		if (unlikely(spa->is_dead)) {
			up_write(&spa->spg->rw_lock);
			pr_err_ratelimited("unexpected double sp unshare\n");
			dump_stack();
			ret = -EINVAL;
			goto out_drop_area;
		}
		spa->is_dead = true;
		up_write(&spa->spg->rw_lock);

		down_read(&spa->spg->rw_lock);
		__sp_free(spa->spg, uva_aligned, size_aligned, NULL);
		up_read(&spa->spg->rw_lock);

		if (current->mm == NULL)
			atomic64_sub(spa->real_size, &kthread_stat.k2u_size);
		else
			sp_update_process_stat(current, false, spa);
	} else {
		WARN(1, "unshare uva invalid spa type");
	}

out_clr_flag:
	if (!vmalloc_area_clr_flag(spa->kva, VM_SHAREPOOL))
		pr_debug("clear spa->kva %ld is not valid\n", spa->kva);
	spa->kva = 0;

out_drop_area:
	__sp_area_drop(spa);
out:
3421
	sp_group_drop(spg);
3422
	return ret;
3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468
}

/* No possible concurrent protection, take care when use */
static int sp_unshare_kva(unsigned long kva, unsigned long size)
{
	unsigned long addr, kva_aligned;
	struct page *page;
	unsigned long size_aligned;
	unsigned long step;
	bool is_hugepage = true;
	int ret;

	ret = is_vmap_hugepage(kva);
	if (ret > 0) {
		kva_aligned = ALIGN_DOWN(kva, PMD_SIZE);
		size_aligned = ALIGN(kva + size, PMD_SIZE) - kva_aligned;
		step = PMD_SIZE;
	} else if (ret == 0) {
		kva_aligned = ALIGN_DOWN(kva, PAGE_SIZE);
		size_aligned = ALIGN(kva + size, PAGE_SIZE) - kva_aligned;
		step = PAGE_SIZE;
		is_hugepage = false;
	} else {
		pr_err_ratelimited("check vmap hugepage failed %d\n", ret);
		return -EINVAL;
	}

	if (kva_aligned + size_aligned < kva_aligned) {
		pr_err_ratelimited("overflow happened in unshare kva\n");
		return -EINVAL;
	}

	for (addr = kva_aligned; addr < (kva_aligned + size_aligned); addr += step) {
		page = vmalloc_to_page((void *)addr);
		if (page)
			put_page(page);
		else
			WARN(1, "vmalloc %pK to page/hugepage failed\n",
			       (void *)addr);
	}

	vunmap((void *)kva_aligned);

	return 0;
}

3469
/**
3470
 * mg_sp_unshare() - Unshare the kernel or user memory which shared by calling
3471 3472 3473 3474 3475 3476 3477 3478
 *                sp_make_share_{k2u,u2k}().
 * @va: the specified virtual address of memory
 * @size: the size of unshared memory
 *
 * Use spg_id of current thread if spg_id == SPG_ID_DEFAULT.
 *
 * Return: 0 for success, -errno on failure.
 */
3479
int mg_sp_unshare(unsigned long va, unsigned long size, int spg_id)
3480
{
3481 3482
	int ret = 0;

3483 3484 3485
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

3486 3487
	check_interrupt_context();

3488 3489 3490
	if (current->flags & PF_KTHREAD)
		return -EINVAL;

3491 3492
	if (va < TASK_SIZE) {
		/* user address */
3493
		ret = sp_unshare_uva(va, size, spg_id);
3494 3495 3496 3497 3498 3499 3500 3501 3502 3503
	} else if (va >= PAGE_OFFSET) {
		/* kernel address */
		ret = sp_unshare_kva(va, size);
	} else {
		/* regard user and kernel address ranges as bad address */
		pr_debug("unshare addr %lx is not a user or kernel addr\n", (unsigned long)va);
		ret = -EFAULT;
	}

	return ret;
3504 3505 3506 3507
}
EXPORT_SYMBOL_GPL(mg_sp_unshare);

/**
3508
 * mg_sp_walk_page_range() - Walk page table with caller specific callbacks.
3509 3510 3511 3512 3513 3514 3515 3516 3517 3518
 * @uva: the start VA of user memory.
 * @size: the size of user memory.
 * @tsk: task struct of the target task.
 * @sp_walk_data: a structure of a page pointer array.
 *
 * Return: 0 for success, -errno on failure.
 *
 * When return 0, sp_walk_data describing [uva, uva+size) can be used.
 * When return -errno, information in sp_walk_data is useless.
 */
3519
int mg_sp_walk_page_range(unsigned long uva, unsigned long size,
3520 3521
	struct task_struct *tsk, struct sp_walk_data *sp_walk_data)
{
3522 3523 3524
	struct mm_struct *mm;
	int ret = 0;

3525 3526 3527
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544
	check_interrupt_context();

	if (unlikely(!sp_walk_data)) {
		pr_err_ratelimited("null pointer when walk page range\n");
		return -EINVAL;
	}
	if (!tsk || (tsk->flags & PF_EXITING))
		return -ESRCH;

	get_task_struct(tsk);
	mm = get_task_mm(tsk);
	if (!mm) {
		put_task_struct(tsk);
		return -ESRCH;
	}

	down_write(&mm->mmap_lock);
3545
	if (likely(!mm->core_state)) {
3546
		ret = __sp_walk_page_range(uva, size, mm, sp_walk_data);
3547
	} else {
3548 3549 3550 3551 3552 3553 3554 3555 3556
		pr_err("walk page range: encoutered coredump\n");
		ret = -ESRCH;
	}
	up_write(&mm->mmap_lock);

	mmput(mm);
	put_task_struct(tsk);

	return ret;
3557 3558 3559 3560
}
EXPORT_SYMBOL_GPL(mg_sp_walk_page_range);

/**
3561
 * mg_sp_walk_page_free() - Free the sp_walk_data structure.
3562 3563
 * @sp_walk_data: a structure of a page pointer array to be freed.
 */
3564
void mg_sp_walk_page_free(struct sp_walk_data *sp_walk_data)
3565
{
3566 3567 3568
	if (!sp_is_enabled())
		return;

3569 3570 3571 3572 3573 3574
	check_interrupt_context();

	if (!sp_walk_data)
		return;

	__sp_walk_page_free(sp_walk_data);
3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589
}
EXPORT_SYMBOL_GPL(mg_sp_walk_page_free);

int sp_register_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&sp_notifier_chain, nb);
}
EXPORT_SYMBOL_GPL(sp_register_notifier);

int sp_unregister_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&sp_notifier_chain, nb);
}
EXPORT_SYMBOL_GPL(sp_unregister_notifier);

3590
static bool is_sp_dynamic_dvpp_addr(unsigned long addr);
3591
/**
3592
 * mg_sp_config_dvpp_range() - User can config the share pool start address
3593 3594 3595 3596
 *                          of each Da-vinci device.
 * @start: the value of share pool start
 * @size: the value of share pool
 * @device_id: the num of Da-vinci device
3597
 * @tgid: the tgid of device process
3598 3599 3600 3601 3602
 *
 * Return true for success.
 * Return false if parameter invalid or has been set up.
 * This functuon has no concurrent problem.
 */
3603
bool mg_sp_config_dvpp_range(size_t start, size_t size, int device_id, int tgid)
3604
{
3605 3606 3607 3608 3609 3610 3611 3612
	int ret;
	bool err = false;
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct sp_group *spg;
	struct sp_mapping *spm;
	unsigned long default_start;

3613 3614 3615
	if (!sp_is_enabled())
		return false;

3616
	/* NOTE: check the start address */
3617
	if (tgid < 0 || size <= 0 || size > MMAP_SHARE_POOL_16G_SIZE ||
3618
	    device_id < 0 || device_id >= MAX_DEVID || !is_online_node_id(device_id)
3619
		|| !is_sp_dynamic_dvpp_addr(start) || !is_sp_dynamic_dvpp_addr(start + size - 1))
3620 3621
		return false;

3622
	ret = get_task(tgid, &tsk);
3623 3624 3625 3626 3627 3628 3629
	if (ret)
		return false;

	mm = get_task_mm(tsk->group_leader);
	if (!mm)
		goto put_task;

3630
	spg = sp_get_local_group(tsk, mm);
3631 3632 3633
	if (IS_ERR(spg))
		goto put_mm;

3634
	spm = spg->mapping[SP_MAPPING_DVPP];
3635
	default_start = MMAP_SHARE_POOL_DVPP_START + device_id * MMAP_SHARE_POOL_16G_SIZE;
3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652
	/* The dvpp range of each group can be configured only once */
	if (spm->start[device_id] != default_start)
		goto put_spg;

	spm->start[device_id] = start;
	spm->end[device_id] = start + size;

	err = true;

put_spg:
	sp_group_drop(spg);
put_mm:
	mmput(mm);
put_task:
	put_task_struct(tsk);

	return err;
3653 3654 3655
}
EXPORT_SYMBOL_GPL(mg_sp_config_dvpp_range);

3656
static bool is_sp_reserve_addr(unsigned long addr)
3657
{
3658
	return addr >= MMAP_SHARE_POOL_START && addr < MMAP_SHARE_POOL_END;
3659 3660
}

3661 3662 3663 3664 3665 3666 3667
/*
 *	| 16G host | 16G device | ... |     |
 *	^
 *	|
 *	MMAP_SHARE_POOL_DVPP_BASE + 16G * 64
 *	We only check the device regions.
 */
3668
static bool is_sp_dynamic_dvpp_addr(unsigned long addr)
3669
{
3670
	if (addr < MMAP_SHARE_POOL_DYNAMIC_DVPP_BASE || addr >= MMAP_SHARE_POOL_DYNAMIC_DVPP_END)
3671 3672
		return false;

3673
	return (addr - MMAP_SHARE_POOL_DYNAMIC_DVPP_BASE) & MMAP_SHARE_POOL_16G_SIZE;
3674 3675
}

3676
/**
3677
 * mg_is_sharepool_addr() - Check if a user memory address belongs to share pool.
3678 3679 3680 3681
 * @addr: the userspace address to be checked.
 *
 * Return true if addr belongs to share pool, or false vice versa.
 */
3682
bool mg_is_sharepool_addr(unsigned long addr)
3683
{
3684
	return sp_is_enabled() &&
3685
		((is_sp_reserve_addr(addr) || is_sp_dynamic_dvpp_addr(addr)));
3686 3687 3688
}
EXPORT_SYMBOL_GPL(mg_is_sharepool_addr);

3689 3690 3691 3692 3693 3694 3695 3696
int sp_node_id(struct vm_area_struct *vma)
{
	struct sp_area *spa;
	int node_id = numa_node_id();

	if (!sp_is_enabled())
		return node_id;

3697
	if (vma && (vma->vm_flags & VM_SHARE_POOL) && vma->vm_private_data) {
3698 3699
		spa = vma->vm_private_data;
		node_id = spa->node_id;
3700 3701 3702 3703 3704
	}

	return node_id;
}

3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720
/*** Statistical and maintenance functions ***/

static void get_mm_rss_info(struct mm_struct *mm, unsigned long *anon,
	unsigned long *file, unsigned long *shmem, unsigned long *total_rss)
{
	*anon = get_mm_counter(mm, MM_ANONPAGES);
	*file = get_mm_counter(mm, MM_FILEPAGES);
	*shmem = get_mm_counter(mm, MM_SHMEMPAGES);
	*total_rss = *anon + *file + *shmem;
}

static long get_proc_k2u(struct sp_proc_stat *stat)
{
	return byte2kb(atomic64_read(&stat->k2u_size));
}

3721
static long get_proc_alloc(struct sp_proc_stat *stat)
3722
{
3723 3724
	return byte2kb(atomic64_read(&stat->alloc_nsize) +
			atomic64_read(&stat->alloc_hsize));
3725 3726
}

G
Guo Mengqi 已提交
3727
static void get_process_sp_res(struct sp_group_master *master,
3728
		long *sp_res_out, long *sp_res_nsize_out)
3729
{
G
Guo Mengqi 已提交
3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741
	struct sp_group *spg;
	struct sp_group_node *spg_node;

	*sp_res_out = 0;
	*sp_res_nsize_out = 0;

	list_for_each_entry(spg_node, &master->node_list, group_node) {
		spg = spg_node->spg;
		*sp_res_out += byte2kb(atomic64_read(&spg->instat.alloc_nsize));
		*sp_res_out += byte2kb(atomic64_read(&spg->instat.alloc_hsize));
		*sp_res_nsize_out += byte2kb(atomic64_read(&spg->instat.alloc_nsize));
	}
3742 3743
}

3744
static long get_sp_res_by_spg_proc(struct sp_group_node *spg_node)
3745
{
G
Guo Mengqi 已提交
3746 3747
	return byte2kb(atomic64_read(&spg_node->spg->instat.alloc_nsize) +
			atomic64_read(&spg_node->spg->instat.alloc_hsize));
3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767
}

/*
 *  Statistics of RSS has a maximum 64 pages deviation (256KB).
 *  Please check_sync_rss_stat().
 */
static void get_process_non_sp_res(unsigned long total_rss, unsigned long shmem,
	long sp_res_nsize, long *non_sp_res_out, long *non_sp_shm_out)
{
	long non_sp_res, non_sp_shm;

	non_sp_res = page2kb(total_rss) - sp_res_nsize;
	non_sp_res = non_sp_res < 0 ? 0 : non_sp_res;
	non_sp_shm = page2kb(shmem) - sp_res_nsize;
	non_sp_shm = non_sp_shm < 0 ? 0 : non_sp_shm;

	*non_sp_res_out = non_sp_res;
	*non_sp_shm_out = non_sp_shm;
}

3768
static long get_spg_proc_alloc(struct sp_group_node *spg_node)
3769
{
3770 3771
	return byte2kb(atomic64_read(&spg_node->instat.alloc_nsize) +
				atomic64_read(&spg_node->instat.alloc_hsize));
3772 3773
}

3774
static long get_spg_proc_k2u(struct sp_group_node *spg_node)
3775
{
3776
	return byte2kb(atomic64_read(&spg_node->instat.k2u_size));
3777 3778 3779 3780 3781 3782 3783 3784
}

static void print_process_prot(struct seq_file *seq, unsigned long prot)
{
	if (prot == PROT_READ)
		seq_puts(seq, "R");
	else if (prot == (PROT_READ | PROT_WRITE))
		seq_puts(seq, "RW");
3785
	else
3786 3787 3788 3789 3790 3791
		seq_puts(seq, "-");
}

int proc_sp_group_state(struct seq_file *m, struct pid_namespace *ns,
			struct pid *pid, struct task_struct *task)
{
Z
Zhou Guanghui 已提交
3792
	struct mm_struct *mm;
3793 3794
	struct sp_group_master *master;
	struct sp_proc_stat *proc_stat;
3795 3796
	struct sp_group_node *spg_node;
	unsigned long anon, file, shmem, total_rss;
3797 3798
	long sp_res, sp_res_nsize, non_sp_res, non_sp_shm;

3799 3800 3801
	if (!sp_is_enabled())
		return 0;

Z
Zhou Guanghui 已提交
3802
	mm = get_task_mm(task);
3803 3804 3805
	if (!mm)
		return 0;

3806
	down_read(&sp_group_sem);
3807
	down_read(&mm->mmap_lock);
3808
	master = mm->sp_group_master;
Z
Zhou Guanghui 已提交
3809 3810
	if (!master)
		goto out;
3811 3812

	get_mm_rss_info(mm, &anon, &file, &shmem, &total_rss);
3813
	proc_stat = &master->instat;
G
Guo Mengqi 已提交
3814
	get_process_sp_res(master, &sp_res, &sp_res_nsize);
3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830
	get_process_non_sp_res(total_rss, shmem, sp_res_nsize,
			       &non_sp_res, &non_sp_shm);

	seq_puts(m, "Share Pool Aggregate Data of This Process\n\n");
	seq_printf(m, "%-8s %-16s %-9s %-9s %-9s %-10s %-10s %-8s\n",
		   "PID", "COMM", "SP_ALLOC", "SP_K2U", "SP_RES", "Non-SP_RES",
		   "Non-SP_Shm", "VIRT");
	seq_printf(m, "%-8d %-16s %-9ld %-9ld %-9ld %-10ld %-10ld %-8ld\n",
		   proc_stat->tgid, proc_stat->comm,
		   get_proc_alloc(proc_stat),
		   get_proc_k2u(proc_stat),
		   sp_res, non_sp_res, non_sp_shm,
		   page2kb(mm->total_vm));

	seq_puts(m, "\n\nProcess in Each SP Group\n\n");
	seq_printf(m, "%-8s %-9s %-9s %-9s %-4s\n",
3831
			"Group_ID", "SP_ALLOC", "SP_K2U", "SP_RES", "PROT");
3832

3833
	list_for_each_entry(spg_node, &master->node_list, group_node) {
3834
		seq_printf(m, "%-8d %-9ld %-9ld %-9ld ",
3835 3836 3837 3838 3839
				spg_node->spg->id,
				get_spg_proc_alloc(spg_node),
				get_spg_proc_k2u(spg_node),
				get_sp_res_by_spg_proc(spg_node));
		print_process_prot(m, spg_node->prot);
3840 3841
		seq_putc(m, '\n');
	}
Z
Zhou Guanghui 已提交
3842 3843

out:
3844
	up_read(&mm->mmap_lock);
3845
	up_read(&sp_group_sem);
Z
Zhou Guanghui 已提交
3846
	mmput(mm);
3847 3848 3849
	return 0;
}

3850
static void spa_stat_of_mapping_show(struct seq_file *seq, struct sp_mapping *spm)
3851 3852 3853 3854 3855
{
	struct rb_node *node;
	struct sp_area *spa, *prev = NULL;

	spin_lock(&sp_area_lock);
3856
	for (node = rb_first(&spm->area_root); node; node = rb_next(node)) {
3857 3858 3859 3860 3861 3862 3863
		__sp_area_drop_locked(prev);

		spa = rb_entry(node, struct sp_area, rb_node);
		prev = spa;
		atomic_inc(&spa->use_count);
		spin_unlock(&sp_area_lock);

3864 3865 3866 3867
		if (spg_valid(spa->spg))  /* k2u to group */
			seq_printf(seq, "%-10d ", spa->spg->id);
		else  /* spg is dead */
			seq_printf(seq, "%-10s ", "Dead");
3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902

		seq_printf(seq, "%2s%-14lx %2s%-14lx %-10ld ",
			   "0x", spa->va_start,
			   "0x", spa->va_end,
			   byte2kb(spa->real_size));

		switch (spa->type) {
		case SPA_TYPE_ALLOC:
			seq_printf(seq, "%-7s ", "ALLOC");
			break;
		case SPA_TYPE_K2TASK:
			seq_printf(seq, "%-7s ", "TASK");
			break;
		case SPA_TYPE_K2SPG:
			seq_printf(seq, "%-7s ", "SPG");
			break;
		default:
			/* usually impossible, perhaps a developer's mistake */
			break;
		}

		if (spa->is_hugepage)
			seq_printf(seq, "%-5s ", "Y");
		else
			seq_printf(seq, "%-5s ", "N");

		seq_printf(seq, "%-8d ",  spa->applier);
		seq_printf(seq, "%-8d\n", atomic_read(&spa->use_count));

		spin_lock(&sp_area_lock);
	}
	__sp_area_drop_locked(prev);
	spin_unlock(&sp_area_lock);
}

C
Chen Jun 已提交
3903 3904 3905 3906 3907
static void spa_ro_stat_show(struct seq_file *seq)
{
	spa_stat_of_mapping_show(seq, sp_mapping_ro);
}

3908 3909 3910 3911 3912 3913 3914
static void spa_normal_stat_show(struct seq_file *seq)
{
	spa_stat_of_mapping_show(seq, sp_mapping_normal);
}

static void spa_dvpp_stat_show(struct seq_file *seq)
{
3915 3916 3917 3918 3919 3920
	struct sp_mapping *spm;

	mutex_lock(&spm_list_lock);
	list_for_each_entry(spm, &spm_dvpp_list, spm_node)
		spa_stat_of_mapping_show(seq, spm);
	mutex_unlock(&spm_list_lock);
3921 3922 3923
}


3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945
void spa_overview_show(struct seq_file *seq)
{
	unsigned int total_num, alloc_num, k2u_task_num, k2u_spg_num;
	unsigned long total_size, alloc_size, k2u_task_size, k2u_spg_size;
	unsigned long dvpp_size, dvpp_va_size;

	if (!sp_is_enabled())
		return;

	spin_lock(&sp_area_lock);
	total_num     = spa_stat.total_num;
	alloc_num     = spa_stat.alloc_num;
	k2u_task_num  = spa_stat.k2u_task_num;
	k2u_spg_num   = spa_stat.k2u_spg_num;
	total_size    = spa_stat.total_size;
	alloc_size    = spa_stat.alloc_size;
	k2u_task_size = spa_stat.k2u_task_size;
	k2u_spg_size  = spa_stat.k2u_spg_size;
	dvpp_size     = spa_stat.dvpp_size;
	dvpp_va_size  = spa_stat.dvpp_va_size;
	spin_unlock(&sp_area_lock);

3946 3947 3948 3949 3950 3951 3952 3953 3954 3955
	SEQ_printf(seq, "Spa total num %u.\n", total_num);
	SEQ_printf(seq, "Spa alloc num %u, k2u(task) num %u, k2u(spg) num %u.\n",
		   alloc_num, k2u_task_num, k2u_spg_num);
	SEQ_printf(seq, "Spa total size:     %13lu KB\n", byte2kb(total_size));
	SEQ_printf(seq, "Spa alloc size:     %13lu KB\n", byte2kb(alloc_size));
	SEQ_printf(seq, "Spa k2u(task) size: %13lu KB\n", byte2kb(k2u_task_size));
	SEQ_printf(seq, "Spa k2u(spg) size:  %13lu KB\n", byte2kb(k2u_spg_size));
	SEQ_printf(seq, "Spa dvpp size:      %13lu KB\n", byte2kb(dvpp_size));
	SEQ_printf(seq, "Spa dvpp va size:   %13lu MB\n", byte2mb(dvpp_va_size));
	SEQ_printf(seq, "\n");
3956 3957
}

3958
static int spg_info_show(int id, void *p, void *data)
3959
{
3960
	struct sp_group *spg = p;
3961 3962
	struct seq_file *seq = data;

3963
	if (id >= SPG_ID_LOCAL_MIN && id <= SPG_ID_LOCAL_MAX)
3964
		return 0;
3965

3966
	SEQ_printf(seq, "Group %6d ", id);
3967

3968 3969 3970 3971 3972 3973 3974 3975
	down_read(&spg->rw_lock);
	SEQ_printf(seq, "size: %lld KB, spa num: %d, total alloc: %lld KB, normal alloc: %lld KB, huge alloc: %lld KB\n",
			byte2kb(atomic64_read(&spg->instat.size)),
			atomic_read(&spg->instat.spa_num),
			byte2kb(atomic64_read(&spg->instat.alloc_size)),
			byte2kb(atomic64_read(&spg->instat.alloc_nsize)),
			byte2kb(atomic64_read(&spg->instat.alloc_hsize)));
	up_read(&spg->rw_lock);
3976 3977 3978 3979 3980 3981 3982 3983 3984

	return 0;
}

void spg_overview_show(struct seq_file *seq)
{
	if (!sp_is_enabled())
		return;

3985 3986 3987
	SEQ_printf(seq, "Share pool total size: %lld KB, spa total num: %d.\n",
			byte2kb(atomic64_read(&sp_overall_stat.spa_total_size)),
			atomic_read(&sp_overall_stat.spa_total_num));
3988

3989 3990 3991
	down_read(&sp_group_sem);
	idr_for_each(&sp_group_idr, spg_info_show, seq);
	up_read(&sp_group_sem);
3992

3993
	SEQ_printf(seq, "\n");
3994 3995
}

3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006
static bool should_show_statistics(void)
{
	if (!capable(CAP_SYS_ADMIN))
		return false;

	if (task_active_pid_ns(current) != &init_pid_ns)
		return false;

	return true;
}

4007 4008
static int spa_stat_show(struct seq_file *seq, void *offset)
{
4009 4010 4011
	if (!should_show_statistics())
		return -EPERM;

4012 4013 4014 4015
	spg_overview_show(seq);
	spa_overview_show(seq);
	/* print the file header */
	seq_printf(seq, "%-10s %-16s %-16s %-10s %-7s %-5s %-8s %-8s\n",
4016
			"Group ID", "va_start", "va_end", "Size(KB)", "Type", "Huge", "PID", "Ref");
C
Chen Jun 已提交
4017
	spa_ro_stat_show(seq);
4018 4019
	spa_normal_stat_show(seq);
	spa_dvpp_stat_show(seq);
4020 4021 4022
	return 0;
}

4023
static int proc_usage_by_group(int id, void *p, void *data)
4024
{
4025
	struct sp_group *spg = p;
4026
	struct seq_file *seq = data;
4027
	struct sp_group_node *spg_node;
4028
	struct mm_struct *mm;
4029 4030 4031
	struct sp_group_master *master;
	int tgid;
	unsigned long anon, file, shmem, total_rss;
4032

4033 4034 4035 4036 4037
	down_read(&spg->rw_lock);
	list_for_each_entry(spg_node, &spg->procs, proc_node) {
		master = spg_node->master;
		mm = master->mm;
		tgid = master->instat.tgid;
4038 4039 4040 4041

		get_mm_rss_info(mm, &anon, &file, &shmem, &total_rss);

		seq_printf(seq, "%-8d ", tgid);
4042 4043
		seq_printf(seq, "%-8d ", id);
		seq_printf(seq, "%-9ld %-9ld %-9ld %-8ld %-7ld %-7ld ",
4044 4045 4046 4047
				get_spg_proc_alloc(spg_node),
				get_spg_proc_k2u(spg_node),
				get_sp_res_by_spg_proc(spg_node),
				page2kb(mm->total_vm), page2kb(total_rss),
4048
				page2kb(shmem));
4049
		print_process_prot(seq, spg_node->prot);
4050 4051
		seq_putc(seq, '\n');
	}
4052
	up_read(&spg->rw_lock);
4053
	cond_resched();
4054

4055 4056 4057
	return 0;
}

4058
static int proc_group_usage_show(struct seq_file *seq, void *offset)
4059
{
4060 4061 4062
	if (!should_show_statistics())
		return -EPERM;

4063 4064
	spg_overview_show(seq);
	spa_overview_show(seq);
4065

4066
	/* print the file header */
4067 4068 4069
	seq_printf(seq, "%-8s %-8s %-9s %-9s %-9s %-8s %-7s %-7s %-4s\n",
			"PID", "Group_ID", "SP_ALLOC", "SP_K2U", "SP_RES",
			"VIRT", "RES", "Shm", "PROT");
4070 4071
	/* print kthread buff_module_guard_work */
	seq_printf(seq, "%-8s %-8s %-9lld %-9lld\n",
4072 4073 4074
			"guard", "-",
			byte2kb(atomic64_read(&kthread_stat.alloc_size)),
			byte2kb(atomic64_read(&kthread_stat.k2u_size)));
4075

W
Wang Wensheng 已提交
4076
	down_read(&sp_group_sem);
4077
	idr_for_each(&sp_group_idr, proc_usage_by_group, seq);
W
Wang Wensheng 已提交
4078 4079
	up_read(&sp_group_sem);

4080 4081 4082
	return 0;
}

4083
static int proc_usage_show(struct seq_file *seq, void *offset)
4084
{
4085
	struct sp_group_master *master = NULL;
4086 4087
	unsigned long anon, file, shmem, total_rss;
	long sp_res, sp_res_nsize, non_sp_res, non_sp_shm;
4088
	struct sp_proc_stat *proc_stat;
4089

4090 4091 4092
	if (!should_show_statistics())
		return -EPERM;

4093
	seq_printf(seq, "%-8s %-16s %-9s %-9s %-9s %-10s %-10s %-8s\n",
4094 4095 4096
			"PID", "COMM", "SP_ALLOC", "SP_K2U", "SP_RES", "Non-SP_RES",
			"Non-SP_Shm", "VIRT");

4097
	down_read(&sp_group_sem);
4098 4099 4100 4101
	mutex_lock(&master_list_lock);
	list_for_each_entry(master, &master_list, list_node) {
		proc_stat = &master->instat;
		get_mm_rss_info(master->mm, &anon, &file, &shmem, &total_rss);
G
Guo Mengqi 已提交
4102
		get_process_sp_res(master, &sp_res, &sp_res_nsize);
4103 4104 4105 4106 4107 4108 4109 4110 4111 4112
		get_process_non_sp_res(total_rss, shmem, sp_res_nsize,
				&non_sp_res, &non_sp_shm);
		seq_printf(seq, "%-8d %-16s %-9ld %-9ld %-9ld %-10ld %-10ld %-8ld\n",
				proc_stat->tgid, proc_stat->comm,
				get_proc_alloc(proc_stat),
				get_proc_k2u(proc_stat),
				sp_res, non_sp_res, non_sp_shm,
				page2kb(master->mm->total_vm));
	}
	mutex_unlock(&master_list_lock);
4113
	up_read(&sp_group_sem);
4114 4115 4116 4117 4118 4119 4120 4121 4122 4123

	return 0;
}

static void __init proc_sharepool_init(void)
{
	if (!proc_mkdir("sharepool", NULL))
		return;

	proc_create_single_data("sharepool/spa_stat", 0400, NULL, spa_stat_show, NULL);
4124 4125
	proc_create_single_data("sharepool/proc_stat", 0400, NULL, proc_group_usage_show, NULL);
	proc_create_single_data("sharepool/proc_overview", 0400, NULL, proc_usage_show, NULL);
4126 4127 4128 4129
}

/*** End of tatistical and maintenance functions ***/

4130 4131
bool sp_check_addr(unsigned long addr)
{
4132
	if (sp_is_enabled() && mg_is_sharepool_addr(addr) &&
4133
	    !check_aoscore_process(current))
4134
		return true;
4135
	else
4136 4137 4138 4139 4140
		return false;
}

bool sp_check_mmap_addr(unsigned long addr, unsigned long flags)
{
4141
	if (sp_is_enabled() && mg_is_sharepool_addr(addr) &&
4142
	    !check_aoscore_process(current) && !(flags & MAP_SHARE_POOL))
4143
		return true;
4144
	else
4145 4146 4147
		return false;
}

4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164
vm_fault_t sharepool_no_page(struct mm_struct *mm,
			struct vm_area_struct *vma,
			struct address_space *mapping, pgoff_t idx,
			unsigned long address, pte_t *ptep, unsigned int flags)
{
	struct hstate *h = hstate_vma(vma);
	vm_fault_t ret = VM_FAULT_SIGBUS;
	unsigned long size;
	struct page *page;
	pte_t new_pte;
	spinlock_t *ptl;
	unsigned long haddr = address & huge_page_mask(h);
	bool new_page = false;
	int err;
	int node_id;
	struct sp_area *spa;

4165
	spa = vma->vm_private_data;
4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180
	if (!spa) {
		pr_err("share pool: vma is invalid, not from sp mmap\n");
		return ret;
	}
	node_id = spa->node_id;

retry:
	page = find_lock_page(mapping, idx);
	if (!page) {
		size = i_size_read(mapping->host) >> huge_page_shift(h);
		if (idx >= size)
			goto out;

		page = alloc_huge_page(vma, haddr, 0);
		if (IS_ERR(page)) {
4181 4182
			page = hugetlb_alloc_hugepage(node_id,
					HUGETLB_ALLOC_BUDDY | HUGETLB_ALLOC_NORECLAIM);
4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243
			if (!page)
				page = ERR_PTR(-ENOMEM);
		}
		if (IS_ERR(page)) {
			ptl = huge_pte_lock(h, mm, ptep);
			if (!huge_pte_none(huge_ptep_get(ptep))) {
				ret = 0;
				spin_unlock(ptl);
				goto out;
			}
			spin_unlock(ptl);
			ret = vmf_error(PTR_ERR(page));
			goto out;
		}
		__SetPageUptodate(page);
		new_page = true;

		/* sharepool pages are all shared */
		err = huge_add_to_page_cache(page, mapping, idx);
		if (err) {
			put_page(page);
			if (err == -EEXIST)
				goto retry;
			goto out;
		}
	}


	ptl = huge_pte_lock(h, mm, ptep);
	size = i_size_read(mapping->host) >> huge_page_shift(h);
	if (idx >= size)
		goto backout;

	ret = 0;
	if (!huge_pte_none(huge_ptep_get(ptep)))
		goto backout;

	page_dup_rmap(page, true);
	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
				&& (vma->vm_flags & VM_SHARED)));
	set_huge_pte_at(mm, haddr, ptep, new_pte);

	hugetlb_count_add(pages_per_huge_page(h), mm);

	spin_unlock(ptl);

	if (new_page) {
		SetPagePrivate(&page[1]);
	}

	unlock_page(page);
out:
	return ret;

backout:
	spin_unlock(ptl);
	unlock_page(page);
	put_page(page);
	goto out;
}

4244
/*
4245 4246
 * The caller must ensure that this function is called
 * when the last thread in the thread group exits.
4247
 */
4248
int sp_group_exit(void)
4249
{
4250
	struct mm_struct *mm;
4251 4252 4253 4254 4255 4256 4257 4258
	struct sp_group *spg;
	struct sp_group_master *master;
	struct sp_group_node *spg_node, *tmp;
	bool is_alive = true;

	if (!sp_is_enabled())
		return 0;

4259 4260 4261 4262
	if (current->flags & PF_KTHREAD)
		return 0;

	mm = current->mm;
4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324
	down_write(&sp_group_sem);

	master = mm->sp_group_master;
	if (!master) {
		up_write(&sp_group_sem);
		return 0;
	}

	list_for_each_entry_safe(spg_node, tmp, &master->node_list, group_node) {
		spg = spg_node->spg;

		down_write(&spg->rw_lock);
		/* a dead group should NOT be reactive again */
		if (spg_valid(spg) && list_is_singular(&spg->procs))
			is_alive = spg->is_alive = false;
		spg->proc_num--;
		list_del(&spg_node->proc_node);
		up_write(&spg->rw_lock);

		if (!is_alive)
			blocking_notifier_call_chain(&sp_notifier_chain, 0,
						     spg);
	}

	/* match with get_task_mm() in sp_group_add_task() */
	if (atomic_sub_and_test(master->count, &mm->mm_users)) {
		up_write(&sp_group_sem);
		WARN(1, "Invalid user counting\n");
		return 1;
	}

	up_write(&sp_group_sem);
	return 0;
}

void sp_group_post_exit(struct mm_struct *mm)
{
	struct sp_proc_stat *stat;
	long alloc_size, k2u_size;
	/* lockless visit */
	struct sp_group_master *master = mm->sp_group_master;
	struct sp_group_node *spg_node, *tmp;
	struct sp_group *spg;

	if (!sp_is_enabled() || !master)
		return;

	/*
	 * There are two basic scenarios when a process in the share pool is
	 * exiting but its share pool memory usage is not 0.
	 * 1. Process A called sp_alloc(), but it terminates without calling
	 *    sp_free(). Then its share pool memory usage is a positive number.
	 * 2. Process A never called sp_alloc(), and process B in the same spg
	 *    called sp_alloc() to get an addr u. Then A gets u somehow and
	 *    called sp_free(u). Now A's share pool memory usage is a negative
	 *    number. Notice B's memory usage will be a positive number.
	 *
	 * We decide to print an info when seeing both of the scenarios.
	 *
	 * A process not in an sp group doesn't need to print because there
	 * wont't be any memory which is not freed.
	 */
4325
	stat = &master->instat;
4326
	if (stat) {
4327
		alloc_size = atomic64_read(&stat->alloc_nsize) + atomic64_read(&stat->alloc_hsize);
4328 4329 4330 4331 4332 4333 4334 4335
		k2u_size = atomic64_read(&stat->k2u_size);

		if (alloc_size != 0 || k2u_size != 0)
			pr_info("process %s(%d) exits. It applied %ld aligned KB, k2u shared %ld aligned KB\n",
				stat->comm, stat->tgid,
				byte2kb(alloc_size), byte2kb(k2u_size));
	}

4336
	down_write(&sp_group_sem);
4337 4338 4339
	list_for_each_entry_safe(spg_node, tmp, &master->node_list, group_node) {
		spg = spg_node->spg;
		/* match with refcount inc in sp_group_add_task */
4340 4341
		if (atomic_dec_and_test(&spg->use_count))
			free_sp_group_locked(spg);
4342
		list_del(&spg_node->group_node);
4343 4344
		kfree(spg_node);
	}
4345
	up_write(&sp_group_sem);
4346

4347
	sp_del_group_master(master);
4348

4349 4350 4351
	kfree(master);
}

4352 4353 4354 4355 4356 4357 4358 4359 4360 4361
DEFINE_STATIC_KEY_FALSE(share_pool_enabled_key);

static int __init enable_share_pool(char *s)
{
	static_branch_enable(&share_pool_enabled_key);
	pr_info("Ascend enable share pool features via bootargs\n");

	return 1;
}
__setup("enable_ascend_share_pool", enable_share_pool);
4362 4363 4364

static int __init share_pool_init(void)
{
4365 4366 4367 4368
	if (!sp_is_enabled())
		return 0;

	sp_mapping_normal = sp_mapping_create(SP_MAPPING_NORMAL);
4369
	if (IS_ERR(sp_mapping_normal))
4370 4371 4372
		goto fail;
	atomic_inc(&sp_mapping_normal->user);

C
Chen Jun 已提交
4373 4374 4375 4376 4377
	sp_mapping_ro = sp_mapping_create(SP_MAPPING_RO);
	if (IS_ERR(sp_mapping_ro))
		goto free_normal;
	atomic_inc(&sp_mapping_ro->user);

4378
	proc_sharepool_init();
4379 4380

	return 0;
C
Chen Jun 已提交
4381 4382 4383

free_normal:
	kfree(sp_mapping_normal);
4384 4385 4386 4387 4388 4389
fail:
	pr_err("Ascend share pool initialization failed\n");
	static_branch_disable(&share_pool_enabled_key);
	return 1;
}
late_initcall(share_pool_init);