share_pool.c 107.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Huawei Ascend Share Pool Memory
 *
 * Copyright (C) 2020 Huawei Limited
 * Author: Tang Yizhou <tangyizhou@huawei.com>
 *         Zefan Li <lizefan@huawei.com>
 *         Wu Peng <wupeng58@huawei.com>
 *         Ding Tianhong <dingtgianhong@huawei.com>
 *         Zhou Guanghui <zhouguanghui1@huawei.com>
 *         Li Ming <limingming.li@huawei.com>
 *
 * This code is based on the hisilicon ascend platform.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#define pr_fmt(fmt) "share pool: " fmt

#include <linux/share_pool.h>
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/mm.h>
#include <linux/mm_types.h>
#include <linux/idr.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
#include <linux/printk.h>
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
#include <linux/pid.h>
#include <linux/pid_namespace.h>
#include <linux/atomic.h>
#include <linux/lockdep.h>
#include <linux/kernel.h>
#include <linux/falloc.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/rmap.h>
#include <linux/preempt.h>
#include <linux/swapops.h>
#include <linux/mmzone.h>
#include <linux/timekeeping.h>
#include <linux/time64.h>
52
#include <linux/pagewalk.h>
53

54 55
#define spg_valid(spg)		((spg)->is_alive == true)

56 57 58 59 60
/* Use spa va address as mmap offset. This can work because spa_file
 * is setup with 64-bit address space. So va shall be well covered.
 */
#define addr_offset(spa)	((spa)->va_start)

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
#define byte2kb(size)		((size) >> 10)
#define byte2mb(size)		((size) >> 20)
#define page2kb(page_num)	((page_num) << (PAGE_SHIFT - 10))

#define MAX_GROUP_FOR_SYSTEM	50000
#define MAX_GROUP_FOR_TASK	3000
#define MAX_PROC_PER_GROUP	1024

#define GROUP_NONE		0

#define SEC2US(sec)		((sec) * 1000000)
#define NS2US(ns)		((ns) / 1000)

#define PF_DOMAIN_CORE		0x10000000	/* AOS CORE processes in sched.h */

76 77
static int system_group_count;

78 79 80 81 82
/* idr of all sp_groups */
static DEFINE_IDR(sp_group_idr);
/* rw semaphore for sp_group_idr and mm->sp_group_master */
static DECLARE_RWSEM(sp_group_sem);

83 84
static BLOCKING_NOTIFIER_HEAD(sp_notifier_chain);

85 86 87 88
static DEFINE_IDA(sp_group_id_ida);

/*** Statistical and maintenance tools ***/

89 90 91 92
/* list of all sp_group_masters */
static LIST_HEAD(master_list);
/* mutex to protect insert/delete ops from master_list */
static DEFINE_MUTEX(master_list_lock);
93

94 95 96 97 98
/* list of all spm-dvpp */
static LIST_HEAD(spm_dvpp_list);
/* mutex to protect insert/delete ops from master_list */
static DEFINE_MUTEX(spm_list_lock);

99
/* for kthread buff_module_guard_work */
100
static struct sp_meminfo kthread_stat;
101

102 103 104 105 106 107 108 109
#define SEQ_printf(m, x...)			\
do {						\
	if (m)					\
		seq_printf(m, x);		\
	else					\
		pr_info(x);			\
} while (0)

110 111 112 113 114 115 116 117 118 119 120
struct sp_meminfo {
	/* total size from sp_alloc and k2u */
	atomic64_t	size;
	/* not huge page size from sp_alloc */
	atomic64_t	alloc_nsize;
	/* huge page size from sp_alloc */
	atomic64_t	alloc_hsize;
	/* total size from sp_alloc */
	atomic64_t	alloc_size;
	/* total size from sp_k2u */
	atomic64_t	k2u_size;
121 122
};

123
#ifndef __GENKSYMS__
124

125 126 127 128
enum sp_mapping_type {
	SP_MAPPING_START,
	SP_MAPPING_DVPP		= SP_MAPPING_START,
	SP_MAPPING_NORMAL,
C
Chen Jun 已提交
129
	SP_MAPPING_RO,
130 131 132
	SP_MAPPING_END,
};

133 134 135 136
/*
 * address space management
 */
struct sp_mapping {
137
	unsigned long type;
138 139 140 141 142 143 144 145 146 147 148
	atomic_t user;
	unsigned long start[MAX_DEVID];
	unsigned long end[MAX_DEVID];
	struct rb_root area_root;

	struct rb_node *free_area_cache;
	unsigned long cached_hole_size;
	unsigned long cached_vstart;

	/* list head for all groups attached to this mapping, dvpp mapping only */
	struct list_head group_head;
149
	struct list_head spm_node;
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
};

/* Processes in the same sp_group can share memory.
 * Memory layout for share pool:
 *
 * |-------------------- 8T -------------------|---|------ 8T ------------|
 * |		Device 0	   |  Device 1 |...|                      |
 * |----------------------------------------------------------------------|
 * |------------- 16G -------------|    16G    |   |                      |
 * | DVPP GROUP0   | DVPP GROUP1   | ... | ... |...|  sp normal memory    |
 * |     sp        |    sp         |     |     |   |                      |
 * |----------------------------------------------------------------------|
 *
 * The host SVM feature reserves 8T virtual memory by mmap, and due to the
 * restriction of DVPP, while SVM and share pool will both allocate memory
 * for DVPP, the memory have to be in the same 32G range.
 *
 * Share pool reserves 16T memory, with 8T for normal uses and 8T for DVPP.
 * Within this 8T DVPP memory, SVM will call sp_config_dvpp_range() to
 * tell us which 16G memory range is reserved for share pool .
 *
 * In some scenarios where there is no host SVM feature, share pool uses
 * the default 8G memory setting for DVPP.
 */
struct sp_group {
	int		 id;
	unsigned long	 flag;
	struct file	 *file;
	struct file	 *file_hugetlb;
	/* number of process in this group */
	int		 proc_num;
	/* list head of processes (sp_group_node, each represents a process) */
	struct list_head procs;
	/* list head of sp_area. it is protected by spin_lock sp_area_lock */
	struct list_head spa_list;
	/* group statistics */
186
	struct sp_meminfo meminfo;
187 188 189
	/* is_alive == false means it's being destroyed */
	bool		 is_alive;
	atomic_t	 use_count;
190
	atomic_t	 spa_num;
191 192 193 194
	/* protect the group internal elements, except spa_list */
	struct rw_semaphore	rw_lock;
	/* list node for dvpp mapping */
	struct list_head	mnode;
195
	struct sp_mapping       *mapping[SP_MAPPING_END];
196 197 198 199
};

/* a per-process(per mm) struct which manages a sp_group_node list */
struct sp_group_master {
200
	pid_t tgid;
201 202 203 204 205 206 207 208 209 210 211 212 213
	/*
	 * number of sp groups the process belongs to,
	 * a.k.a the number of sp_node in node_list
	 */
	unsigned int count;
	/* list head of sp_node */
	struct list_head node_list;
	struct mm_struct *mm;
	/*
	 * Used to apply for the shared pool memory of the current process.
	 * For example, sp_alloc non-share memory or k2task.
	 */
	struct sp_group *local;
214
	struct sp_meminfo meminfo;
215
	struct list_head list_node;
216
	char comm[TASK_COMM_LEN];
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
};

/*
 * each instance represents an sp group the process belongs to
 * sp_group_master    : sp_group_node   = 1 : N
 * sp_group_node->spg : sp_group        = 1 : 1
 * sp_group_node      : sp_group->procs = N : 1
 */
struct sp_group_node {
	/* list node in sp_group->procs */
	struct list_head proc_node;
	/* list node in sp_group_maseter->node_list */
	struct list_head group_node;
	struct sp_group_master *master;
	struct sp_group *spg;
	unsigned long prot;
233 234 235 236 237 238

	/*
	 * alloc amount minus free amount, may be negative when freed by
	 * another task in the same sp group.
	 */
	struct sp_meminfo meminfo;
239 240 241
};
#endif

242 243 244 245 246 247 248 249 250 251 252 253 254 255
static inline void sp_add_group_master(struct sp_group_master *master)
{
	mutex_lock(&master_list_lock);
	list_add_tail(&master->list_node, &master_list);
	mutex_unlock(&master_list_lock);
}

static inline void sp_del_group_master(struct sp_group_master *master)
{
	mutex_lock(&master_list_lock);
	list_del(&master->list_node);
	mutex_unlock(&master_list_lock);
}

256 257 258 259 260
static void meminfo_init(struct sp_meminfo *meminfo)
{
	memset(meminfo, 0, sizeof(struct sp_meminfo));
}

261
static void meminfo_inc_usage(unsigned long size, bool huge, struct sp_meminfo *meminfo)
262
{
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
	atomic64_add(size, &meminfo->size);
	atomic64_add(size, &meminfo->alloc_size);
	if (huge)
		atomic64_add(size, &meminfo->alloc_hsize);
	else
		atomic64_add(size, &meminfo->alloc_nsize);
}

static void meminfo_dec_usage(unsigned long size, bool huge, struct sp_meminfo *meminfo)
{
	atomic64_sub(size, &meminfo->size);
	atomic64_sub(size, &meminfo->alloc_size);
	if (huge)
		atomic64_sub(size, &meminfo->alloc_hsize);
	else
		atomic64_sub(size, &meminfo->alloc_nsize);
279 280
}

281
static void meminfo_inc_k2u(unsigned long size, struct sp_meminfo *meminfo)
282
{
283 284 285 286 287 288 289 290
	atomic64_add(size, &meminfo->size);
	atomic64_add(size, &meminfo->k2u_size);
}

static void meminfo_dec_k2u(unsigned long size, struct sp_meminfo *meminfo)
{
	atomic64_sub(size, &meminfo->size);
	atomic64_sub(size, &meminfo->k2u_size);
291 292
}

293 294 295 296 297 298 299 300 301 302
static unsigned long sp_mapping_type(struct sp_mapping *spm)
{
	return spm->type;
}

static void sp_mapping_set_type(struct sp_mapping *spm, unsigned long type)
{
	spm->type = type;
}

303
static struct sp_mapping *sp_mapping_normal;
C
Chen Jun 已提交
304
static struct sp_mapping *sp_mapping_ro;
305

306 307 308
static void sp_mapping_add_to_list(struct sp_mapping *spm)
{
	mutex_lock(&spm_list_lock);
309
	if (sp_mapping_type(spm) == SP_MAPPING_DVPP)
310 311 312 313 314 315 316
		list_add_tail(&spm->spm_node, &spm_dvpp_list);
	mutex_unlock(&spm_list_lock);
}

static void sp_mapping_remove_from_list(struct sp_mapping *spm)
{
	mutex_lock(&spm_list_lock);
317
	if (sp_mapping_type(spm) == SP_MAPPING_DVPP)
318 319 320 321
		list_del(&spm->spm_node);
	mutex_unlock(&spm_list_lock);
}

322 323 324 325 326
static void sp_mapping_range_init(struct sp_mapping *spm)
{
	int i;

	for (i = 0; i < MAX_DEVID; i++) {
327
		switch (sp_mapping_type(spm)) {
C
Chen Jun 已提交
328 329 330 331
		case SP_MAPPING_RO:
			spm->start[i] = MMAP_SHARE_POOL_RO_START;
			spm->end[i]   = MMAP_SHARE_POOL_RO_END;
			break;
332
		case SP_MAPPING_NORMAL:
333
			spm->start[i] = MMAP_SHARE_POOL_NORMAL_START;
334 335 336 337 338 339 340 341 342
			spm->end[i]   = MMAP_SHARE_POOL_NORMAL_END;
			break;
		case SP_MAPPING_DVPP:
			spm->start[i] = MMAP_SHARE_POOL_DVPP_START + i * MMAP_SHARE_POOL_16G_SIZE;
			spm->end[i]   = spm->start[i] + MMAP_SHARE_POOL_16G_SIZE;
			break;
		default:
			pr_err("Invalid sp_mapping type [%lu]\n", sp_mapping_type(spm));
			break;
343 344 345 346
		}
	}
}

347
static struct sp_mapping *sp_mapping_create(unsigned long type)
348 349 350 351 352 353 354
{
	struct sp_mapping *spm;

	spm = kzalloc(sizeof(struct sp_mapping), GFP_KERNEL);
	if (!spm)
		return ERR_PTR(-ENOMEM);

355
	sp_mapping_set_type(spm, type);
356 357 358
	sp_mapping_range_init(spm);
	atomic_set(&spm->user, 0);
	spm->area_root = RB_ROOT;
359
	INIT_LIST_HEAD(&spm->group_head);
360
	sp_mapping_add_to_list(spm);
361 362 363 364

	return spm;
}

365 366
static void sp_mapping_destroy(struct sp_mapping *spm)
{
367
	sp_mapping_remove_from_list(spm);
368 369 370 371 372
	kfree(spm);
}

static void sp_mapping_attach(struct sp_group *spg, struct sp_mapping *spm)
{
373
	unsigned long type = sp_mapping_type(spm);
374
	atomic_inc(&spm->user);
375

376 377
	spg->mapping[type] = spm;
	if (type == SP_MAPPING_DVPP)
378
		list_add_tail(&spg->mnode, &spm->group_head);
379 380 381 382
}

static void sp_mapping_detach(struct sp_group *spg, struct sp_mapping *spm)
{
383 384
	unsigned long type;

385 386
	if (!spm)
		return;
387

388 389
	type = sp_mapping_type(spm);
	if (type == SP_MAPPING_DVPP)
390 391
		list_del(&spg->mnode);
	if (atomic_dec_and_test(&spm->user))
392
		sp_mapping_destroy(spm);
393 394

	spg->mapping[type] = NULL;
395 396
}

397 398 399 400 401 402 403 404 405 406
/* merge old mapping to new, and the old mapping would be destroyed */
static void sp_mapping_merge(struct sp_mapping *new, struct sp_mapping *old)
{
	struct sp_group *spg, *tmp;

	if (new == old)
		return;

	list_for_each_entry_safe(spg, tmp, &old->group_head, mnode) {
		list_move_tail(&spg->mnode, &new->group_head);
407
		spg->mapping[SP_MAPPING_DVPP] = new;
408 409 410 411 412 413 414 415 416 417 418
	}

	atomic_add(atomic_read(&old->user), &new->user);
	sp_mapping_destroy(old);
}

static bool is_mapping_empty(struct sp_mapping *spm)
{
	return RB_EMPTY_ROOT(&spm->area_root);
}

419 420 421 422
static bool can_mappings_merge(struct sp_mapping *m1, struct sp_mapping *m2)
{
	int i;

423
	for (i = 0; i < MAX_DEVID; i++)
424 425 426 427 428 429
		if (m1->start[i] != m2->start[i] || m1->end[i] != m2->end[i])
			return false;

	return true;
}

430
/*
431 432 433 434 435
 * 1. The mappings of local group is set on creating.
 * 2. This is used to setup the mapping for groups created during add_task.
 * 3. The normal mapping exists for all groups.
 * 4. The dvpp mappings for the new group and local group can merge _iff_ at
 *    least one of the mapping is empty.
436
 * the caller must hold sp_group_sem
437
 * NOTE: undo the mergeing when the later process failed.
438 439 440
 */
static int sp_mapping_group_setup(struct mm_struct *mm, struct sp_group *spg)
{
441 442 443 444
	struct sp_mapping *local_dvpp_mapping, *spg_dvpp_mapping;

	local_dvpp_mapping = mm->sp_group_master->local->mapping[SP_MAPPING_DVPP];
	spg_dvpp_mapping = spg->mapping[SP_MAPPING_DVPP];
445

446
	if (!list_empty(&spg->procs) && !(spg->flag & SPG_FLAG_NON_DVPP)) {
447 448 449 450 451 452
		/*
		 * Don't return an error when the mappings' address range conflict.
		 * As long as the mapping is unused, we can drop the empty mapping.
		 * This may change the address range for the task or group implicitly,
		 * give a warn for it.
		 */
453
		bool is_conflict = !can_mappings_merge(local_dvpp_mapping, spg_dvpp_mapping);
454

455 456
		if (is_mapping_empty(local_dvpp_mapping)) {
			sp_mapping_merge(spg_dvpp_mapping, local_dvpp_mapping);
457 458
			if (is_conflict)
				pr_warn_ratelimited("task address space conflict, spg_id=%d\n", spg->id);
459 460
		} else if (is_mapping_empty(spg_dvpp_mapping)) {
			sp_mapping_merge(local_dvpp_mapping, spg_dvpp_mapping);
461 462 463
			if (is_conflict)
				pr_warn_ratelimited("group address space conflict, spg_id=%d\n", spg->id);
		} else {
464 465
			pr_info_ratelimited("Duplicate address space, id=%d\n", spg->id);
			return -EINVAL;
466 467
		}
	} else {
468 469
		if (!(spg->flag & SPG_FLAG_NON_DVPP))
			/* the mapping of local group is always set */
470 471
			sp_mapping_attach(spg, local_dvpp_mapping);
		if (!spg->mapping[SP_MAPPING_NORMAL])
472
			sp_mapping_attach(spg, sp_mapping_normal);
C
Chen Jun 已提交
473 474
		if (!spg->mapping[SP_MAPPING_RO])
			sp_mapping_attach(spg, sp_mapping_ro);
475 476 477 478 479
	}

	return 0;
}

480
static struct sp_mapping *sp_mapping_find(struct sp_group *spg,
C
Chen Jun 已提交
481 482 483 484 485
						 unsigned long addr)
{
	if (addr >= MMAP_SHARE_POOL_NORMAL_START && addr < MMAP_SHARE_POOL_NORMAL_END)
		return spg->mapping[SP_MAPPING_NORMAL];

C
Chen Jun 已提交
486 487 488
	if (addr >= MMAP_SHARE_POOL_RO_START && addr < MMAP_SHARE_POOL_RO_END)
		return spg->mapping[SP_MAPPING_RO];

C
Chen Jun 已提交
489 490 491
	return spg->mapping[SP_MAPPING_DVPP];
}

492
static struct sp_group *create_spg(int spg_id, unsigned long flag);
493
static void free_new_spg_id(bool new, int spg_id);
494 495 496
static void free_sp_group_locked(struct sp_group *spg);
static int local_group_add_task(struct mm_struct *mm, struct sp_group *spg);
static int init_local_group(struct mm_struct *mm)
497
{
498
	int spg_id, ret;
499
	struct sp_group *spg;
500
	struct sp_mapping *spm;
501 502
	struct sp_group_master *master = mm->sp_group_master;

503 504 505 506
	spg_id = ida_alloc_range(&sp_group_id_ida, SPG_ID_LOCAL_MIN,
				 SPG_ID_LOCAL_MAX, GFP_ATOMIC);
	if (spg_id < 0) {
		pr_err_ratelimited("generate local group id failed %d\n", spg_id);
507
		return spg_id;
508 509
	}

510
	spg = create_spg(spg_id, 0);
511
	if (IS_ERR(spg)) {
512 513
		free_new_spg_id(true, spg_id);
		return PTR_ERR(spg);
514 515 516
	}

	master->local = spg;
517 518 519 520 521 522 523
	spm = sp_mapping_create(SP_MAPPING_DVPP);
	if (IS_ERR(spm)) {
		ret = PTR_ERR(spm);
		goto free_spg;
	}
	sp_mapping_attach(master->local, spm);
	sp_mapping_attach(master->local, sp_mapping_normal);
C
Chen Jun 已提交
524
	sp_mapping_attach(master->local, sp_mapping_ro);
525

526 527
	ret = local_group_add_task(mm, spg);
	if (ret < 0)
528
		/* The spm would be released while destroying the spg */
529 530
		goto free_spg;

531
	return 0;
532 533

free_spg:
534
	/* spg_id is freed in free_sp_group_locked */
535
	free_sp_group_locked(spg);
536 537
	master->local = NULL;
	return ret;
538 539
}

540 541
/* The caller must hold sp_group_sem */
static int sp_init_group_master_locked(struct task_struct *tsk, struct mm_struct *mm)
542
{
543
	int ret;
544 545
	struct sp_group_master *master;

546
	if (mm->sp_group_master)
547 548
		return 0;

549 550 551 552 553 554 555
	master = kmalloc(sizeof(struct sp_group_master), GFP_KERNEL);
	if (!master)
		return -ENOMEM;

	INIT_LIST_HEAD(&master->node_list);
	master->count = 0;
	master->mm = mm;
556
	master->tgid = tsk->tgid;
557
	get_task_comm(master->comm, current);
558
	meminfo_init(&master->meminfo);
559
	mm->sp_group_master = master;
560
	sp_add_group_master(master);
561 562 563

	ret = init_local_group(mm);
	if (ret)
564
		goto free_master;
565 566

	return 0;
567 568

free_master:
569
	sp_del_group_master(master);
570 571 572 573 574 575 576 577 578
	mm->sp_group_master = NULL;
	kfree(master);

	return ret;
}

static inline bool is_local_group(int spg_id)
{
	return spg_id >= SPG_ID_LOCAL_MIN && spg_id <= SPG_ID_LOCAL_MAX;
579 580
}

581
static struct sp_group *sp_get_local_group(struct task_struct *tsk, struct mm_struct *mm)
582 583 584 585 586 587 588 589 590 591 592 593 594 595
{
	int ret;
	struct sp_group_master *master;

	down_read(&sp_group_sem);
	master = mm->sp_group_master;
	if (master && master->local) {
		atomic_inc(&master->local->use_count);
		up_read(&sp_group_sem);
		return master->local;
	}
	up_read(&sp_group_sem);

	down_write(&sp_group_sem);
596
	ret = sp_init_group_master_locked(tsk, mm);
597 598 599 600 601 602 603 604 605 606 607
	if (ret) {
		up_write(&sp_group_sem);
		return ERR_PTR(ret);
	}
	master = mm->sp_group_master;
	atomic_inc(&master->local->use_count);
	up_write(&sp_group_sem);

	return master->local;
}

608 609
static void update_mem_usage_alloc(unsigned long size, bool inc,
		bool is_hugepage, struct sp_group_node *spg_node)
610 611
{
	if (inc) {
612
		meminfo_inc_usage(size, is_hugepage, &spg_node->meminfo);
613 614
		meminfo_inc_usage(size, is_hugepage, &spg_node->master->meminfo);
	} else {
615
		meminfo_dec_usage(size, is_hugepage, &spg_node->meminfo);
616
		meminfo_dec_usage(size, is_hugepage, &spg_node->master->meminfo);
617
	}
618 619
}

620 621
static void update_mem_usage_k2u(unsigned long size, bool inc,
		struct sp_group_node *spg_node)
622 623
{
	if (inc) {
624
		meminfo_inc_k2u(size, &spg_node->meminfo);
625
		meminfo_inc_k2u(size, &spg_node->master->meminfo);
626
	} else {
627
		meminfo_dec_k2u(size, &spg_node->meminfo);
628
		meminfo_dec_k2u(size, &spg_node->master->meminfo);
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
	}
}

/* statistics of all sp area, protected by sp_area_lock */
struct sp_spa_stat {
	unsigned int total_num;
	unsigned int alloc_num;
	unsigned int k2u_task_num;
	unsigned int k2u_spg_num;
	unsigned long total_size;
	unsigned long alloc_size;
	unsigned long k2u_task_size;
	unsigned long k2u_spg_size;
	unsigned long dvpp_size;
	unsigned long dvpp_va_size;
};

static struct sp_spa_stat spa_stat;

/* statistics of all sp group born from sp_alloc and k2u(spg) */
struct sp_overall_stat {
	atomic_t spa_total_num;
	atomic64_t spa_total_size;
};

static struct sp_overall_stat sp_overall_stat;

/*** Global share pool VA allocator ***/

enum spa_type {
	SPA_TYPE_ALLOC = 1,
660 661
	/* NOTE: reorganize after the statisical structure is reconstructed. */
	SPA_TYPE_ALLOC_PRIVATE = SPA_TYPE_ALLOC,
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
	SPA_TYPE_K2TASK,
	SPA_TYPE_K2SPG,
};

/*
 * We bump the reference when each mmap succeeds, and it will be dropped
 * when vma is about to release, so sp_area object will be automatically
 * freed when all tasks in the sp group has exited.
 */
struct sp_area {
	unsigned long va_start;
	unsigned long va_end;		/* va_end always align to hugepage */
	unsigned long real_size;	/* real size with alignment */
	unsigned long region_vstart;	/* belong to normal region or DVPP region */
	unsigned long flags;
	bool is_hugepage;
	bool is_dead;
	atomic_t use_count;		/* How many vmas use this VA region */
	struct rb_node rb_node;		/* address sorted rbtree */
	struct list_head link;		/* link to the spg->head */
	struct sp_group *spg;
	enum spa_type type;		/* where spa born from */
	struct mm_struct *mm;		/* owner of k2u(task) */
	unsigned long kva;		/* shared kva */
	pid_t applier;			/* the original applier process */
	int node_id;			/* memory node */
	int device_id;
};
static DEFINE_SPINLOCK(sp_area_lock);

static unsigned long spa_size(struct sp_area *spa)
{
	return spa->real_size;
}

static struct file *spa_file(struct sp_area *spa)
{
	if (spa->is_hugepage)
		return spa->spg->file_hugetlb;
	else
		return spa->spg->file;
}

705 706
/* the caller should hold sp_area_lock */
static void spa_inc_usage(struct sp_area *spa)
707
{
708 709 710 711 712 713 714 715 716
	enum spa_type type = spa->type;
	unsigned long size = spa->real_size;
	bool is_dvpp = spa->flags & SP_DVPP;
	bool is_huge = spa->is_hugepage;

	switch (type) {
	case SPA_TYPE_ALLOC:
		spa_stat.alloc_num += 1;
		spa_stat.alloc_size += size;
717
		meminfo_inc_usage(size, is_huge, &spa->spg->meminfo);
718 719 720 721
		break;
	case SPA_TYPE_K2TASK:
		spa_stat.k2u_task_num += 1;
		spa_stat.k2u_task_size += size;
722
		meminfo_inc_k2u(size, &spa->spg->meminfo);
723 724 725 726
		break;
	case SPA_TYPE_K2SPG:
		spa_stat.k2u_spg_num += 1;
		spa_stat.k2u_spg_size += size;
727
		meminfo_inc_k2u(size, &spa->spg->meminfo);
728 729 730 731 732 733 734 735 736 737
		break;
	default:
		WARN(1, "invalid spa type");
	}

	if (is_dvpp) {
		spa_stat.dvpp_size += size;
		spa_stat.dvpp_va_size += ALIGN(size, PMD_SIZE);
	}

738
	atomic_inc(&spa->spg->spa_num);
739 740 741 742 743 744 745
	/*
	 * all the calculations won't overflow due to system limitation and
	 * parameter checking in sp_alloc_area()
	 */
	spa_stat.total_num += 1;
	spa_stat.total_size += size;

746
	if (!is_local_group(spa->spg->id)) {
747 748 749
		atomic_inc(&sp_overall_stat.spa_total_num);
		atomic64_add(size, &sp_overall_stat.spa_total_size);
	}
750 751
}

752 753
/* the caller should hold sp_area_lock */
static void spa_dec_usage(struct sp_area *spa)
754
{
755 756 757 758 759 760 761 762 763
	enum spa_type type = spa->type;
	unsigned long size = spa->real_size;
	bool is_dvpp = spa->flags & SP_DVPP;
	bool is_huge = spa->is_hugepage;

	switch (type) {
	case SPA_TYPE_ALLOC:
		spa_stat.alloc_num -= 1;
		spa_stat.alloc_size -= size;
764
		meminfo_dec_usage(size, is_huge, &spa->spg->meminfo);
765 766 767 768
		break;
	case SPA_TYPE_K2TASK:
		spa_stat.k2u_task_num -= 1;
		spa_stat.k2u_task_size -= size;
769
		meminfo_dec_k2u(size, &spa->spg->meminfo);
770 771 772 773
		break;
	case SPA_TYPE_K2SPG:
		spa_stat.k2u_spg_num -= 1;
		spa_stat.k2u_spg_size -= size;
774
		meminfo_dec_k2u(size, &spa->spg->meminfo);
775 776 777 778 779 780 781 782 783 784
		break;
	default:
		WARN(1, "invalid spa type");
	}

	if (is_dvpp) {
		spa_stat.dvpp_size -= size;
		spa_stat.dvpp_va_size -= ALIGN(size, PMD_SIZE);
	}

785
	atomic_dec(&spa->spg->spa_num);
786 787 788
	spa_stat.total_num -= 1;
	spa_stat.total_size -= size;

789
	if (!is_local_group(spa->spg->id)) {
790 791 792
		atomic_dec(&sp_overall_stat.spa_total_num);
		atomic64_sub(spa->real_size, &sp_overall_stat.spa_total_size);
	}
793 794
}

795 796
static void update_mem_usage(unsigned long size, bool inc, bool is_hugepage,
	struct sp_group_node *spg_node, enum spa_type type)
797
{
798 799
	switch (type) {
	case SPA_TYPE_ALLOC:
800
		update_mem_usage_alloc(size, inc, is_hugepage, spg_node);
801 802 803
		break;
	case SPA_TYPE_K2TASK:
	case SPA_TYPE_K2SPG:
804
		update_mem_usage_k2u(size, inc, spg_node);
805 806 807 808
		break;
	default:
		WARN(1, "invalid stat type\n");
	}
809 810
}

811 812 813 814 815 816 817 818 819 820 821 822
struct sp_group_node *find_spg_node_by_spg(struct mm_struct *mm,
		struct sp_group *spg)
{
	struct sp_group_node *spg_node;

	list_for_each_entry(spg_node, &mm->sp_group_master->node_list, group_node) {
		if (spg_node->spg == spg)
			return spg_node;
	}
	return NULL;
}

823 824
static void sp_update_process_stat(struct task_struct *tsk, bool inc,
	struct sp_area *spa)
825
{
826
	struct sp_group_node *spg_node;
827 828
	unsigned long size = spa->real_size;
	enum spa_type type = spa->type;
829

830
	spg_node = find_spg_node_by_spg(tsk->mm, spa->spg);
G
Guo Mengqi 已提交
831
	update_mem_usage(size, inc, spa->is_hugepage, spg_node, type);
832 833 834 835 836 837
}

static inline void check_interrupt_context(void)
{
	if (unlikely(in_interrupt()))
		panic("function can't be used in interrupt context\n");
838 839
}

840 841 842 843 844 845 846 847
static inline bool check_aoscore_process(struct task_struct *tsk)
{
	if (tsk->flags & PF_DOMAIN_CORE)
		return true;
	else
		return false;
}

848 849
static unsigned long sp_mmap(struct mm_struct *mm, struct file *file,
			     struct sp_area *spa, unsigned long *populate,
850
			     unsigned long prot, struct vm_area_struct **pvma);
851
static void sp_munmap(struct mm_struct *mm, unsigned long addr, unsigned long size);
852 853 854 855 856 857 858 859 860 861 862

#define K2U_NORMAL	0
#define K2U_COREDUMP	1

struct sp_k2u_context {
	unsigned long kva;
	unsigned long kva_aligned;
	unsigned long size;
	unsigned long size_aligned;
	unsigned long sp_flags;
	int state;
863
	enum spa_type type;
864 865
};

866 867
static unsigned long sp_remap_kva_to_vma(struct sp_area *spa, struct mm_struct *mm,
					unsigned long prot, struct sp_k2u_context *kc);
868

869 870 871
static void free_sp_group_id(int spg_id)
{
	/* ida operation is protected by an internal spin_lock */
872 873
	if ((spg_id >= SPG_ID_AUTO_MIN && spg_id <= SPG_ID_AUTO_MAX) ||
	    (spg_id >= SPG_ID_LOCAL_MIN && spg_id <= SPG_ID_LOCAL_MAX))
874 875 876
		ida_free(&sp_group_id_ida, spg_id);
}

877 878 879 880 881 882
static void free_new_spg_id(bool new, int spg_id)
{
	if (new)
		free_sp_group_id(spg_id);
}

883
static void free_sp_group_locked(struct sp_group *spg)
884
{
885 886
	int type;

887 888 889 890
	fput(spg->file);
	fput(spg->file_hugetlb);
	idr_remove(&sp_group_idr, spg->id);
	free_sp_group_id((unsigned int)spg->id);
891 892 893 894

	for (type = SP_MAPPING_START; type < SP_MAPPING_END; type++)
		sp_mapping_detach(spg, spg->mapping[type]);

895 896
	if (!is_local_group(spg->id))
		system_group_count--;
897

898 899 900 901
	kfree(spg);
	WARN(system_group_count < 0, "unexpected group count\n");
}

902 903 904 905 906 907 908
static void free_sp_group(struct sp_group *spg)
{
	down_write(&sp_group_sem);
	free_sp_group_locked(spg);
	up_write(&sp_group_sem);
}

909
static void sp_group_put_locked(struct sp_group *spg)
910 911 912 913 914 915 916
{
	lockdep_assert_held_write(&sp_group_sem);

	if (atomic_dec_and_test(&spg->use_count))
		free_sp_group_locked(spg);
}

917
static void sp_group_put(struct sp_group *spg)
918 919 920 921 922 923
{
	if (atomic_dec_and_test(&spg->use_count))
		free_sp_group(spg);
}

/* use with put_task_struct(task) */
924
static int get_task(int tgid, struct task_struct **task)
925 926
{
	struct task_struct *tsk;
927
	struct pid *p;
928 929

	rcu_read_lock();
930 931
	p = find_pid_ns(tgid, &init_pid_ns);
	tsk = pid_task(p, PIDTYPE_TGID);
932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
	if (!tsk || (tsk->flags & PF_EXITING)) {
		rcu_read_unlock();
		return -ESRCH;
	}
	get_task_struct(tsk);
	rcu_read_unlock();

	*task = tsk;
	return 0;
}

/*
 * the caller must:
 * 1. hold spg->rw_lock
 * 2. ensure no concurrency problem for mm_struct
 */
948
static bool is_process_in_group(struct sp_group *spg,
949 950 951 952 953 954
						 struct mm_struct *mm)
{
	struct sp_group_node *spg_node;

	list_for_each_entry(spg_node, &spg->procs, proc_node)
		if (spg_node->master->mm == mm)
955
			return true;
956

957
	return false;
958 959
}

960 961
/* user must call sp_group_put() after use */
static struct sp_group *sp_group_get_locked(int tgid, int spg_id)
962 963 964 965 966 967
{
	struct sp_group *spg = NULL;
	struct task_struct *tsk = NULL;
	int ret = 0;

	if (spg_id == SPG_ID_DEFAULT) {
968
		ret = get_task(tgid, &tsk);
969 970 971
		if (ret)
			return NULL;

972 973 974
		task_lock(tsk);
		if (tsk->mm == NULL)
			spg = NULL;
975 976
		else if (tsk->mm->sp_group_master)
			spg = tsk->mm->sp_group_master->local;
977
		task_unlock(tsk);
978 979

		put_task_struct(tsk);
980 981 982 983
	} else {
		spg = idr_find(&sp_group_idr, spg_id);
	}

984 985
	if (!spg || !atomic_inc_not_zero(&spg->use_count))
		return NULL;
986

987
	return spg;
988 989
}

990
static struct sp_group *sp_group_get(int tgid, int spg_id)
991 992 993 994
{
	struct sp_group *spg;

	down_read(&sp_group_sem);
995
	spg = sp_group_get_locked(tgid, spg_id);
996 997 998 999
	up_read(&sp_group_sem);
	return spg;
}

1000 1001
/**
 * mp_sp_group_id_by_pid() - Get the sp_group ID array of a process.
1002
 * @tgid: tgid of target process.
1003 1004 1005 1006 1007 1008 1009 1010 1011
 * @spg_ids: point to an array to save the group ids the process belongs to
 * @num: input the spg_ids array size; output the spg number of the process
 *
 * Return:
 * >0		- the sp_group ID.
 * -ENODEV	- target process doesn't belong to any sp_group.
 * -EINVAL	- spg_ids or num is NULL.
 * -E2BIG	- the num of groups process belongs to is larger than *num
 */
1012
int mg_sp_group_id_by_pid(int tgid, int *spg_ids, int *num)
1013
{
1014
	int ret = 0, real_count;
1015 1016 1017 1018
	struct sp_group_node *node;
	struct sp_group_master *master = NULL;
	struct task_struct *tsk;

1019 1020 1021
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1022 1023
	check_interrupt_context();

1024
	if (!spg_ids || !num || *num <= 0)
1025 1026
		return -EINVAL;

1027
	ret = get_task(tgid, &tsk);
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
	if (ret)
		return ret;

	down_read(&sp_group_sem);
	task_lock(tsk);
	if (tsk->mm)
		master = tsk->mm->sp_group_master;
	task_unlock(tsk);

	if (!master) {
		ret = -ENODEV;
		goto out_up_read;
	}

1042 1043 1044 1045 1046 1047 1048 1049
	/*
	 * There is a local group for each process which is used for
	 * passthrough allocation. The local group is a internal
	 * implementation for convenience and is not attempt to bother
	 * the user.
	 */
	real_count = master->count - 1;
	if (real_count <= 0) {
1050 1051 1052
		ret = -ENODEV;
		goto out_up_read;
	}
1053
	if ((unsigned int)*num < real_count) {
1054 1055 1056
		ret = -E2BIG;
		goto out_up_read;
	}
1057
	*num = real_count;
1058

1059 1060 1061
	list_for_each_entry(node, &master->node_list, group_node) {
		if (is_local_group(node->spg->id))
			continue;
1062
		*(spg_ids++) = node->spg->id;
1063
	}
1064 1065 1066 1067 1068

out_up_read:
	up_read(&sp_group_sem);
	put_task_struct(tsk);
	return ret;
1069 1070 1071
}
EXPORT_SYMBOL_GPL(mg_sp_group_id_by_pid);

1072 1073 1074 1075 1076
static bool is_online_node_id(int node_id)
{
	return node_id >= 0 && node_id < MAX_NUMNODES && node_online(node_id);
}

1077 1078 1079 1080 1081 1082 1083
static void sp_group_init(struct sp_group *spg, int spg_id, unsigned long flag)
{
	spg->id = spg_id;
	spg->flag = flag;
	spg->is_alive = true;
	spg->proc_num = 0;
	atomic_set(&spg->use_count, 1);
1084
	atomic_set(&spg->spa_num, 0);
1085 1086 1087 1088
	INIT_LIST_HEAD(&spg->procs);
	INIT_LIST_HEAD(&spg->spa_list);
	INIT_LIST_HEAD(&spg->mnode);
	init_rwsem(&spg->rw_lock);
1089
	meminfo_init(&spg->meminfo);
1090 1091
}

1092
static struct sp_group *create_spg(int spg_id, unsigned long flag)
1093
{
1094 1095
	int ret;
	struct sp_group *spg;
1096
	char name[DNAME_INLINE_LEN];
1097 1098 1099
	struct user_struct *user = NULL;
	int hsize_log = MAP_HUGE_2MB >> MAP_HUGE_SHIFT;

1100 1101
	if (unlikely(system_group_count + 1 == MAX_GROUP_FOR_SYSTEM &&
		     !is_local_group(spg_id))) {
1102
		pr_err("reach system max group num\n");
1103 1104 1105 1106 1107 1108 1109 1110
		return ERR_PTR(-ENOSPC);
	}

	spg = kzalloc(sizeof(*spg), GFP_KERNEL);
	if (spg == NULL)
		return ERR_PTR(-ENOMEM);

	sprintf(name, "sp_group_%d", spg_id);
1111
	spg->file = shmem_kernel_file_setup(name, MAX_LFS_FILESIZE, VM_NORESERVE);
1112 1113 1114
	if (IS_ERR(spg->file)) {
		pr_err("spg file setup failed %ld\n", PTR_ERR(spg->file));
		ret = PTR_ERR(spg->file);
1115
		goto out_kfree;
1116 1117
	}

1118
	sprintf(name, "sp_group_%d_huge", spg_id);
1119
	spg->file_hugetlb = hugetlb_file_setup(name, MAX_LFS_FILESIZE,
1120
				VM_NORESERVE, &user, HUGETLB_ANONHUGE_INODE, hsize_log);
1121
	if (IS_ERR(spg->file_hugetlb)) {
1122
		pr_err("spg file_hugetlb setup failed %ld\n", PTR_ERR(spg->file_hugetlb));
1123 1124 1125 1126
		ret = PTR_ERR(spg->file_hugetlb);
		goto out_fput;
	}

1127 1128 1129 1130 1131 1132 1133 1134
	sp_group_init(spg, spg_id, flag);

	ret = idr_alloc(&sp_group_idr, spg, spg_id, spg_id + 1, GFP_KERNEL);
	if (ret < 0) {
		pr_err("group %d idr alloc failed %d\n", spg_id, ret);
		goto out_fput_huge;
	}

1135 1136
	if (!is_local_group(spg_id))
		system_group_count++;
1137

1138 1139
	return spg;

1140 1141
out_fput_huge:
	fput(spg->file_hugetlb);
1142 1143 1144 1145 1146
out_fput:
	fput(spg->file);
out_kfree:
	kfree(spg);
	return ERR_PTR(ret);
1147 1148
}

1149
/* the caller must hold sp_group_sem */
1150
static struct sp_group *find_or_alloc_sp_group(int spg_id, unsigned long flag)
1151 1152 1153
{
	struct sp_group *spg;

1154
	spg = sp_group_get_locked(current->tgid, spg_id);
1155 1156

	if (!spg) {
1157
		spg = create_spg(spg_id, flag);
1158 1159 1160 1161
	} else {
		down_read(&spg->rw_lock);
		if (!spg_valid(spg)) {
			up_read(&spg->rw_lock);
1162
			sp_group_put_locked(spg);
1163 1164 1165
			return ERR_PTR(-ENODEV);
		}
		up_read(&spg->rw_lock);
1166
		/* spg->use_count has increased due to sp_group_get() */
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
	}

	return spg;
}

static void __sp_area_drop_locked(struct sp_area *spa);

/* The caller must down_write(&mm->mmap_lock) */
static void sp_munmap_task_areas(struct mm_struct *mm, struct sp_group *spg, struct list_head *stop)
{
	struct sp_area *spa, *prev = NULL;
	int err;


	spin_lock(&sp_area_lock);
	list_for_each_entry(spa, &spg->spa_list, link) {
		if (&spa->link == stop)
			break;

		__sp_area_drop_locked(prev);
		prev = spa;

		atomic_inc(&spa->use_count);
		spin_unlock(&sp_area_lock);

		err = do_munmap(mm, spa->va_start, spa_size(spa), NULL);
		if (err) {
			/* we are not supposed to fail */
			pr_err("failed to unmap VA %pK when munmap task areas\n",
			       (void *)spa->va_start);
		}

		spin_lock(&sp_area_lock);
	}
	__sp_area_drop_locked(prev);

	spin_unlock(&sp_area_lock);
}

/* the caller must hold sp_group_sem */
1207 1208
static int mm_add_group_init(struct task_struct *tsk, struct mm_struct *mm,
			     struct sp_group *spg)
1209
{
1210 1211
	int ret;
	struct sp_group_master *master;
1212

1213 1214 1215 1216 1217 1218 1219 1220 1221
	if (!mm->sp_group_master) {
		ret = sp_init_group_master_locked(tsk, mm);
		if (ret)
			return ret;
	} else {
		if (is_process_in_group(spg, mm)) {
			pr_err_ratelimited("task already in target group, id=%d\n", spg->id);
			return -EEXIST;
		}
1222

1223 1224 1225 1226 1227
		master = mm->sp_group_master;
		if (master->count == MAX_GROUP_FOR_TASK) {
			pr_err("task reaches max group num\n");
			return -ENOSPC;
		}
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248
	}

	return 0;
}

/* the caller must hold sp_group_sem */
static struct sp_group_node *create_spg_node(struct mm_struct *mm,
	unsigned long prot, struct sp_group *spg)
{
	struct sp_group_master *master = mm->sp_group_master;
	struct sp_group_node *spg_node;

	spg_node = kzalloc(sizeof(struct sp_group_node), GFP_KERNEL);
	if (spg_node == NULL)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&spg_node->group_node);
	INIT_LIST_HEAD(&spg_node->proc_node);
	spg_node->spg = spg;
	spg_node->master = master;
	spg_node->prot = prot;
1249
	meminfo_init(&spg_node->meminfo);
1250 1251 1252 1253 1254 1255 1256 1257 1258

	list_add_tail(&spg_node->group_node, &master->node_list);
	master->count++;

	return spg_node;
}

/* the caller must down_write(&spg->rw_lock) */
static int insert_spg_node(struct sp_group *spg, struct sp_group_node *node)
1259
{
1260 1261 1262 1263 1264 1265 1266
	if (spg->proc_num + 1 == MAX_PROC_PER_GROUP) {
		pr_err_ratelimited("add group: group reaches max process num\n");
		return -ENOSPC;
	}

	spg->proc_num++;
	list_add_tail(&node->proc_node, &spg->procs);
1267 1268 1269 1270

	return 0;
}

1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
/* the caller must down_write(&spg->rw_lock) */
static void delete_spg_node(struct sp_group *spg, struct sp_group_node *node)
{
	list_del(&node->proc_node);
	spg->proc_num--;
}

/* the caller must hold sp_group_sem */
static void free_spg_node(struct mm_struct *mm, struct sp_group *spg,
	struct sp_group_node *spg_node)
{
	struct sp_group_master *master = mm->sp_group_master;

	list_del(&spg_node->group_node);
	master->count--;

	kfree(spg_node);
}

1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303
static int local_group_add_task(struct mm_struct *mm, struct sp_group *spg)
{
	struct sp_group_node *node;

	node = create_spg_node(mm, PROT_READ | PROT_WRITE, spg);
	if (IS_ERR(node))
		return PTR_ERR(node);

	insert_spg_node(spg, node);
	mmget(mm);

	return 0;
}

1304
/**
1305
 * mg_sp_group_add_task() - Add a process to an share group (sp_group).
1306
 * @tgid: the tgid of the task to be added.
1307 1308
 * @prot: the prot of task for this spg.
 * @spg_id: the ID of the sp_group.
1309
 * @flag: to give some special message.
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
 *
 * A process can't be added to more than one sp_group in single group mode
 * and can in multiple group mode.
 *
 * Return: A postive group number for success, -errno on failure.
 *
 * The manually specified ID is between [SPG_ID_MIN, SPG_ID_MAX].
 * The automatically allocated ID is between [SPG_ID_AUTO_MIN, SPG_ID_AUTO_MAX].
 * When negative, the return value is -errno.
 */
1320
int mg_sp_group_add_task(int tgid, unsigned long prot, int spg_id)
1321
{
1322
	unsigned long flag = 0;
1323 1324 1325 1326 1327 1328 1329 1330
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct sp_group *spg;
	struct sp_group_node *node = NULL;
	int ret = 0;
	bool id_newly_generated = false;
	struct sp_area *spa, *prev = NULL;

1331 1332 1333
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
	check_interrupt_context();

	/* only allow READ, READ | WRITE */
	if (!((prot == PROT_READ)
	      || (prot == (PROT_READ | PROT_WRITE)))) {
		pr_err_ratelimited("prot is invalid 0x%lx\n", prot);
		return -EINVAL;
	}

	if (spg_id < SPG_ID_MIN || spg_id > SPG_ID_AUTO) {
		pr_err_ratelimited("add group failed, invalid group id %d\n", spg_id);
		return -EINVAL;
	}

	if (spg_id >= SPG_ID_AUTO_MIN && spg_id <= SPG_ID_AUTO_MAX) {
1349
		spg = sp_group_get(tgid, spg_id);
1350 1351 1352 1353 1354 1355 1356 1357 1358 1359

		if (!spg) {
			pr_err_ratelimited("spg %d hasn't been created\n", spg_id);
			return -EINVAL;
		}

		down_read(&spg->rw_lock);
		if (!spg_valid(spg)) {
			up_read(&spg->rw_lock);
			pr_err_ratelimited("add group failed, group id %d is dead\n", spg_id);
1360
			sp_group_put(spg);
1361 1362 1363 1364
			return -EINVAL;
		}
		up_read(&spg->rw_lock);

1365
		sp_group_put(spg);
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
	}

	if (spg_id == SPG_ID_AUTO) {
		spg_id = ida_alloc_range(&sp_group_id_ida, SPG_ID_AUTO_MIN,
					 SPG_ID_AUTO_MAX, GFP_ATOMIC);
		if (spg_id < 0) {
			pr_err_ratelimited("add group failed, auto generate group id failed\n");
			return spg_id;
		}
		id_newly_generated = true;
	}

	down_write(&sp_group_sem);

1380
	ret = get_task(tgid, &tsk);
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413
	if (ret) {
		up_write(&sp_group_sem);
		free_new_spg_id(id_newly_generated, spg_id);
		goto out;
	}

	if (check_aoscore_process(tsk)) {
		up_write(&sp_group_sem);
		ret = -EACCES;
		free_new_spg_id(id_newly_generated, spg_id);
		goto out_put_task;
	}

	/*
	 * group_leader: current thread may be exiting in a multithread process
	 *
	 * DESIGN IDEA
	 * We increase mm->mm_users deliberately to ensure it's decreased in
	 * share pool under only 2 circumstances, which will simply the overall
	 * design as mm won't be freed unexpectedly.
	 *
	 * The corresponding refcount decrements are as follows:
	 * 1. the error handling branch of THIS function.
	 * 2. In sp_group_exit(). It's called only when process is exiting.
	 */
	mm = get_task_mm(tsk->group_leader);
	if (!mm) {
		up_write(&sp_group_sem);
		ret = -ESRCH;
		free_new_spg_id(id_newly_generated, spg_id);
		goto out_put_task;
	}

1414 1415 1416 1417 1418 1419 1420 1421 1422
	if (mm->sp_group_master && mm->sp_group_master->tgid != tgid) {
		up_write(&sp_group_sem);
		pr_err("add: task(%d) is a vfork child of the original task(%d)\n",
			tgid, mm->sp_group_master->tgid);
		ret = -EINVAL;
		free_new_spg_id(id_newly_generated, spg_id);
		goto out_put_mm;
	}

1423
	spg = find_or_alloc_sp_group(spg_id, flag);
1424 1425 1426 1427 1428 1429 1430
	if (IS_ERR(spg)) {
		up_write(&sp_group_sem);
		ret = PTR_ERR(spg);
		free_new_spg_id(id_newly_generated, spg_id);
		goto out_put_mm;
	}

1431 1432 1433 1434
	down_write(&spg->rw_lock);
	ret = mm_add_group_init(tsk, mm, spg);
	if (ret) {
		up_write(&spg->rw_lock);
1435
		goto out_drop_group;
1436
	}
1437

1438
	ret = sp_mapping_group_setup(mm, spg);
1439 1440
	if (ret) {
		up_write(&spg->rw_lock);
1441
		goto out_drop_group;
1442
	}
1443

1444 1445
	node = create_spg_node(mm, prot, spg);
	if (unlikely(IS_ERR(node))) {
1446
		up_write(&spg->rw_lock);
1447
		ret = PTR_ERR(node);
1448
		goto out_drop_group;
1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466
	}

	ret = insert_spg_node(spg, node);
	if (unlikely(ret)) {
		up_write(&spg->rw_lock);
		goto out_drop_spg_node;
	}

	/*
	 * create mappings of existing shared memory segments into this
	 * new process' page table.
	 */
	spin_lock(&sp_area_lock);

	list_for_each_entry(spa, &spg->spa_list, link) {
		unsigned long populate = 0;
		struct file *file = spa_file(spa);
		unsigned long addr;
1467
		unsigned long prot_spa = prot;
C
Chen Jun 已提交
1468 1469

		if ((spa->flags & (SP_PROT_RO | SP_PROT_FOCUS)) == (SP_PROT_RO | SP_PROT_FOCUS))
1470
			prot_spa &= ~PROT_WRITE;
1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482

		__sp_area_drop_locked(prev);
		prev = spa;

		atomic_inc(&spa->use_count);

		if (spa->is_dead == true)
			continue;

		spin_unlock(&sp_area_lock);

		if (spa->type == SPA_TYPE_K2SPG && spa->kva) {
1483
			addr = sp_remap_kva_to_vma(spa, mm, prot_spa, NULL);
1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500
			if (IS_ERR_VALUE(addr))
				pr_warn("add group remap k2u failed %ld\n", addr);

			spin_lock(&sp_area_lock);
			continue;
		}

		down_write(&mm->mmap_lock);
		if (unlikely(mm->core_state)) {
			sp_munmap_task_areas(mm, spg, &spa->link);
			up_write(&mm->mmap_lock);
			ret = -EBUSY;
			pr_err("add group: encountered coredump, abort\n");
			spin_lock(&sp_area_lock);
			break;
		}

1501
		addr = sp_mmap(mm, file, spa, &populate, prot_spa, NULL);
1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
		if (IS_ERR_VALUE(addr)) {
			sp_munmap_task_areas(mm, spg, &spa->link);
			up_write(&mm->mmap_lock);
			ret = addr;
			pr_err("add group: sp mmap failed %d\n", ret);
			spin_lock(&sp_area_lock);
			break;
		}
		up_write(&mm->mmap_lock);

		if (populate) {
			ret = do_mm_populate(mm, spa->va_start, populate, 0);
			if (ret) {
				if (unlikely(fatal_signal_pending(current)))
					pr_warn_ratelimited("add group failed, current thread is killed\n");
				else
					pr_warn_ratelimited("add group failed, mm populate failed (potential no enough memory when -12): %d, spa type is %d\n",
					ret, spa->type);
				down_write(&mm->mmap_lock);
				sp_munmap_task_areas(mm, spg, spa->link.next);
				up_write(&mm->mmap_lock);
				spin_lock(&sp_area_lock);
				break;
			}
		}

		spin_lock(&sp_area_lock);
	}
	__sp_area_drop_locked(prev);
	spin_unlock(&sp_area_lock);

	if (unlikely(ret))
		delete_spg_node(spg, node);
	up_write(&spg->rw_lock);

out_drop_spg_node:
	if (unlikely(ret))
		free_spg_node(mm, spg, node);
	/*
	 * to simplify design, we don't release the resource of
	 * group_master and proc_stat, they will be freed when
	 * process is exiting.
	 */
out_drop_group:
	if (unlikely(ret)) {
		up_write(&sp_group_sem);
1548
		sp_group_put(spg);
1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
	} else
		up_write(&sp_group_sem);
out_put_mm:
	/* No need to put the mm if the sp group adds this mm successfully */
	if (unlikely(ret))
		mmput(mm);
out_put_task:
	put_task_struct(tsk);
out:
	return ret == 0 ? spg_id : ret;
}
1560 1561
EXPORT_SYMBOL_GPL(mg_sp_group_add_task);

1562 1563
/**
 * mg_sp_group_del_task() - delete a process from a sp group.
1564
 * @tgid: the tgid of the task to be deleted
1565 1566 1567 1568 1569 1570 1571
 * @spg_id: sharepool group id
 *
 * the group's spa list must be empty, or deletion will fail.
 *
 * Return:
 * * if success, return 0.
 * * -EINVAL, spg_id invalid or spa_lsit not emtpy or spg dead
1572
 * * -ESRCH, the task group of tgid is not in group / process dead
1573
 */
1574
int mg_sp_group_del_task(int tgid, int spg_id)
1575
{
1576 1577 1578 1579 1580 1581 1582
	int ret = 0;
	struct sp_group *spg;
	struct sp_group_node *spg_node;
	struct task_struct *tsk = NULL;
	struct mm_struct *mm = NULL;
	bool is_alive = true;

1583 1584 1585
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1586
	if (spg_id < SPG_ID_MIN || spg_id > SPG_ID_AUTO) {
1587
		pr_err("del from group failed, invalid group id %d\n", spg_id);
1588 1589 1590
		return -EINVAL;
	}

1591
	spg = sp_group_get(tgid, spg_id);
1592
	if (!spg) {
1593 1594
		pr_err("spg not found or get task failed, tgid:%d, spg_id:%d\n",
			tgid, spg_id);
1595 1596 1597 1598 1599 1600
		return -EINVAL;
	}
	down_write(&sp_group_sem);

	if (!spg_valid(spg)) {
		up_write(&sp_group_sem);
1601
		pr_err("spg dead, spg_id:%d\n", spg_id);
1602 1603 1604 1605
		ret = -EINVAL;
		goto out;
	}

1606
	ret = get_task(tgid, &tsk);
1607 1608
	if (ret) {
		up_write(&sp_group_sem);
1609
		pr_err("task is not found, tgid:%d\n", tgid);
1610 1611 1612 1613 1614
		goto out;
	}
	mm = get_task_mm(tsk->group_leader);
	if (!mm) {
		up_write(&sp_group_sem);
1615
		pr_err("mm is not found, tgid:%d\n", tgid);
1616 1617 1618 1619
		ret = -ESRCH;
		goto out_put_task;
	}

1620 1621 1622 1623 1624 1625 1626
	if (!mm->sp_group_master) {
		up_write(&sp_group_sem);
		pr_err("task(%d) is not in any group(%d)\n", tgid, spg_id);
		ret = -EINVAL;
		goto out_put_mm;
	}

1627 1628 1629 1630 1631 1632 1633 1634
	if (mm->sp_group_master->tgid != tgid) {
		up_write(&sp_group_sem);
		pr_err("del: task(%d) is a vfork child of the original task(%d)\n",
			tgid, mm->sp_group_master->tgid);
		ret = -EINVAL;
		goto out_put_mm;
	}

1635
	spg_node = find_spg_node_by_spg(mm, spg);
1636 1637
	if (!spg_node) {
		up_write(&sp_group_sem);
1638
		pr_err("task(%d) not in group(%d)\n", tgid, spg_id);
1639 1640 1641 1642 1643
		ret = -ESRCH;
		goto out_put_mm;
	}

	down_write(&spg->rw_lock);
1644 1645 1646 1647

	if (!list_empty(&spg->spa_list)) {
		up_write(&spg->rw_lock);
		up_write(&sp_group_sem);
1648
		pr_err("spa is not empty, task:%d, spg_id:%d\n", tgid, spg_id);
1649 1650 1651 1652
		ret = -EINVAL;
		goto out_put_mm;
	}

1653 1654 1655 1656
	if (list_is_singular(&spg->procs))
		is_alive = spg->is_alive = false;
	spg->proc_num--;
	list_del(&spg_node->proc_node);
1657
	sp_group_put(spg);
1658 1659 1660 1661 1662 1663 1664
	up_write(&spg->rw_lock);
	if (!is_alive)
		blocking_notifier_call_chain(&sp_notifier_chain, 0, spg);

	list_del(&spg_node->group_node);
	mm->sp_group_master->count--;
	kfree(spg_node);
1665
	atomic_dec(&mm->mm_users);
1666 1667 1668 1669 1670 1671 1672 1673

	up_write(&sp_group_sem);

out_put_mm:
	mmput(mm);
out_put_task:
	put_task_struct(tsk);
out:
1674
	sp_group_put(spg); /* if spg dead, freed here */
1675
	return ret;
1676 1677 1678
}
EXPORT_SYMBOL_GPL(mg_sp_group_del_task);

1679
int mg_sp_id_of_current(void)
1680 1681 1682 1683
{
	int ret, spg_id;
	struct sp_group_master *master;

1684 1685 1686
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1687
	if ((current->flags & PF_KTHREAD) || !current->mm)
1688 1689 1690 1691
		return -EINVAL;

	down_read(&sp_group_sem);
	master = current->mm->sp_group_master;
1692
	if (master) {
1693 1694 1695 1696 1697 1698 1699
		spg_id = master->local->id;
		up_read(&sp_group_sem);
		return spg_id;
	}
	up_read(&sp_group_sem);

	down_write(&sp_group_sem);
1700
	ret = sp_init_group_master_locked(current, current->mm);
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712
	if (ret) {
		up_write(&sp_group_sem);
		return ret;
	}
	master = current->mm->sp_group_master;
	spg_id = master->local->id;
	up_write(&sp_group_sem);

	return spg_id;
}
EXPORT_SYMBOL_GPL(mg_sp_id_of_current);

1713
/* the caller must hold sp_area_lock */
1714
static void insert_sp_area(struct sp_mapping *spm, struct sp_area *spa)
1715
{
1716
	struct rb_node **p = &spm->area_root.rb_node;
1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
	struct rb_node *parent = NULL;

	while (*p) {
		struct sp_area *tmp;

		parent = *p;
		tmp = rb_entry(parent, struct sp_area, rb_node);
		if (spa->va_start < tmp->va_end)
			p = &(*p)->rb_left;
		else if (spa->va_end > tmp->va_start)
			p = &(*p)->rb_right;
		else
			BUG();
	}

	rb_link_node(&spa->rb_node, parent, p);
1733
	rb_insert_color(&spa->rb_node, &spm->area_root);
1734 1735 1736 1737 1738 1739 1740 1741
}

/**
 * sp_alloc_area() - Allocate a region of VA from the share pool.
 * @size: the size of VA to allocate.
 * @flags: how to allocate the memory.
 * @spg: the share group that the memory is allocated to.
 * @type: the type of the region.
1742
 * @applier: the tgid of the task which allocates the region.
1743 1744 1745 1746 1747 1748 1749 1750 1751
 *
 * Return: a valid pointer for success, NULL on failure.
 */
static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags,
				     struct sp_group *spg, enum spa_type type,
				     pid_t applier)
{
	struct sp_area *spa, *first, *err;
	struct rb_node *n;
1752 1753
	unsigned long vstart;
	unsigned long vend;
1754 1755 1756
	unsigned long addr;
	unsigned long size_align = ALIGN(size, PMD_SIZE); /* va aligned to 2M */
	int device_id, node_id;
1757
	struct sp_mapping *mapping;
1758 1759 1760 1761 1762 1763 1764 1765 1766

	device_id = sp_flags_device_id(flags);
	node_id = flags & SP_SPEC_NODE_ID ? sp_flags_node_id(flags) : device_id;

	if (!is_online_node_id(node_id)) {
		pr_err_ratelimited("invalid numa node id %d\n", node_id);
		return ERR_PTR(-EINVAL);
	}

C
Chen Jun 已提交
1767 1768 1769 1770 1771 1772
	if (flags & SP_PROT_FOCUS) {
		if ((flags & (SP_DVPP | SP_PROT_RO)) != SP_PROT_RO) {
			pr_err("invalid sp_flags [%lx]\n", flags);
			return ERR_PTR(-EINVAL);
		}
		mapping = spg->mapping[SP_MAPPING_RO];
1773
	} else if (flags & SP_DVPP) {
1774
		mapping = spg->mapping[SP_MAPPING_DVPP];
1775
	} else {
1776
		mapping = spg->mapping[SP_MAPPING_NORMAL];
1777
	}
1778

1779 1780 1781 1782 1783
	if (!mapping) {
		pr_err_ratelimited("non DVPP spg, id %d\n", spg->id);
		return ERR_PTR(-EINVAL);
	}

1784 1785
	vstart = mapping->start[device_id];
	vend = mapping->end[device_id];
1786 1787 1788 1789 1790 1791 1792 1793 1794
	spa = __kmalloc_node(sizeof(struct sp_area), GFP_KERNEL, node_id);
	if (unlikely(!spa))
		return ERR_PTR(-ENOMEM);

	spin_lock(&sp_area_lock);

	/*
	 * Invalidate cache if we have more permissive parameters.
	 * cached_hole_size notes the largest hole noticed _below_
1795
	 * the sp_area cached in free_area_cache: if size fits
1796
	 * into that hole, we want to scan from vstart to reuse
1797 1798
	 * the hole instead of allocating above free_area_cache.
	 * Note that sp_free_area may update free_area_cache
1799 1800
	 * without updating cached_hole_size.
	 */
1801 1802 1803 1804
	if (!mapping->free_area_cache || size_align < mapping->cached_hole_size ||
	    vstart != mapping->cached_vstart) {
		mapping->cached_hole_size = 0;
		mapping->free_area_cache = NULL;
1805 1806 1807
	}

	/* record if we encounter less permissive parameters */
1808
	mapping->cached_vstart = vstart;
1809 1810

	/* find starting point for our search */
1811 1812
	if (mapping->free_area_cache) {
		first = rb_entry(mapping->free_area_cache, struct sp_area, rb_node);
1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
		addr = first->va_end;
		if (addr + size_align < addr) {
			err = ERR_PTR(-EOVERFLOW);
			goto error;
		}
	} else {
		addr = vstart;
		if (addr + size_align < addr) {
			err = ERR_PTR(-EOVERFLOW);
			goto error;
		}

1825
		n = mapping->area_root.rb_node;
1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846
		first = NULL;

		while (n) {
			struct sp_area *tmp;

			tmp = rb_entry(n, struct sp_area, rb_node);
			if (tmp->va_end >= addr) {
				first = tmp;
				if (tmp->va_start <= addr)
					break;
				n = n->rb_left;
			} else
				n = n->rb_right;
		}

		if (!first)
			goto found;
	}

	/* from the starting point, traverse areas until a suitable hole is found */
	while (addr + size_align > first->va_start && addr + size_align <= vend) {
1847 1848
		if (addr + mapping->cached_hole_size < first->va_start)
			mapping->cached_hole_size = first->va_start - addr;
1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884
		addr = first->va_end;
		if (addr + size_align < addr) {
			err = ERR_PTR(-EOVERFLOW);
			goto error;
		}

		n = rb_next(&first->rb_node);
		if (n)
			first = rb_entry(n, struct sp_area, rb_node);
		else
			goto found;
	}

found:
	if (addr + size_align > vend) {
		err = ERR_PTR(-EOVERFLOW);
		goto error;
	}

	spa->va_start = addr;
	spa->va_end = addr + size_align;
	spa->real_size = size;
	spa->region_vstart = vstart;
	spa->flags = flags;
	spa->is_hugepage = (flags & SP_HUGEPAGE);
	spa->is_dead = false;
	spa->spg = spg;
	atomic_set(&spa->use_count, 1);
	spa->type = type;
	spa->mm = NULL;
	spa->kva = 0;   /* NULL pointer */
	spa->applier = applier;
	spa->node_id = node_id;
	spa->device_id = device_id;

	spa_inc_usage(spa);
1885
	insert_sp_area(mapping, spa);
1886 1887
	mapping->free_area_cache = &spa->rb_node;
	list_add_tail(&spa->link, &spg->spa_list);
1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899

	spin_unlock(&sp_area_lock);

	return spa;

error:
	spin_unlock(&sp_area_lock);
	kfree(spa);
	return err;
}

/* the caller should hold sp_area_lock */
1900
static struct sp_area *find_sp_area_locked(struct sp_group *spg,
1901
		unsigned long addr)
1902
{
C
Chen Jun 已提交
1903 1904
	struct sp_mapping *spm = sp_mapping_find(spg, addr);
	struct rb_node *n = spm->area_root.rb_node;
1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920
	while (n) {
		struct sp_area *spa;

		spa = rb_entry(n, struct sp_area, rb_node);
		if (addr < spa->va_start) {
			n = n->rb_left;
		} else if (addr > spa->va_start) {
			n = n->rb_right;
		} else {
			return spa;
		}
	}

	return NULL;
}

1921
static struct sp_area *get_sp_area(struct sp_group *spg, unsigned long addr)
1922 1923 1924 1925
{
	struct sp_area *n;

	spin_lock(&sp_area_lock);
1926
	n = find_sp_area_locked(spg, addr);
1927 1928 1929 1930 1931 1932
	if (n)
		atomic_inc(&n->use_count);
	spin_unlock(&sp_area_lock);
	return n;
}

1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945
static bool vmalloc_area_clr_flag(unsigned long kva, unsigned long flags)
{
	struct vm_struct *area;

	area = find_vm_area((void *)kva);
	if (area) {
		area->flags &= ~flags;
		return true;
	}

	return false;
}

1946 1947 1948 1949 1950
/*
 * Free the VA region starting from addr to the share pool
 */
static void sp_free_area(struct sp_area *spa)
{
1951 1952 1953
	unsigned long addr = spa->va_start;
	struct sp_mapping *spm;

1954 1955
	lockdep_assert_held(&sp_area_lock);

C
Chen Jun 已提交
1956
	spm = sp_mapping_find(spa->spg, addr);
1957
	if (spm->free_area_cache) {
1958 1959
		struct sp_area *cache;

1960
		cache = rb_entry(spm->free_area_cache, struct sp_area, rb_node);
1961
		if (spa->va_start <= cache->va_start) {
1962
			spm->free_area_cache = rb_prev(&spa->rb_node);
1963 1964 1965 1966
			/*
			 * the new cache node may be changed to another region,
			 * i.e. from DVPP region to normal region
			 */
1967 1968
			if (spm->free_area_cache) {
				cache = rb_entry(spm->free_area_cache,
1969
						 struct sp_area, rb_node);
1970
				spm->cached_vstart = cache->region_vstart;
1971 1972 1973 1974 1975 1976 1977 1978
			}
			/*
			 * We don't try to update cached_hole_size,
			 * but it won't go very wrong.
			 */
		}
	}

1979 1980 1981
	if (spa->kva && !vmalloc_area_clr_flag(spa->kva, VM_SHAREPOOL))
		pr_debug("clear spa->kva %ld is not valid\n", spa->kva);

1982
	spa_dec_usage(spa);
1983
	list_del(&spa->link);
1984

1985
	rb_erase(&spa->rb_node, &spm->area_root);
1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020
	RB_CLEAR_NODE(&spa->rb_node);
	kfree(spa);
}

static void __sp_area_drop_locked(struct sp_area *spa)
{
	/*
	 * Considering a situation where task A and B are in the same spg.
	 * A is exiting and calling remove_vma(). Before A calls this func,
	 * B calls sp_free() to free the same spa. So spa maybe NULL when A
	 * calls this func later.
	 */
	if (!spa)
		return;

	if (atomic_dec_and_test(&spa->use_count))
		sp_free_area(spa);
}

static void __sp_area_drop(struct sp_area *spa)
{
	spin_lock(&sp_area_lock);
	__sp_area_drop_locked(spa);
	spin_unlock(&sp_area_lock);
}

void sp_area_drop(struct vm_area_struct *vma)
{
	if (!(vma->vm_flags & VM_SHARE_POOL))
		return;

	/*
	 * Considering a situation where task A and B are in the same spg.
	 * A is exiting and calling remove_vma() -> ... -> sp_area_drop().
	 * Concurrently, B is calling sp_free() to free the same spa.
2021
	 * find_sp_area_locked() and __sp_area_drop_locked() should be
2022 2023 2024
	 * an atomic operation.
	 */
	spin_lock(&sp_area_lock);
2025
	__sp_area_drop_locked(vma->vm_private_data);
2026 2027 2028
	spin_unlock(&sp_area_lock);
}

W
Wang Wensheng 已提交
2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082
/*
 * The function calls of do_munmap() won't change any non-atomic member
 * of struct sp_group. Please review the following chain:
 * do_munmap -> remove_vma_list -> remove_vma -> sp_area_drop ->
 * __sp_area_drop_locked -> sp_free_area
 */
static void sp_munmap(struct mm_struct *mm, unsigned long addr,
			   unsigned long size)
{
	int err;

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		up_write(&mm->mmap_lock);
		pr_info("munmap: encoutered coredump\n");
		return;
	}

	err = do_munmap(mm, addr, size, NULL);
	/* we are not supposed to fail */
	if (err)
		pr_err("failed to unmap VA %pK when sp munmap\n", (void *)addr);

	up_write(&mm->mmap_lock);
}

static void __sp_free(struct sp_group *spg, unsigned long addr,
		      unsigned long size, struct mm_struct *stop)
{
	struct mm_struct *mm;
	struct sp_group_node *spg_node = NULL;

	list_for_each_entry(spg_node, &spg->procs, proc_node) {
		mm = spg_node->master->mm;
		if (mm == stop)
			break;
		sp_munmap(mm, addr, size);
	}
}

/* Free the memory of the backing shmem or hugetlbfs */
static void sp_fallocate(struct sp_area *spa)
{
	int ret;
	unsigned long mode = FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE;
	unsigned long offset = addr_offset(spa);

	ret = vfs_fallocate(spa_file(spa), mode, offset, spa_size(spa));
	if (ret)
		WARN(1, "sp fallocate failed %d\n", ret);
}

static void sp_free_unmap_fallocate(struct sp_area *spa)
{
2083 2084 2085 2086
	down_read(&spa->spg->rw_lock);
	__sp_free(spa->spg, spa->va_start, spa_size(spa), NULL);
	sp_fallocate(spa);
	up_read(&spa->spg->rw_lock);
W
Wang Wensheng 已提交
2087 2088 2089 2090 2091 2092 2093
}

static int sp_check_caller_permission(struct sp_group *spg, struct mm_struct *mm)
{
	int ret = 0;

	down_read(&spg->rw_lock);
2094
	if (!is_process_in_group(spg, mm))
W
Wang Wensheng 已提交
2095 2096
		ret = -EPERM;
	up_read(&spg->rw_lock);
2097

W
Wang Wensheng 已提交
2098 2099 2100 2101 2102 2103 2104 2105 2106 2107
	return ret;
}

#define FREE_CONT	1
#define FREE_END	2

struct sp_free_context {
	unsigned long addr;
	struct sp_area *spa;
	int state;
2108
	int spg_id;
W
Wang Wensheng 已提交
2109 2110 2111 2112 2113 2114 2115 2116
};

/* when success, __sp_area_drop(spa) should be used */
static int sp_free_get_spa(struct sp_free_context *fc)
{
	int ret = 0;
	unsigned long addr = fc->addr;
	struct sp_area *spa;
2117 2118
	struct sp_group *spg;

2119
	spg = sp_group_get(current->tgid, fc->spg_id);
2120 2121 2122 2123
	if (!spg) {
		pr_debug("sp free get group failed %d\n", fc->spg_id);
		return -EINVAL;
	}
W
Wang Wensheng 已提交
2124 2125 2126

	fc->state = FREE_CONT;

2127
	spa = get_sp_area(spg, addr);
2128
	sp_group_put(spg);
W
Wang Wensheng 已提交
2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140
	if (!spa) {
		pr_debug("sp free invalid input addr %lx\n", addr);
		return -EINVAL;
	}

	if (spa->type != SPA_TYPE_ALLOC) {
		ret = -EINVAL;
		pr_debug("sp free failed, %lx is not sp alloc addr\n", addr);
		goto drop_spa;
	}
	fc->spa = spa;

2141 2142
	if (!current->mm)
		goto check_spa;
W
Wang Wensheng 已提交
2143

2144 2145 2146
	ret = sp_check_caller_permission(spa->spg, current->mm);
	if (ret < 0)
		goto drop_spa;
W
Wang Wensheng 已提交
2147 2148

check_spa:
2149 2150 2151 2152
	if (is_local_group(spa->spg->id) && (current->tgid != spa->applier)) {
		ret = -EPERM;
		goto drop_spa;
	}
W
Wang Wensheng 已提交
2153

2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167
	down_write(&spa->spg->rw_lock);
	if (!spg_valid(spa->spg)) {
		fc->state = FREE_END;
		up_write(&spa->spg->rw_lock);
		goto drop_spa;
		/* we must return success(0) in this situation */
	}
	/* the life cycle of spa has a direct relation with sp group */
	if (unlikely(spa->is_dead)) {
		up_write(&spa->spg->rw_lock);
		pr_err_ratelimited("unexpected double sp free\n");
		dump_stack();
		ret = -EINVAL;
		goto drop_spa;
W
Wang Wensheng 已提交
2168
	}
2169 2170 2171
	spa->is_dead = true;
	up_write(&spa->spg->rw_lock);

W
Wang Wensheng 已提交
2172 2173 2174 2175 2176 2177 2178
	return 0;

drop_spa:
	__sp_area_drop(spa);
	return ret;
}

2179
/**
2180
 * mg_sp_free() - Free the memory allocated by mg_sp_alloc().
2181
 * @addr: the starting VA of the memory.
2182
 * @id: Address space identifier, which is used to distinguish the addr.
2183 2184 2185 2186 2187 2188
 *
 * Return:
 * * 0		- success.
 * * -EINVAL	- the memory can't be found or was not allocted by share pool.
 * * -EPERM	- the caller has no permision to free the memory.
 */
2189
int mg_sp_free(unsigned long addr, int id)
2190
{
W
Wang Wensheng 已提交
2191 2192 2193
	int ret = 0;
	struct sp_free_context fc = {
		.addr = addr,
2194
		.spg_id = id,
W
Wang Wensheng 已提交
2195 2196
	};

2197 2198 2199
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

W
Wang Wensheng 已提交
2200 2201
	check_interrupt_context();

2202 2203 2204
	if (current->flags & PF_KTHREAD)
		return -EINVAL;

W
Wang Wensheng 已提交
2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215
	ret = sp_free_get_spa(&fc);
	if (ret || fc.state == FREE_END)
		goto out;

	sp_free_unmap_fallocate(fc.spa);

	if (current->mm == NULL)
		atomic64_sub(fc.spa->real_size, &kthread_stat.alloc_size);
	else
		sp_update_process_stat(current, false, fc.spa);

2216
	__sp_area_drop(fc.spa);  /* match get_sp_area in sp_free_get_spa */
W
Wang Wensheng 已提交
2217 2218
out:
	return ret;
2219 2220 2221
}
EXPORT_SYMBOL_GPL(mg_sp_free);

2222 2223 2224
/* wrapper of __do_mmap() and the caller must hold down_write(&mm->mmap_lock). */
static unsigned long sp_mmap(struct mm_struct *mm, struct file *file,
			     struct sp_area *spa, unsigned long *populate,
2225
			     unsigned long prot, struct vm_area_struct **pvma)
2226 2227 2228 2229 2230 2231 2232
{
	unsigned long addr = spa->va_start;
	unsigned long size = spa_size(spa);
	unsigned long flags = MAP_FIXED | MAP_SHARED | MAP_POPULATE |
			      MAP_SHARE_POOL;
	unsigned long vm_flags = VM_NORESERVE | VM_SHARE_POOL | VM_DONTCOPY;
	unsigned long pgoff = addr_offset(spa) >> PAGE_SHIFT;
2233
	struct vm_area_struct *vma;
2234 2235 2236 2237 2238 2239 2240 2241 2242

	atomic_inc(&spa->use_count);
	addr = __do_mmap_mm(mm, file, addr, size, prot, flags, vm_flags, pgoff,
			 populate, NULL);
	if (IS_ERR_VALUE(addr)) {
		atomic_dec(&spa->use_count);
		pr_err("do_mmap fails %ld\n", addr);
	} else {
		BUG_ON(addr != spa->va_start);
2243 2244 2245 2246
		vma = find_vma(mm, addr);
		vma->vm_private_data = spa;
		if (pvma)
			*pvma = vma;
2247 2248 2249 2250 2251
	}

	return addr;
}

W
Wang Wensheng 已提交
2252 2253 2254
#define ALLOC_NORMAL	1
#define ALLOC_RETRY	2
#define ALLOC_NOMEM	3
2255
#define ALLOC_COREDUMP	4
W
Wang Wensheng 已提交
2256 2257 2258 2259 2260 2261 2262 2263 2264

struct sp_alloc_context {
	struct sp_group *spg;
	struct file *file;
	unsigned long size;
	unsigned long size_aligned;
	unsigned long sp_flags;
	unsigned long populate;
	int state;
2265
	bool have_mbind;
2266
	enum spa_type type;
W
Wang Wensheng 已提交
2267 2268 2269 2270 2271 2272 2273 2274 2275
};

static int sp_alloc_prepare(unsigned long size, unsigned long sp_flags,
	int spg_id, struct sp_alloc_context *ac)
{
	struct sp_group *spg;

	check_interrupt_context();

2276 2277 2278 2279 2280
	if (current->flags & PF_KTHREAD) {
		pr_err_ratelimited("allocation failed, task is kthread\n");
		return -EINVAL;
	}

W
Wang Wensheng 已提交
2281 2282 2283 2284 2285
	if (unlikely(!size || (size >> PAGE_SHIFT) > totalram_pages())) {
		pr_err_ratelimited("allocation failed, invalid size %lu\n", size);
		return -EINVAL;
	}

2286
	if (spg_id != SPG_ID_DEFAULT && (spg_id < SPG_ID_MIN || spg_id >= SPG_ID_AUTO)) {
W
Wang Wensheng 已提交
2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298
		pr_err_ratelimited("allocation failed, invalid group id %d\n", spg_id);
		return -EINVAL;
	}

	if (sp_flags & (~SP_FLAG_MASK)) {
		pr_err_ratelimited("allocation failed, invalid flag %lx\n", sp_flags);
		return -EINVAL;
	}

	if (sp_flags & SP_HUGEPAGE_ONLY)
		sp_flags |= SP_HUGEPAGE;

2299
	if (spg_id != SPG_ID_DEFAULT) {
2300
		spg = sp_group_get(current->tgid, spg_id);
2301 2302 2303
		if (!spg) {
			pr_err_ratelimited("allocation failed, can't find group\n");
			return -ENODEV;
W
Wang Wensheng 已提交
2304 2305
		}

2306 2307 2308 2309
		/* up_read will be at the end of sp_alloc */
		down_read(&spg->rw_lock);
		if (!spg_valid(spg)) {
			up_read(&spg->rw_lock);
2310
			sp_group_put(spg);
2311 2312 2313
			pr_err_ratelimited("allocation failed, spg is dead\n");
			return -ENODEV;
		}
W
Wang Wensheng 已提交
2314

2315 2316
		if (!is_process_in_group(spg, current->mm)) {
			up_read(&spg->rw_lock);
2317
			sp_group_put(spg);
2318 2319
			pr_err_ratelimited("allocation failed, task not in group\n");
			return -ENODEV;
W
Wang Wensheng 已提交
2320
		}
2321
		ac->type = SPA_TYPE_ALLOC;
2322
	} else {  /* allocation pass through scene */
2323
		spg = sp_get_local_group(current, current->mm);
2324 2325
		if (IS_ERR(spg))
			return PTR_ERR(spg);
2326 2327
		down_read(&spg->rw_lock);
		ac->type = SPA_TYPE_ALLOC_PRIVATE;
W
Wang Wensheng 已提交
2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341
	}

	if (sp_flags & SP_HUGEPAGE) {
		ac->file = spg->file_hugetlb;
		ac->size_aligned = ALIGN(size, PMD_SIZE);
	} else {
		ac->file = spg->file;
		ac->size_aligned = ALIGN(size, PAGE_SIZE);
	}

	ac->spg = spg;
	ac->size = size;
	ac->sp_flags = sp_flags;
	ac->state = ALLOC_NORMAL;
2342
	ac->have_mbind = false;
W
Wang Wensheng 已提交
2343 2344 2345 2346 2347 2348
	return 0;
}

static void sp_alloc_unmap(struct mm_struct *mm, struct sp_area *spa,
	struct sp_group_node *spg_node)
{
2349
	__sp_free(spa->spg, spa->va_start, spa->real_size, mm);
W
Wang Wensheng 已提交
2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364
}

static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa,
	struct sp_group_node *spg_node, struct sp_alloc_context *ac)
{
	int ret = 0;
	unsigned long mmap_addr;
	/* pass through default permission */
	unsigned long prot = PROT_READ | PROT_WRITE;
	unsigned long populate = 0;
	struct vm_area_struct *vma;

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		up_write(&mm->mmap_lock);
2365
		ac->state = ALLOC_COREDUMP;
W
Wang Wensheng 已提交
2366 2367 2368 2369 2370 2371 2372
		pr_info("allocation encountered coredump\n");
		return -EFAULT;
	}

	if (spg_node)
		prot = spg_node->prot;

2373 2374 2375
	if (ac->sp_flags & SP_PROT_RO)
		prot = PROT_READ;

W
Wang Wensheng 已提交
2376
	/* when success, mmap_addr == spa->va_start */
2377
	mmap_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot, &vma);
W
Wang Wensheng 已提交
2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392
	if (IS_ERR_VALUE(mmap_addr)) {
		up_write(&mm->mmap_lock);
		sp_alloc_unmap(mm, spa, spg_node);
		pr_err("sp mmap in allocation failed %ld\n", mmap_addr);
		return PTR_ERR((void *)mmap_addr);
	}

	if (unlikely(populate == 0)) {
		up_write(&mm->mmap_lock);
		pr_err("allocation sp mmap populate failed\n");
		ret = -EFAULT;
		goto unmap;
	}
	ac->populate = populate;

2393 2394 2395
	if (ac->sp_flags & SP_PROT_RO)
		vma->vm_flags &= ~VM_MAYWRITE;

W
Wang Wensheng 已提交
2396 2397 2398 2399 2400 2401 2402 2403
	/* clean PTE_RDONLY flags or trigger SMMU event */
	if (prot & PROT_WRITE)
		vma->vm_page_prot = __pgprot(((~PTE_RDONLY) & vma->vm_page_prot.pgprot) | PTE_DIRTY);
	up_write(&mm->mmap_lock);

	return ret;

unmap:
2404
	sp_alloc_unmap(list_next_entry(spg_node, proc_node)->master->mm, spa, spg_node);
W
Wang Wensheng 已提交
2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426
	return ret;
}

static void sp_alloc_fallback(struct sp_area *spa, struct sp_alloc_context *ac)
{
	if (ac->file == ac->spg->file) {
		ac->state = ALLOC_NOMEM;
		return;
	}

	if (!(ac->sp_flags & SP_HUGEPAGE_ONLY)) {
		ac->file = ac->spg->file;
		ac->size_aligned = ALIGN(ac->size, PAGE_SIZE);
		ac->sp_flags &= ~SP_HUGEPAGE;
		ac->state = ALLOC_RETRY;
		__sp_area_drop(spa);
		return;
	}
	ac->state = ALLOC_NOMEM;
}

static int sp_alloc_populate(struct mm_struct *mm, struct sp_area *spa,
2427
			     struct sp_alloc_context *ac)
W
Wang Wensheng 已提交
2428 2429 2430 2431 2432 2433 2434
{
	/*
	 * We are not ignoring errors, so if we fail to allocate
	 * physical memory we just return failure, so we won't encounter
	 * page fault later on, and more importantly sp_make_share_u2k()
	 * depends on this feature (and MAP_LOCKED) to work correctly.
	 */
2435

2436
	return do_mm_populate(mm, spa->va_start, ac->populate, 0);
W
Wang Wensheng 已提交
2437 2438
}

2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449
static long sp_mbind(struct mm_struct *mm, unsigned long start, unsigned long len,
		unsigned long node)
{
	nodemask_t nmask;

	nodes_clear(nmask);
	node_set(node, nmask);
	return __do_mbind(start, len, MPOL_BIND, MPOL_F_STATIC_NODES,
			&nmask, MPOL_MF_STRICT, mm);
}

W
Wang Wensheng 已提交
2450 2451 2452 2453 2454 2455
static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa,
	struct sp_group_node *spg_node, struct sp_alloc_context *ac)
{
	int ret;

	ret = sp_alloc_mmap(mm, spa, spg_node, ac);
2456

2457
	if (ret < 0)
W
Wang Wensheng 已提交
2458 2459
		return ret;

2460 2461 2462 2463 2464
	if (!ac->have_mbind) {
		ret = sp_mbind(mm, spa->va_start, spa->real_size, spa->node_id);
		if (ret < 0) {
			pr_err("cannot bind the memory range to specified node:%d, err:%d\n",
				spa->node_id, ret);
2465
			return ret;
2466 2467 2468 2469 2470 2471 2472 2473 2474 2475
		}
		ac->have_mbind = true;
	}

	ret = sp_alloc_populate(mm, spa, ac);
	if (ret) {
		if (unlikely(fatal_signal_pending(current)))
			pr_warn_ratelimited("allocation failed, current thread is killed\n");
		else
			pr_warn_ratelimited("allocation failed due to mm populate failed(potential no enough memory when -12): %d\n",
2476
					ret);
2477
	}
W
Wang Wensheng 已提交
2478 2479 2480 2481 2482 2483
	return ret;
}

static int sp_alloc_mmap_populate(struct sp_area *spa,
				  struct sp_alloc_context *ac)
{
2484 2485
	int ret = -EINVAL;
	int mmap_ret = 0;
2486
	struct mm_struct *mm, *end_mm = NULL;
W
Wang Wensheng 已提交
2487 2488
	struct sp_group_node *spg_node;

2489 2490 2491 2492 2493
	/* create mapping for each process in the group */
	list_for_each_entry(spg_node, &spa->spg->procs, proc_node) {
		mm = spg_node->master->mm;
		mmap_ret = __sp_alloc_mmap_populate(mm, spa, spg_node, ac);
		if (mmap_ret) {
2494 2495 2496 2497 2498 2499

			/*
			 * Goto fallback procedure upon ERR_VALUE,
			 * but skip the coredump situation,
			 * because we don't want one misbehaving process to affect others.
			 */
2500
			if (ac->state != ALLOC_COREDUMP)
2501
				goto unmap;
2502 2503

			/* Reset state and discard the coredump error. */
2504 2505
			ac->state = ALLOC_NORMAL;
			continue;
W
Wang Wensheng 已提交
2506
		}
2507
		ret = mmap_ret;
W
Wang Wensheng 已提交
2508
	}
2509

W
Wang Wensheng 已提交
2510
	return ret;
2511 2512 2513 2514 2515 2516 2517

unmap:
	/* use the next mm in proc list as end mark */
	if (!list_is_last(&spg_node->proc_node, &spa->spg->procs))
		end_mm = list_next_entry(spg_node, proc_node)->master->mm;
	sp_alloc_unmap(end_mm, spa, spg_node);

2518 2519 2520 2521 2522 2523 2524 2525 2526 2527
	/*
	 * Sometimes do_mm_populate() allocates some memory and then failed to
	 * allocate more. (e.g. memory use reaches cgroup limit.)
	 * In this case, it will return enomem, but will not free the
	 * memory which has already been allocated.
	 *
	 * So if __sp_alloc_mmap_populate fails, always call sp_fallocate()
	 * to make sure backup physical memory of the shared file is freed.
	 */
	sp_fallocate(spa);
2528 2529 2530 2531 2532 2533 2534

	/* if hugepage allocation fails, this will transfer to normal page
	 * and try again. (only if SP_HUGEPAGE_ONLY is not flagged
	 */
	sp_alloc_fallback(spa, ac);

	return mmap_ret;
W
Wang Wensheng 已提交
2535 2536 2537 2538
}

/* spa maybe an error pointer, so introduce variable spg */
static void sp_alloc_finish(int result, struct sp_area *spa,
2539
		struct sp_alloc_context *ac)
W
Wang Wensheng 已提交
2540 2541 2542
{
	struct sp_group *spg = ac->spg;

2543
	/* match sp_alloc_prepare */
2544
	up_read(&spg->rw_lock);
W
Wang Wensheng 已提交
2545 2546 2547 2548 2549

	if (!result)
		sp_update_process_stat(current, true, spa);

	/* this will free spa if mmap failed */
2550
	if (spa && !IS_ERR(spa))
W
Wang Wensheng 已提交
2551 2552
		__sp_area_drop(spa);

2553
	sp_group_put(spg);
W
Wang Wensheng 已提交
2554 2555
}

2556
/**
2557
 * mg_sp_alloc() - Allocate shared memory for all the processes in a sp_group.
2558 2559 2560 2561 2562 2563 2564 2565 2566 2567
 * @size: the size of memory to allocate.
 * @sp_flags: how to allocate the memory.
 * @spg_id: the share group that the memory is allocated to.
 *
 * Use pass through allocation if spg_id == SPG_ID_DEFAULT in multi-group mode.
 *
 * Return:
 * * if succeed, return the starting address of the shared memory.
 * * if fail, return the pointer of -errno.
 */
2568
void *mg_sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id)
2569
{
W
Wang Wensheng 已提交
2570 2571 2572 2573
	struct sp_area *spa = NULL;
	int ret = 0;
	struct sp_alloc_context ac;

2574 2575 2576
	if (!sp_is_enabled())
		return ERR_PTR(-EOPNOTSUPP);

W
Wang Wensheng 已提交
2577 2578 2579 2580 2581 2582
	ret = sp_alloc_prepare(size, sp_flags, spg_id, &ac);
	if (ret)
		return ERR_PTR(ret);

try_again:
	spa = sp_alloc_area(ac.size_aligned, ac.sp_flags, ac.spg,
2583
			    ac.type, current->tgid);
W
Wang Wensheng 已提交
2584 2585 2586 2587 2588 2589 2590 2591
	if (IS_ERR(spa)) {
		pr_err_ratelimited("alloc spa failed in allocation(potential no enough virtual memory when -75): %ld\n",
			PTR_ERR(spa));
		ret = PTR_ERR(spa);
		goto out;
	}

	ret = sp_alloc_mmap_populate(spa, &ac);
2592 2593 2594 2595 2596 2597 2598
	if (ret && ac.state == ALLOC_RETRY) {
		/*
		 * The mempolicy for shared memory is located at backend file, which varies
		 * between normal pages and huge pages. So we should set the mbind policy again
		 * when we retry using normal pages.
		 */
		ac.have_mbind = false;
W
Wang Wensheng 已提交
2599
		goto try_again;
2600
	}
W
Wang Wensheng 已提交
2601 2602 2603 2604 2605 2606 2607

out:
	sp_alloc_finish(ret, spa, &ac);
	if (ret)
		return ERR_PTR(ret);
	else
		return (void *)(spa->va_start);
2608 2609 2610
}
EXPORT_SYMBOL_GPL(mg_sp_alloc);

2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640
/**
 * is_vmap_hugepage() - Check if a kernel address belongs to vmalloc family.
 * @addr: the kernel space address to be checked.
 *
 * Return:
 * * >0		- a vmalloc hugepage addr.
 * * =0		- a normal vmalloc addr.
 * * -errno	- failure.
 */
static int is_vmap_hugepage(unsigned long addr)
{
	struct vm_struct *area;

	if (unlikely(!addr)) {
		pr_err_ratelimited("null vmap addr pointer\n");
		return -EINVAL;
	}

	area = find_vm_area((void *)addr);
	if (unlikely(!area)) {
		pr_debug("can't find vm area(%lx)\n", addr);
		return -EINVAL;
	}

	if (area->flags & VM_HUGE_PAGES)
		return 1;
	else
		return 0;
}

2641 2642
static unsigned long __sp_remap_get_pfn(unsigned long kva)
{
G
Guo Mengqi 已提交
2643
	unsigned long pfn = -EINVAL;
2644

G
Guo Mengqi 已提交
2645
	/* sp_make_share_k2u only support vmalloc address */
2646 2647 2648 2649 2650 2651 2652
	if (is_vmalloc_addr((void *)kva))
		pfn = vmalloc_to_pfn((void *)kva);

	return pfn;
}

/* when called by k2u to group, always make sure rw_lock of spg is down */
2653 2654
static unsigned long sp_remap_kva_to_vma(struct sp_area *spa, struct mm_struct *mm,
					unsigned long prot, struct sp_k2u_context *kc)
2655 2656 2657 2658 2659 2660
{
	struct vm_area_struct *vma;
	unsigned long ret_addr;
	unsigned long populate = 0;
	int ret = 0;
	unsigned long addr, buf, offset;
2661
	unsigned long kva = spa->kva;
2662 2663 2664 2665 2666

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		pr_err("k2u mmap: encountered coredump, abort\n");
		ret_addr = -EBUSY;
2667 2668
		if (kc)
			kc->state = K2U_COREDUMP;
2669 2670 2671
		goto put_mm;
	}

2672
	if (kc && (kc->sp_flags & SP_PROT_RO))
2673 2674
		prot = PROT_READ;

2675
	ret_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot, &vma);
2676 2677 2678 2679 2680 2681 2682 2683
	if (IS_ERR_VALUE(ret_addr)) {
		pr_debug("k2u mmap failed %lx\n", ret_addr);
		goto put_mm;
	}

	if (prot & PROT_WRITE)
		vma->vm_page_prot = __pgprot(((~PTE_RDONLY) & vma->vm_page_prot.pgprot) | PTE_DIRTY);

2684
	if (kc && (kc->sp_flags & SP_PROT_RO))
2685 2686
		vma->vm_flags &= ~VM_MAYWRITE;

2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723
	if (is_vm_hugetlb_page(vma)) {
		ret = remap_vmalloc_hugepage_range(vma, (void *)kva, 0);
		if (ret) {
			do_munmap(mm, ret_addr, spa_size(spa), NULL);
			pr_debug("remap vmalloc hugepage failed, ret %d, kva is %lx\n",
				 ret, (unsigned long)kva);
			ret_addr = ret;
			goto put_mm;
		}
		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
	} else {
		buf = ret_addr;
		addr = kva;
		offset = 0;
		do {
			ret = remap_pfn_range(vma, buf, __sp_remap_get_pfn(addr), PAGE_SIZE,
					__pgprot(vma->vm_page_prot.pgprot));
			if (ret) {
				do_munmap(mm, ret_addr, spa_size(spa), NULL);
				pr_err("remap_pfn_range failed %d\n", ret);
				ret_addr = ret;
				goto put_mm;
			}
			offset += PAGE_SIZE;
			buf += PAGE_SIZE;
			addr += PAGE_SIZE;
		} while (offset < spa_size(spa));
	}

put_mm:
	up_write(&mm->mmap_lock);

	return ret_addr;
}

/**
 * Share kernel memory to a spg, the current process must be in that group
2724
 * @kc: the context for k2u, including kva, size, flags...
2725 2726 2727 2728
 * @spg: the sp group to be shared with
 *
 * Return: the shared user address to start at
 */
2729
static void *sp_make_share_kva_to_spg(struct sp_k2u_context *kc, struct sp_group *spg)
2730 2731 2732 2733
{
	struct sp_area *spa;
	struct mm_struct *mm;
	struct sp_group_node *spg_node;
2734
	unsigned long ret_addr = -ENODEV;
2735 2736

	down_read(&spg->rw_lock);
2737
	spa = sp_alloc_area(kc->size_aligned, kc->sp_flags, spg, kc->type, current->tgid);
2738 2739
	if (IS_ERR(spa)) {
		up_read(&spg->rw_lock);
2740
		pr_err("alloc spa failed in k2u_spg (potential no enough virtual memory when -75): %ld\n",
2741 2742 2743 2744
				PTR_ERR(spa));
		return spa;
	}

2745
	spa->kva = kc->kva_aligned;
2746 2747
	list_for_each_entry(spg_node, &spg->procs, proc_node) {
		mm = spg_node->master->mm;
2748 2749
		kc->state = K2U_NORMAL;
		ret_addr = sp_remap_kva_to_vma(spa, mm, spg_node->prot, kc);
2750
		if (IS_ERR_VALUE(ret_addr)) {
2751
			if (kc->state == K2U_COREDUMP)
2752
				continue;
2753
			pr_err("remap k2u to spg failed %ld\n", ret_addr);
2754 2755 2756 2757 2758 2759 2760
			__sp_free(spg, spa->va_start, spa_size(spa), mm);
			goto out;
		}
	}

out:
	up_read(&spg->rw_lock);
2761
	if (!IS_ERR_VALUE(ret_addr))
2762
		sp_update_process_stat(current, true, spa);
Z
Zhou Guanghui 已提交
2763
	__sp_area_drop(spa);
2764

2765
	return (void *)ret_addr;
2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787
}

static bool vmalloc_area_set_flag(unsigned long kva, unsigned long flags)
{
	struct vm_struct *area;

	area = find_vm_area((void *)kva);
	if (area) {
		area->flags |= flags;
		return true;
	}

	return false;
}

static int sp_k2u_prepare(unsigned long kva, unsigned long size,
	unsigned long sp_flags, int spg_id, struct sp_k2u_context *kc)
{
	int is_hugepage;
	unsigned int page_size = PAGE_SIZE;
	unsigned long kva_aligned, size_aligned;

2788 2789 2790 2791 2792
	if (!size) {
		pr_err_ratelimited("k2u input size is 0.\n");
		return -EINVAL;
	}

2793
	if (sp_flags & ~SP_FLAG_MASK) {
2794 2795 2796
		pr_err_ratelimited("k2u sp_flags %lx error\n", sp_flags);
		return -EINVAL;
	}
2797
	sp_flags &= ~SP_HUGEPAGE;
2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823

	if (!current->mm) {
		pr_err_ratelimited("k2u: kthread is not allowed\n");
		return -EPERM;
	}

	is_hugepage = is_vmap_hugepage(kva);
	if (is_hugepage > 0) {
		sp_flags |= SP_HUGEPAGE;
		page_size = PMD_SIZE;
	} else if (is_hugepage == 0) {
		/* do nothing */
	} else {
		pr_err_ratelimited("k2u kva is not vmalloc address\n");
		return is_hugepage;
	}

	/* aligned down kva is convenient for caller to start with any valid kva */
	kva_aligned = ALIGN_DOWN(kva, page_size);
	size_aligned = ALIGN(kva + size, page_size) - kva_aligned;

	if (!vmalloc_area_set_flag(kva_aligned, VM_SHAREPOOL)) {
		pr_debug("k2u_task kva %lx is not valid\n", kva_aligned);
		return -EINVAL;
	}

2824 2825 2826
	kc->kva          = kva;
	kc->kva_aligned  = kva_aligned;
	kc->size         = size;
2827
	kc->size_aligned = size_aligned;
2828 2829 2830
	kc->sp_flags     = sp_flags;
	kc->type         = (spg_id == SPG_ID_DEFAULT || spg_id == SPG_ID_NONE)
				? SPA_TYPE_K2TASK : SPA_TYPE_K2SPG;
2831

2832
	return 0;
2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844
}

static void *sp_k2u_finish(void *uva, struct sp_k2u_context *kc)
{
	if (IS_ERR(uva))
		vmalloc_area_clr_flag(kc->kva_aligned, VM_SHAREPOOL);
	else
		uva = uva + (kc->kva - kc->kva_aligned);

	return uva;
}

2845
/**
2846
 * mg_sp_make_share_k2u() - Share kernel memory to current process or an sp_group.
2847 2848 2849
 * @kva: the VA of shared kernel memory.
 * @size: the size of shared kernel memory.
 * @sp_flags: how to allocate the memory. We only support SP_DVPP.
2850
 * @tgid:  the tgid of the specified process (Not currently in use).
2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861
 * @spg_id: the share group that the memory is shared to.
 *
 * Return: the shared target user address to start at
 *
 * Share kernel memory to current task if spg_id == SPG_ID_NONE
 * or SPG_ID_DEFAULT in multi-group mode.
 *
 * Return:
 * * if succeed, return the shared user address to start at.
 * * if fail, return the pointer of -errno.
 */
2862
void *mg_sp_make_share_k2u(unsigned long kva, unsigned long size,
2863
			unsigned long sp_flags, int tgid, int spg_id)
2864
{
2865 2866 2867
	void *uva;
	int ret;
	struct sp_k2u_context kc;
2868
	struct sp_group *spg;
2869

2870 2871 2872
	if (!sp_is_enabled())
		return ERR_PTR(-EOPNOTSUPP);

2873 2874 2875 2876 2877 2878
	check_interrupt_context();

	ret = sp_k2u_prepare(kva, size, sp_flags, spg_id, &kc);
	if (ret)
		return ERR_PTR(ret);

2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891
	if (kc.type == SPA_TYPE_K2TASK) {
		down_write(&sp_group_sem);
		ret = sp_init_group_master_locked(current, current->mm);
		up_write(&sp_group_sem);
		if (ret) {
			pr_err("k2u_task init local mapping failed %d\n", ret);
			uva = ERR_PTR(ret);
			goto out;
		}
		/* the caller could use SPG_ID_NONE */
		spg_id = SPG_ID_DEFAULT;
	}

2892
	spg = sp_group_get(current->tgid, spg_id);
2893 2894 2895
	if (spg) {
		ret = sp_check_caller_permission(spg, current->mm);
		if (ret < 0) {
2896
			sp_group_put(spg);
2897 2898
			uva = ERR_PTR(ret);
			goto out;
2899
		}
2900
		uva = sp_make_share_kva_to_spg(&kc, spg);
2901
		sp_group_put(spg);
2902 2903
	} else {
		uva = ERR_PTR(-ENODEV);
2904 2905 2906 2907
	}

out:
	return sp_k2u_finish(uva, &kc);
2908 2909 2910
}
EXPORT_SYMBOL_GPL(mg_sp_make_share_k2u);

2911 2912 2913
static int sp_pmd_entry(pmd_t *pmd, unsigned long addr,
			unsigned long next, struct mm_walk *walk)
{
2914
	struct page *page;
2915 2916
	struct sp_walk_data *sp_walk_data = walk->private;

2917 2918 2919 2920 2921 2922 2923 2924 2925 2926
	/*
	 * There exist a scene in DVPP where the pagetable is huge page but its
	 * vma doesn't record it, something like THP.
	 * So we cannot make out whether it is a hugepage map until we access the
	 * pmd here. If mixed size of pages appear, just return an error.
	 */
	if (pmd_huge(*pmd)) {
		if (!sp_walk_data->is_page_type_set) {
			sp_walk_data->is_page_type_set = true;
			sp_walk_data->is_hugepage = true;
2927
		} else if (!sp_walk_data->is_hugepage) {
2928
			return -EFAULT;
2929
		}
2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946

		/* To skip pte level walk */
		walk->action = ACTION_CONTINUE;

		page = pmd_page(*pmd);
		get_page(page);
		sp_walk_data->pages[sp_walk_data->page_count++] = page;

		return 0;
	}

	if (!sp_walk_data->is_page_type_set) {
		sp_walk_data->is_page_type_set = true;
		sp_walk_data->is_hugepage = false;
	} else if (sp_walk_data->is_hugepage)
		return -EFAULT;

2947
	sp_walk_data->pmd = pmd;
2948

2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091
	return 0;
}

static int sp_pte_entry(pte_t *pte, unsigned long addr,
			unsigned long next, struct mm_walk *walk)
{
	struct page *page;
	struct sp_walk_data *sp_walk_data = walk->private;
	pmd_t *pmd = sp_walk_data->pmd;

retry:
	if (unlikely(!pte_present(*pte))) {
		swp_entry_t entry;

		if (pte_none(*pte))
			goto no_page;
		entry = pte_to_swp_entry(*pte);
		if (!is_migration_entry(entry))
			goto no_page;
		migration_entry_wait(walk->mm, pmd, addr);
		goto retry;
	}

	page = pte_page(*pte);
	get_page(page);
	sp_walk_data->pages[sp_walk_data->page_count++] = page;
	return 0;

no_page:
	pr_debug("the page of addr %lx unexpectedly not in RAM\n",
		 (unsigned long)addr);
	return -EFAULT;
}

static int sp_test_walk(unsigned long addr, unsigned long next,
			struct mm_walk *walk)
{
	/*
	 * FIXME: The devmm driver uses remap_pfn_range() but actually there
	 * are associated struct pages, so they should use vm_map_pages() or
	 * similar APIs. Before the driver has been converted to correct APIs
	 * we use this test_walk() callback so we can treat VM_PFNMAP VMAs as
	 * normal VMAs.
	 */
	return 0;
}

static int sp_pte_hole(unsigned long start, unsigned long end,
		       int depth, struct mm_walk *walk)
{
	pr_debug("hole [%lx, %lx) appeared unexpectedly\n", (unsigned long)start, (unsigned long)end);
	return -EFAULT;
}

static int sp_hugetlb_entry(pte_t *ptep, unsigned long hmask,
			    unsigned long addr, unsigned long next,
			    struct mm_walk *walk)
{
	pte_t pte = huge_ptep_get(ptep);
	struct page *page = pte_page(pte);
	struct sp_walk_data *sp_walk_data;

	if (unlikely(!pte_present(pte))) {
		pr_debug("the page of addr %lx unexpectedly not in RAM\n", (unsigned long)addr);
		return -EFAULT;
	}

	sp_walk_data = walk->private;
	get_page(page);
	sp_walk_data->pages[sp_walk_data->page_count++] = page;
	return 0;
}

/*
 * __sp_walk_page_range() - Walk page table with caller specific callbacks.
 * @uva: the start VA of user memory.
 * @size: the size of user memory.
 * @mm: mm struct of the target task.
 * @sp_walk_data: a structure of a page pointer array.
 *
 * the caller must hold mm->mmap_lock
 *
 * Notes for parameter alignment:
 * When size == 0, let it be page_size, so that at least one page is walked.
 *
 * When size > 0, for convenience, usually the parameters of uva and
 * size are not page aligned. There are four different alignment scenarios and
 * we must handler all of them correctly.
 *
 * The basic idea is to align down uva and align up size so all the pages
 * in range [uva, uva + size) are walked. However, there are special cases.
 *
 * Considering a 2M-hugepage addr scenario. Assuming the caller wants to
 * traverse range [1001M, 1004.5M), so uva and size is 1001M and 3.5M
 * accordingly. The aligned-down uva is 1000M and the aligned-up size is 4M.
 * The traverse range will be [1000M, 1004M). Obviously, the final page for
 * [1004M, 1004.5M) is not covered.
 *
 * To fix this problem, we need to walk an additional page, size should be
 * ALIGN(uva+size) - uva_aligned
 */
static int __sp_walk_page_range(unsigned long uva, unsigned long size,
	struct mm_struct *mm, struct sp_walk_data *sp_walk_data)
{
	int ret = 0;
	struct vm_area_struct *vma;
	unsigned long page_nr;
	struct page **pages = NULL;
	bool is_hugepage = false;
	unsigned long uva_aligned;
	unsigned long size_aligned;
	unsigned int page_size = PAGE_SIZE;
	struct mm_walk_ops sp_walk = {};

	/*
	 * Here we also support non share pool memory in this interface
	 * because the caller can't distinguish whether a uva is from the
	 * share pool or not. It is not the best idea to do so, but currently
	 * it simplifies overall design.
	 *
	 * In this situation, the correctness of the parameters is mainly
	 * guaranteed by the caller.
	 */
	vma = find_vma(mm, uva);
	if (!vma) {
		pr_debug("u2k input uva %lx is invalid\n", (unsigned long)uva);
		return -EINVAL;
	}
	if (is_vm_hugetlb_page(vma))
		is_hugepage = true;

	sp_walk.pte_hole = sp_pte_hole;
	sp_walk.test_walk = sp_test_walk;
	if (is_hugepage) {
		sp_walk_data->is_hugepage = true;
		sp_walk.hugetlb_entry = sp_hugetlb_entry;
		page_size = PMD_SIZE;
	} else {
		sp_walk_data->is_hugepage = false;
		sp_walk.pte_entry = sp_pte_entry;
		sp_walk.pmd_entry = sp_pmd_entry;
	}

3092 3093
	sp_walk_data->is_page_type_set = false;
	sp_walk_data->page_count = 0;
3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117
	sp_walk_data->page_size = page_size;
	uva_aligned = ALIGN_DOWN(uva, page_size);
	sp_walk_data->uva_aligned = uva_aligned;
	if (size == 0)
		size_aligned = page_size;
	else
		/* special alignment handling */
		size_aligned = ALIGN(uva + size, page_size) - uva_aligned;

	if (uva_aligned + size_aligned < uva_aligned) {
		pr_err_ratelimited("overflow happened in walk page range\n");
		return -EINVAL;
	}

	page_nr = size_aligned / page_size;
	pages = kvmalloc(page_nr * sizeof(struct page *), GFP_KERNEL);
	if (!pages) {
		pr_err_ratelimited("alloc page array failed in walk page range\n");
		return -ENOMEM;
	}
	sp_walk_data->pages = pages;

	ret = walk_page_range(mm, uva_aligned, uva_aligned + size_aligned,
			      &sp_walk, sp_walk_data);
3118 3119 3120
	if (ret) {
		while (sp_walk_data->page_count--)
			put_page(pages[sp_walk_data->page_count]);
3121
		kvfree(pages);
3122 3123
		sp_walk_data->pages = NULL;
	}
3124

Z
Zhou Guanghui 已提交
3125 3126 3127
	if (sp_walk_data->is_hugepage)
		sp_walk_data->uva_aligned = ALIGN_DOWN(uva, PMD_SIZE);

3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146
	return ret;
}

static void __sp_walk_page_free(struct sp_walk_data *data)
{
	int i = 0;
	struct page *page;

	while (i < data->page_count) {
		page = data->pages[i++];
		put_page(page);
	}

	kvfree(data->pages);
	/* prevent repeated release */
	data->page_count = 0;
	data->pages = NULL;
}

3147
/**
3148
 * mg_sp_make_share_u2k() - Share user memory of a specified process to kernel.
3149 3150
 * @uva: the VA of shared user memory
 * @size: the size of shared user memory
3151
 * @tgid: the tgid of the specified process(Not currently in use)
3152 3153 3154 3155 3156
 *
 * Return:
 * * if success, return the starting kernel address of the shared memory.
 * * if failed, return the pointer of -errno.
 */
3157
void *mg_sp_make_share_u2k(unsigned long uva, unsigned long size, int tgid)
3158
{
3159 3160 3161
	int ret = 0;
	struct mm_struct *mm = current->mm;
	void *p = ERR_PTR(-ESRCH);
3162
	struct sp_walk_data sp_walk_data;
3163 3164
	struct vm_struct *area;

3165 3166 3167
	if (!sp_is_enabled())
		return ERR_PTR(-EOPNOTSUPP);

3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214
	check_interrupt_context();

	if (mm == NULL) {
		pr_err("u2k: kthread is not allowed\n");
		return ERR_PTR(-EPERM);
	}

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		up_write(&mm->mmap_lock);
		pr_err("u2k: encountered coredump, abort\n");
		return p;
	}

	ret = __sp_walk_page_range(uva, size, mm, &sp_walk_data);
	if (ret) {
		pr_err_ratelimited("walk page range failed %d\n", ret);
		up_write(&mm->mmap_lock);
		return ERR_PTR(ret);
	}

	if (sp_walk_data.is_hugepage)
		p = vmap_hugepage(sp_walk_data.pages, sp_walk_data.page_count,
				  VM_MAP, PAGE_KERNEL);
	else
		p = vmap(sp_walk_data.pages, sp_walk_data.page_count, VM_MAP,
			 PAGE_KERNEL);
	up_write(&mm->mmap_lock);

	if (!p) {
		pr_err("vmap(huge) in u2k failed\n");
		__sp_walk_page_free(&sp_walk_data);
		return ERR_PTR(-ENOMEM);
	}

	p = p + (uva - sp_walk_data.uva_aligned);

	/*
	 * kva p may be used later in k2u. Since p comes from uva originally,
	 * it's reasonable to add flag VM_USERMAP so that p can be remapped
	 * into userspace again.
	 */
	area = find_vm_area(p);
	area->flags |= VM_USERMAP;

	kvfree(sp_walk_data.pages);
	return p;
3215 3216 3217
}
EXPORT_SYMBOL_GPL(mg_sp_make_share_u2k);

3218
/*
3219 3220 3221 3222 3223
 * sp_unshare_uva - unshare a uva from sp_make_share_k2u
 * @uva: the uva to be unshared
 * @size: not used actually and we just check it
 * @group_id: specify the spg of the uva; for local group, it can be SPG_ID_DEFAULT
 *            unless current process is exiting.
3224 3225 3226 3227 3228 3229
 *
 * Procedure of unshare uva must be compatible with:
 *
 * 1. DVPP channel destroy procedure:
 * do_exit() -> exit_mm() (mm no longer in spg and current->mm == NULL) ->
 * exit_task_work() -> task_work_run() -> __fput() -> ... -> vdec_close() ->
3230
 * sp_unshare(uva, local_spg_id)
3231
 */
3232
static int sp_unshare_uva(unsigned long uva, unsigned long size, int group_id)
3233
{
3234 3235 3236
	int ret = 0;
	struct sp_area *spa;
	unsigned int page_size;
3237 3238
	struct sp_group *spg;

3239
	spg = sp_group_get(current->tgid, group_id);
3240
	if (!spg) {
3241
		pr_err("sp unshare find group failed %d\n", group_id);
3242 3243
		return -EINVAL;
	}
3244

3245
	/* All the spa are aligned to 2M. */
3246
	spa = get_sp_area(spg, ALIGN_DOWN(uva, PMD_SIZE));
3247
	if (!spa) {
3248 3249 3250
		ret = -EINVAL;
		pr_err("invalid input uva %lx in unshare uva\n", (unsigned long)uva);
		goto out;
3251 3252 3253
	}

	if (spa->type != SPA_TYPE_K2TASK && spa->type != SPA_TYPE_K2SPG) {
3254
		pr_err("unshare wrong type spa\n");
3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265
		ret = -EINVAL;
		goto out_drop_area;
	}
	/*
	 * 1. overflow actually won't happen due to an spa must be valid.
	 * 2. we must unshare [spa->va_start, spa->va_start + spa->real_size) completely
	 *    because an spa is one-to-one correspondence with an vma.
	 *    Thus input parameter size is not necessarily needed.
	 */
	page_size = (spa->is_hugepage ? PMD_SIZE : PAGE_SIZE);

3266
	if (spa->real_size < ALIGN(size, page_size)) {
3267
		ret = -EINVAL;
3268
		pr_err("unshare uva failed, invalid parameter size %lu\n", size);
3269 3270 3271
		goto out_drop_area;
	}

3272 3273 3274
	down_read(&spa->spg->rw_lock);
	/* always allow dvpp channel destroy procedure */
	if (current->mm && !is_process_in_group(spa->spg, current->mm)) {
3275
		up_read(&spa->spg->rw_lock);
3276 3277 3278 3279 3280
		pr_err("unshare uva failed, caller process doesn't belong to target group\n");
		ret = -EPERM;
		goto out_drop_area;
	}
	up_read(&spa->spg->rw_lock);
3281

3282 3283
	down_write(&spa->spg->rw_lock);
	if (!spg_valid(spa->spg)) {
3284
		up_write(&spa->spg->rw_lock);
3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297
		pr_info("no need to unshare uva, sp group of spa is dead\n");
		goto out_clr_flag;
	}
	/* the life cycle of spa has a direct relation with sp group */
	if (unlikely(spa->is_dead)) {
		up_write(&spa->spg->rw_lock);
		pr_err("unexpected double sp unshare\n");
		dump_stack();
		ret = -EINVAL;
		goto out_drop_area;
	}
	spa->is_dead = true;
	up_write(&spa->spg->rw_lock);
3298

3299 3300 3301
	down_read(&spa->spg->rw_lock);
	__sp_free(spa->spg, spa->va_start, spa->real_size, NULL);
	up_read(&spa->spg->rw_lock);
3302

3303 3304 3305 3306
	if (current->mm == NULL)
		atomic64_sub(spa->real_size, &kthread_stat.k2u_size);
	else
		sp_update_process_stat(current, false, spa);
3307 3308 3309

out_clr_flag:
	if (!vmalloc_area_clr_flag(spa->kva, VM_SHAREPOOL))
3310
		pr_info("clear spa->kva %ld is not valid\n", spa->kva);
3311 3312 3313 3314 3315
	spa->kva = 0;

out_drop_area:
	__sp_area_drop(spa);
out:
3316
	sp_group_put(spg);
3317
	return ret;
3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363
}

/* No possible concurrent protection, take care when use */
static int sp_unshare_kva(unsigned long kva, unsigned long size)
{
	unsigned long addr, kva_aligned;
	struct page *page;
	unsigned long size_aligned;
	unsigned long step;
	bool is_hugepage = true;
	int ret;

	ret = is_vmap_hugepage(kva);
	if (ret > 0) {
		kva_aligned = ALIGN_DOWN(kva, PMD_SIZE);
		size_aligned = ALIGN(kva + size, PMD_SIZE) - kva_aligned;
		step = PMD_SIZE;
	} else if (ret == 0) {
		kva_aligned = ALIGN_DOWN(kva, PAGE_SIZE);
		size_aligned = ALIGN(kva + size, PAGE_SIZE) - kva_aligned;
		step = PAGE_SIZE;
		is_hugepage = false;
	} else {
		pr_err_ratelimited("check vmap hugepage failed %d\n", ret);
		return -EINVAL;
	}

	if (kva_aligned + size_aligned < kva_aligned) {
		pr_err_ratelimited("overflow happened in unshare kva\n");
		return -EINVAL;
	}

	for (addr = kva_aligned; addr < (kva_aligned + size_aligned); addr += step) {
		page = vmalloc_to_page((void *)addr);
		if (page)
			put_page(page);
		else
			WARN(1, "vmalloc %pK to page/hugepage failed\n",
			       (void *)addr);
	}

	vunmap((void *)kva_aligned);

	return 0;
}

3364
/**
3365
 * mg_sp_unshare() - Unshare the kernel or user memory which shared by calling
3366 3367 3368 3369 3370 3371 3372 3373
 *                sp_make_share_{k2u,u2k}().
 * @va: the specified virtual address of memory
 * @size: the size of unshared memory
 *
 * Use spg_id of current thread if spg_id == SPG_ID_DEFAULT.
 *
 * Return: 0 for success, -errno on failure.
 */
3374
int mg_sp_unshare(unsigned long va, unsigned long size, int spg_id)
3375
{
3376 3377
	int ret = 0;

3378 3379 3380
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

3381 3382
	check_interrupt_context();

3383 3384 3385
	if (current->flags & PF_KTHREAD)
		return -EINVAL;

3386 3387
	if (va < TASK_SIZE) {
		/* user address */
3388
		ret = sp_unshare_uva(va, size, spg_id);
3389 3390 3391 3392 3393 3394 3395 3396 3397 3398
	} else if (va >= PAGE_OFFSET) {
		/* kernel address */
		ret = sp_unshare_kva(va, size);
	} else {
		/* regard user and kernel address ranges as bad address */
		pr_debug("unshare addr %lx is not a user or kernel addr\n", (unsigned long)va);
		ret = -EFAULT;
	}

	return ret;
3399 3400 3401 3402
}
EXPORT_SYMBOL_GPL(mg_sp_unshare);

/**
3403
 * mg_sp_walk_page_range() - Walk page table with caller specific callbacks.
3404 3405 3406 3407 3408 3409 3410 3411 3412 3413
 * @uva: the start VA of user memory.
 * @size: the size of user memory.
 * @tsk: task struct of the target task.
 * @sp_walk_data: a structure of a page pointer array.
 *
 * Return: 0 for success, -errno on failure.
 *
 * When return 0, sp_walk_data describing [uva, uva+size) can be used.
 * When return -errno, information in sp_walk_data is useless.
 */
3414
int mg_sp_walk_page_range(unsigned long uva, unsigned long size,
3415 3416
	struct task_struct *tsk, struct sp_walk_data *sp_walk_data)
{
3417 3418 3419
	struct mm_struct *mm;
	int ret = 0;

3420 3421 3422
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439
	check_interrupt_context();

	if (unlikely(!sp_walk_data)) {
		pr_err_ratelimited("null pointer when walk page range\n");
		return -EINVAL;
	}
	if (!tsk || (tsk->flags & PF_EXITING))
		return -ESRCH;

	get_task_struct(tsk);
	mm = get_task_mm(tsk);
	if (!mm) {
		put_task_struct(tsk);
		return -ESRCH;
	}

	down_write(&mm->mmap_lock);
3440
	if (likely(!mm->core_state)) {
3441
		ret = __sp_walk_page_range(uva, size, mm, sp_walk_data);
3442
	} else {
3443 3444 3445 3446 3447 3448 3449 3450 3451
		pr_err("walk page range: encoutered coredump\n");
		ret = -ESRCH;
	}
	up_write(&mm->mmap_lock);

	mmput(mm);
	put_task_struct(tsk);

	return ret;
3452 3453 3454 3455
}
EXPORT_SYMBOL_GPL(mg_sp_walk_page_range);

/**
3456
 * mg_sp_walk_page_free() - Free the sp_walk_data structure.
3457 3458
 * @sp_walk_data: a structure of a page pointer array to be freed.
 */
3459
void mg_sp_walk_page_free(struct sp_walk_data *sp_walk_data)
3460
{
3461 3462 3463
	if (!sp_is_enabled())
		return;

3464 3465 3466 3467 3468 3469
	check_interrupt_context();

	if (!sp_walk_data)
		return;

	__sp_walk_page_free(sp_walk_data);
3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484
}
EXPORT_SYMBOL_GPL(mg_sp_walk_page_free);

int sp_register_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&sp_notifier_chain, nb);
}
EXPORT_SYMBOL_GPL(sp_register_notifier);

int sp_unregister_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&sp_notifier_chain, nb);
}
EXPORT_SYMBOL_GPL(sp_unregister_notifier);

3485
static bool is_sp_dynamic_dvpp_addr(unsigned long addr);
3486
/**
3487
 * mg_sp_config_dvpp_range() - User can config the share pool start address
3488 3489 3490 3491
 *                          of each Da-vinci device.
 * @start: the value of share pool start
 * @size: the value of share pool
 * @device_id: the num of Da-vinci device
3492
 * @tgid: the tgid of device process
3493 3494 3495 3496 3497
 *
 * Return true for success.
 * Return false if parameter invalid or has been set up.
 * This functuon has no concurrent problem.
 */
3498
bool mg_sp_config_dvpp_range(size_t start, size_t size, int device_id, int tgid)
3499
{
3500 3501 3502 3503 3504 3505 3506 3507
	int ret;
	bool err = false;
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct sp_group *spg;
	struct sp_mapping *spm;
	unsigned long default_start;

3508 3509 3510
	if (!sp_is_enabled())
		return false;

3511
	/* NOTE: check the start address */
3512
	if (tgid < 0 || size <= 0 || size > MMAP_SHARE_POOL_16G_SIZE ||
3513
	    device_id < 0 || device_id >= MAX_DEVID || !is_online_node_id(device_id)
3514
		|| !is_sp_dynamic_dvpp_addr(start) || !is_sp_dynamic_dvpp_addr(start + size - 1))
3515 3516
		return false;

3517
	ret = get_task(tgid, &tsk);
3518 3519 3520 3521 3522 3523 3524
	if (ret)
		return false;

	mm = get_task_mm(tsk->group_leader);
	if (!mm)
		goto put_task;

3525
	spg = sp_get_local_group(tsk, mm);
3526 3527 3528
	if (IS_ERR(spg))
		goto put_mm;

3529
	spm = spg->mapping[SP_MAPPING_DVPP];
3530
	default_start = MMAP_SHARE_POOL_DVPP_START + device_id * MMAP_SHARE_POOL_16G_SIZE;
3531 3532 3533 3534 3535 3536 3537 3538 3539 3540
	/* The dvpp range of each group can be configured only once */
	if (spm->start[device_id] != default_start)
		goto put_spg;

	spm->start[device_id] = start;
	spm->end[device_id] = start + size;

	err = true;

put_spg:
3541
	sp_group_put(spg);
3542 3543 3544 3545 3546 3547
put_mm:
	mmput(mm);
put_task:
	put_task_struct(tsk);

	return err;
3548 3549 3550
}
EXPORT_SYMBOL_GPL(mg_sp_config_dvpp_range);

3551
static bool is_sp_reserve_addr(unsigned long addr)
3552
{
3553
	return addr >= MMAP_SHARE_POOL_START && addr < MMAP_SHARE_POOL_END;
3554 3555
}

3556 3557 3558 3559 3560 3561 3562
/*
 *	| 16G host | 16G device | ... |     |
 *	^
 *	|
 *	MMAP_SHARE_POOL_DVPP_BASE + 16G * 64
 *	We only check the device regions.
 */
3563
static bool is_sp_dynamic_dvpp_addr(unsigned long addr)
3564
{
3565
	if (addr < MMAP_SHARE_POOL_DYNAMIC_DVPP_BASE || addr >= MMAP_SHARE_POOL_DYNAMIC_DVPP_END)
3566 3567
		return false;

3568
	return (addr - MMAP_SHARE_POOL_DYNAMIC_DVPP_BASE) & MMAP_SHARE_POOL_16G_SIZE;
3569 3570
}

3571
/**
3572
 * mg_is_sharepool_addr() - Check if a user memory address belongs to share pool.
3573 3574 3575 3576
 * @addr: the userspace address to be checked.
 *
 * Return true if addr belongs to share pool, or false vice versa.
 */
3577
bool mg_is_sharepool_addr(unsigned long addr)
3578
{
3579
	return sp_is_enabled() &&
3580
		((is_sp_reserve_addr(addr) || is_sp_dynamic_dvpp_addr(addr)));
3581 3582 3583
}
EXPORT_SYMBOL_GPL(mg_is_sharepool_addr);

3584 3585 3586 3587 3588 3589 3590 3591
int sp_node_id(struct vm_area_struct *vma)
{
	struct sp_area *spa;
	int node_id = numa_node_id();

	if (!sp_is_enabled())
		return node_id;

3592
	if (vma && (vma->vm_flags & VM_SHARE_POOL) && vma->vm_private_data) {
3593 3594
		spa = vma->vm_private_data;
		node_id = spa->node_id;
3595 3596 3597 3598 3599
	}

	return node_id;
}

3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610
/*** Statistical and maintenance functions ***/

static void get_mm_rss_info(struct mm_struct *mm, unsigned long *anon,
	unsigned long *file, unsigned long *shmem, unsigned long *total_rss)
{
	*anon = get_mm_counter(mm, MM_ANONPAGES);
	*file = get_mm_counter(mm, MM_FILEPAGES);
	*shmem = get_mm_counter(mm, MM_SHMEMPAGES);
	*total_rss = *anon + *file + *shmem;
}

3611
static long get_proc_k2u(struct sp_meminfo *meminfo)
3612
{
3613
	return byte2kb(atomic64_read(&meminfo->k2u_size));
3614 3615
}

3616
static long get_proc_alloc(struct sp_meminfo *meminfo)
3617
{
3618 3619
	return byte2kb(atomic64_read(&meminfo->alloc_nsize) +
			atomic64_read(&meminfo->alloc_hsize));
3620 3621
}

G
Guo Mengqi 已提交
3622
static void get_process_sp_res(struct sp_group_master *master,
3623
		long *sp_res_out, long *sp_res_nsize_out)
3624
{
G
Guo Mengqi 已提交
3625 3626 3627 3628 3629 3630 3631 3632
	struct sp_group *spg;
	struct sp_group_node *spg_node;

	*sp_res_out = 0;
	*sp_res_nsize_out = 0;

	list_for_each_entry(spg_node, &master->node_list, group_node) {
		spg = spg_node->spg;
3633 3634 3635
		*sp_res_out += byte2kb(atomic64_read(&spg->meminfo.alloc_nsize));
		*sp_res_out += byte2kb(atomic64_read(&spg->meminfo.alloc_hsize));
		*sp_res_nsize_out += byte2kb(atomic64_read(&spg->meminfo.alloc_nsize));
G
Guo Mengqi 已提交
3636
	}
3637 3638
}

3639
static long get_sp_res_by_spg_proc(struct sp_group_node *spg_node)
3640
{
3641 3642
	return byte2kb(atomic64_read(&spg_node->spg->meminfo.alloc_nsize) +
			atomic64_read(&spg_node->spg->meminfo.alloc_hsize));
3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662
}

/*
 *  Statistics of RSS has a maximum 64 pages deviation (256KB).
 *  Please check_sync_rss_stat().
 */
static void get_process_non_sp_res(unsigned long total_rss, unsigned long shmem,
	long sp_res_nsize, long *non_sp_res_out, long *non_sp_shm_out)
{
	long non_sp_res, non_sp_shm;

	non_sp_res = page2kb(total_rss) - sp_res_nsize;
	non_sp_res = non_sp_res < 0 ? 0 : non_sp_res;
	non_sp_shm = page2kb(shmem) - sp_res_nsize;
	non_sp_shm = non_sp_shm < 0 ? 0 : non_sp_shm;

	*non_sp_res_out = non_sp_res;
	*non_sp_shm_out = non_sp_shm;
}

3663
static long get_spg_proc_alloc(struct sp_group_node *spg_node)
3664
{
3665 3666
	return byte2kb(atomic64_read(&spg_node->meminfo.alloc_nsize) +
				atomic64_read(&spg_node->meminfo.alloc_hsize));
3667 3668
}

3669
static long get_spg_proc_k2u(struct sp_group_node *spg_node)
3670
{
3671
	return byte2kb(atomic64_read(&spg_node->meminfo.k2u_size));
3672 3673 3674 3675 3676 3677 3678 3679
}

static void print_process_prot(struct seq_file *seq, unsigned long prot)
{
	if (prot == PROT_READ)
		seq_puts(seq, "R");
	else if (prot == (PROT_READ | PROT_WRITE))
		seq_puts(seq, "RW");
3680
	else
3681 3682 3683 3684 3685 3686
		seq_puts(seq, "-");
}

int proc_sp_group_state(struct seq_file *m, struct pid_namespace *ns,
			struct pid *pid, struct task_struct *task)
{
Z
Zhou Guanghui 已提交
3687
	struct mm_struct *mm;
3688
	struct sp_group_master *master;
3689
	struct sp_meminfo *meminfo;
3690 3691
	struct sp_group_node *spg_node;
	unsigned long anon, file, shmem, total_rss;
3692 3693
	long sp_res, sp_res_nsize, non_sp_res, non_sp_shm;

3694 3695 3696
	if (!sp_is_enabled())
		return 0;

Z
Zhou Guanghui 已提交
3697
	mm = get_task_mm(task);
3698 3699 3700
	if (!mm)
		return 0;

3701
	down_read(&sp_group_sem);
3702
	down_read(&mm->mmap_lock);
3703
	master = mm->sp_group_master;
Z
Zhou Guanghui 已提交
3704 3705
	if (!master)
		goto out;
3706 3707

	get_mm_rss_info(mm, &anon, &file, &shmem, &total_rss);
3708
	meminfo = &master->meminfo;
G
Guo Mengqi 已提交
3709
	get_process_sp_res(master, &sp_res, &sp_res_nsize);
3710 3711 3712 3713 3714 3715 3716 3717
	get_process_non_sp_res(total_rss, shmem, sp_res_nsize,
			       &non_sp_res, &non_sp_shm);

	seq_puts(m, "Share Pool Aggregate Data of This Process\n\n");
	seq_printf(m, "%-8s %-16s %-9s %-9s %-9s %-10s %-10s %-8s\n",
		   "PID", "COMM", "SP_ALLOC", "SP_K2U", "SP_RES", "Non-SP_RES",
		   "Non-SP_Shm", "VIRT");
	seq_printf(m, "%-8d %-16s %-9ld %-9ld %-9ld %-10ld %-10ld %-8ld\n",
3718
		   master->tgid, master->comm,
3719 3720
		   get_proc_alloc(meminfo),
		   get_proc_k2u(meminfo),
3721 3722 3723 3724 3725
		   sp_res, non_sp_res, non_sp_shm,
		   page2kb(mm->total_vm));

	seq_puts(m, "\n\nProcess in Each SP Group\n\n");
	seq_printf(m, "%-8s %-9s %-9s %-9s %-4s\n",
3726
			"Group_ID", "SP_ALLOC", "SP_K2U", "SP_RES", "PROT");
3727

3728
	list_for_each_entry(spg_node, &master->node_list, group_node) {
3729
		seq_printf(m, "%-8d %-9ld %-9ld %-9ld ",
3730 3731 3732 3733 3734
				spg_node->spg->id,
				get_spg_proc_alloc(spg_node),
				get_spg_proc_k2u(spg_node),
				get_sp_res_by_spg_proc(spg_node));
		print_process_prot(m, spg_node->prot);
3735 3736
		seq_putc(m, '\n');
	}
Z
Zhou Guanghui 已提交
3737 3738

out:
3739
	up_read(&mm->mmap_lock);
3740
	up_read(&sp_group_sem);
Z
Zhou Guanghui 已提交
3741
	mmput(mm);
3742 3743 3744
	return 0;
}

3745
static void spa_stat_of_mapping_show(struct seq_file *seq, struct sp_mapping *spm)
3746 3747 3748 3749 3750
{
	struct rb_node *node;
	struct sp_area *spa, *prev = NULL;

	spin_lock(&sp_area_lock);
3751
	for (node = rb_first(&spm->area_root); node; node = rb_next(node)) {
3752 3753 3754 3755 3756 3757 3758
		__sp_area_drop_locked(prev);

		spa = rb_entry(node, struct sp_area, rb_node);
		prev = spa;
		atomic_inc(&spa->use_count);
		spin_unlock(&sp_area_lock);

3759 3760 3761 3762
		if (spg_valid(spa->spg))  /* k2u to group */
			seq_printf(seq, "%-10d ", spa->spg->id);
		else  /* spg is dead */
			seq_printf(seq, "%-10s ", "Dead");
3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797

		seq_printf(seq, "%2s%-14lx %2s%-14lx %-10ld ",
			   "0x", spa->va_start,
			   "0x", spa->va_end,
			   byte2kb(spa->real_size));

		switch (spa->type) {
		case SPA_TYPE_ALLOC:
			seq_printf(seq, "%-7s ", "ALLOC");
			break;
		case SPA_TYPE_K2TASK:
			seq_printf(seq, "%-7s ", "TASK");
			break;
		case SPA_TYPE_K2SPG:
			seq_printf(seq, "%-7s ", "SPG");
			break;
		default:
			/* usually impossible, perhaps a developer's mistake */
			break;
		}

		if (spa->is_hugepage)
			seq_printf(seq, "%-5s ", "Y");
		else
			seq_printf(seq, "%-5s ", "N");

		seq_printf(seq, "%-8d ",  spa->applier);
		seq_printf(seq, "%-8d\n", atomic_read(&spa->use_count));

		spin_lock(&sp_area_lock);
	}
	__sp_area_drop_locked(prev);
	spin_unlock(&sp_area_lock);
}

C
Chen Jun 已提交
3798 3799 3800 3801 3802
static void spa_ro_stat_show(struct seq_file *seq)
{
	spa_stat_of_mapping_show(seq, sp_mapping_ro);
}

3803 3804 3805 3806 3807 3808 3809
static void spa_normal_stat_show(struct seq_file *seq)
{
	spa_stat_of_mapping_show(seq, sp_mapping_normal);
}

static void spa_dvpp_stat_show(struct seq_file *seq)
{
3810 3811 3812 3813 3814 3815
	struct sp_mapping *spm;

	mutex_lock(&spm_list_lock);
	list_for_each_entry(spm, &spm_dvpp_list, spm_node)
		spa_stat_of_mapping_show(seq, spm);
	mutex_unlock(&spm_list_lock);
3816 3817 3818
}


3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840
void spa_overview_show(struct seq_file *seq)
{
	unsigned int total_num, alloc_num, k2u_task_num, k2u_spg_num;
	unsigned long total_size, alloc_size, k2u_task_size, k2u_spg_size;
	unsigned long dvpp_size, dvpp_va_size;

	if (!sp_is_enabled())
		return;

	spin_lock(&sp_area_lock);
	total_num     = spa_stat.total_num;
	alloc_num     = spa_stat.alloc_num;
	k2u_task_num  = spa_stat.k2u_task_num;
	k2u_spg_num   = spa_stat.k2u_spg_num;
	total_size    = spa_stat.total_size;
	alloc_size    = spa_stat.alloc_size;
	k2u_task_size = spa_stat.k2u_task_size;
	k2u_spg_size  = spa_stat.k2u_spg_size;
	dvpp_size     = spa_stat.dvpp_size;
	dvpp_va_size  = spa_stat.dvpp_va_size;
	spin_unlock(&sp_area_lock);

3841 3842 3843 3844 3845 3846 3847 3848 3849 3850
	SEQ_printf(seq, "Spa total num %u.\n", total_num);
	SEQ_printf(seq, "Spa alloc num %u, k2u(task) num %u, k2u(spg) num %u.\n",
		   alloc_num, k2u_task_num, k2u_spg_num);
	SEQ_printf(seq, "Spa total size:     %13lu KB\n", byte2kb(total_size));
	SEQ_printf(seq, "Spa alloc size:     %13lu KB\n", byte2kb(alloc_size));
	SEQ_printf(seq, "Spa k2u(task) size: %13lu KB\n", byte2kb(k2u_task_size));
	SEQ_printf(seq, "Spa k2u(spg) size:  %13lu KB\n", byte2kb(k2u_spg_size));
	SEQ_printf(seq, "Spa dvpp size:      %13lu KB\n", byte2kb(dvpp_size));
	SEQ_printf(seq, "Spa dvpp va size:   %13lu MB\n", byte2mb(dvpp_va_size));
	SEQ_printf(seq, "\n");
3851 3852
}

3853
static int spg_info_show(int id, void *p, void *data)
3854
{
3855
	struct sp_group *spg = p;
3856 3857
	struct seq_file *seq = data;

3858
	if (id >= SPG_ID_LOCAL_MIN && id <= SPG_ID_LOCAL_MAX)
3859
		return 0;
3860

3861
	SEQ_printf(seq, "Group %6d ", id);
3862

3863 3864
	down_read(&spg->rw_lock);
	SEQ_printf(seq, "size: %lld KB, spa num: %d, total alloc: %lld KB, normal alloc: %lld KB, huge alloc: %lld KB\n",
3865
			byte2kb(atomic64_read(&spg->meminfo.size)),
3866
			atomic_read(&spg->spa_num),
3867 3868 3869
			byte2kb(atomic64_read(&spg->meminfo.alloc_size)),
			byte2kb(atomic64_read(&spg->meminfo.alloc_nsize)),
			byte2kb(atomic64_read(&spg->meminfo.alloc_hsize)));
3870
	up_read(&spg->rw_lock);
3871 3872 3873 3874 3875 3876 3877 3878 3879

	return 0;
}

void spg_overview_show(struct seq_file *seq)
{
	if (!sp_is_enabled())
		return;

3880 3881 3882
	SEQ_printf(seq, "Share pool total size: %lld KB, spa total num: %d.\n",
			byte2kb(atomic64_read(&sp_overall_stat.spa_total_size)),
			atomic_read(&sp_overall_stat.spa_total_num));
3883

3884 3885 3886
	down_read(&sp_group_sem);
	idr_for_each(&sp_group_idr, spg_info_show, seq);
	up_read(&sp_group_sem);
3887

3888
	SEQ_printf(seq, "\n");
3889 3890
}

3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901
static bool should_show_statistics(void)
{
	if (!capable(CAP_SYS_ADMIN))
		return false;

	if (task_active_pid_ns(current) != &init_pid_ns)
		return false;

	return true;
}

3902 3903
static int spa_stat_show(struct seq_file *seq, void *offset)
{
3904 3905 3906
	if (!should_show_statistics())
		return -EPERM;

3907 3908 3909 3910
	spg_overview_show(seq);
	spa_overview_show(seq);
	/* print the file header */
	seq_printf(seq, "%-10s %-16s %-16s %-10s %-7s %-5s %-8s %-8s\n",
3911
			"Group ID", "va_start", "va_end", "Size(KB)", "Type", "Huge", "PID", "Ref");
C
Chen Jun 已提交
3912
	spa_ro_stat_show(seq);
3913 3914
	spa_normal_stat_show(seq);
	spa_dvpp_stat_show(seq);
3915 3916 3917
	return 0;
}

3918
static int proc_usage_by_group(int id, void *p, void *data)
3919
{
3920
	struct sp_group *spg = p;
3921
	struct seq_file *seq = data;
3922
	struct sp_group_node *spg_node;
3923
	struct mm_struct *mm;
3924 3925 3926
	struct sp_group_master *master;
	int tgid;
	unsigned long anon, file, shmem, total_rss;
3927

3928 3929 3930 3931
	down_read(&spg->rw_lock);
	list_for_each_entry(spg_node, &spg->procs, proc_node) {
		master = spg_node->master;
		mm = master->mm;
3932
		tgid = master->tgid;
3933 3934 3935 3936

		get_mm_rss_info(mm, &anon, &file, &shmem, &total_rss);

		seq_printf(seq, "%-8d ", tgid);
3937 3938
		seq_printf(seq, "%-8d ", id);
		seq_printf(seq, "%-9ld %-9ld %-9ld %-8ld %-7ld %-7ld ",
3939 3940 3941 3942
				get_spg_proc_alloc(spg_node),
				get_spg_proc_k2u(spg_node),
				get_sp_res_by_spg_proc(spg_node),
				page2kb(mm->total_vm), page2kb(total_rss),
3943
				page2kb(shmem));
3944
		print_process_prot(seq, spg_node->prot);
3945 3946
		seq_putc(seq, '\n');
	}
3947
	up_read(&spg->rw_lock);
3948
	cond_resched();
3949

3950 3951 3952
	return 0;
}

3953
static int proc_group_usage_show(struct seq_file *seq, void *offset)
3954
{
3955 3956 3957
	if (!should_show_statistics())
		return -EPERM;

3958 3959
	spg_overview_show(seq);
	spa_overview_show(seq);
3960

3961
	/* print the file header */
3962 3963 3964
	seq_printf(seq, "%-8s %-8s %-9s %-9s %-9s %-8s %-7s %-7s %-4s\n",
			"PID", "Group_ID", "SP_ALLOC", "SP_K2U", "SP_RES",
			"VIRT", "RES", "Shm", "PROT");
3965 3966
	/* print kthread buff_module_guard_work */
	seq_printf(seq, "%-8s %-8s %-9lld %-9lld\n",
3967 3968 3969
			"guard", "-",
			byte2kb(atomic64_read(&kthread_stat.alloc_size)),
			byte2kb(atomic64_read(&kthread_stat.k2u_size)));
3970

W
Wang Wensheng 已提交
3971
	down_read(&sp_group_sem);
3972
	idr_for_each(&sp_group_idr, proc_usage_by_group, seq);
W
Wang Wensheng 已提交
3973 3974
	up_read(&sp_group_sem);

3975 3976 3977
	return 0;
}

3978
static int proc_usage_show(struct seq_file *seq, void *offset)
3979
{
3980
	struct sp_group_master *master = NULL;
3981 3982
	unsigned long anon, file, shmem, total_rss;
	long sp_res, sp_res_nsize, non_sp_res, non_sp_shm;
3983
	struct sp_meminfo *meminfo;
3984

3985 3986 3987
	if (!should_show_statistics())
		return -EPERM;

3988
	seq_printf(seq, "%-8s %-16s %-9s %-9s %-9s %-10s %-10s %-8s\n",
3989 3990 3991
			"PID", "COMM", "SP_ALLOC", "SP_K2U", "SP_RES", "Non-SP_RES",
			"Non-SP_Shm", "VIRT");

3992
	down_read(&sp_group_sem);
3993 3994
	mutex_lock(&master_list_lock);
	list_for_each_entry(master, &master_list, list_node) {
3995
		meminfo = &master->meminfo;
3996
		get_mm_rss_info(master->mm, &anon, &file, &shmem, &total_rss);
G
Guo Mengqi 已提交
3997
		get_process_sp_res(master, &sp_res, &sp_res_nsize);
3998 3999 4000
		get_process_non_sp_res(total_rss, shmem, sp_res_nsize,
				&non_sp_res, &non_sp_shm);
		seq_printf(seq, "%-8d %-16s %-9ld %-9ld %-9ld %-10ld %-10ld %-8ld\n",
4001
				master->tgid, master->comm,
4002 4003
				get_proc_alloc(meminfo),
				get_proc_k2u(meminfo),
4004 4005 4006 4007
				sp_res, non_sp_res, non_sp_shm,
				page2kb(master->mm->total_vm));
	}
	mutex_unlock(&master_list_lock);
4008
	up_read(&sp_group_sem);
4009 4010 4011 4012 4013 4014 4015 4016 4017 4018

	return 0;
}

static void __init proc_sharepool_init(void)
{
	if (!proc_mkdir("sharepool", NULL))
		return;

	proc_create_single_data("sharepool/spa_stat", 0400, NULL, spa_stat_show, NULL);
4019 4020
	proc_create_single_data("sharepool/proc_stat", 0400, NULL, proc_group_usage_show, NULL);
	proc_create_single_data("sharepool/proc_overview", 0400, NULL, proc_usage_show, NULL);
4021 4022 4023 4024
}

/*** End of tatistical and maintenance functions ***/

4025 4026
bool sp_check_addr(unsigned long addr)
{
4027
	if (sp_is_enabled() && mg_is_sharepool_addr(addr) &&
4028
	    !check_aoscore_process(current))
4029
		return true;
4030
	else
4031 4032 4033 4034 4035
		return false;
}

bool sp_check_mmap_addr(unsigned long addr, unsigned long flags)
{
4036
	if (sp_is_enabled() && mg_is_sharepool_addr(addr) &&
4037
	    !check_aoscore_process(current) && !(flags & MAP_SHARE_POOL))
4038
		return true;
4039
	else
4040 4041 4042
		return false;
}

4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059
vm_fault_t sharepool_no_page(struct mm_struct *mm,
			struct vm_area_struct *vma,
			struct address_space *mapping, pgoff_t idx,
			unsigned long address, pte_t *ptep, unsigned int flags)
{
	struct hstate *h = hstate_vma(vma);
	vm_fault_t ret = VM_FAULT_SIGBUS;
	unsigned long size;
	struct page *page;
	pte_t new_pte;
	spinlock_t *ptl;
	unsigned long haddr = address & huge_page_mask(h);
	bool new_page = false;
	int err;
	int node_id;
	struct sp_area *spa;

4060
	spa = vma->vm_private_data;
4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075
	if (!spa) {
		pr_err("share pool: vma is invalid, not from sp mmap\n");
		return ret;
	}
	node_id = spa->node_id;

retry:
	page = find_lock_page(mapping, idx);
	if (!page) {
		size = i_size_read(mapping->host) >> huge_page_shift(h);
		if (idx >= size)
			goto out;

		page = alloc_huge_page(vma, haddr, 0);
		if (IS_ERR(page)) {
4076 4077
			page = hugetlb_alloc_hugepage(node_id,
					HUGETLB_ALLOC_BUDDY | HUGETLB_ALLOC_NORECLAIM);
4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138
			if (!page)
				page = ERR_PTR(-ENOMEM);
		}
		if (IS_ERR(page)) {
			ptl = huge_pte_lock(h, mm, ptep);
			if (!huge_pte_none(huge_ptep_get(ptep))) {
				ret = 0;
				spin_unlock(ptl);
				goto out;
			}
			spin_unlock(ptl);
			ret = vmf_error(PTR_ERR(page));
			goto out;
		}
		__SetPageUptodate(page);
		new_page = true;

		/* sharepool pages are all shared */
		err = huge_add_to_page_cache(page, mapping, idx);
		if (err) {
			put_page(page);
			if (err == -EEXIST)
				goto retry;
			goto out;
		}
	}


	ptl = huge_pte_lock(h, mm, ptep);
	size = i_size_read(mapping->host) >> huge_page_shift(h);
	if (idx >= size)
		goto backout;

	ret = 0;
	if (!huge_pte_none(huge_ptep_get(ptep)))
		goto backout;

	page_dup_rmap(page, true);
	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
				&& (vma->vm_flags & VM_SHARED)));
	set_huge_pte_at(mm, haddr, ptep, new_pte);

	hugetlb_count_add(pages_per_huge_page(h), mm);

	spin_unlock(ptl);

	if (new_page) {
		SetPagePrivate(&page[1]);
	}

	unlock_page(page);
out:
	return ret;

backout:
	spin_unlock(ptl);
	unlock_page(page);
	put_page(page);
	goto out;
}

4139
/*
4140 4141
 * The caller must ensure that this function is called
 * when the last thread in the thread group exits.
4142
 */
4143
int sp_group_exit(void)
4144
{
4145
	struct mm_struct *mm;
4146 4147 4148 4149 4150 4151 4152 4153
	struct sp_group *spg;
	struct sp_group_master *master;
	struct sp_group_node *spg_node, *tmp;
	bool is_alive = true;

	if (!sp_is_enabled())
		return 0;

4154 4155 4156 4157
	if (current->flags & PF_KTHREAD)
		return 0;

	mm = current->mm;
4158 4159 4160 4161 4162 4163 4164 4165
	down_write(&sp_group_sem);

	master = mm->sp_group_master;
	if (!master) {
		up_write(&sp_group_sem);
		return 0;
	}

4166 4167 4168 4169 4170
	if (master->tgid != current->tgid) {
		up_write(&sp_group_sem);
		return 0;
	}

4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199
	list_for_each_entry_safe(spg_node, tmp, &master->node_list, group_node) {
		spg = spg_node->spg;

		down_write(&spg->rw_lock);
		/* a dead group should NOT be reactive again */
		if (spg_valid(spg) && list_is_singular(&spg->procs))
			is_alive = spg->is_alive = false;
		spg->proc_num--;
		list_del(&spg_node->proc_node);
		up_write(&spg->rw_lock);

		if (!is_alive)
			blocking_notifier_call_chain(&sp_notifier_chain, 0,
						     spg);
	}

	/* match with get_task_mm() in sp_group_add_task() */
	if (atomic_sub_and_test(master->count, &mm->mm_users)) {
		up_write(&sp_group_sem);
		WARN(1, "Invalid user counting\n");
		return 1;
	}

	up_write(&sp_group_sem);
	return 0;
}

void sp_group_post_exit(struct mm_struct *mm)
{
4200
	struct sp_meminfo *meminfo;
4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224
	long alloc_size, k2u_size;
	/* lockless visit */
	struct sp_group_master *master = mm->sp_group_master;
	struct sp_group_node *spg_node, *tmp;
	struct sp_group *spg;

	if (!sp_is_enabled() || !master)
		return;

	/*
	 * There are two basic scenarios when a process in the share pool is
	 * exiting but its share pool memory usage is not 0.
	 * 1. Process A called sp_alloc(), but it terminates without calling
	 *    sp_free(). Then its share pool memory usage is a positive number.
	 * 2. Process A never called sp_alloc(), and process B in the same spg
	 *    called sp_alloc() to get an addr u. Then A gets u somehow and
	 *    called sp_free(u). Now A's share pool memory usage is a negative
	 *    number. Notice B's memory usage will be a positive number.
	 *
	 * We decide to print an info when seeing both of the scenarios.
	 *
	 * A process not in an sp group doesn't need to print because there
	 * wont't be any memory which is not freed.
	 */
4225 4226 4227 4228 4229 4230 4231
	meminfo = &master->meminfo;
	alloc_size = atomic64_read(&meminfo->alloc_nsize) + atomic64_read(&meminfo->alloc_hsize);
	k2u_size = atomic64_read(&meminfo->k2u_size);
	if (alloc_size != 0 || k2u_size != 0)
		pr_info("process %s(%d) exits. It applied %ld aligned KB, k2u shared %ld aligned KB\n",
			master->comm, master->tgid,
			byte2kb(alloc_size), byte2kb(k2u_size));
4232

4233
	down_write(&sp_group_sem);
4234 4235 4236
	list_for_each_entry_safe(spg_node, tmp, &master->node_list, group_node) {
		spg = spg_node->spg;
		/* match with refcount inc in sp_group_add_task */
4237 4238
		if (atomic_dec_and_test(&spg->use_count))
			free_sp_group_locked(spg);
4239
		list_del(&spg_node->group_node);
4240 4241
		kfree(spg_node);
	}
4242
	up_write(&sp_group_sem);
4243

4244
	sp_del_group_master(master);
4245

4246 4247 4248
	kfree(master);
}

4249 4250 4251 4252 4253 4254 4255 4256 4257 4258
DEFINE_STATIC_KEY_FALSE(share_pool_enabled_key);

static int __init enable_share_pool(char *s)
{
	static_branch_enable(&share_pool_enabled_key);
	pr_info("Ascend enable share pool features via bootargs\n");

	return 1;
}
__setup("enable_ascend_share_pool", enable_share_pool);
4259 4260 4261

static int __init share_pool_init(void)
{
4262 4263 4264 4265
	if (!sp_is_enabled())
		return 0;

	sp_mapping_normal = sp_mapping_create(SP_MAPPING_NORMAL);
4266
	if (IS_ERR(sp_mapping_normal))
4267 4268 4269
		goto fail;
	atomic_inc(&sp_mapping_normal->user);

C
Chen Jun 已提交
4270 4271 4272 4273 4274
	sp_mapping_ro = sp_mapping_create(SP_MAPPING_RO);
	if (IS_ERR(sp_mapping_ro))
		goto free_normal;
	atomic_inc(&sp_mapping_ro->user);

4275
	proc_sharepool_init();
4276 4277

	return 0;
C
Chen Jun 已提交
4278 4279 4280

free_normal:
	kfree(sp_mapping_normal);
4281 4282 4283 4284 4285 4286
fail:
	pr_err("Ascend share pool initialization failed\n");
	static_branch_disable(&share_pool_enabled_key);
	return 1;
}
late_initcall(share_pool_init);