share_pool.c 108.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Huawei Ascend Share Pool Memory
 *
 * Copyright (C) 2020 Huawei Limited
 * Author: Tang Yizhou <tangyizhou@huawei.com>
 *         Zefan Li <lizefan@huawei.com>
 *         Wu Peng <wupeng58@huawei.com>
 *         Ding Tianhong <dingtgianhong@huawei.com>
 *         Zhou Guanghui <zhouguanghui1@huawei.com>
 *         Li Ming <limingming.li@huawei.com>
 *
 * This code is based on the hisilicon ascend platform.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#define pr_fmt(fmt) "share pool: " fmt

#include <linux/share_pool.h>
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/mm.h>
#include <linux/mm_types.h>
#include <linux/idr.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
#include <linux/printk.h>
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
#include <linux/pid.h>
#include <linux/pid_namespace.h>
#include <linux/atomic.h>
#include <linux/lockdep.h>
#include <linux/kernel.h>
#include <linux/falloc.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/rmap.h>
#include <linux/preempt.h>
#include <linux/swapops.h>
#include <linux/mmzone.h>
#include <linux/timekeeping.h>
#include <linux/time64.h>
52
#include <linux/pagewalk.h>
53

54 55
#define spg_valid(spg)		((spg)->is_alive == true)

56 57 58 59 60
/* Use spa va address as mmap offset. This can work because spa_file
 * is setup with 64-bit address space. So va shall be well covered.
 */
#define addr_offset(spa)	((spa)->va_start)

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
#define byte2kb(size)		((size) >> 10)
#define byte2mb(size)		((size) >> 20)
#define page2kb(page_num)	((page_num) << (PAGE_SHIFT - 10))

#define MAX_GROUP_FOR_SYSTEM	50000
#define MAX_GROUP_FOR_TASK	3000
#define MAX_PROC_PER_GROUP	1024

#define GROUP_NONE		0

#define SEC2US(sec)		((sec) * 1000000)
#define NS2US(ns)		((ns) / 1000)

#define PF_DOMAIN_CORE		0x10000000	/* AOS CORE processes in sched.h */

76 77
static int system_group_count;

78 79 80 81 82
/* idr of all sp_groups */
static DEFINE_IDR(sp_group_idr);
/* rw semaphore for sp_group_idr and mm->sp_group_master */
static DECLARE_RWSEM(sp_group_sem);

83 84
static BLOCKING_NOTIFIER_HEAD(sp_notifier_chain);

85 86 87 88
static DEFINE_IDA(sp_group_id_ida);

/*** Statistical and maintenance tools ***/

89 90 91 92
/* list of all sp_group_masters */
static LIST_HEAD(master_list);
/* mutex to protect insert/delete ops from master_list */
static DEFINE_MUTEX(master_list_lock);
93

94 95 96 97 98
/* list of all spm-dvpp */
static LIST_HEAD(spm_dvpp_list);
/* mutex to protect insert/delete ops from master_list */
static DEFINE_MUTEX(spm_list_lock);

99 100 101
/* for kthread buff_module_guard_work */
static struct sp_proc_stat kthread_stat;

102 103 104 105 106 107 108 109
#define SEQ_printf(m, x...)			\
do {						\
	if (m)					\
		seq_printf(m, x);		\
	else					\
		pr_info(x);			\
} while (0)

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
#ifndef __GENKSYMS__
struct sp_spg_stat {
	/* number of sp_area */
	atomic_t	 spa_num;
	/* total size of all sp_area from sp_alloc and k2u */
	atomic64_t	 size;
	/* total size of all sp_area from sp_alloc 0-order page */
	atomic64_t	 alloc_nsize;
	/* total size of all sp_area from sp_alloc hugepage */
	atomic64_t	 alloc_hsize;
	/* total size of all sp_area from ap_alloc */
	atomic64_t	 alloc_size;
	/* total size of all sp_area from sp_k2u */
	atomic64_t	 k2u_size;
};

/* per process memory usage statistics indexed by tgid */
struct sp_proc_stat {
	int tgid;
	char comm[TASK_COMM_LEN];
	/*
	 * alloc amount minus free amount, may be negative when freed by
	 * another task in the same sp group.
	 */
	atomic64_t alloc_size;
	atomic64_t alloc_nsize;
	atomic64_t alloc_hsize;
	atomic64_t k2u_size;
};

/* per process/sp-group memory usage statistics */
struct spg_proc_stat {
	int tgid;
	int spg_id;  /* 0 for non-group data, such as k2u_task */
	/*
	 * alloc amount minus free amount, may be negative when freed by
	 * another task in the same sp group.
	 */
	atomic64_t alloc_size;
	atomic64_t alloc_nsize;
	atomic64_t alloc_hsize;
	atomic64_t k2u_size;
};

154 155 156 157
enum sp_mapping_type {
	SP_MAPPING_START,
	SP_MAPPING_DVPP		= SP_MAPPING_START,
	SP_MAPPING_NORMAL,
C
Chen Jun 已提交
158
	SP_MAPPING_RO,
159 160 161
	SP_MAPPING_END,
};

162 163 164 165
/*
 * address space management
 */
struct sp_mapping {
166
	unsigned long type;
167 168 169 170 171 172 173 174 175 176 177
	atomic_t user;
	unsigned long start[MAX_DEVID];
	unsigned long end[MAX_DEVID];
	struct rb_root area_root;

	struct rb_node *free_area_cache;
	unsigned long cached_hole_size;
	unsigned long cached_vstart;

	/* list head for all groups attached to this mapping, dvpp mapping only */
	struct list_head group_head;
178
	struct list_head spm_node;
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
};

/* Processes in the same sp_group can share memory.
 * Memory layout for share pool:
 *
 * |-------------------- 8T -------------------|---|------ 8T ------------|
 * |		Device 0	   |  Device 1 |...|                      |
 * |----------------------------------------------------------------------|
 * |------------- 16G -------------|    16G    |   |                      |
 * | DVPP GROUP0   | DVPP GROUP1   | ... | ... |...|  sp normal memory    |
 * |     sp        |    sp         |     |     |   |                      |
 * |----------------------------------------------------------------------|
 *
 * The host SVM feature reserves 8T virtual memory by mmap, and due to the
 * restriction of DVPP, while SVM and share pool will both allocate memory
 * for DVPP, the memory have to be in the same 32G range.
 *
 * Share pool reserves 16T memory, with 8T for normal uses and 8T for DVPP.
 * Within this 8T DVPP memory, SVM will call sp_config_dvpp_range() to
 * tell us which 16G memory range is reserved for share pool .
 *
 * In some scenarios where there is no host SVM feature, share pool uses
 * the default 8G memory setting for DVPP.
 */
struct sp_group {
	int		 id;
	unsigned long	 flag;
	struct file	 *file;
	struct file	 *file_hugetlb;
	/* number of process in this group */
	int		 proc_num;
	/* list head of processes (sp_group_node, each represents a process) */
	struct list_head procs;
	/* list head of sp_area. it is protected by spin_lock sp_area_lock */
	struct list_head spa_list;
	/* group statistics */
	struct sp_spg_stat instat;
	/* is_alive == false means it's being destroyed */
	bool		 is_alive;
	atomic_t	 use_count;
	/* protect the group internal elements, except spa_list */
	struct rw_semaphore	rw_lock;
	/* list node for dvpp mapping */
	struct list_head	mnode;
223
	struct sp_mapping       *mapping[SP_MAPPING_END];
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
};

/* a per-process(per mm) struct which manages a sp_group_node list */
struct sp_group_master {
	/*
	 * number of sp groups the process belongs to,
	 * a.k.a the number of sp_node in node_list
	 */
	unsigned int count;
	/* list head of sp_node */
	struct list_head node_list;
	struct mm_struct *mm;
	/*
	 * Used to apply for the shared pool memory of the current process.
	 * For example, sp_alloc non-share memory or k2task.
	 */
	struct sp_group *local;
	struct sp_proc_stat instat;
	struct list_head list_node;
};

/*
 * each instance represents an sp group the process belongs to
 * sp_group_master    : sp_group_node   = 1 : N
 * sp_group_node->spg : sp_group        = 1 : 1
 * sp_group_node      : sp_group->procs = N : 1
 */
struct sp_group_node {
	/* list node in sp_group->procs */
	struct list_head proc_node;
	/* list node in sp_group_maseter->node_list */
	struct list_head group_node;
	struct sp_group_master *master;
	struct sp_group *spg;
	unsigned long prot;
	struct spg_proc_stat instat;
};
#endif

263 264 265 266 267 268 269 270 271 272 273 274 275 276
static inline void sp_add_group_master(struct sp_group_master *master)
{
	mutex_lock(&master_list_lock);
	list_add_tail(&master->list_node, &master_list);
	mutex_unlock(&master_list_lock);
}

static inline void sp_del_group_master(struct sp_group_master *master)
{
	mutex_lock(&master_list_lock);
	list_del(&master->list_node);
	mutex_unlock(&master_list_lock);
}

277
/* The caller should hold mmap_sem to protect master (TBD) */
G
Guo Mengqi 已提交
278 279
static void sp_init_group_master_stat(int tgid, struct mm_struct *mm,
		struct sp_proc_stat *stat)
280 281 282 283
{
	atomic64_set(&stat->alloc_nsize, 0);
	atomic64_set(&stat->alloc_hsize, 0);
	atomic64_set(&stat->k2u_size, 0);
G
Guo Mengqi 已提交
284
	stat->tgid = tgid;
285 286 287
	get_task_comm(stat->comm, current);
}

288 289 290 291 292 293 294 295 296 297
static unsigned long sp_mapping_type(struct sp_mapping *spm)
{
	return spm->type;
}

static void sp_mapping_set_type(struct sp_mapping *spm, unsigned long type)
{
	spm->type = type;
}

298
static struct sp_mapping *sp_mapping_normal;
C
Chen Jun 已提交
299
static struct sp_mapping *sp_mapping_ro;
300

301 302 303
static void sp_mapping_add_to_list(struct sp_mapping *spm)
{
	mutex_lock(&spm_list_lock);
304
	if (sp_mapping_type(spm) == SP_MAPPING_DVPP)
305 306 307 308 309 310 311
		list_add_tail(&spm->spm_node, &spm_dvpp_list);
	mutex_unlock(&spm_list_lock);
}

static void sp_mapping_remove_from_list(struct sp_mapping *spm)
{
	mutex_lock(&spm_list_lock);
312
	if (sp_mapping_type(spm) == SP_MAPPING_DVPP)
313 314 315 316
		list_del(&spm->spm_node);
	mutex_unlock(&spm_list_lock);
}

317 318 319 320 321
static void sp_mapping_range_init(struct sp_mapping *spm)
{
	int i;

	for (i = 0; i < MAX_DEVID; i++) {
322
		switch (sp_mapping_type(spm)) {
C
Chen Jun 已提交
323 324 325 326
		case SP_MAPPING_RO:
			spm->start[i] = MMAP_SHARE_POOL_RO_START;
			spm->end[i]   = MMAP_SHARE_POOL_RO_END;
			break;
327
		case SP_MAPPING_NORMAL:
328
			spm->start[i] = MMAP_SHARE_POOL_NORMAL_START;
329 330 331 332 333 334 335 336 337
			spm->end[i]   = MMAP_SHARE_POOL_NORMAL_END;
			break;
		case SP_MAPPING_DVPP:
			spm->start[i] = MMAP_SHARE_POOL_DVPP_START + i * MMAP_SHARE_POOL_16G_SIZE;
			spm->end[i]   = spm->start[i] + MMAP_SHARE_POOL_16G_SIZE;
			break;
		default:
			pr_err("Invalid sp_mapping type [%lu]\n", sp_mapping_type(spm));
			break;
338 339 340 341
		}
	}
}

342
static struct sp_mapping *sp_mapping_create(unsigned long type)
343 344 345 346 347 348 349
{
	struct sp_mapping *spm;

	spm = kzalloc(sizeof(struct sp_mapping), GFP_KERNEL);
	if (!spm)
		return ERR_PTR(-ENOMEM);

350
	sp_mapping_set_type(spm, type);
351 352 353
	sp_mapping_range_init(spm);
	atomic_set(&spm->user, 0);
	spm->area_root = RB_ROOT;
354
	INIT_LIST_HEAD(&spm->group_head);
355
	sp_mapping_add_to_list(spm);
356 357 358 359

	return spm;
}

360 361
static void sp_mapping_destroy(struct sp_mapping *spm)
{
362
	sp_mapping_remove_from_list(spm);
363 364 365 366 367
	kfree(spm);
}

static void sp_mapping_attach(struct sp_group *spg, struct sp_mapping *spm)
{
368
	unsigned long type = sp_mapping_type(spm);
369
	atomic_inc(&spm->user);
370

371 372
	spg->mapping[type] = spm;
	if (type == SP_MAPPING_DVPP)
373
		list_add_tail(&spg->mnode, &spm->group_head);
374 375 376 377
}

static void sp_mapping_detach(struct sp_group *spg, struct sp_mapping *spm)
{
378 379
	unsigned long type;

380 381
	if (!spm)
		return;
382

383 384
	type = sp_mapping_type(spm);
	if (type == SP_MAPPING_DVPP)
385 386
		list_del(&spg->mnode);
	if (atomic_dec_and_test(&spm->user))
387
		sp_mapping_destroy(spm);
388 389

	spg->mapping[type] = NULL;
390 391
}

392 393 394 395 396 397 398 399 400 401
/* merge old mapping to new, and the old mapping would be destroyed */
static void sp_mapping_merge(struct sp_mapping *new, struct sp_mapping *old)
{
	struct sp_group *spg, *tmp;

	if (new == old)
		return;

	list_for_each_entry_safe(spg, tmp, &old->group_head, mnode) {
		list_move_tail(&spg->mnode, &new->group_head);
402
		spg->mapping[SP_MAPPING_DVPP] = new;
403 404 405 406 407 408 409 410 411 412 413
	}

	atomic_add(atomic_read(&old->user), &new->user);
	sp_mapping_destroy(old);
}

static bool is_mapping_empty(struct sp_mapping *spm)
{
	return RB_EMPTY_ROOT(&spm->area_root);
}

414 415 416 417
static bool can_mappings_merge(struct sp_mapping *m1, struct sp_mapping *m2)
{
	int i;

418
	for (i = 0; i < MAX_DEVID; i++)
419 420 421 422 423 424
		if (m1->start[i] != m2->start[i] || m1->end[i] != m2->end[i])
			return false;

	return true;
}

425
/*
426 427 428 429 430
 * 1. The mappings of local group is set on creating.
 * 2. This is used to setup the mapping for groups created during add_task.
 * 3. The normal mapping exists for all groups.
 * 4. The dvpp mappings for the new group and local group can merge _iff_ at
 *    least one of the mapping is empty.
431
 * the caller must hold sp_group_sem
432
 * NOTE: undo the mergeing when the later process failed.
433 434 435
 */
static int sp_mapping_group_setup(struct mm_struct *mm, struct sp_group *spg)
{
436 437 438 439
	struct sp_mapping *local_dvpp_mapping, *spg_dvpp_mapping;

	local_dvpp_mapping = mm->sp_group_master->local->mapping[SP_MAPPING_DVPP];
	spg_dvpp_mapping = spg->mapping[SP_MAPPING_DVPP];
440

441
	if (!list_empty(&spg->procs) && !(spg->flag & SPG_FLAG_NON_DVPP)) {
442 443 444 445 446 447
		/*
		 * Don't return an error when the mappings' address range conflict.
		 * As long as the mapping is unused, we can drop the empty mapping.
		 * This may change the address range for the task or group implicitly,
		 * give a warn for it.
		 */
448
		bool is_conflict = !can_mappings_merge(local_dvpp_mapping, spg_dvpp_mapping);
449

450 451
		if (is_mapping_empty(local_dvpp_mapping)) {
			sp_mapping_merge(spg_dvpp_mapping, local_dvpp_mapping);
452 453
			if (is_conflict)
				pr_warn_ratelimited("task address space conflict, spg_id=%d\n", spg->id);
454 455
		} else if (is_mapping_empty(spg_dvpp_mapping)) {
			sp_mapping_merge(local_dvpp_mapping, spg_dvpp_mapping);
456 457 458
			if (is_conflict)
				pr_warn_ratelimited("group address space conflict, spg_id=%d\n", spg->id);
		} else {
459 460
			pr_info_ratelimited("Duplicate address space, id=%d\n", spg->id);
			return -EINVAL;
461 462
		}
	} else {
463 464
		if (!(spg->flag & SPG_FLAG_NON_DVPP))
			/* the mapping of local group is always set */
465 466
			sp_mapping_attach(spg, local_dvpp_mapping);
		if (!spg->mapping[SP_MAPPING_NORMAL])
467
			sp_mapping_attach(spg, sp_mapping_normal);
C
Chen Jun 已提交
468 469
		if (!spg->mapping[SP_MAPPING_RO])
			sp_mapping_attach(spg, sp_mapping_ro);
470 471 472 473 474
	}

	return 0;
}

475
static struct sp_mapping *sp_mapping_find(struct sp_group *spg,
C
Chen Jun 已提交
476 477 478 479 480
						 unsigned long addr)
{
	if (addr >= MMAP_SHARE_POOL_NORMAL_START && addr < MMAP_SHARE_POOL_NORMAL_END)
		return spg->mapping[SP_MAPPING_NORMAL];

C
Chen Jun 已提交
481 482 483
	if (addr >= MMAP_SHARE_POOL_RO_START && addr < MMAP_SHARE_POOL_RO_END)
		return spg->mapping[SP_MAPPING_RO];

C
Chen Jun 已提交
484 485 486
	return spg->mapping[SP_MAPPING_DVPP];
}

487
static struct sp_group *create_spg(int spg_id, unsigned long flag);
488
static void free_new_spg_id(bool new, int spg_id);
489 490 491
static void free_sp_group_locked(struct sp_group *spg);
static int local_group_add_task(struct mm_struct *mm, struct sp_group *spg);
static int init_local_group(struct mm_struct *mm)
492
{
493
	int spg_id, ret;
494
	struct sp_group *spg;
495
	struct sp_mapping *spm;
496 497
	struct sp_group_master *master = mm->sp_group_master;

498 499 500 501
	spg_id = ida_alloc_range(&sp_group_id_ida, SPG_ID_LOCAL_MIN,
				 SPG_ID_LOCAL_MAX, GFP_ATOMIC);
	if (spg_id < 0) {
		pr_err_ratelimited("generate local group id failed %d\n", spg_id);
502
		return spg_id;
503 504
	}

505
	spg = create_spg(spg_id, 0);
506
	if (IS_ERR(spg)) {
507 508
		free_new_spg_id(true, spg_id);
		return PTR_ERR(spg);
509 510 511
	}

	master->local = spg;
512 513 514 515 516 517 518
	spm = sp_mapping_create(SP_MAPPING_DVPP);
	if (IS_ERR(spm)) {
		ret = PTR_ERR(spm);
		goto free_spg;
	}
	sp_mapping_attach(master->local, spm);
	sp_mapping_attach(master->local, sp_mapping_normal);
C
Chen Jun 已提交
519
	sp_mapping_attach(master->local, sp_mapping_ro);
520

521 522
	ret = local_group_add_task(mm, spg);
	if (ret < 0)
523
		/* The spm would be released while destroying the spg */
524 525
		goto free_spg;

526
	return 0;
527 528

free_spg:
529
	/* spg_id is freed in free_sp_group_locked */
530
	free_sp_group_locked(spg);
531 532
	master->local = NULL;
	return ret;
533 534
}

535 536
/* The caller must hold sp_group_sem */
static int sp_init_group_master_locked(struct task_struct *tsk, struct mm_struct *mm)
537
{
538
	int ret;
539 540
	struct sp_group_master *master;

541
	if (mm->sp_group_master)
542 543
		return 0;

544 545 546 547 548 549 550
	master = kmalloc(sizeof(struct sp_group_master), GFP_KERNEL);
	if (!master)
		return -ENOMEM;

	INIT_LIST_HEAD(&master->node_list);
	master->count = 0;
	master->mm = mm;
G
Guo Mengqi 已提交
551
	sp_init_group_master_stat(tsk->tgid, mm, &master->instat);
552
	mm->sp_group_master = master;
553
	sp_add_group_master(master);
554 555 556

	ret = init_local_group(mm);
	if (ret)
557
		goto free_master;
558 559

	return 0;
560 561

free_master:
562
	sp_del_group_master(master);
563 564 565 566 567 568 569 570 571
	mm->sp_group_master = NULL;
	kfree(master);

	return ret;
}

static inline bool is_local_group(int spg_id)
{
	return spg_id >= SPG_ID_LOCAL_MIN && spg_id <= SPG_ID_LOCAL_MAX;
572 573
}

574
static struct sp_group *sp_get_local_group(struct task_struct *tsk, struct mm_struct *mm)
575 576 577 578 579 580 581 582 583 584 585 586 587 588
{
	int ret;
	struct sp_group_master *master;

	down_read(&sp_group_sem);
	master = mm->sp_group_master;
	if (master && master->local) {
		atomic_inc(&master->local->use_count);
		up_read(&sp_group_sem);
		return master->local;
	}
	up_read(&sp_group_sem);

	down_write(&sp_group_sem);
589
	ret = sp_init_group_master_locked(tsk, mm);
590 591 592 593 594 595 596 597 598 599 600
	if (ret) {
		up_write(&sp_group_sem);
		return ERR_PTR(ret);
	}
	master = mm->sp_group_master;
	atomic_inc(&master->local->use_count);
	up_write(&sp_group_sem);

	return master->local;
}

601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
static void update_spg_stat_alloc(unsigned long size, bool inc,
	bool huge, struct sp_spg_stat *stat)
{
	if (inc) {
		atomic_inc(&stat->spa_num);
		atomic64_add(size, &stat->size);
		atomic64_add(size, &stat->alloc_size);
		if (huge)
			atomic64_add(size, &stat->alloc_hsize);
		else
			atomic64_add(size, &stat->alloc_nsize);
	} else {
		atomic_dec(&stat->spa_num);
		atomic64_sub(size, &stat->size);
		atomic64_sub(size, &stat->alloc_size);
		if (huge)
			atomic64_sub(size, &stat->alloc_hsize);
		else
			atomic64_sub(size, &stat->alloc_nsize);
	}
}

static void update_spg_stat_k2u(unsigned long size, bool inc,
	struct sp_spg_stat *stat)
{
	if (inc) {
		atomic_inc(&stat->spa_num);
		atomic64_add(size, &stat->size);
		atomic64_add(size, &stat->k2u_size);
	} else {
		atomic_dec(&stat->spa_num);
		atomic64_sub(size, &stat->size);
		atomic64_sub(size, &stat->k2u_size);
	}
}

637 638
static void update_mem_usage_alloc(unsigned long size, bool inc,
		bool is_hugepage, struct sp_group_node *spg_node)
639
{
640
	struct sp_proc_stat *proc_stat = &spg_node->master->instat;
641 642

	if (inc) {
643 644 645 646 647 648 649 650
		if (is_hugepage) {
			atomic64_add(size, &spg_node->instat.alloc_hsize);
			atomic64_add(size, &proc_stat->alloc_hsize);
			return;
		}
		atomic64_add(size, &spg_node->instat.alloc_nsize);
		atomic64_add(size, &proc_stat->alloc_nsize);
		return;
651
	}
652 653 654 655 656 657 658 659 660

	if (is_hugepage) {
		atomic64_sub(size, &spg_node->instat.alloc_hsize);
		atomic64_sub(size, &proc_stat->alloc_hsize);
		return;
	}
	atomic64_sub(size, &spg_node->instat.alloc_nsize);
	atomic64_sub(size, &proc_stat->alloc_nsize);
	return;
661 662
}

663 664
static void update_mem_usage_k2u(unsigned long size, bool inc,
		struct sp_group_node *spg_node)
665
{
666
	struct sp_proc_stat *proc_stat = &spg_node->master->instat;
667 668

	if (inc) {
669
		atomic64_add(size, &spg_node->instat.k2u_size);
670 671
		atomic64_add(size, &proc_stat->k2u_size);
	} else {
672
		atomic64_sub(size, &spg_node->instat.k2u_size);
673 674 675 676
		atomic64_sub(size, &proc_stat->k2u_size);
	}
}

677
static void sp_init_spg_proc_stat(struct spg_proc_stat *stat, int spg_id)
678
{
679
	stat->tgid = current->tgid;
680
	stat->spg_id = spg_id;
681 682
	atomic64_set(&stat->alloc_nsize, 0);
	atomic64_set(&stat->alloc_hsize, 0);
683 684 685
	atomic64_set(&stat->k2u_size, 0);
}

686
static void sp_init_group_stat(struct sp_spg_stat *stat)
687 688 689 690 691 692
{
	atomic_set(&stat->spa_num, 0);
	atomic64_set(&stat->size, 0);
	atomic64_set(&stat->alloc_nsize, 0);
	atomic64_set(&stat->alloc_hsize, 0);
	atomic64_set(&stat->alloc_size, 0);
693
	atomic64_set(&stat->k2u_size, 0);
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
}

/* statistics of all sp area, protected by sp_area_lock */
struct sp_spa_stat {
	unsigned int total_num;
	unsigned int alloc_num;
	unsigned int k2u_task_num;
	unsigned int k2u_spg_num;
	unsigned long total_size;
	unsigned long alloc_size;
	unsigned long k2u_task_size;
	unsigned long k2u_spg_size;
	unsigned long dvpp_size;
	unsigned long dvpp_va_size;
};

static struct sp_spa_stat spa_stat;

/* statistics of all sp group born from sp_alloc and k2u(spg) */
struct sp_overall_stat {
	atomic_t spa_total_num;
	atomic64_t spa_total_size;
};

static struct sp_overall_stat sp_overall_stat;

/*** Global share pool VA allocator ***/

enum spa_type {
	SPA_TYPE_ALLOC = 1,
724 725
	/* NOTE: reorganize after the statisical structure is reconstructed. */
	SPA_TYPE_ALLOC_PRIVATE = SPA_TYPE_ALLOC,
726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768
	SPA_TYPE_K2TASK,
	SPA_TYPE_K2SPG,
};

/*
 * We bump the reference when each mmap succeeds, and it will be dropped
 * when vma is about to release, so sp_area object will be automatically
 * freed when all tasks in the sp group has exited.
 */
struct sp_area {
	unsigned long va_start;
	unsigned long va_end;		/* va_end always align to hugepage */
	unsigned long real_size;	/* real size with alignment */
	unsigned long region_vstart;	/* belong to normal region or DVPP region */
	unsigned long flags;
	bool is_hugepage;
	bool is_dead;
	atomic_t use_count;		/* How many vmas use this VA region */
	struct rb_node rb_node;		/* address sorted rbtree */
	struct list_head link;		/* link to the spg->head */
	struct sp_group *spg;
	enum spa_type type;		/* where spa born from */
	struct mm_struct *mm;		/* owner of k2u(task) */
	unsigned long kva;		/* shared kva */
	pid_t applier;			/* the original applier process */
	int node_id;			/* memory node */
	int device_id;
};
static DEFINE_SPINLOCK(sp_area_lock);

static unsigned long spa_size(struct sp_area *spa)
{
	return spa->real_size;
}

static struct file *spa_file(struct sp_area *spa)
{
	if (spa->is_hugepage)
		return spa->spg->file_hugetlb;
	else
		return spa->spg->file;
}

769 770
/* the caller should hold sp_area_lock */
static void spa_inc_usage(struct sp_area *spa)
771
{
772 773 774 775 776 777 778 779 780
	enum spa_type type = spa->type;
	unsigned long size = spa->real_size;
	bool is_dvpp = spa->flags & SP_DVPP;
	bool is_huge = spa->is_hugepage;

	switch (type) {
	case SPA_TYPE_ALLOC:
		spa_stat.alloc_num += 1;
		spa_stat.alloc_size += size;
781
		update_spg_stat_alloc(size, true, is_huge, &spa->spg->instat);
782 783 784 785
		break;
	case SPA_TYPE_K2TASK:
		spa_stat.k2u_task_num += 1;
		spa_stat.k2u_task_size += size;
786
		update_spg_stat_k2u(size, true, &spa->spg->instat);
787 788 789 790
		break;
	case SPA_TYPE_K2SPG:
		spa_stat.k2u_spg_num += 1;
		spa_stat.k2u_spg_size += size;
791
		update_spg_stat_k2u(size, true, &spa->spg->instat);
792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
		break;
	default:
		WARN(1, "invalid spa type");
	}

	if (is_dvpp) {
		spa_stat.dvpp_size += size;
		spa_stat.dvpp_va_size += ALIGN(size, PMD_SIZE);
	}

	/*
	 * all the calculations won't overflow due to system limitation and
	 * parameter checking in sp_alloc_area()
	 */
	spa_stat.total_num += 1;
	spa_stat.total_size += size;

809
	if (!is_local_group(spa->spg->id)) {
810 811 812
		atomic_inc(&sp_overall_stat.spa_total_num);
		atomic64_add(size, &sp_overall_stat.spa_total_size);
	}
813 814
}

815 816
/* the caller should hold sp_area_lock */
static void spa_dec_usage(struct sp_area *spa)
817
{
818 819 820 821 822 823 824 825 826
	enum spa_type type = spa->type;
	unsigned long size = spa->real_size;
	bool is_dvpp = spa->flags & SP_DVPP;
	bool is_huge = spa->is_hugepage;

	switch (type) {
	case SPA_TYPE_ALLOC:
		spa_stat.alloc_num -= 1;
		spa_stat.alloc_size -= size;
827
		update_spg_stat_alloc(size, false, is_huge, &spa->spg->instat);
828 829 830 831
		break;
	case SPA_TYPE_K2TASK:
		spa_stat.k2u_task_num -= 1;
		spa_stat.k2u_task_size -= size;
832
		update_spg_stat_k2u(size, false, &spa->spg->instat);
833 834 835 836
		break;
	case SPA_TYPE_K2SPG:
		spa_stat.k2u_spg_num -= 1;
		spa_stat.k2u_spg_size -= size;
837
		update_spg_stat_k2u(size, false, &spa->spg->instat);
838 839 840 841 842 843 844 845 846 847 848 849 850
		break;
	default:
		WARN(1, "invalid spa type");
	}

	if (is_dvpp) {
		spa_stat.dvpp_size -= size;
		spa_stat.dvpp_va_size -= ALIGN(size, PMD_SIZE);
	}

	spa_stat.total_num -= 1;
	spa_stat.total_size -= size;

851
	if (!is_local_group(spa->spg->id)) {
852 853 854
		atomic_dec(&sp_overall_stat.spa_total_num);
		atomic64_sub(spa->real_size, &sp_overall_stat.spa_total_size);
	}
855 856
}

857 858
static void update_mem_usage(unsigned long size, bool inc, bool is_hugepage,
	struct sp_group_node *spg_node, enum spa_type type)
859
{
860 861
	switch (type) {
	case SPA_TYPE_ALLOC:
862
		update_mem_usage_alloc(size, inc, is_hugepage, spg_node);
863 864 865
		break;
	case SPA_TYPE_K2TASK:
	case SPA_TYPE_K2SPG:
866
		update_mem_usage_k2u(size, inc, spg_node);
867 868 869 870
		break;
	default:
		WARN(1, "invalid stat type\n");
	}
871 872
}

873 874 875 876 877 878 879 880 881 882 883 884
struct sp_group_node *find_spg_node_by_spg(struct mm_struct *mm,
		struct sp_group *spg)
{
	struct sp_group_node *spg_node;

	list_for_each_entry(spg_node, &mm->sp_group_master->node_list, group_node) {
		if (spg_node->spg == spg)
			return spg_node;
	}
	return NULL;
}

885 886
static void sp_update_process_stat(struct task_struct *tsk, bool inc,
	struct sp_area *spa)
887
{
888
	struct sp_group_node *spg_node;
889 890
	unsigned long size = spa->real_size;
	enum spa_type type = spa->type;
891

892
	spg_node = find_spg_node_by_spg(tsk->mm, spa->spg);
G
Guo Mengqi 已提交
893
	update_mem_usage(size, inc, spa->is_hugepage, spg_node, type);
894 895 896 897 898 899
}

static inline void check_interrupt_context(void)
{
	if (unlikely(in_interrupt()))
		panic("function can't be used in interrupt context\n");
900 901
}

902 903 904 905 906 907 908 909
static inline bool check_aoscore_process(struct task_struct *tsk)
{
	if (tsk->flags & PF_DOMAIN_CORE)
		return true;
	else
		return false;
}

910 911
static unsigned long sp_mmap(struct mm_struct *mm, struct file *file,
			     struct sp_area *spa, unsigned long *populate,
912
			     unsigned long prot, struct vm_area_struct **pvma);
913
static void sp_munmap(struct mm_struct *mm, unsigned long addr, unsigned long size);
914 915 916 917 918 919 920 921 922 923 924

#define K2U_NORMAL	0
#define K2U_COREDUMP	1

struct sp_k2u_context {
	unsigned long kva;
	unsigned long kva_aligned;
	unsigned long size;
	unsigned long size_aligned;
	unsigned long sp_flags;
	int state;
925
	enum spa_type type;
926 927
};

928 929
static unsigned long sp_remap_kva_to_vma(struct sp_area *spa, struct mm_struct *mm,
					unsigned long prot, struct sp_k2u_context *kc);
930

931 932 933
static void free_sp_group_id(int spg_id)
{
	/* ida operation is protected by an internal spin_lock */
934 935
	if ((spg_id >= SPG_ID_AUTO_MIN && spg_id <= SPG_ID_AUTO_MAX) ||
	    (spg_id >= SPG_ID_LOCAL_MIN && spg_id <= SPG_ID_LOCAL_MAX))
936 937 938
		ida_free(&sp_group_id_ida, spg_id);
}

939 940 941 942 943 944
static void free_new_spg_id(bool new, int spg_id)
{
	if (new)
		free_sp_group_id(spg_id);
}

945
static void free_sp_group_locked(struct sp_group *spg)
946
{
947 948
	int type;

949 950 951 952
	fput(spg->file);
	fput(spg->file_hugetlb);
	idr_remove(&sp_group_idr, spg->id);
	free_sp_group_id((unsigned int)spg->id);
953 954 955 956

	for (type = SP_MAPPING_START; type < SP_MAPPING_END; type++)
		sp_mapping_detach(spg, spg->mapping[type]);

957 958
	if (!is_local_group(spg->id))
		system_group_count--;
959

960 961 962 963
	kfree(spg);
	WARN(system_group_count < 0, "unexpected group count\n");
}

964 965 966 967 968 969 970
static void free_sp_group(struct sp_group *spg)
{
	down_write(&sp_group_sem);
	free_sp_group_locked(spg);
	up_write(&sp_group_sem);
}

971
static void sp_group_put_locked(struct sp_group *spg)
972 973 974 975 976 977 978
{
	lockdep_assert_held_write(&sp_group_sem);

	if (atomic_dec_and_test(&spg->use_count))
		free_sp_group_locked(spg);
}

979
static void sp_group_put(struct sp_group *spg)
980 981 982 983 984 985
{
	if (atomic_dec_and_test(&spg->use_count))
		free_sp_group(spg);
}

/* use with put_task_struct(task) */
986
static int get_task(int tgid, struct task_struct **task)
987 988
{
	struct task_struct *tsk;
989
	struct pid *p;
990 991

	rcu_read_lock();
992 993
	p = find_pid_ns(tgid, &init_pid_ns);
	tsk = pid_task(p, PIDTYPE_TGID);
994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
	if (!tsk || (tsk->flags & PF_EXITING)) {
		rcu_read_unlock();
		return -ESRCH;
	}
	get_task_struct(tsk);
	rcu_read_unlock();

	*task = tsk;
	return 0;
}

/*
 * the caller must:
 * 1. hold spg->rw_lock
 * 2. ensure no concurrency problem for mm_struct
 */
1010
static bool is_process_in_group(struct sp_group *spg,
1011 1012 1013 1014 1015 1016
						 struct mm_struct *mm)
{
	struct sp_group_node *spg_node;

	list_for_each_entry(spg_node, &spg->procs, proc_node)
		if (spg_node->master->mm == mm)
1017
			return true;
1018

1019
	return false;
1020 1021
}

1022 1023
/* user must call sp_group_put() after use */
static struct sp_group *sp_group_get_locked(int tgid, int spg_id)
1024 1025 1026 1027 1028 1029
{
	struct sp_group *spg = NULL;
	struct task_struct *tsk = NULL;
	int ret = 0;

	if (spg_id == SPG_ID_DEFAULT) {
1030
		ret = get_task(tgid, &tsk);
1031 1032 1033
		if (ret)
			return NULL;

1034 1035 1036
		task_lock(tsk);
		if (tsk->mm == NULL)
			spg = NULL;
1037 1038
		else if (tsk->mm->sp_group_master)
			spg = tsk->mm->sp_group_master->local;
1039
		task_unlock(tsk);
1040 1041

		put_task_struct(tsk);
1042 1043 1044 1045
	} else {
		spg = idr_find(&sp_group_idr, spg_id);
	}

1046 1047
	if (!spg || !atomic_inc_not_zero(&spg->use_count))
		return NULL;
1048

1049
	return spg;
1050 1051
}

1052
static struct sp_group *sp_group_get(int tgid, int spg_id)
1053 1054 1055 1056
{
	struct sp_group *spg;

	down_read(&sp_group_sem);
1057
	spg = sp_group_get_locked(tgid, spg_id);
1058 1059 1060 1061
	up_read(&sp_group_sem);
	return spg;
}

1062 1063
/**
 * mp_sp_group_id_by_pid() - Get the sp_group ID array of a process.
1064
 * @tgid: tgid of target process.
1065 1066 1067 1068 1069 1070 1071 1072 1073
 * @spg_ids: point to an array to save the group ids the process belongs to
 * @num: input the spg_ids array size; output the spg number of the process
 *
 * Return:
 * >0		- the sp_group ID.
 * -ENODEV	- target process doesn't belong to any sp_group.
 * -EINVAL	- spg_ids or num is NULL.
 * -E2BIG	- the num of groups process belongs to is larger than *num
 */
1074
int mg_sp_group_id_by_pid(int tgid, int *spg_ids, int *num)
1075
{
1076
	int ret = 0, real_count;
1077 1078 1079 1080
	struct sp_group_node *node;
	struct sp_group_master *master = NULL;
	struct task_struct *tsk;

1081 1082 1083
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1084 1085
	check_interrupt_context();

1086
	if (!spg_ids || !num || *num <= 0)
1087 1088
		return -EINVAL;

1089
	ret = get_task(tgid, &tsk);
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
	if (ret)
		return ret;

	down_read(&sp_group_sem);
	task_lock(tsk);
	if (tsk->mm)
		master = tsk->mm->sp_group_master;
	task_unlock(tsk);

	if (!master) {
		ret = -ENODEV;
		goto out_up_read;
	}

1104 1105 1106 1107 1108 1109 1110 1111
	/*
	 * There is a local group for each process which is used for
	 * passthrough allocation. The local group is a internal
	 * implementation for convenience and is not attempt to bother
	 * the user.
	 */
	real_count = master->count - 1;
	if (real_count <= 0) {
1112 1113 1114
		ret = -ENODEV;
		goto out_up_read;
	}
1115
	if ((unsigned int)*num < real_count) {
1116 1117 1118
		ret = -E2BIG;
		goto out_up_read;
	}
1119
	*num = real_count;
1120

1121 1122 1123
	list_for_each_entry(node, &master->node_list, group_node) {
		if (is_local_group(node->spg->id))
			continue;
1124
		*(spg_ids++) = node->spg->id;
1125
	}
1126 1127 1128 1129 1130

out_up_read:
	up_read(&sp_group_sem);
	put_task_struct(tsk);
	return ret;
1131 1132 1133
}
EXPORT_SYMBOL_GPL(mg_sp_group_id_by_pid);

1134 1135 1136 1137 1138
static bool is_online_node_id(int node_id)
{
	return node_id >= 0 && node_id < MAX_NUMNODES && node_online(node_id);
}

1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
static void sp_group_init(struct sp_group *spg, int spg_id, unsigned long flag)
{
	spg->id = spg_id;
	spg->flag = flag;
	spg->is_alive = true;
	spg->proc_num = 0;
	atomic_set(&spg->use_count, 1);
	INIT_LIST_HEAD(&spg->procs);
	INIT_LIST_HEAD(&spg->spa_list);
	INIT_LIST_HEAD(&spg->mnode);
	init_rwsem(&spg->rw_lock);
	sp_init_group_stat(&spg->instat);
}

1153
static struct sp_group *create_spg(int spg_id, unsigned long flag)
1154
{
1155 1156
	int ret;
	struct sp_group *spg;
1157
	char name[DNAME_INLINE_LEN];
1158 1159 1160
	struct user_struct *user = NULL;
	int hsize_log = MAP_HUGE_2MB >> MAP_HUGE_SHIFT;

1161 1162
	if (unlikely(system_group_count + 1 == MAX_GROUP_FOR_SYSTEM &&
		     !is_local_group(spg_id))) {
1163
		pr_err("reach system max group num\n");
1164 1165 1166 1167 1168 1169 1170 1171
		return ERR_PTR(-ENOSPC);
	}

	spg = kzalloc(sizeof(*spg), GFP_KERNEL);
	if (spg == NULL)
		return ERR_PTR(-ENOMEM);

	sprintf(name, "sp_group_%d", spg_id);
1172
	spg->file = shmem_kernel_file_setup(name, MAX_LFS_FILESIZE, VM_NORESERVE);
1173 1174 1175
	if (IS_ERR(spg->file)) {
		pr_err("spg file setup failed %ld\n", PTR_ERR(spg->file));
		ret = PTR_ERR(spg->file);
1176
		goto out_kfree;
1177 1178
	}

1179
	sprintf(name, "sp_group_%d_huge", spg_id);
1180
	spg->file_hugetlb = hugetlb_file_setup(name, MAX_LFS_FILESIZE,
1181
				VM_NORESERVE, &user, HUGETLB_ANONHUGE_INODE, hsize_log);
1182
	if (IS_ERR(spg->file_hugetlb)) {
1183
		pr_err("spg file_hugetlb setup failed %ld\n", PTR_ERR(spg->file_hugetlb));
1184 1185 1186 1187
		ret = PTR_ERR(spg->file_hugetlb);
		goto out_fput;
	}

1188 1189 1190 1191 1192 1193 1194 1195
	sp_group_init(spg, spg_id, flag);

	ret = idr_alloc(&sp_group_idr, spg, spg_id, spg_id + 1, GFP_KERNEL);
	if (ret < 0) {
		pr_err("group %d idr alloc failed %d\n", spg_id, ret);
		goto out_fput_huge;
	}

1196 1197
	if (!is_local_group(spg_id))
		system_group_count++;
1198

1199 1200
	return spg;

1201 1202
out_fput_huge:
	fput(spg->file_hugetlb);
1203 1204 1205 1206 1207
out_fput:
	fput(spg->file);
out_kfree:
	kfree(spg);
	return ERR_PTR(ret);
1208 1209
}

1210
/* the caller must hold sp_group_sem */
1211
static struct sp_group *find_or_alloc_sp_group(int spg_id, unsigned long flag)
1212 1213 1214
{
	struct sp_group *spg;

1215
	spg = sp_group_get_locked(current->tgid, spg_id);
1216 1217

	if (!spg) {
1218
		spg = create_spg(spg_id, flag);
1219 1220 1221 1222
	} else {
		down_read(&spg->rw_lock);
		if (!spg_valid(spg)) {
			up_read(&spg->rw_lock);
1223
			sp_group_put_locked(spg);
1224 1225 1226
			return ERR_PTR(-ENODEV);
		}
		up_read(&spg->rw_lock);
1227
		/* spg->use_count has increased due to sp_group_get() */
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
	}

	return spg;
}

static void __sp_area_drop_locked(struct sp_area *spa);

/* The caller must down_write(&mm->mmap_lock) */
static void sp_munmap_task_areas(struct mm_struct *mm, struct sp_group *spg, struct list_head *stop)
{
	struct sp_area *spa, *prev = NULL;
	int err;


	spin_lock(&sp_area_lock);
	list_for_each_entry(spa, &spg->spa_list, link) {
		if (&spa->link == stop)
			break;

		__sp_area_drop_locked(prev);
		prev = spa;

		atomic_inc(&spa->use_count);
		spin_unlock(&sp_area_lock);

		err = do_munmap(mm, spa->va_start, spa_size(spa), NULL);
		if (err) {
			/* we are not supposed to fail */
			pr_err("failed to unmap VA %pK when munmap task areas\n",
			       (void *)spa->va_start);
		}

		spin_lock(&sp_area_lock);
	}
	__sp_area_drop_locked(prev);

	spin_unlock(&sp_area_lock);
}

/* the caller must hold sp_group_sem */
1268 1269
static int mm_add_group_init(struct task_struct *tsk, struct mm_struct *mm,
			     struct sp_group *spg)
1270
{
1271 1272
	int ret;
	struct sp_group_master *master;
1273

1274 1275 1276 1277 1278 1279 1280 1281 1282
	if (!mm->sp_group_master) {
		ret = sp_init_group_master_locked(tsk, mm);
		if (ret)
			return ret;
	} else {
		if (is_process_in_group(spg, mm)) {
			pr_err_ratelimited("task already in target group, id=%d\n", spg->id);
			return -EEXIST;
		}
1283

1284 1285 1286 1287 1288
		master = mm->sp_group_master;
		if (master->count == MAX_GROUP_FOR_TASK) {
			pr_err("task reaches max group num\n");
			return -ENOSPC;
		}
1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
	}

	return 0;
}

/* the caller must hold sp_group_sem */
static struct sp_group_node *create_spg_node(struct mm_struct *mm,
	unsigned long prot, struct sp_group *spg)
{
	struct sp_group_master *master = mm->sp_group_master;
	struct sp_group_node *spg_node;

	spg_node = kzalloc(sizeof(struct sp_group_node), GFP_KERNEL);
	if (spg_node == NULL)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&spg_node->group_node);
	INIT_LIST_HEAD(&spg_node->proc_node);
	spg_node->spg = spg;
	spg_node->master = master;
	spg_node->prot = prot;
1310
	sp_init_spg_proc_stat(&spg_node->instat, spg->id);
1311 1312 1313 1314 1315 1316 1317 1318 1319

	list_add_tail(&spg_node->group_node, &master->node_list);
	master->count++;

	return spg_node;
}

/* the caller must down_write(&spg->rw_lock) */
static int insert_spg_node(struct sp_group *spg, struct sp_group_node *node)
1320
{
1321 1322 1323 1324 1325 1326 1327
	if (spg->proc_num + 1 == MAX_PROC_PER_GROUP) {
		pr_err_ratelimited("add group: group reaches max process num\n");
		return -ENOSPC;
	}

	spg->proc_num++;
	list_add_tail(&node->proc_node, &spg->procs);
1328 1329 1330 1331

	return 0;
}

1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350
/* the caller must down_write(&spg->rw_lock) */
static void delete_spg_node(struct sp_group *spg, struct sp_group_node *node)
{
	list_del(&node->proc_node);
	spg->proc_num--;
}

/* the caller must hold sp_group_sem */
static void free_spg_node(struct mm_struct *mm, struct sp_group *spg,
	struct sp_group_node *spg_node)
{
	struct sp_group_master *master = mm->sp_group_master;

	list_del(&spg_node->group_node);
	master->count--;

	kfree(spg_node);
}

1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
static int local_group_add_task(struct mm_struct *mm, struct sp_group *spg)
{
	struct sp_group_node *node;

	node = create_spg_node(mm, PROT_READ | PROT_WRITE, spg);
	if (IS_ERR(node))
		return PTR_ERR(node);

	insert_spg_node(spg, node);
	mmget(mm);

	return 0;
}

1365
/**
1366
 * mg_sp_group_add_task() - Add a process to an share group (sp_group).
1367
 * @tgid: the tgid of the task to be added.
1368 1369
 * @prot: the prot of task for this spg.
 * @spg_id: the ID of the sp_group.
1370
 * @flag: to give some special message.
1371 1372 1373 1374 1375 1376 1377 1378 1379 1380
 *
 * A process can't be added to more than one sp_group in single group mode
 * and can in multiple group mode.
 *
 * Return: A postive group number for success, -errno on failure.
 *
 * The manually specified ID is between [SPG_ID_MIN, SPG_ID_MAX].
 * The automatically allocated ID is between [SPG_ID_AUTO_MIN, SPG_ID_AUTO_MAX].
 * When negative, the return value is -errno.
 */
1381
int mg_sp_group_add_task(int tgid, unsigned long prot, int spg_id)
1382
{
1383
	unsigned long flag = 0;
1384 1385 1386 1387 1388 1389 1390 1391
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct sp_group *spg;
	struct sp_group_node *node = NULL;
	int ret = 0;
	bool id_newly_generated = false;
	struct sp_area *spa, *prev = NULL;

1392 1393 1394
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409
	check_interrupt_context();

	/* only allow READ, READ | WRITE */
	if (!((prot == PROT_READ)
	      || (prot == (PROT_READ | PROT_WRITE)))) {
		pr_err_ratelimited("prot is invalid 0x%lx\n", prot);
		return -EINVAL;
	}

	if (spg_id < SPG_ID_MIN || spg_id > SPG_ID_AUTO) {
		pr_err_ratelimited("add group failed, invalid group id %d\n", spg_id);
		return -EINVAL;
	}

	if (spg_id >= SPG_ID_AUTO_MIN && spg_id <= SPG_ID_AUTO_MAX) {
1410
		spg = sp_group_get(tgid, spg_id);
1411 1412 1413 1414 1415 1416 1417 1418 1419 1420

		if (!spg) {
			pr_err_ratelimited("spg %d hasn't been created\n", spg_id);
			return -EINVAL;
		}

		down_read(&spg->rw_lock);
		if (!spg_valid(spg)) {
			up_read(&spg->rw_lock);
			pr_err_ratelimited("add group failed, group id %d is dead\n", spg_id);
1421
			sp_group_put(spg);
1422 1423 1424 1425
			return -EINVAL;
		}
		up_read(&spg->rw_lock);

1426
		sp_group_put(spg);
1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440
	}

	if (spg_id == SPG_ID_AUTO) {
		spg_id = ida_alloc_range(&sp_group_id_ida, SPG_ID_AUTO_MIN,
					 SPG_ID_AUTO_MAX, GFP_ATOMIC);
		if (spg_id < 0) {
			pr_err_ratelimited("add group failed, auto generate group id failed\n");
			return spg_id;
		}
		id_newly_generated = true;
	}

	down_write(&sp_group_sem);

1441
	ret = get_task(tgid, &tsk);
1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474
	if (ret) {
		up_write(&sp_group_sem);
		free_new_spg_id(id_newly_generated, spg_id);
		goto out;
	}

	if (check_aoscore_process(tsk)) {
		up_write(&sp_group_sem);
		ret = -EACCES;
		free_new_spg_id(id_newly_generated, spg_id);
		goto out_put_task;
	}

	/*
	 * group_leader: current thread may be exiting in a multithread process
	 *
	 * DESIGN IDEA
	 * We increase mm->mm_users deliberately to ensure it's decreased in
	 * share pool under only 2 circumstances, which will simply the overall
	 * design as mm won't be freed unexpectedly.
	 *
	 * The corresponding refcount decrements are as follows:
	 * 1. the error handling branch of THIS function.
	 * 2. In sp_group_exit(). It's called only when process is exiting.
	 */
	mm = get_task_mm(tsk->group_leader);
	if (!mm) {
		up_write(&sp_group_sem);
		ret = -ESRCH;
		free_new_spg_id(id_newly_generated, spg_id);
		goto out_put_task;
	}

1475
	spg = find_or_alloc_sp_group(spg_id, flag);
1476 1477 1478 1479 1480 1481 1482
	if (IS_ERR(spg)) {
		up_write(&sp_group_sem);
		ret = PTR_ERR(spg);
		free_new_spg_id(id_newly_generated, spg_id);
		goto out_put_mm;
	}

1483 1484 1485 1486
	down_write(&spg->rw_lock);
	ret = mm_add_group_init(tsk, mm, spg);
	if (ret) {
		up_write(&spg->rw_lock);
1487
		goto out_drop_group;
1488
	}
1489

1490
	ret = sp_mapping_group_setup(mm, spg);
1491 1492
	if (ret) {
		up_write(&spg->rw_lock);
1493
		goto out_drop_group;
1494
	}
1495

1496 1497
	node = create_spg_node(mm, prot, spg);
	if (unlikely(IS_ERR(node))) {
1498
		up_write(&spg->rw_lock);
1499
		ret = PTR_ERR(node);
1500
		goto out_drop_group;
1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518
	}

	ret = insert_spg_node(spg, node);
	if (unlikely(ret)) {
		up_write(&spg->rw_lock);
		goto out_drop_spg_node;
	}

	/*
	 * create mappings of existing shared memory segments into this
	 * new process' page table.
	 */
	spin_lock(&sp_area_lock);

	list_for_each_entry(spa, &spg->spa_list, link) {
		unsigned long populate = 0;
		struct file *file = spa_file(spa);
		unsigned long addr;
1519
		unsigned long prot_spa = prot;
C
Chen Jun 已提交
1520 1521

		if ((spa->flags & (SP_PROT_RO | SP_PROT_FOCUS)) == (SP_PROT_RO | SP_PROT_FOCUS))
1522
			prot_spa &= ~PROT_WRITE;
1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534

		__sp_area_drop_locked(prev);
		prev = spa;

		atomic_inc(&spa->use_count);

		if (spa->is_dead == true)
			continue;

		spin_unlock(&sp_area_lock);

		if (spa->type == SPA_TYPE_K2SPG && spa->kva) {
1535
			addr = sp_remap_kva_to_vma(spa, mm, prot_spa, NULL);
1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552
			if (IS_ERR_VALUE(addr))
				pr_warn("add group remap k2u failed %ld\n", addr);

			spin_lock(&sp_area_lock);
			continue;
		}

		down_write(&mm->mmap_lock);
		if (unlikely(mm->core_state)) {
			sp_munmap_task_areas(mm, spg, &spa->link);
			up_write(&mm->mmap_lock);
			ret = -EBUSY;
			pr_err("add group: encountered coredump, abort\n");
			spin_lock(&sp_area_lock);
			break;
		}

1553
		addr = sp_mmap(mm, file, spa, &populate, prot_spa, NULL);
1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
		if (IS_ERR_VALUE(addr)) {
			sp_munmap_task_areas(mm, spg, &spa->link);
			up_write(&mm->mmap_lock);
			ret = addr;
			pr_err("add group: sp mmap failed %d\n", ret);
			spin_lock(&sp_area_lock);
			break;
		}
		up_write(&mm->mmap_lock);

		if (populate) {
			ret = do_mm_populate(mm, spa->va_start, populate, 0);
			if (ret) {
				if (unlikely(fatal_signal_pending(current)))
					pr_warn_ratelimited("add group failed, current thread is killed\n");
				else
					pr_warn_ratelimited("add group failed, mm populate failed (potential no enough memory when -12): %d, spa type is %d\n",
					ret, spa->type);
				down_write(&mm->mmap_lock);
				sp_munmap_task_areas(mm, spg, spa->link.next);
				up_write(&mm->mmap_lock);
				spin_lock(&sp_area_lock);
				break;
			}
		}

		spin_lock(&sp_area_lock);
	}
	__sp_area_drop_locked(prev);
	spin_unlock(&sp_area_lock);

	if (unlikely(ret))
		delete_spg_node(spg, node);
	up_write(&spg->rw_lock);

out_drop_spg_node:
	if (unlikely(ret))
		free_spg_node(mm, spg, node);
	/*
	 * to simplify design, we don't release the resource of
	 * group_master and proc_stat, they will be freed when
	 * process is exiting.
	 */
out_drop_group:
	if (unlikely(ret)) {
		up_write(&sp_group_sem);
1600
		sp_group_put(spg);
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611
	} else
		up_write(&sp_group_sem);
out_put_mm:
	/* No need to put the mm if the sp group adds this mm successfully */
	if (unlikely(ret))
		mmput(mm);
out_put_task:
	put_task_struct(tsk);
out:
	return ret == 0 ? spg_id : ret;
}
1612 1613
EXPORT_SYMBOL_GPL(mg_sp_group_add_task);

1614 1615
/**
 * mg_sp_group_del_task() - delete a process from a sp group.
1616
 * @tgid: the tgid of the task to be deleted
1617 1618 1619 1620 1621 1622 1623
 * @spg_id: sharepool group id
 *
 * the group's spa list must be empty, or deletion will fail.
 *
 * Return:
 * * if success, return 0.
 * * -EINVAL, spg_id invalid or spa_lsit not emtpy or spg dead
1624
 * * -ESRCH, the task group of tgid is not in group / process dead
1625
 */
1626
int mg_sp_group_del_task(int tgid, int spg_id)
1627
{
1628 1629 1630 1631 1632 1633 1634
	int ret = 0;
	struct sp_group *spg;
	struct sp_group_node *spg_node;
	struct task_struct *tsk = NULL;
	struct mm_struct *mm = NULL;
	bool is_alive = true;

1635 1636 1637
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1638
	if (spg_id < SPG_ID_MIN || spg_id > SPG_ID_AUTO) {
1639
		pr_err("del from group failed, invalid group id %d\n", spg_id);
1640 1641 1642
		return -EINVAL;
	}

1643
	spg = sp_group_get(tgid, spg_id);
1644
	if (!spg) {
1645 1646
		pr_err("spg not found or get task failed, tgid:%d, spg_id:%d\n",
			tgid, spg_id);
1647 1648 1649 1650 1651 1652
		return -EINVAL;
	}
	down_write(&sp_group_sem);

	if (!spg_valid(spg)) {
		up_write(&sp_group_sem);
1653
		pr_err("spg dead, spg_id:%d\n", spg_id);
1654 1655 1656 1657
		ret = -EINVAL;
		goto out;
	}

1658
	ret = get_task(tgid, &tsk);
1659 1660
	if (ret) {
		up_write(&sp_group_sem);
1661
		pr_err("task is not found, tgid:%d\n", tgid);
1662 1663 1664 1665 1666
		goto out;
	}
	mm = get_task_mm(tsk->group_leader);
	if (!mm) {
		up_write(&sp_group_sem);
1667
		pr_err("mm is not found, tgid:%d\n", tgid);
1668 1669 1670 1671
		ret = -ESRCH;
		goto out_put_task;
	}

1672 1673 1674 1675 1676 1677 1678
	if (!mm->sp_group_master) {
		up_write(&sp_group_sem);
		pr_err("task(%d) is not in any group(%d)\n", tgid, spg_id);
		ret = -EINVAL;
		goto out_put_mm;
	}

1679
	spg_node = find_spg_node_by_spg(mm, spg);
1680 1681
	if (!spg_node) {
		up_write(&sp_group_sem);
1682
		pr_err("task(%d) not in group(%d)\n", tgid, spg_id);
1683 1684 1685 1686 1687
		ret = -ESRCH;
		goto out_put_mm;
	}

	down_write(&spg->rw_lock);
1688 1689 1690 1691

	if (!list_empty(&spg->spa_list)) {
		up_write(&spg->rw_lock);
		up_write(&sp_group_sem);
1692
		pr_err("spa is not empty, task:%d, spg_id:%d\n", tgid, spg_id);
1693 1694 1695 1696
		ret = -EINVAL;
		goto out_put_mm;
	}

1697 1698 1699 1700
	if (list_is_singular(&spg->procs))
		is_alive = spg->is_alive = false;
	spg->proc_num--;
	list_del(&spg_node->proc_node);
1701
	sp_group_put(spg);
1702 1703 1704 1705 1706 1707 1708
	up_write(&spg->rw_lock);
	if (!is_alive)
		blocking_notifier_call_chain(&sp_notifier_chain, 0, spg);

	list_del(&spg_node->group_node);
	mm->sp_group_master->count--;
	kfree(spg_node);
1709
	atomic_dec(&mm->mm_users);
1710 1711 1712 1713 1714 1715 1716 1717

	up_write(&sp_group_sem);

out_put_mm:
	mmput(mm);
out_put_task:
	put_task_struct(tsk);
out:
1718
	sp_group_put(spg); /* if spg dead, freed here */
1719
	return ret;
1720 1721 1722
}
EXPORT_SYMBOL_GPL(mg_sp_group_del_task);

1723
int mg_sp_id_of_current(void)
1724 1725 1726 1727
{
	int ret, spg_id;
	struct sp_group_master *master;

1728 1729 1730
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1731
	if ((current->flags & PF_KTHREAD) || !current->mm)
1732 1733 1734 1735
		return -EINVAL;

	down_read(&sp_group_sem);
	master = current->mm->sp_group_master;
1736
	if (master) {
1737 1738 1739 1740 1741 1742 1743
		spg_id = master->local->id;
		up_read(&sp_group_sem);
		return spg_id;
	}
	up_read(&sp_group_sem);

	down_write(&sp_group_sem);
1744
	ret = sp_init_group_master_locked(current, current->mm);
1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
	if (ret) {
		up_write(&sp_group_sem);
		return ret;
	}
	master = current->mm->sp_group_master;
	spg_id = master->local->id;
	up_write(&sp_group_sem);

	return spg_id;
}
EXPORT_SYMBOL_GPL(mg_sp_id_of_current);

1757
/* the caller must hold sp_area_lock */
1758
static void insert_sp_area(struct sp_mapping *spm, struct sp_area *spa)
1759
{
1760
	struct rb_node **p = &spm->area_root.rb_node;
1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
	struct rb_node *parent = NULL;

	while (*p) {
		struct sp_area *tmp;

		parent = *p;
		tmp = rb_entry(parent, struct sp_area, rb_node);
		if (spa->va_start < tmp->va_end)
			p = &(*p)->rb_left;
		else if (spa->va_end > tmp->va_start)
			p = &(*p)->rb_right;
		else
			BUG();
	}

	rb_link_node(&spa->rb_node, parent, p);
1777
	rb_insert_color(&spa->rb_node, &spm->area_root);
1778 1779 1780 1781 1782 1783 1784 1785
}

/**
 * sp_alloc_area() - Allocate a region of VA from the share pool.
 * @size: the size of VA to allocate.
 * @flags: how to allocate the memory.
 * @spg: the share group that the memory is allocated to.
 * @type: the type of the region.
1786
 * @applier: the tgid of the task which allocates the region.
1787 1788 1789 1790 1791 1792 1793 1794 1795
 *
 * Return: a valid pointer for success, NULL on failure.
 */
static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags,
				     struct sp_group *spg, enum spa_type type,
				     pid_t applier)
{
	struct sp_area *spa, *first, *err;
	struct rb_node *n;
1796 1797
	unsigned long vstart;
	unsigned long vend;
1798 1799 1800
	unsigned long addr;
	unsigned long size_align = ALIGN(size, PMD_SIZE); /* va aligned to 2M */
	int device_id, node_id;
1801
	struct sp_mapping *mapping;
1802 1803 1804 1805 1806 1807 1808 1809 1810

	device_id = sp_flags_device_id(flags);
	node_id = flags & SP_SPEC_NODE_ID ? sp_flags_node_id(flags) : device_id;

	if (!is_online_node_id(node_id)) {
		pr_err_ratelimited("invalid numa node id %d\n", node_id);
		return ERR_PTR(-EINVAL);
	}

C
Chen Jun 已提交
1811 1812 1813 1814 1815 1816
	if (flags & SP_PROT_FOCUS) {
		if ((flags & (SP_DVPP | SP_PROT_RO)) != SP_PROT_RO) {
			pr_err("invalid sp_flags [%lx]\n", flags);
			return ERR_PTR(-EINVAL);
		}
		mapping = spg->mapping[SP_MAPPING_RO];
1817
	} else if (flags & SP_DVPP) {
1818
		mapping = spg->mapping[SP_MAPPING_DVPP];
1819
	} else {
1820
		mapping = spg->mapping[SP_MAPPING_NORMAL];
1821
	}
1822

1823 1824 1825 1826 1827
	if (!mapping) {
		pr_err_ratelimited("non DVPP spg, id %d\n", spg->id);
		return ERR_PTR(-EINVAL);
	}

1828 1829
	vstart = mapping->start[device_id];
	vend = mapping->end[device_id];
1830 1831 1832 1833 1834 1835 1836 1837 1838
	spa = __kmalloc_node(sizeof(struct sp_area), GFP_KERNEL, node_id);
	if (unlikely(!spa))
		return ERR_PTR(-ENOMEM);

	spin_lock(&sp_area_lock);

	/*
	 * Invalidate cache if we have more permissive parameters.
	 * cached_hole_size notes the largest hole noticed _below_
1839
	 * the sp_area cached in free_area_cache: if size fits
1840
	 * into that hole, we want to scan from vstart to reuse
1841 1842
	 * the hole instead of allocating above free_area_cache.
	 * Note that sp_free_area may update free_area_cache
1843 1844
	 * without updating cached_hole_size.
	 */
1845 1846 1847 1848
	if (!mapping->free_area_cache || size_align < mapping->cached_hole_size ||
	    vstart != mapping->cached_vstart) {
		mapping->cached_hole_size = 0;
		mapping->free_area_cache = NULL;
1849 1850 1851
	}

	/* record if we encounter less permissive parameters */
1852
	mapping->cached_vstart = vstart;
1853 1854

	/* find starting point for our search */
1855 1856
	if (mapping->free_area_cache) {
		first = rb_entry(mapping->free_area_cache, struct sp_area, rb_node);
1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868
		addr = first->va_end;
		if (addr + size_align < addr) {
			err = ERR_PTR(-EOVERFLOW);
			goto error;
		}
	} else {
		addr = vstart;
		if (addr + size_align < addr) {
			err = ERR_PTR(-EOVERFLOW);
			goto error;
		}

1869
		n = mapping->area_root.rb_node;
1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890
		first = NULL;

		while (n) {
			struct sp_area *tmp;

			tmp = rb_entry(n, struct sp_area, rb_node);
			if (tmp->va_end >= addr) {
				first = tmp;
				if (tmp->va_start <= addr)
					break;
				n = n->rb_left;
			} else
				n = n->rb_right;
		}

		if (!first)
			goto found;
	}

	/* from the starting point, traverse areas until a suitable hole is found */
	while (addr + size_align > first->va_start && addr + size_align <= vend) {
1891 1892
		if (addr + mapping->cached_hole_size < first->va_start)
			mapping->cached_hole_size = first->va_start - addr;
1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928
		addr = first->va_end;
		if (addr + size_align < addr) {
			err = ERR_PTR(-EOVERFLOW);
			goto error;
		}

		n = rb_next(&first->rb_node);
		if (n)
			first = rb_entry(n, struct sp_area, rb_node);
		else
			goto found;
	}

found:
	if (addr + size_align > vend) {
		err = ERR_PTR(-EOVERFLOW);
		goto error;
	}

	spa->va_start = addr;
	spa->va_end = addr + size_align;
	spa->real_size = size;
	spa->region_vstart = vstart;
	spa->flags = flags;
	spa->is_hugepage = (flags & SP_HUGEPAGE);
	spa->is_dead = false;
	spa->spg = spg;
	atomic_set(&spa->use_count, 1);
	spa->type = type;
	spa->mm = NULL;
	spa->kva = 0;   /* NULL pointer */
	spa->applier = applier;
	spa->node_id = node_id;
	spa->device_id = device_id;

	spa_inc_usage(spa);
1929
	insert_sp_area(mapping, spa);
1930 1931
	mapping->free_area_cache = &spa->rb_node;
	list_add_tail(&spa->link, &spg->spa_list);
1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943

	spin_unlock(&sp_area_lock);

	return spa;

error:
	spin_unlock(&sp_area_lock);
	kfree(spa);
	return err;
}

/* the caller should hold sp_area_lock */
1944
static struct sp_area *find_sp_area_locked(struct sp_group *spg,
1945
		unsigned long addr)
1946
{
C
Chen Jun 已提交
1947 1948
	struct sp_mapping *spm = sp_mapping_find(spg, addr);
	struct rb_node *n = spm->area_root.rb_node;
1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964
	while (n) {
		struct sp_area *spa;

		spa = rb_entry(n, struct sp_area, rb_node);
		if (addr < spa->va_start) {
			n = n->rb_left;
		} else if (addr > spa->va_start) {
			n = n->rb_right;
		} else {
			return spa;
		}
	}

	return NULL;
}

1965
static struct sp_area *get_sp_area(struct sp_group *spg, unsigned long addr)
1966 1967 1968 1969
{
	struct sp_area *n;

	spin_lock(&sp_area_lock);
1970
	n = find_sp_area_locked(spg, addr);
1971 1972 1973 1974 1975 1976
	if (n)
		atomic_inc(&n->use_count);
	spin_unlock(&sp_area_lock);
	return n;
}

1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989
static bool vmalloc_area_clr_flag(unsigned long kva, unsigned long flags)
{
	struct vm_struct *area;

	area = find_vm_area((void *)kva);
	if (area) {
		area->flags &= ~flags;
		return true;
	}

	return false;
}

1990 1991 1992 1993 1994
/*
 * Free the VA region starting from addr to the share pool
 */
static void sp_free_area(struct sp_area *spa)
{
1995 1996 1997
	unsigned long addr = spa->va_start;
	struct sp_mapping *spm;

1998 1999
	lockdep_assert_held(&sp_area_lock);

C
Chen Jun 已提交
2000
	spm = sp_mapping_find(spa->spg, addr);
2001
	if (spm->free_area_cache) {
2002 2003
		struct sp_area *cache;

2004
		cache = rb_entry(spm->free_area_cache, struct sp_area, rb_node);
2005
		if (spa->va_start <= cache->va_start) {
2006
			spm->free_area_cache = rb_prev(&spa->rb_node);
2007 2008 2009 2010
			/*
			 * the new cache node may be changed to another region,
			 * i.e. from DVPP region to normal region
			 */
2011 2012
			if (spm->free_area_cache) {
				cache = rb_entry(spm->free_area_cache,
2013
						 struct sp_area, rb_node);
2014
				spm->cached_vstart = cache->region_vstart;
2015 2016 2017 2018 2019 2020 2021 2022
			}
			/*
			 * We don't try to update cached_hole_size,
			 * but it won't go very wrong.
			 */
		}
	}

2023 2024 2025
	if (spa->kva && !vmalloc_area_clr_flag(spa->kva, VM_SHAREPOOL))
		pr_debug("clear spa->kva %ld is not valid\n", spa->kva);

2026
	spa_dec_usage(spa);
2027
	list_del(&spa->link);
2028

2029
	rb_erase(&spa->rb_node, &spm->area_root);
2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064
	RB_CLEAR_NODE(&spa->rb_node);
	kfree(spa);
}

static void __sp_area_drop_locked(struct sp_area *spa)
{
	/*
	 * Considering a situation where task A and B are in the same spg.
	 * A is exiting and calling remove_vma(). Before A calls this func,
	 * B calls sp_free() to free the same spa. So spa maybe NULL when A
	 * calls this func later.
	 */
	if (!spa)
		return;

	if (atomic_dec_and_test(&spa->use_count))
		sp_free_area(spa);
}

static void __sp_area_drop(struct sp_area *spa)
{
	spin_lock(&sp_area_lock);
	__sp_area_drop_locked(spa);
	spin_unlock(&sp_area_lock);
}

void sp_area_drop(struct vm_area_struct *vma)
{
	if (!(vma->vm_flags & VM_SHARE_POOL))
		return;

	/*
	 * Considering a situation where task A and B are in the same spg.
	 * A is exiting and calling remove_vma() -> ... -> sp_area_drop().
	 * Concurrently, B is calling sp_free() to free the same spa.
2065
	 * find_sp_area_locked() and __sp_area_drop_locked() should be
2066 2067 2068
	 * an atomic operation.
	 */
	spin_lock(&sp_area_lock);
2069
	__sp_area_drop_locked(vma->vm_private_data);
2070 2071 2072
	spin_unlock(&sp_area_lock);
}

W
Wang Wensheng 已提交
2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126
/*
 * The function calls of do_munmap() won't change any non-atomic member
 * of struct sp_group. Please review the following chain:
 * do_munmap -> remove_vma_list -> remove_vma -> sp_area_drop ->
 * __sp_area_drop_locked -> sp_free_area
 */
static void sp_munmap(struct mm_struct *mm, unsigned long addr,
			   unsigned long size)
{
	int err;

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		up_write(&mm->mmap_lock);
		pr_info("munmap: encoutered coredump\n");
		return;
	}

	err = do_munmap(mm, addr, size, NULL);
	/* we are not supposed to fail */
	if (err)
		pr_err("failed to unmap VA %pK when sp munmap\n", (void *)addr);

	up_write(&mm->mmap_lock);
}

static void __sp_free(struct sp_group *spg, unsigned long addr,
		      unsigned long size, struct mm_struct *stop)
{
	struct mm_struct *mm;
	struct sp_group_node *spg_node = NULL;

	list_for_each_entry(spg_node, &spg->procs, proc_node) {
		mm = spg_node->master->mm;
		if (mm == stop)
			break;
		sp_munmap(mm, addr, size);
	}
}

/* Free the memory of the backing shmem or hugetlbfs */
static void sp_fallocate(struct sp_area *spa)
{
	int ret;
	unsigned long mode = FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE;
	unsigned long offset = addr_offset(spa);

	ret = vfs_fallocate(spa_file(spa), mode, offset, spa_size(spa));
	if (ret)
		WARN(1, "sp fallocate failed %d\n", ret);
}

static void sp_free_unmap_fallocate(struct sp_area *spa)
{
2127 2128 2129 2130
	down_read(&spa->spg->rw_lock);
	__sp_free(spa->spg, spa->va_start, spa_size(spa), NULL);
	sp_fallocate(spa);
	up_read(&spa->spg->rw_lock);
W
Wang Wensheng 已提交
2131 2132 2133 2134 2135 2136 2137
}

static int sp_check_caller_permission(struct sp_group *spg, struct mm_struct *mm)
{
	int ret = 0;

	down_read(&spg->rw_lock);
2138
	if (!is_process_in_group(spg, mm))
W
Wang Wensheng 已提交
2139 2140
		ret = -EPERM;
	up_read(&spg->rw_lock);
2141

W
Wang Wensheng 已提交
2142 2143 2144 2145 2146 2147 2148 2149 2150 2151
	return ret;
}

#define FREE_CONT	1
#define FREE_END	2

struct sp_free_context {
	unsigned long addr;
	struct sp_area *spa;
	int state;
2152
	int spg_id;
W
Wang Wensheng 已提交
2153 2154 2155 2156 2157 2158 2159 2160
};

/* when success, __sp_area_drop(spa) should be used */
static int sp_free_get_spa(struct sp_free_context *fc)
{
	int ret = 0;
	unsigned long addr = fc->addr;
	struct sp_area *spa;
2161 2162
	struct sp_group *spg;

2163
	spg = sp_group_get(current->tgid, fc->spg_id);
2164 2165 2166 2167
	if (!spg) {
		pr_debug("sp free get group failed %d\n", fc->spg_id);
		return -EINVAL;
	}
W
Wang Wensheng 已提交
2168 2169 2170

	fc->state = FREE_CONT;

2171
	spa = get_sp_area(spg, addr);
2172
	sp_group_put(spg);
W
Wang Wensheng 已提交
2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
	if (!spa) {
		pr_debug("sp free invalid input addr %lx\n", addr);
		return -EINVAL;
	}

	if (spa->type != SPA_TYPE_ALLOC) {
		ret = -EINVAL;
		pr_debug("sp free failed, %lx is not sp alloc addr\n", addr);
		goto drop_spa;
	}
	fc->spa = spa;

2185 2186
	if (!current->mm)
		goto check_spa;
W
Wang Wensheng 已提交
2187

2188 2189 2190
	ret = sp_check_caller_permission(spa->spg, current->mm);
	if (ret < 0)
		goto drop_spa;
W
Wang Wensheng 已提交
2191 2192

check_spa:
2193 2194 2195 2196
	if (is_local_group(spa->spg->id) && (current->tgid != spa->applier)) {
		ret = -EPERM;
		goto drop_spa;
	}
W
Wang Wensheng 已提交
2197

2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211
	down_write(&spa->spg->rw_lock);
	if (!spg_valid(spa->spg)) {
		fc->state = FREE_END;
		up_write(&spa->spg->rw_lock);
		goto drop_spa;
		/* we must return success(0) in this situation */
	}
	/* the life cycle of spa has a direct relation with sp group */
	if (unlikely(spa->is_dead)) {
		up_write(&spa->spg->rw_lock);
		pr_err_ratelimited("unexpected double sp free\n");
		dump_stack();
		ret = -EINVAL;
		goto drop_spa;
W
Wang Wensheng 已提交
2212
	}
2213 2214 2215
	spa->is_dead = true;
	up_write(&spa->spg->rw_lock);

W
Wang Wensheng 已提交
2216 2217 2218 2219 2220 2221 2222
	return 0;

drop_spa:
	__sp_area_drop(spa);
	return ret;
}

2223
/**
2224
 * mg_sp_free() - Free the memory allocated by mg_sp_alloc().
2225
 * @addr: the starting VA of the memory.
2226
 * @id: Address space identifier, which is used to distinguish the addr.
2227 2228 2229 2230 2231 2232
 *
 * Return:
 * * 0		- success.
 * * -EINVAL	- the memory can't be found or was not allocted by share pool.
 * * -EPERM	- the caller has no permision to free the memory.
 */
2233
int mg_sp_free(unsigned long addr, int id)
2234
{
W
Wang Wensheng 已提交
2235 2236 2237
	int ret = 0;
	struct sp_free_context fc = {
		.addr = addr,
2238
		.spg_id = id,
W
Wang Wensheng 已提交
2239 2240
	};

2241 2242 2243
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

W
Wang Wensheng 已提交
2244 2245
	check_interrupt_context();

2246 2247 2248
	if (current->flags & PF_KTHREAD)
		return -EINVAL;

W
Wang Wensheng 已提交
2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259
	ret = sp_free_get_spa(&fc);
	if (ret || fc.state == FREE_END)
		goto out;

	sp_free_unmap_fallocate(fc.spa);

	if (current->mm == NULL)
		atomic64_sub(fc.spa->real_size, &kthread_stat.alloc_size);
	else
		sp_update_process_stat(current, false, fc.spa);

2260
	__sp_area_drop(fc.spa);  /* match get_sp_area in sp_free_get_spa */
W
Wang Wensheng 已提交
2261 2262
out:
	return ret;
2263 2264 2265
}
EXPORT_SYMBOL_GPL(mg_sp_free);

2266 2267 2268
/* wrapper of __do_mmap() and the caller must hold down_write(&mm->mmap_lock). */
static unsigned long sp_mmap(struct mm_struct *mm, struct file *file,
			     struct sp_area *spa, unsigned long *populate,
2269
			     unsigned long prot, struct vm_area_struct **pvma)
2270 2271 2272 2273 2274 2275 2276
{
	unsigned long addr = spa->va_start;
	unsigned long size = spa_size(spa);
	unsigned long flags = MAP_FIXED | MAP_SHARED | MAP_POPULATE |
			      MAP_SHARE_POOL;
	unsigned long vm_flags = VM_NORESERVE | VM_SHARE_POOL | VM_DONTCOPY;
	unsigned long pgoff = addr_offset(spa) >> PAGE_SHIFT;
2277
	struct vm_area_struct *vma;
2278 2279 2280 2281 2282 2283 2284 2285 2286

	atomic_inc(&spa->use_count);
	addr = __do_mmap_mm(mm, file, addr, size, prot, flags, vm_flags, pgoff,
			 populate, NULL);
	if (IS_ERR_VALUE(addr)) {
		atomic_dec(&spa->use_count);
		pr_err("do_mmap fails %ld\n", addr);
	} else {
		BUG_ON(addr != spa->va_start);
2287 2288 2289 2290
		vma = find_vma(mm, addr);
		vma->vm_private_data = spa;
		if (pvma)
			*pvma = vma;
2291 2292 2293 2294 2295
	}

	return addr;
}

W
Wang Wensheng 已提交
2296 2297 2298
#define ALLOC_NORMAL	1
#define ALLOC_RETRY	2
#define ALLOC_NOMEM	3
2299
#define ALLOC_COREDUMP	4
W
Wang Wensheng 已提交
2300 2301 2302 2303 2304 2305 2306 2307 2308

struct sp_alloc_context {
	struct sp_group *spg;
	struct file *file;
	unsigned long size;
	unsigned long size_aligned;
	unsigned long sp_flags;
	unsigned long populate;
	int state;
2309
	bool have_mbind;
2310
	enum spa_type type;
W
Wang Wensheng 已提交
2311 2312 2313 2314 2315 2316 2317 2318 2319
};

static int sp_alloc_prepare(unsigned long size, unsigned long sp_flags,
	int spg_id, struct sp_alloc_context *ac)
{
	struct sp_group *spg;

	check_interrupt_context();

2320 2321 2322 2323 2324
	if (current->flags & PF_KTHREAD) {
		pr_err_ratelimited("allocation failed, task is kthread\n");
		return -EINVAL;
	}

W
Wang Wensheng 已提交
2325 2326 2327 2328 2329
	if (unlikely(!size || (size >> PAGE_SHIFT) > totalram_pages())) {
		pr_err_ratelimited("allocation failed, invalid size %lu\n", size);
		return -EINVAL;
	}

2330
	if (spg_id != SPG_ID_DEFAULT && (spg_id < SPG_ID_MIN || spg_id >= SPG_ID_AUTO)) {
W
Wang Wensheng 已提交
2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342
		pr_err_ratelimited("allocation failed, invalid group id %d\n", spg_id);
		return -EINVAL;
	}

	if (sp_flags & (~SP_FLAG_MASK)) {
		pr_err_ratelimited("allocation failed, invalid flag %lx\n", sp_flags);
		return -EINVAL;
	}

	if (sp_flags & SP_HUGEPAGE_ONLY)
		sp_flags |= SP_HUGEPAGE;

2343
	if (spg_id != SPG_ID_DEFAULT) {
2344
		spg = sp_group_get(current->tgid, spg_id);
2345 2346 2347
		if (!spg) {
			pr_err_ratelimited("allocation failed, can't find group\n");
			return -ENODEV;
W
Wang Wensheng 已提交
2348 2349
		}

2350 2351 2352 2353
		/* up_read will be at the end of sp_alloc */
		down_read(&spg->rw_lock);
		if (!spg_valid(spg)) {
			up_read(&spg->rw_lock);
2354
			sp_group_put(spg);
2355 2356 2357
			pr_err_ratelimited("allocation failed, spg is dead\n");
			return -ENODEV;
		}
W
Wang Wensheng 已提交
2358

2359 2360
		if (!is_process_in_group(spg, current->mm)) {
			up_read(&spg->rw_lock);
2361
			sp_group_put(spg);
2362 2363
			pr_err_ratelimited("allocation failed, task not in group\n");
			return -ENODEV;
W
Wang Wensheng 已提交
2364
		}
2365
		ac->type = SPA_TYPE_ALLOC;
2366
	} else {  /* allocation pass through scene */
2367
		spg = sp_get_local_group(current, current->mm);
2368 2369
		if (IS_ERR(spg))
			return PTR_ERR(spg);
2370 2371
		down_read(&spg->rw_lock);
		ac->type = SPA_TYPE_ALLOC_PRIVATE;
W
Wang Wensheng 已提交
2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385
	}

	if (sp_flags & SP_HUGEPAGE) {
		ac->file = spg->file_hugetlb;
		ac->size_aligned = ALIGN(size, PMD_SIZE);
	} else {
		ac->file = spg->file;
		ac->size_aligned = ALIGN(size, PAGE_SIZE);
	}

	ac->spg = spg;
	ac->size = size;
	ac->sp_flags = sp_flags;
	ac->state = ALLOC_NORMAL;
2386
	ac->have_mbind = false;
W
Wang Wensheng 已提交
2387 2388 2389 2390 2391 2392
	return 0;
}

static void sp_alloc_unmap(struct mm_struct *mm, struct sp_area *spa,
	struct sp_group_node *spg_node)
{
2393
	__sp_free(spa->spg, spa->va_start, spa->real_size, mm);
W
Wang Wensheng 已提交
2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
}

static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa,
	struct sp_group_node *spg_node, struct sp_alloc_context *ac)
{
	int ret = 0;
	unsigned long mmap_addr;
	/* pass through default permission */
	unsigned long prot = PROT_READ | PROT_WRITE;
	unsigned long populate = 0;
	struct vm_area_struct *vma;

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		up_write(&mm->mmap_lock);
2409
		ac->state = ALLOC_COREDUMP;
W
Wang Wensheng 已提交
2410 2411 2412 2413 2414 2415 2416
		pr_info("allocation encountered coredump\n");
		return -EFAULT;
	}

	if (spg_node)
		prot = spg_node->prot;

2417 2418 2419
	if (ac->sp_flags & SP_PROT_RO)
		prot = PROT_READ;

W
Wang Wensheng 已提交
2420
	/* when success, mmap_addr == spa->va_start */
2421
	mmap_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot, &vma);
W
Wang Wensheng 已提交
2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436
	if (IS_ERR_VALUE(mmap_addr)) {
		up_write(&mm->mmap_lock);
		sp_alloc_unmap(mm, spa, spg_node);
		pr_err("sp mmap in allocation failed %ld\n", mmap_addr);
		return PTR_ERR((void *)mmap_addr);
	}

	if (unlikely(populate == 0)) {
		up_write(&mm->mmap_lock);
		pr_err("allocation sp mmap populate failed\n");
		ret = -EFAULT;
		goto unmap;
	}
	ac->populate = populate;

2437 2438 2439
	if (ac->sp_flags & SP_PROT_RO)
		vma->vm_flags &= ~VM_MAYWRITE;

W
Wang Wensheng 已提交
2440 2441 2442 2443 2444 2445 2446 2447
	/* clean PTE_RDONLY flags or trigger SMMU event */
	if (prot & PROT_WRITE)
		vma->vm_page_prot = __pgprot(((~PTE_RDONLY) & vma->vm_page_prot.pgprot) | PTE_DIRTY);
	up_write(&mm->mmap_lock);

	return ret;

unmap:
2448
	sp_alloc_unmap(list_next_entry(spg_node, proc_node)->master->mm, spa, spg_node);
W
Wang Wensheng 已提交
2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470
	return ret;
}

static void sp_alloc_fallback(struct sp_area *spa, struct sp_alloc_context *ac)
{
	if (ac->file == ac->spg->file) {
		ac->state = ALLOC_NOMEM;
		return;
	}

	if (!(ac->sp_flags & SP_HUGEPAGE_ONLY)) {
		ac->file = ac->spg->file;
		ac->size_aligned = ALIGN(ac->size, PAGE_SIZE);
		ac->sp_flags &= ~SP_HUGEPAGE;
		ac->state = ALLOC_RETRY;
		__sp_area_drop(spa);
		return;
	}
	ac->state = ALLOC_NOMEM;
}

static int sp_alloc_populate(struct mm_struct *mm, struct sp_area *spa,
2471
			     struct sp_alloc_context *ac)
W
Wang Wensheng 已提交
2472 2473 2474 2475 2476 2477 2478
{
	/*
	 * We are not ignoring errors, so if we fail to allocate
	 * physical memory we just return failure, so we won't encounter
	 * page fault later on, and more importantly sp_make_share_u2k()
	 * depends on this feature (and MAP_LOCKED) to work correctly.
	 */
2479

2480
	return do_mm_populate(mm, spa->va_start, ac->populate, 0);
W
Wang Wensheng 已提交
2481 2482
}

2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493
static long sp_mbind(struct mm_struct *mm, unsigned long start, unsigned long len,
		unsigned long node)
{
	nodemask_t nmask;

	nodes_clear(nmask);
	node_set(node, nmask);
	return __do_mbind(start, len, MPOL_BIND, MPOL_F_STATIC_NODES,
			&nmask, MPOL_MF_STRICT, mm);
}

W
Wang Wensheng 已提交
2494 2495 2496 2497 2498 2499
static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa,
	struct sp_group_node *spg_node, struct sp_alloc_context *ac)
{
	int ret;

	ret = sp_alloc_mmap(mm, spa, spg_node, ac);
2500

2501
	if (ret < 0)
W
Wang Wensheng 已提交
2502 2503
		return ret;

2504 2505 2506 2507 2508
	if (!ac->have_mbind) {
		ret = sp_mbind(mm, spa->va_start, spa->real_size, spa->node_id);
		if (ret < 0) {
			pr_err("cannot bind the memory range to specified node:%d, err:%d\n",
				spa->node_id, ret);
2509
			return ret;
2510 2511 2512 2513 2514 2515 2516 2517 2518 2519
		}
		ac->have_mbind = true;
	}

	ret = sp_alloc_populate(mm, spa, ac);
	if (ret) {
		if (unlikely(fatal_signal_pending(current)))
			pr_warn_ratelimited("allocation failed, current thread is killed\n");
		else
			pr_warn_ratelimited("allocation failed due to mm populate failed(potential no enough memory when -12): %d\n",
2520
					ret);
2521
	}
W
Wang Wensheng 已提交
2522 2523 2524 2525 2526 2527
	return ret;
}

static int sp_alloc_mmap_populate(struct sp_area *spa,
				  struct sp_alloc_context *ac)
{
2528 2529
	int ret = -EINVAL;
	int mmap_ret = 0;
2530
	struct mm_struct *mm, *end_mm = NULL;
W
Wang Wensheng 已提交
2531 2532
	struct sp_group_node *spg_node;

2533 2534 2535 2536 2537
	/* create mapping for each process in the group */
	list_for_each_entry(spg_node, &spa->spg->procs, proc_node) {
		mm = spg_node->master->mm;
		mmap_ret = __sp_alloc_mmap_populate(mm, spa, spg_node, ac);
		if (mmap_ret) {
2538 2539 2540 2541 2542 2543

			/*
			 * Goto fallback procedure upon ERR_VALUE,
			 * but skip the coredump situation,
			 * because we don't want one misbehaving process to affect others.
			 */
2544
			if (ac->state != ALLOC_COREDUMP)
2545
				goto unmap;
2546 2547

			/* Reset state and discard the coredump error. */
2548 2549
			ac->state = ALLOC_NORMAL;
			continue;
W
Wang Wensheng 已提交
2550
		}
2551
		ret = mmap_ret;
W
Wang Wensheng 已提交
2552
	}
2553

W
Wang Wensheng 已提交
2554
	return ret;
2555 2556 2557 2558 2559 2560 2561

unmap:
	/* use the next mm in proc list as end mark */
	if (!list_is_last(&spg_node->proc_node, &spa->spg->procs))
		end_mm = list_next_entry(spg_node, proc_node)->master->mm;
	sp_alloc_unmap(end_mm, spa, spg_node);

2562 2563 2564 2565 2566 2567 2568 2569 2570 2571
	/*
	 * Sometimes do_mm_populate() allocates some memory and then failed to
	 * allocate more. (e.g. memory use reaches cgroup limit.)
	 * In this case, it will return enomem, but will not free the
	 * memory which has already been allocated.
	 *
	 * So if __sp_alloc_mmap_populate fails, always call sp_fallocate()
	 * to make sure backup physical memory of the shared file is freed.
	 */
	sp_fallocate(spa);
2572 2573 2574 2575 2576 2577 2578

	/* if hugepage allocation fails, this will transfer to normal page
	 * and try again. (only if SP_HUGEPAGE_ONLY is not flagged
	 */
	sp_alloc_fallback(spa, ac);

	return mmap_ret;
W
Wang Wensheng 已提交
2579 2580 2581 2582
}

/* spa maybe an error pointer, so introduce variable spg */
static void sp_alloc_finish(int result, struct sp_area *spa,
2583
		struct sp_alloc_context *ac)
W
Wang Wensheng 已提交
2584 2585 2586
{
	struct sp_group *spg = ac->spg;

2587
	/* match sp_alloc_prepare */
2588
	up_read(&spg->rw_lock);
W
Wang Wensheng 已提交
2589 2590 2591 2592 2593

	if (!result)
		sp_update_process_stat(current, true, spa);

	/* this will free spa if mmap failed */
2594
	if (spa && !IS_ERR(spa))
W
Wang Wensheng 已提交
2595 2596
		__sp_area_drop(spa);

2597
	sp_group_put(spg);
W
Wang Wensheng 已提交
2598 2599
}

2600
/**
2601
 * mg_sp_alloc() - Allocate shared memory for all the processes in a sp_group.
2602 2603 2604 2605 2606 2607 2608 2609 2610 2611
 * @size: the size of memory to allocate.
 * @sp_flags: how to allocate the memory.
 * @spg_id: the share group that the memory is allocated to.
 *
 * Use pass through allocation if spg_id == SPG_ID_DEFAULT in multi-group mode.
 *
 * Return:
 * * if succeed, return the starting address of the shared memory.
 * * if fail, return the pointer of -errno.
 */
2612
void *mg_sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id)
2613
{
W
Wang Wensheng 已提交
2614 2615 2616 2617
	struct sp_area *spa = NULL;
	int ret = 0;
	struct sp_alloc_context ac;

2618 2619 2620
	if (!sp_is_enabled())
		return ERR_PTR(-EOPNOTSUPP);

W
Wang Wensheng 已提交
2621 2622 2623 2624 2625 2626
	ret = sp_alloc_prepare(size, sp_flags, spg_id, &ac);
	if (ret)
		return ERR_PTR(ret);

try_again:
	spa = sp_alloc_area(ac.size_aligned, ac.sp_flags, ac.spg,
2627
			    ac.type, current->tgid);
W
Wang Wensheng 已提交
2628 2629 2630 2631 2632 2633 2634 2635
	if (IS_ERR(spa)) {
		pr_err_ratelimited("alloc spa failed in allocation(potential no enough virtual memory when -75): %ld\n",
			PTR_ERR(spa));
		ret = PTR_ERR(spa);
		goto out;
	}

	ret = sp_alloc_mmap_populate(spa, &ac);
2636 2637 2638 2639 2640 2641 2642
	if (ret && ac.state == ALLOC_RETRY) {
		/*
		 * The mempolicy for shared memory is located at backend file, which varies
		 * between normal pages and huge pages. So we should set the mbind policy again
		 * when we retry using normal pages.
		 */
		ac.have_mbind = false;
W
Wang Wensheng 已提交
2643
		goto try_again;
2644
	}
W
Wang Wensheng 已提交
2645 2646 2647 2648 2649 2650 2651

out:
	sp_alloc_finish(ret, spa, &ac);
	if (ret)
		return ERR_PTR(ret);
	else
		return (void *)(spa->va_start);
2652 2653 2654
}
EXPORT_SYMBOL_GPL(mg_sp_alloc);

2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684
/**
 * is_vmap_hugepage() - Check if a kernel address belongs to vmalloc family.
 * @addr: the kernel space address to be checked.
 *
 * Return:
 * * >0		- a vmalloc hugepage addr.
 * * =0		- a normal vmalloc addr.
 * * -errno	- failure.
 */
static int is_vmap_hugepage(unsigned long addr)
{
	struct vm_struct *area;

	if (unlikely(!addr)) {
		pr_err_ratelimited("null vmap addr pointer\n");
		return -EINVAL;
	}

	area = find_vm_area((void *)addr);
	if (unlikely(!area)) {
		pr_debug("can't find vm area(%lx)\n", addr);
		return -EINVAL;
	}

	if (area->flags & VM_HUGE_PAGES)
		return 1;
	else
		return 0;
}

2685 2686
static unsigned long __sp_remap_get_pfn(unsigned long kva)
{
G
Guo Mengqi 已提交
2687
	unsigned long pfn = -EINVAL;
2688

G
Guo Mengqi 已提交
2689
	/* sp_make_share_k2u only support vmalloc address */
2690 2691 2692 2693 2694 2695 2696
	if (is_vmalloc_addr((void *)kva))
		pfn = vmalloc_to_pfn((void *)kva);

	return pfn;
}

/* when called by k2u to group, always make sure rw_lock of spg is down */
2697 2698
static unsigned long sp_remap_kva_to_vma(struct sp_area *spa, struct mm_struct *mm,
					unsigned long prot, struct sp_k2u_context *kc)
2699 2700 2701 2702 2703 2704
{
	struct vm_area_struct *vma;
	unsigned long ret_addr;
	unsigned long populate = 0;
	int ret = 0;
	unsigned long addr, buf, offset;
2705
	unsigned long kva = spa->kva;
2706 2707 2708 2709 2710

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		pr_err("k2u mmap: encountered coredump, abort\n");
		ret_addr = -EBUSY;
2711 2712
		if (kc)
			kc->state = K2U_COREDUMP;
2713 2714 2715
		goto put_mm;
	}

2716
	if (kc && (kc->sp_flags & SP_PROT_RO))
2717 2718
		prot = PROT_READ;

2719
	ret_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot, &vma);
2720 2721 2722 2723 2724 2725 2726 2727
	if (IS_ERR_VALUE(ret_addr)) {
		pr_debug("k2u mmap failed %lx\n", ret_addr);
		goto put_mm;
	}

	if (prot & PROT_WRITE)
		vma->vm_page_prot = __pgprot(((~PTE_RDONLY) & vma->vm_page_prot.pgprot) | PTE_DIRTY);

2728
	if (kc && (kc->sp_flags & SP_PROT_RO))
2729 2730
		vma->vm_flags &= ~VM_MAYWRITE;

2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767
	if (is_vm_hugetlb_page(vma)) {
		ret = remap_vmalloc_hugepage_range(vma, (void *)kva, 0);
		if (ret) {
			do_munmap(mm, ret_addr, spa_size(spa), NULL);
			pr_debug("remap vmalloc hugepage failed, ret %d, kva is %lx\n",
				 ret, (unsigned long)kva);
			ret_addr = ret;
			goto put_mm;
		}
		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
	} else {
		buf = ret_addr;
		addr = kva;
		offset = 0;
		do {
			ret = remap_pfn_range(vma, buf, __sp_remap_get_pfn(addr), PAGE_SIZE,
					__pgprot(vma->vm_page_prot.pgprot));
			if (ret) {
				do_munmap(mm, ret_addr, spa_size(spa), NULL);
				pr_err("remap_pfn_range failed %d\n", ret);
				ret_addr = ret;
				goto put_mm;
			}
			offset += PAGE_SIZE;
			buf += PAGE_SIZE;
			addr += PAGE_SIZE;
		} while (offset < spa_size(spa));
	}

put_mm:
	up_write(&mm->mmap_lock);

	return ret_addr;
}

/**
 * Share kernel memory to a spg, the current process must be in that group
2768
 * @kc: the context for k2u, including kva, size, flags...
2769 2770 2771 2772
 * @spg: the sp group to be shared with
 *
 * Return: the shared user address to start at
 */
2773
static void *sp_make_share_kva_to_spg(struct sp_k2u_context *kc, struct sp_group *spg)
2774 2775 2776 2777
{
	struct sp_area *spa;
	struct mm_struct *mm;
	struct sp_group_node *spg_node;
2778
	unsigned long ret_addr = -ENODEV;
2779 2780

	down_read(&spg->rw_lock);
2781
	spa = sp_alloc_area(kc->size_aligned, kc->sp_flags, spg, kc->type, current->tgid);
2782 2783
	if (IS_ERR(spa)) {
		up_read(&spg->rw_lock);
2784
		pr_err("alloc spa failed in k2u_spg (potential no enough virtual memory when -75): %ld\n",
2785 2786 2787 2788
				PTR_ERR(spa));
		return spa;
	}

2789
	spa->kva = kc->kva_aligned;
2790 2791
	list_for_each_entry(spg_node, &spg->procs, proc_node) {
		mm = spg_node->master->mm;
2792 2793
		kc->state = K2U_NORMAL;
		ret_addr = sp_remap_kva_to_vma(spa, mm, spg_node->prot, kc);
2794
		if (IS_ERR_VALUE(ret_addr)) {
2795
			if (kc->state == K2U_COREDUMP)
2796
				continue;
2797
			pr_err("remap k2u to spg failed %ld\n", ret_addr);
2798 2799 2800 2801 2802 2803 2804
			__sp_free(spg, spa->va_start, spa_size(spa), mm);
			goto out;
		}
	}

out:
	up_read(&spg->rw_lock);
2805
	if (!IS_ERR_VALUE(ret_addr))
2806
		sp_update_process_stat(current, true, spa);
Z
Zhou Guanghui 已提交
2807
	__sp_area_drop(spa);
2808

2809
	return (void *)ret_addr;
2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831
}

static bool vmalloc_area_set_flag(unsigned long kva, unsigned long flags)
{
	struct vm_struct *area;

	area = find_vm_area((void *)kva);
	if (area) {
		area->flags |= flags;
		return true;
	}

	return false;
}

static int sp_k2u_prepare(unsigned long kva, unsigned long size,
	unsigned long sp_flags, int spg_id, struct sp_k2u_context *kc)
{
	int is_hugepage;
	unsigned int page_size = PAGE_SIZE;
	unsigned long kva_aligned, size_aligned;

2832 2833 2834 2835 2836
	if (!size) {
		pr_err_ratelimited("k2u input size is 0.\n");
		return -EINVAL;
	}

2837
	if (sp_flags & ~SP_FLAG_MASK) {
2838 2839 2840
		pr_err_ratelimited("k2u sp_flags %lx error\n", sp_flags);
		return -EINVAL;
	}
2841
	sp_flags &= ~SP_HUGEPAGE;
2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867

	if (!current->mm) {
		pr_err_ratelimited("k2u: kthread is not allowed\n");
		return -EPERM;
	}

	is_hugepage = is_vmap_hugepage(kva);
	if (is_hugepage > 0) {
		sp_flags |= SP_HUGEPAGE;
		page_size = PMD_SIZE;
	} else if (is_hugepage == 0) {
		/* do nothing */
	} else {
		pr_err_ratelimited("k2u kva is not vmalloc address\n");
		return is_hugepage;
	}

	/* aligned down kva is convenient for caller to start with any valid kva */
	kva_aligned = ALIGN_DOWN(kva, page_size);
	size_aligned = ALIGN(kva + size, page_size) - kva_aligned;

	if (!vmalloc_area_set_flag(kva_aligned, VM_SHAREPOOL)) {
		pr_debug("k2u_task kva %lx is not valid\n", kva_aligned);
		return -EINVAL;
	}

2868 2869 2870
	kc->kva          = kva;
	kc->kva_aligned  = kva_aligned;
	kc->size         = size;
2871
	kc->size_aligned = size_aligned;
2872 2873 2874
	kc->sp_flags     = sp_flags;
	kc->type         = (spg_id == SPG_ID_DEFAULT || spg_id == SPG_ID_NONE)
				? SPA_TYPE_K2TASK : SPA_TYPE_K2SPG;
2875

2876
	return 0;
2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888
}

static void *sp_k2u_finish(void *uva, struct sp_k2u_context *kc)
{
	if (IS_ERR(uva))
		vmalloc_area_clr_flag(kc->kva_aligned, VM_SHAREPOOL);
	else
		uva = uva + (kc->kva - kc->kva_aligned);

	return uva;
}

2889
/**
2890
 * mg_sp_make_share_k2u() - Share kernel memory to current process or an sp_group.
2891 2892 2893
 * @kva: the VA of shared kernel memory.
 * @size: the size of shared kernel memory.
 * @sp_flags: how to allocate the memory. We only support SP_DVPP.
2894
 * @tgid:  the tgid of the specified process (Not currently in use).
2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905
 * @spg_id: the share group that the memory is shared to.
 *
 * Return: the shared target user address to start at
 *
 * Share kernel memory to current task if spg_id == SPG_ID_NONE
 * or SPG_ID_DEFAULT in multi-group mode.
 *
 * Return:
 * * if succeed, return the shared user address to start at.
 * * if fail, return the pointer of -errno.
 */
2906
void *mg_sp_make_share_k2u(unsigned long kva, unsigned long size,
2907
			unsigned long sp_flags, int tgid, int spg_id)
2908
{
2909 2910 2911
	void *uva;
	int ret;
	struct sp_k2u_context kc;
2912
	struct sp_group *spg;
2913

2914 2915 2916
	if (!sp_is_enabled())
		return ERR_PTR(-EOPNOTSUPP);

2917 2918 2919 2920 2921 2922
	check_interrupt_context();

	ret = sp_k2u_prepare(kva, size, sp_flags, spg_id, &kc);
	if (ret)
		return ERR_PTR(ret);

2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935
	if (kc.type == SPA_TYPE_K2TASK) {
		down_write(&sp_group_sem);
		ret = sp_init_group_master_locked(current, current->mm);
		up_write(&sp_group_sem);
		if (ret) {
			pr_err("k2u_task init local mapping failed %d\n", ret);
			uva = ERR_PTR(ret);
			goto out;
		}
		/* the caller could use SPG_ID_NONE */
		spg_id = SPG_ID_DEFAULT;
	}

2936
	spg = sp_group_get(current->tgid, spg_id);
2937 2938 2939
	if (spg) {
		ret = sp_check_caller_permission(spg, current->mm);
		if (ret < 0) {
2940
			sp_group_put(spg);
2941 2942
			uva = ERR_PTR(ret);
			goto out;
2943
		}
2944
		uva = sp_make_share_kva_to_spg(&kc, spg);
2945
		sp_group_put(spg);
2946 2947
	} else {
		uva = ERR_PTR(-ENODEV);
2948 2949 2950 2951
	}

out:
	return sp_k2u_finish(uva, &kc);
2952 2953 2954
}
EXPORT_SYMBOL_GPL(mg_sp_make_share_k2u);

2955 2956 2957
static int sp_pmd_entry(pmd_t *pmd, unsigned long addr,
			unsigned long next, struct mm_walk *walk)
{
2958
	struct page *page;
2959 2960
	struct sp_walk_data *sp_walk_data = walk->private;

2961 2962 2963 2964 2965 2966 2967 2968 2969 2970
	/*
	 * There exist a scene in DVPP where the pagetable is huge page but its
	 * vma doesn't record it, something like THP.
	 * So we cannot make out whether it is a hugepage map until we access the
	 * pmd here. If mixed size of pages appear, just return an error.
	 */
	if (pmd_huge(*pmd)) {
		if (!sp_walk_data->is_page_type_set) {
			sp_walk_data->is_page_type_set = true;
			sp_walk_data->is_hugepage = true;
2971
		} else if (!sp_walk_data->is_hugepage) {
2972
			return -EFAULT;
2973
		}
2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990

		/* To skip pte level walk */
		walk->action = ACTION_CONTINUE;

		page = pmd_page(*pmd);
		get_page(page);
		sp_walk_data->pages[sp_walk_data->page_count++] = page;

		return 0;
	}

	if (!sp_walk_data->is_page_type_set) {
		sp_walk_data->is_page_type_set = true;
		sp_walk_data->is_hugepage = false;
	} else if (sp_walk_data->is_hugepage)
		return -EFAULT;

2991
	sp_walk_data->pmd = pmd;
2992

2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135
	return 0;
}

static int sp_pte_entry(pte_t *pte, unsigned long addr,
			unsigned long next, struct mm_walk *walk)
{
	struct page *page;
	struct sp_walk_data *sp_walk_data = walk->private;
	pmd_t *pmd = sp_walk_data->pmd;

retry:
	if (unlikely(!pte_present(*pte))) {
		swp_entry_t entry;

		if (pte_none(*pte))
			goto no_page;
		entry = pte_to_swp_entry(*pte);
		if (!is_migration_entry(entry))
			goto no_page;
		migration_entry_wait(walk->mm, pmd, addr);
		goto retry;
	}

	page = pte_page(*pte);
	get_page(page);
	sp_walk_data->pages[sp_walk_data->page_count++] = page;
	return 0;

no_page:
	pr_debug("the page of addr %lx unexpectedly not in RAM\n",
		 (unsigned long)addr);
	return -EFAULT;
}

static int sp_test_walk(unsigned long addr, unsigned long next,
			struct mm_walk *walk)
{
	/*
	 * FIXME: The devmm driver uses remap_pfn_range() but actually there
	 * are associated struct pages, so they should use vm_map_pages() or
	 * similar APIs. Before the driver has been converted to correct APIs
	 * we use this test_walk() callback so we can treat VM_PFNMAP VMAs as
	 * normal VMAs.
	 */
	return 0;
}

static int sp_pte_hole(unsigned long start, unsigned long end,
		       int depth, struct mm_walk *walk)
{
	pr_debug("hole [%lx, %lx) appeared unexpectedly\n", (unsigned long)start, (unsigned long)end);
	return -EFAULT;
}

static int sp_hugetlb_entry(pte_t *ptep, unsigned long hmask,
			    unsigned long addr, unsigned long next,
			    struct mm_walk *walk)
{
	pte_t pte = huge_ptep_get(ptep);
	struct page *page = pte_page(pte);
	struct sp_walk_data *sp_walk_data;

	if (unlikely(!pte_present(pte))) {
		pr_debug("the page of addr %lx unexpectedly not in RAM\n", (unsigned long)addr);
		return -EFAULT;
	}

	sp_walk_data = walk->private;
	get_page(page);
	sp_walk_data->pages[sp_walk_data->page_count++] = page;
	return 0;
}

/*
 * __sp_walk_page_range() - Walk page table with caller specific callbacks.
 * @uva: the start VA of user memory.
 * @size: the size of user memory.
 * @mm: mm struct of the target task.
 * @sp_walk_data: a structure of a page pointer array.
 *
 * the caller must hold mm->mmap_lock
 *
 * Notes for parameter alignment:
 * When size == 0, let it be page_size, so that at least one page is walked.
 *
 * When size > 0, for convenience, usually the parameters of uva and
 * size are not page aligned. There are four different alignment scenarios and
 * we must handler all of them correctly.
 *
 * The basic idea is to align down uva and align up size so all the pages
 * in range [uva, uva + size) are walked. However, there are special cases.
 *
 * Considering a 2M-hugepage addr scenario. Assuming the caller wants to
 * traverse range [1001M, 1004.5M), so uva and size is 1001M and 3.5M
 * accordingly. The aligned-down uva is 1000M and the aligned-up size is 4M.
 * The traverse range will be [1000M, 1004M). Obviously, the final page for
 * [1004M, 1004.5M) is not covered.
 *
 * To fix this problem, we need to walk an additional page, size should be
 * ALIGN(uva+size) - uva_aligned
 */
static int __sp_walk_page_range(unsigned long uva, unsigned long size,
	struct mm_struct *mm, struct sp_walk_data *sp_walk_data)
{
	int ret = 0;
	struct vm_area_struct *vma;
	unsigned long page_nr;
	struct page **pages = NULL;
	bool is_hugepage = false;
	unsigned long uva_aligned;
	unsigned long size_aligned;
	unsigned int page_size = PAGE_SIZE;
	struct mm_walk_ops sp_walk = {};

	/*
	 * Here we also support non share pool memory in this interface
	 * because the caller can't distinguish whether a uva is from the
	 * share pool or not. It is not the best idea to do so, but currently
	 * it simplifies overall design.
	 *
	 * In this situation, the correctness of the parameters is mainly
	 * guaranteed by the caller.
	 */
	vma = find_vma(mm, uva);
	if (!vma) {
		pr_debug("u2k input uva %lx is invalid\n", (unsigned long)uva);
		return -EINVAL;
	}
	if (is_vm_hugetlb_page(vma))
		is_hugepage = true;

	sp_walk.pte_hole = sp_pte_hole;
	sp_walk.test_walk = sp_test_walk;
	if (is_hugepage) {
		sp_walk_data->is_hugepage = true;
		sp_walk.hugetlb_entry = sp_hugetlb_entry;
		page_size = PMD_SIZE;
	} else {
		sp_walk_data->is_hugepage = false;
		sp_walk.pte_entry = sp_pte_entry;
		sp_walk.pmd_entry = sp_pmd_entry;
	}

3136 3137
	sp_walk_data->is_page_type_set = false;
	sp_walk_data->page_count = 0;
3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161
	sp_walk_data->page_size = page_size;
	uva_aligned = ALIGN_DOWN(uva, page_size);
	sp_walk_data->uva_aligned = uva_aligned;
	if (size == 0)
		size_aligned = page_size;
	else
		/* special alignment handling */
		size_aligned = ALIGN(uva + size, page_size) - uva_aligned;

	if (uva_aligned + size_aligned < uva_aligned) {
		pr_err_ratelimited("overflow happened in walk page range\n");
		return -EINVAL;
	}

	page_nr = size_aligned / page_size;
	pages = kvmalloc(page_nr * sizeof(struct page *), GFP_KERNEL);
	if (!pages) {
		pr_err_ratelimited("alloc page array failed in walk page range\n");
		return -ENOMEM;
	}
	sp_walk_data->pages = pages;

	ret = walk_page_range(mm, uva_aligned, uva_aligned + size_aligned,
			      &sp_walk, sp_walk_data);
3162 3163 3164
	if (ret) {
		while (sp_walk_data->page_count--)
			put_page(pages[sp_walk_data->page_count]);
3165
		kvfree(pages);
3166 3167
		sp_walk_data->pages = NULL;
	}
3168

Z
Zhou Guanghui 已提交
3169 3170 3171
	if (sp_walk_data->is_hugepage)
		sp_walk_data->uva_aligned = ALIGN_DOWN(uva, PMD_SIZE);

3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190
	return ret;
}

static void __sp_walk_page_free(struct sp_walk_data *data)
{
	int i = 0;
	struct page *page;

	while (i < data->page_count) {
		page = data->pages[i++];
		put_page(page);
	}

	kvfree(data->pages);
	/* prevent repeated release */
	data->page_count = 0;
	data->pages = NULL;
}

3191
/**
3192
 * mg_sp_make_share_u2k() - Share user memory of a specified process to kernel.
3193 3194
 * @uva: the VA of shared user memory
 * @size: the size of shared user memory
3195
 * @tgid: the tgid of the specified process(Not currently in use)
3196 3197 3198 3199 3200
 *
 * Return:
 * * if success, return the starting kernel address of the shared memory.
 * * if failed, return the pointer of -errno.
 */
3201
void *mg_sp_make_share_u2k(unsigned long uva, unsigned long size, int tgid)
3202
{
3203 3204 3205
	int ret = 0;
	struct mm_struct *mm = current->mm;
	void *p = ERR_PTR(-ESRCH);
3206
	struct sp_walk_data sp_walk_data;
3207 3208
	struct vm_struct *area;

3209 3210 3211
	if (!sp_is_enabled())
		return ERR_PTR(-EOPNOTSUPP);

3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258
	check_interrupt_context();

	if (mm == NULL) {
		pr_err("u2k: kthread is not allowed\n");
		return ERR_PTR(-EPERM);
	}

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		up_write(&mm->mmap_lock);
		pr_err("u2k: encountered coredump, abort\n");
		return p;
	}

	ret = __sp_walk_page_range(uva, size, mm, &sp_walk_data);
	if (ret) {
		pr_err_ratelimited("walk page range failed %d\n", ret);
		up_write(&mm->mmap_lock);
		return ERR_PTR(ret);
	}

	if (sp_walk_data.is_hugepage)
		p = vmap_hugepage(sp_walk_data.pages, sp_walk_data.page_count,
				  VM_MAP, PAGE_KERNEL);
	else
		p = vmap(sp_walk_data.pages, sp_walk_data.page_count, VM_MAP,
			 PAGE_KERNEL);
	up_write(&mm->mmap_lock);

	if (!p) {
		pr_err("vmap(huge) in u2k failed\n");
		__sp_walk_page_free(&sp_walk_data);
		return ERR_PTR(-ENOMEM);
	}

	p = p + (uva - sp_walk_data.uva_aligned);

	/*
	 * kva p may be used later in k2u. Since p comes from uva originally,
	 * it's reasonable to add flag VM_USERMAP so that p can be remapped
	 * into userspace again.
	 */
	area = find_vm_area(p);
	area->flags |= VM_USERMAP;

	kvfree(sp_walk_data.pages);
	return p;
3259 3260 3261
}
EXPORT_SYMBOL_GPL(mg_sp_make_share_u2k);

3262
/*
3263 3264 3265 3266 3267
 * sp_unshare_uva - unshare a uva from sp_make_share_k2u
 * @uva: the uva to be unshared
 * @size: not used actually and we just check it
 * @group_id: specify the spg of the uva; for local group, it can be SPG_ID_DEFAULT
 *            unless current process is exiting.
3268 3269 3270 3271 3272 3273
 *
 * Procedure of unshare uva must be compatible with:
 *
 * 1. DVPP channel destroy procedure:
 * do_exit() -> exit_mm() (mm no longer in spg and current->mm == NULL) ->
 * exit_task_work() -> task_work_run() -> __fput() -> ... -> vdec_close() ->
3274
 * sp_unshare(uva, local_spg_id)
3275
 */
3276
static int sp_unshare_uva(unsigned long uva, unsigned long size, int group_id)
3277
{
3278 3279 3280
	int ret = 0;
	struct sp_area *spa;
	unsigned int page_size;
3281 3282
	struct sp_group *spg;

3283
	spg = sp_group_get(current->tgid, group_id);
3284
	if (!spg) {
3285
		pr_err("sp unshare find group failed %d\n", group_id);
3286 3287
		return -EINVAL;
	}
3288

3289
	/* All the spa are aligned to 2M. */
3290
	spa = get_sp_area(spg, ALIGN_DOWN(uva, PMD_SIZE));
3291
	if (!spa) {
3292 3293 3294
		ret = -EINVAL;
		pr_err("invalid input uva %lx in unshare uva\n", (unsigned long)uva);
		goto out;
3295 3296 3297
	}

	if (spa->type != SPA_TYPE_K2TASK && spa->type != SPA_TYPE_K2SPG) {
3298
		pr_err("unshare wrong type spa\n");
3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309
		ret = -EINVAL;
		goto out_drop_area;
	}
	/*
	 * 1. overflow actually won't happen due to an spa must be valid.
	 * 2. we must unshare [spa->va_start, spa->va_start + spa->real_size) completely
	 *    because an spa is one-to-one correspondence with an vma.
	 *    Thus input parameter size is not necessarily needed.
	 */
	page_size = (spa->is_hugepage ? PMD_SIZE : PAGE_SIZE);

3310
	if (spa->real_size < ALIGN(size, page_size)) {
3311
		ret = -EINVAL;
3312
		pr_err("unshare uva failed, invalid parameter size %lu\n", size);
3313 3314 3315
		goto out_drop_area;
	}

3316 3317 3318
	down_read(&spa->spg->rw_lock);
	/* always allow dvpp channel destroy procedure */
	if (current->mm && !is_process_in_group(spa->spg, current->mm)) {
3319
		up_read(&spa->spg->rw_lock);
3320 3321 3322 3323 3324
		pr_err("unshare uva failed, caller process doesn't belong to target group\n");
		ret = -EPERM;
		goto out_drop_area;
	}
	up_read(&spa->spg->rw_lock);
3325

3326 3327
	down_write(&spa->spg->rw_lock);
	if (!spg_valid(spa->spg)) {
3328
		up_write(&spa->spg->rw_lock);
3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341
		pr_info("no need to unshare uva, sp group of spa is dead\n");
		goto out_clr_flag;
	}
	/* the life cycle of spa has a direct relation with sp group */
	if (unlikely(spa->is_dead)) {
		up_write(&spa->spg->rw_lock);
		pr_err("unexpected double sp unshare\n");
		dump_stack();
		ret = -EINVAL;
		goto out_drop_area;
	}
	spa->is_dead = true;
	up_write(&spa->spg->rw_lock);
3342

3343 3344 3345
	down_read(&spa->spg->rw_lock);
	__sp_free(spa->spg, spa->va_start, spa->real_size, NULL);
	up_read(&spa->spg->rw_lock);
3346

3347 3348 3349 3350
	if (current->mm == NULL)
		atomic64_sub(spa->real_size, &kthread_stat.k2u_size);
	else
		sp_update_process_stat(current, false, spa);
3351 3352 3353

out_clr_flag:
	if (!vmalloc_area_clr_flag(spa->kva, VM_SHAREPOOL))
3354
		pr_info("clear spa->kva %ld is not valid\n", spa->kva);
3355 3356 3357 3358 3359
	spa->kva = 0;

out_drop_area:
	__sp_area_drop(spa);
out:
3360
	sp_group_put(spg);
3361
	return ret;
3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407
}

/* No possible concurrent protection, take care when use */
static int sp_unshare_kva(unsigned long kva, unsigned long size)
{
	unsigned long addr, kva_aligned;
	struct page *page;
	unsigned long size_aligned;
	unsigned long step;
	bool is_hugepage = true;
	int ret;

	ret = is_vmap_hugepage(kva);
	if (ret > 0) {
		kva_aligned = ALIGN_DOWN(kva, PMD_SIZE);
		size_aligned = ALIGN(kva + size, PMD_SIZE) - kva_aligned;
		step = PMD_SIZE;
	} else if (ret == 0) {
		kva_aligned = ALIGN_DOWN(kva, PAGE_SIZE);
		size_aligned = ALIGN(kva + size, PAGE_SIZE) - kva_aligned;
		step = PAGE_SIZE;
		is_hugepage = false;
	} else {
		pr_err_ratelimited("check vmap hugepage failed %d\n", ret);
		return -EINVAL;
	}

	if (kva_aligned + size_aligned < kva_aligned) {
		pr_err_ratelimited("overflow happened in unshare kva\n");
		return -EINVAL;
	}

	for (addr = kva_aligned; addr < (kva_aligned + size_aligned); addr += step) {
		page = vmalloc_to_page((void *)addr);
		if (page)
			put_page(page);
		else
			WARN(1, "vmalloc %pK to page/hugepage failed\n",
			       (void *)addr);
	}

	vunmap((void *)kva_aligned);

	return 0;
}

3408
/**
3409
 * mg_sp_unshare() - Unshare the kernel or user memory which shared by calling
3410 3411 3412 3413 3414 3415 3416 3417
 *                sp_make_share_{k2u,u2k}().
 * @va: the specified virtual address of memory
 * @size: the size of unshared memory
 *
 * Use spg_id of current thread if spg_id == SPG_ID_DEFAULT.
 *
 * Return: 0 for success, -errno on failure.
 */
3418
int mg_sp_unshare(unsigned long va, unsigned long size, int spg_id)
3419
{
3420 3421
	int ret = 0;

3422 3423 3424
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

3425 3426
	check_interrupt_context();

3427 3428 3429
	if (current->flags & PF_KTHREAD)
		return -EINVAL;

3430 3431
	if (va < TASK_SIZE) {
		/* user address */
3432
		ret = sp_unshare_uva(va, size, spg_id);
3433 3434 3435 3436 3437 3438 3439 3440 3441 3442
	} else if (va >= PAGE_OFFSET) {
		/* kernel address */
		ret = sp_unshare_kva(va, size);
	} else {
		/* regard user and kernel address ranges as bad address */
		pr_debug("unshare addr %lx is not a user or kernel addr\n", (unsigned long)va);
		ret = -EFAULT;
	}

	return ret;
3443 3444 3445 3446
}
EXPORT_SYMBOL_GPL(mg_sp_unshare);

/**
3447
 * mg_sp_walk_page_range() - Walk page table with caller specific callbacks.
3448 3449 3450 3451 3452 3453 3454 3455 3456 3457
 * @uva: the start VA of user memory.
 * @size: the size of user memory.
 * @tsk: task struct of the target task.
 * @sp_walk_data: a structure of a page pointer array.
 *
 * Return: 0 for success, -errno on failure.
 *
 * When return 0, sp_walk_data describing [uva, uva+size) can be used.
 * When return -errno, information in sp_walk_data is useless.
 */
3458
int mg_sp_walk_page_range(unsigned long uva, unsigned long size,
3459 3460
	struct task_struct *tsk, struct sp_walk_data *sp_walk_data)
{
3461 3462 3463
	struct mm_struct *mm;
	int ret = 0;

3464 3465 3466
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483
	check_interrupt_context();

	if (unlikely(!sp_walk_data)) {
		pr_err_ratelimited("null pointer when walk page range\n");
		return -EINVAL;
	}
	if (!tsk || (tsk->flags & PF_EXITING))
		return -ESRCH;

	get_task_struct(tsk);
	mm = get_task_mm(tsk);
	if (!mm) {
		put_task_struct(tsk);
		return -ESRCH;
	}

	down_write(&mm->mmap_lock);
3484
	if (likely(!mm->core_state)) {
3485
		ret = __sp_walk_page_range(uva, size, mm, sp_walk_data);
3486
	} else {
3487 3488 3489 3490 3491 3492 3493 3494 3495
		pr_err("walk page range: encoutered coredump\n");
		ret = -ESRCH;
	}
	up_write(&mm->mmap_lock);

	mmput(mm);
	put_task_struct(tsk);

	return ret;
3496 3497 3498 3499
}
EXPORT_SYMBOL_GPL(mg_sp_walk_page_range);

/**
3500
 * mg_sp_walk_page_free() - Free the sp_walk_data structure.
3501 3502
 * @sp_walk_data: a structure of a page pointer array to be freed.
 */
3503
void mg_sp_walk_page_free(struct sp_walk_data *sp_walk_data)
3504
{
3505 3506 3507
	if (!sp_is_enabled())
		return;

3508 3509 3510 3511 3512 3513
	check_interrupt_context();

	if (!sp_walk_data)
		return;

	__sp_walk_page_free(sp_walk_data);
3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528
}
EXPORT_SYMBOL_GPL(mg_sp_walk_page_free);

int sp_register_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&sp_notifier_chain, nb);
}
EXPORT_SYMBOL_GPL(sp_register_notifier);

int sp_unregister_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&sp_notifier_chain, nb);
}
EXPORT_SYMBOL_GPL(sp_unregister_notifier);

3529
static bool is_sp_dynamic_dvpp_addr(unsigned long addr);
3530
/**
3531
 * mg_sp_config_dvpp_range() - User can config the share pool start address
3532 3533 3534 3535
 *                          of each Da-vinci device.
 * @start: the value of share pool start
 * @size: the value of share pool
 * @device_id: the num of Da-vinci device
3536
 * @tgid: the tgid of device process
3537 3538 3539 3540 3541
 *
 * Return true for success.
 * Return false if parameter invalid or has been set up.
 * This functuon has no concurrent problem.
 */
3542
bool mg_sp_config_dvpp_range(size_t start, size_t size, int device_id, int tgid)
3543
{
3544 3545 3546 3547 3548 3549 3550 3551
	int ret;
	bool err = false;
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct sp_group *spg;
	struct sp_mapping *spm;
	unsigned long default_start;

3552 3553 3554
	if (!sp_is_enabled())
		return false;

3555
	/* NOTE: check the start address */
3556
	if (tgid < 0 || size <= 0 || size > MMAP_SHARE_POOL_16G_SIZE ||
3557
	    device_id < 0 || device_id >= MAX_DEVID || !is_online_node_id(device_id)
3558
		|| !is_sp_dynamic_dvpp_addr(start) || !is_sp_dynamic_dvpp_addr(start + size - 1))
3559 3560
		return false;

3561
	ret = get_task(tgid, &tsk);
3562 3563 3564 3565 3566 3567 3568
	if (ret)
		return false;

	mm = get_task_mm(tsk->group_leader);
	if (!mm)
		goto put_task;

3569
	spg = sp_get_local_group(tsk, mm);
3570 3571 3572
	if (IS_ERR(spg))
		goto put_mm;

3573
	spm = spg->mapping[SP_MAPPING_DVPP];
3574
	default_start = MMAP_SHARE_POOL_DVPP_START + device_id * MMAP_SHARE_POOL_16G_SIZE;
3575 3576 3577 3578 3579 3580 3581 3582 3583 3584
	/* The dvpp range of each group can be configured only once */
	if (spm->start[device_id] != default_start)
		goto put_spg;

	spm->start[device_id] = start;
	spm->end[device_id] = start + size;

	err = true;

put_spg:
3585
	sp_group_put(spg);
3586 3587 3588 3589 3590 3591
put_mm:
	mmput(mm);
put_task:
	put_task_struct(tsk);

	return err;
3592 3593 3594
}
EXPORT_SYMBOL_GPL(mg_sp_config_dvpp_range);

3595
static bool is_sp_reserve_addr(unsigned long addr)
3596
{
3597
	return addr >= MMAP_SHARE_POOL_START && addr < MMAP_SHARE_POOL_END;
3598 3599
}

3600 3601 3602 3603 3604 3605 3606
/*
 *	| 16G host | 16G device | ... |     |
 *	^
 *	|
 *	MMAP_SHARE_POOL_DVPP_BASE + 16G * 64
 *	We only check the device regions.
 */
3607
static bool is_sp_dynamic_dvpp_addr(unsigned long addr)
3608
{
3609
	if (addr < MMAP_SHARE_POOL_DYNAMIC_DVPP_BASE || addr >= MMAP_SHARE_POOL_DYNAMIC_DVPP_END)
3610 3611
		return false;

3612
	return (addr - MMAP_SHARE_POOL_DYNAMIC_DVPP_BASE) & MMAP_SHARE_POOL_16G_SIZE;
3613 3614
}

3615
/**
3616
 * mg_is_sharepool_addr() - Check if a user memory address belongs to share pool.
3617 3618 3619 3620
 * @addr: the userspace address to be checked.
 *
 * Return true if addr belongs to share pool, or false vice versa.
 */
3621
bool mg_is_sharepool_addr(unsigned long addr)
3622
{
3623
	return sp_is_enabled() &&
3624
		((is_sp_reserve_addr(addr) || is_sp_dynamic_dvpp_addr(addr)));
3625 3626 3627
}
EXPORT_SYMBOL_GPL(mg_is_sharepool_addr);

3628 3629 3630 3631 3632 3633 3634 3635
int sp_node_id(struct vm_area_struct *vma)
{
	struct sp_area *spa;
	int node_id = numa_node_id();

	if (!sp_is_enabled())
		return node_id;

3636
	if (vma && (vma->vm_flags & VM_SHARE_POOL) && vma->vm_private_data) {
3637 3638
		spa = vma->vm_private_data;
		node_id = spa->node_id;
3639 3640 3641 3642 3643
	}

	return node_id;
}

3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659
/*** Statistical and maintenance functions ***/

static void get_mm_rss_info(struct mm_struct *mm, unsigned long *anon,
	unsigned long *file, unsigned long *shmem, unsigned long *total_rss)
{
	*anon = get_mm_counter(mm, MM_ANONPAGES);
	*file = get_mm_counter(mm, MM_FILEPAGES);
	*shmem = get_mm_counter(mm, MM_SHMEMPAGES);
	*total_rss = *anon + *file + *shmem;
}

static long get_proc_k2u(struct sp_proc_stat *stat)
{
	return byte2kb(atomic64_read(&stat->k2u_size));
}

3660
static long get_proc_alloc(struct sp_proc_stat *stat)
3661
{
3662 3663
	return byte2kb(atomic64_read(&stat->alloc_nsize) +
			atomic64_read(&stat->alloc_hsize));
3664 3665
}

G
Guo Mengqi 已提交
3666
static void get_process_sp_res(struct sp_group_master *master,
3667
		long *sp_res_out, long *sp_res_nsize_out)
3668
{
G
Guo Mengqi 已提交
3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680
	struct sp_group *spg;
	struct sp_group_node *spg_node;

	*sp_res_out = 0;
	*sp_res_nsize_out = 0;

	list_for_each_entry(spg_node, &master->node_list, group_node) {
		spg = spg_node->spg;
		*sp_res_out += byte2kb(atomic64_read(&spg->instat.alloc_nsize));
		*sp_res_out += byte2kb(atomic64_read(&spg->instat.alloc_hsize));
		*sp_res_nsize_out += byte2kb(atomic64_read(&spg->instat.alloc_nsize));
	}
3681 3682
}

3683
static long get_sp_res_by_spg_proc(struct sp_group_node *spg_node)
3684
{
G
Guo Mengqi 已提交
3685 3686
	return byte2kb(atomic64_read(&spg_node->spg->instat.alloc_nsize) +
			atomic64_read(&spg_node->spg->instat.alloc_hsize));
3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706
}

/*
 *  Statistics of RSS has a maximum 64 pages deviation (256KB).
 *  Please check_sync_rss_stat().
 */
static void get_process_non_sp_res(unsigned long total_rss, unsigned long shmem,
	long sp_res_nsize, long *non_sp_res_out, long *non_sp_shm_out)
{
	long non_sp_res, non_sp_shm;

	non_sp_res = page2kb(total_rss) - sp_res_nsize;
	non_sp_res = non_sp_res < 0 ? 0 : non_sp_res;
	non_sp_shm = page2kb(shmem) - sp_res_nsize;
	non_sp_shm = non_sp_shm < 0 ? 0 : non_sp_shm;

	*non_sp_res_out = non_sp_res;
	*non_sp_shm_out = non_sp_shm;
}

3707
static long get_spg_proc_alloc(struct sp_group_node *spg_node)
3708
{
3709 3710
	return byte2kb(atomic64_read(&spg_node->instat.alloc_nsize) +
				atomic64_read(&spg_node->instat.alloc_hsize));
3711 3712
}

3713
static long get_spg_proc_k2u(struct sp_group_node *spg_node)
3714
{
3715
	return byte2kb(atomic64_read(&spg_node->instat.k2u_size));
3716 3717 3718 3719 3720 3721 3722 3723
}

static void print_process_prot(struct seq_file *seq, unsigned long prot)
{
	if (prot == PROT_READ)
		seq_puts(seq, "R");
	else if (prot == (PROT_READ | PROT_WRITE))
		seq_puts(seq, "RW");
3724
	else
3725 3726 3727 3728 3729 3730
		seq_puts(seq, "-");
}

int proc_sp_group_state(struct seq_file *m, struct pid_namespace *ns,
			struct pid *pid, struct task_struct *task)
{
Z
Zhou Guanghui 已提交
3731
	struct mm_struct *mm;
3732 3733
	struct sp_group_master *master;
	struct sp_proc_stat *proc_stat;
3734 3735
	struct sp_group_node *spg_node;
	unsigned long anon, file, shmem, total_rss;
3736 3737
	long sp_res, sp_res_nsize, non_sp_res, non_sp_shm;

3738 3739 3740
	if (!sp_is_enabled())
		return 0;

Z
Zhou Guanghui 已提交
3741
	mm = get_task_mm(task);
3742 3743 3744
	if (!mm)
		return 0;

3745
	down_read(&sp_group_sem);
3746
	down_read(&mm->mmap_lock);
3747
	master = mm->sp_group_master;
Z
Zhou Guanghui 已提交
3748 3749
	if (!master)
		goto out;
3750 3751

	get_mm_rss_info(mm, &anon, &file, &shmem, &total_rss);
3752
	proc_stat = &master->instat;
G
Guo Mengqi 已提交
3753
	get_process_sp_res(master, &sp_res, &sp_res_nsize);
3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769
	get_process_non_sp_res(total_rss, shmem, sp_res_nsize,
			       &non_sp_res, &non_sp_shm);

	seq_puts(m, "Share Pool Aggregate Data of This Process\n\n");
	seq_printf(m, "%-8s %-16s %-9s %-9s %-9s %-10s %-10s %-8s\n",
		   "PID", "COMM", "SP_ALLOC", "SP_K2U", "SP_RES", "Non-SP_RES",
		   "Non-SP_Shm", "VIRT");
	seq_printf(m, "%-8d %-16s %-9ld %-9ld %-9ld %-10ld %-10ld %-8ld\n",
		   proc_stat->tgid, proc_stat->comm,
		   get_proc_alloc(proc_stat),
		   get_proc_k2u(proc_stat),
		   sp_res, non_sp_res, non_sp_shm,
		   page2kb(mm->total_vm));

	seq_puts(m, "\n\nProcess in Each SP Group\n\n");
	seq_printf(m, "%-8s %-9s %-9s %-9s %-4s\n",
3770
			"Group_ID", "SP_ALLOC", "SP_K2U", "SP_RES", "PROT");
3771

3772
	list_for_each_entry(spg_node, &master->node_list, group_node) {
3773
		seq_printf(m, "%-8d %-9ld %-9ld %-9ld ",
3774 3775 3776 3777 3778
				spg_node->spg->id,
				get_spg_proc_alloc(spg_node),
				get_spg_proc_k2u(spg_node),
				get_sp_res_by_spg_proc(spg_node));
		print_process_prot(m, spg_node->prot);
3779 3780
		seq_putc(m, '\n');
	}
Z
Zhou Guanghui 已提交
3781 3782

out:
3783
	up_read(&mm->mmap_lock);
3784
	up_read(&sp_group_sem);
Z
Zhou Guanghui 已提交
3785
	mmput(mm);
3786 3787 3788
	return 0;
}

3789
static void spa_stat_of_mapping_show(struct seq_file *seq, struct sp_mapping *spm)
3790 3791 3792 3793 3794
{
	struct rb_node *node;
	struct sp_area *spa, *prev = NULL;

	spin_lock(&sp_area_lock);
3795
	for (node = rb_first(&spm->area_root); node; node = rb_next(node)) {
3796 3797 3798 3799 3800 3801 3802
		__sp_area_drop_locked(prev);

		spa = rb_entry(node, struct sp_area, rb_node);
		prev = spa;
		atomic_inc(&spa->use_count);
		spin_unlock(&sp_area_lock);

3803 3804 3805 3806
		if (spg_valid(spa->spg))  /* k2u to group */
			seq_printf(seq, "%-10d ", spa->spg->id);
		else  /* spg is dead */
			seq_printf(seq, "%-10s ", "Dead");
3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841

		seq_printf(seq, "%2s%-14lx %2s%-14lx %-10ld ",
			   "0x", spa->va_start,
			   "0x", spa->va_end,
			   byte2kb(spa->real_size));

		switch (spa->type) {
		case SPA_TYPE_ALLOC:
			seq_printf(seq, "%-7s ", "ALLOC");
			break;
		case SPA_TYPE_K2TASK:
			seq_printf(seq, "%-7s ", "TASK");
			break;
		case SPA_TYPE_K2SPG:
			seq_printf(seq, "%-7s ", "SPG");
			break;
		default:
			/* usually impossible, perhaps a developer's mistake */
			break;
		}

		if (spa->is_hugepage)
			seq_printf(seq, "%-5s ", "Y");
		else
			seq_printf(seq, "%-5s ", "N");

		seq_printf(seq, "%-8d ",  spa->applier);
		seq_printf(seq, "%-8d\n", atomic_read(&spa->use_count));

		spin_lock(&sp_area_lock);
	}
	__sp_area_drop_locked(prev);
	spin_unlock(&sp_area_lock);
}

C
Chen Jun 已提交
3842 3843 3844 3845 3846
static void spa_ro_stat_show(struct seq_file *seq)
{
	spa_stat_of_mapping_show(seq, sp_mapping_ro);
}

3847 3848 3849 3850 3851 3852 3853
static void spa_normal_stat_show(struct seq_file *seq)
{
	spa_stat_of_mapping_show(seq, sp_mapping_normal);
}

static void spa_dvpp_stat_show(struct seq_file *seq)
{
3854 3855 3856 3857 3858 3859
	struct sp_mapping *spm;

	mutex_lock(&spm_list_lock);
	list_for_each_entry(spm, &spm_dvpp_list, spm_node)
		spa_stat_of_mapping_show(seq, spm);
	mutex_unlock(&spm_list_lock);
3860 3861 3862
}


3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884
void spa_overview_show(struct seq_file *seq)
{
	unsigned int total_num, alloc_num, k2u_task_num, k2u_spg_num;
	unsigned long total_size, alloc_size, k2u_task_size, k2u_spg_size;
	unsigned long dvpp_size, dvpp_va_size;

	if (!sp_is_enabled())
		return;

	spin_lock(&sp_area_lock);
	total_num     = spa_stat.total_num;
	alloc_num     = spa_stat.alloc_num;
	k2u_task_num  = spa_stat.k2u_task_num;
	k2u_spg_num   = spa_stat.k2u_spg_num;
	total_size    = spa_stat.total_size;
	alloc_size    = spa_stat.alloc_size;
	k2u_task_size = spa_stat.k2u_task_size;
	k2u_spg_size  = spa_stat.k2u_spg_size;
	dvpp_size     = spa_stat.dvpp_size;
	dvpp_va_size  = spa_stat.dvpp_va_size;
	spin_unlock(&sp_area_lock);

3885 3886 3887 3888 3889 3890 3891 3892 3893 3894
	SEQ_printf(seq, "Spa total num %u.\n", total_num);
	SEQ_printf(seq, "Spa alloc num %u, k2u(task) num %u, k2u(spg) num %u.\n",
		   alloc_num, k2u_task_num, k2u_spg_num);
	SEQ_printf(seq, "Spa total size:     %13lu KB\n", byte2kb(total_size));
	SEQ_printf(seq, "Spa alloc size:     %13lu KB\n", byte2kb(alloc_size));
	SEQ_printf(seq, "Spa k2u(task) size: %13lu KB\n", byte2kb(k2u_task_size));
	SEQ_printf(seq, "Spa k2u(spg) size:  %13lu KB\n", byte2kb(k2u_spg_size));
	SEQ_printf(seq, "Spa dvpp size:      %13lu KB\n", byte2kb(dvpp_size));
	SEQ_printf(seq, "Spa dvpp va size:   %13lu MB\n", byte2mb(dvpp_va_size));
	SEQ_printf(seq, "\n");
3895 3896
}

3897
static int spg_info_show(int id, void *p, void *data)
3898
{
3899
	struct sp_group *spg = p;
3900 3901
	struct seq_file *seq = data;

3902
	if (id >= SPG_ID_LOCAL_MIN && id <= SPG_ID_LOCAL_MAX)
3903
		return 0;
3904

3905
	SEQ_printf(seq, "Group %6d ", id);
3906

3907 3908 3909 3910 3911 3912 3913 3914
	down_read(&spg->rw_lock);
	SEQ_printf(seq, "size: %lld KB, spa num: %d, total alloc: %lld KB, normal alloc: %lld KB, huge alloc: %lld KB\n",
			byte2kb(atomic64_read(&spg->instat.size)),
			atomic_read(&spg->instat.spa_num),
			byte2kb(atomic64_read(&spg->instat.alloc_size)),
			byte2kb(atomic64_read(&spg->instat.alloc_nsize)),
			byte2kb(atomic64_read(&spg->instat.alloc_hsize)));
	up_read(&spg->rw_lock);
3915 3916 3917 3918 3919 3920 3921 3922 3923

	return 0;
}

void spg_overview_show(struct seq_file *seq)
{
	if (!sp_is_enabled())
		return;

3924 3925 3926
	SEQ_printf(seq, "Share pool total size: %lld KB, spa total num: %d.\n",
			byte2kb(atomic64_read(&sp_overall_stat.spa_total_size)),
			atomic_read(&sp_overall_stat.spa_total_num));
3927

3928 3929 3930
	down_read(&sp_group_sem);
	idr_for_each(&sp_group_idr, spg_info_show, seq);
	up_read(&sp_group_sem);
3931

3932
	SEQ_printf(seq, "\n");
3933 3934
}

3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945
static bool should_show_statistics(void)
{
	if (!capable(CAP_SYS_ADMIN))
		return false;

	if (task_active_pid_ns(current) != &init_pid_ns)
		return false;

	return true;
}

3946 3947
static int spa_stat_show(struct seq_file *seq, void *offset)
{
3948 3949 3950
	if (!should_show_statistics())
		return -EPERM;

3951 3952 3953 3954
	spg_overview_show(seq);
	spa_overview_show(seq);
	/* print the file header */
	seq_printf(seq, "%-10s %-16s %-16s %-10s %-7s %-5s %-8s %-8s\n",
3955
			"Group ID", "va_start", "va_end", "Size(KB)", "Type", "Huge", "PID", "Ref");
C
Chen Jun 已提交
3956
	spa_ro_stat_show(seq);
3957 3958
	spa_normal_stat_show(seq);
	spa_dvpp_stat_show(seq);
3959 3960 3961
	return 0;
}

3962
static int proc_usage_by_group(int id, void *p, void *data)
3963
{
3964
	struct sp_group *spg = p;
3965
	struct seq_file *seq = data;
3966
	struct sp_group_node *spg_node;
3967
	struct mm_struct *mm;
3968 3969 3970
	struct sp_group_master *master;
	int tgid;
	unsigned long anon, file, shmem, total_rss;
3971

3972 3973 3974 3975 3976
	down_read(&spg->rw_lock);
	list_for_each_entry(spg_node, &spg->procs, proc_node) {
		master = spg_node->master;
		mm = master->mm;
		tgid = master->instat.tgid;
3977 3978 3979 3980

		get_mm_rss_info(mm, &anon, &file, &shmem, &total_rss);

		seq_printf(seq, "%-8d ", tgid);
3981 3982
		seq_printf(seq, "%-8d ", id);
		seq_printf(seq, "%-9ld %-9ld %-9ld %-8ld %-7ld %-7ld ",
3983 3984 3985 3986
				get_spg_proc_alloc(spg_node),
				get_spg_proc_k2u(spg_node),
				get_sp_res_by_spg_proc(spg_node),
				page2kb(mm->total_vm), page2kb(total_rss),
3987
				page2kb(shmem));
3988
		print_process_prot(seq, spg_node->prot);
3989 3990
		seq_putc(seq, '\n');
	}
3991
	up_read(&spg->rw_lock);
3992
	cond_resched();
3993

3994 3995 3996
	return 0;
}

3997
static int proc_group_usage_show(struct seq_file *seq, void *offset)
3998
{
3999 4000 4001
	if (!should_show_statistics())
		return -EPERM;

4002 4003
	spg_overview_show(seq);
	spa_overview_show(seq);
4004

4005
	/* print the file header */
4006 4007 4008
	seq_printf(seq, "%-8s %-8s %-9s %-9s %-9s %-8s %-7s %-7s %-4s\n",
			"PID", "Group_ID", "SP_ALLOC", "SP_K2U", "SP_RES",
			"VIRT", "RES", "Shm", "PROT");
4009 4010
	/* print kthread buff_module_guard_work */
	seq_printf(seq, "%-8s %-8s %-9lld %-9lld\n",
4011 4012 4013
			"guard", "-",
			byte2kb(atomic64_read(&kthread_stat.alloc_size)),
			byte2kb(atomic64_read(&kthread_stat.k2u_size)));
4014

W
Wang Wensheng 已提交
4015
	down_read(&sp_group_sem);
4016
	idr_for_each(&sp_group_idr, proc_usage_by_group, seq);
W
Wang Wensheng 已提交
4017 4018
	up_read(&sp_group_sem);

4019 4020 4021
	return 0;
}

4022
static int proc_usage_show(struct seq_file *seq, void *offset)
4023
{
4024
	struct sp_group_master *master = NULL;
4025 4026
	unsigned long anon, file, shmem, total_rss;
	long sp_res, sp_res_nsize, non_sp_res, non_sp_shm;
4027
	struct sp_proc_stat *proc_stat;
4028

4029 4030 4031
	if (!should_show_statistics())
		return -EPERM;

4032
	seq_printf(seq, "%-8s %-16s %-9s %-9s %-9s %-10s %-10s %-8s\n",
4033 4034 4035
			"PID", "COMM", "SP_ALLOC", "SP_K2U", "SP_RES", "Non-SP_RES",
			"Non-SP_Shm", "VIRT");

4036
	down_read(&sp_group_sem);
4037 4038 4039 4040
	mutex_lock(&master_list_lock);
	list_for_each_entry(master, &master_list, list_node) {
		proc_stat = &master->instat;
		get_mm_rss_info(master->mm, &anon, &file, &shmem, &total_rss);
G
Guo Mengqi 已提交
4041
		get_process_sp_res(master, &sp_res, &sp_res_nsize);
4042 4043 4044 4045 4046 4047 4048 4049 4050 4051
		get_process_non_sp_res(total_rss, shmem, sp_res_nsize,
				&non_sp_res, &non_sp_shm);
		seq_printf(seq, "%-8d %-16s %-9ld %-9ld %-9ld %-10ld %-10ld %-8ld\n",
				proc_stat->tgid, proc_stat->comm,
				get_proc_alloc(proc_stat),
				get_proc_k2u(proc_stat),
				sp_res, non_sp_res, non_sp_shm,
				page2kb(master->mm->total_vm));
	}
	mutex_unlock(&master_list_lock);
4052
	up_read(&sp_group_sem);
4053 4054 4055 4056 4057 4058 4059 4060 4061 4062

	return 0;
}

static void __init proc_sharepool_init(void)
{
	if (!proc_mkdir("sharepool", NULL))
		return;

	proc_create_single_data("sharepool/spa_stat", 0400, NULL, spa_stat_show, NULL);
4063 4064
	proc_create_single_data("sharepool/proc_stat", 0400, NULL, proc_group_usage_show, NULL);
	proc_create_single_data("sharepool/proc_overview", 0400, NULL, proc_usage_show, NULL);
4065 4066 4067 4068
}

/*** End of tatistical and maintenance functions ***/

4069 4070
bool sp_check_addr(unsigned long addr)
{
4071
	if (sp_is_enabled() && mg_is_sharepool_addr(addr) &&
4072
	    !check_aoscore_process(current))
4073
		return true;
4074
	else
4075 4076 4077 4078 4079
		return false;
}

bool sp_check_mmap_addr(unsigned long addr, unsigned long flags)
{
4080
	if (sp_is_enabled() && mg_is_sharepool_addr(addr) &&
4081
	    !check_aoscore_process(current) && !(flags & MAP_SHARE_POOL))
4082
		return true;
4083
	else
4084 4085 4086
		return false;
}

4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103
vm_fault_t sharepool_no_page(struct mm_struct *mm,
			struct vm_area_struct *vma,
			struct address_space *mapping, pgoff_t idx,
			unsigned long address, pte_t *ptep, unsigned int flags)
{
	struct hstate *h = hstate_vma(vma);
	vm_fault_t ret = VM_FAULT_SIGBUS;
	unsigned long size;
	struct page *page;
	pte_t new_pte;
	spinlock_t *ptl;
	unsigned long haddr = address & huge_page_mask(h);
	bool new_page = false;
	int err;
	int node_id;
	struct sp_area *spa;

4104
	spa = vma->vm_private_data;
4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119
	if (!spa) {
		pr_err("share pool: vma is invalid, not from sp mmap\n");
		return ret;
	}
	node_id = spa->node_id;

retry:
	page = find_lock_page(mapping, idx);
	if (!page) {
		size = i_size_read(mapping->host) >> huge_page_shift(h);
		if (idx >= size)
			goto out;

		page = alloc_huge_page(vma, haddr, 0);
		if (IS_ERR(page)) {
4120 4121
			page = hugetlb_alloc_hugepage(node_id,
					HUGETLB_ALLOC_BUDDY | HUGETLB_ALLOC_NORECLAIM);
4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182
			if (!page)
				page = ERR_PTR(-ENOMEM);
		}
		if (IS_ERR(page)) {
			ptl = huge_pte_lock(h, mm, ptep);
			if (!huge_pte_none(huge_ptep_get(ptep))) {
				ret = 0;
				spin_unlock(ptl);
				goto out;
			}
			spin_unlock(ptl);
			ret = vmf_error(PTR_ERR(page));
			goto out;
		}
		__SetPageUptodate(page);
		new_page = true;

		/* sharepool pages are all shared */
		err = huge_add_to_page_cache(page, mapping, idx);
		if (err) {
			put_page(page);
			if (err == -EEXIST)
				goto retry;
			goto out;
		}
	}


	ptl = huge_pte_lock(h, mm, ptep);
	size = i_size_read(mapping->host) >> huge_page_shift(h);
	if (idx >= size)
		goto backout;

	ret = 0;
	if (!huge_pte_none(huge_ptep_get(ptep)))
		goto backout;

	page_dup_rmap(page, true);
	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
				&& (vma->vm_flags & VM_SHARED)));
	set_huge_pte_at(mm, haddr, ptep, new_pte);

	hugetlb_count_add(pages_per_huge_page(h), mm);

	spin_unlock(ptl);

	if (new_page) {
		SetPagePrivate(&page[1]);
	}

	unlock_page(page);
out:
	return ret;

backout:
	spin_unlock(ptl);
	unlock_page(page);
	put_page(page);
	goto out;
}

4183
/*
4184 4185
 * The caller must ensure that this function is called
 * when the last thread in the thread group exits.
4186
 */
4187
int sp_group_exit(void)
4188
{
4189
	struct mm_struct *mm;
4190 4191 4192 4193 4194 4195 4196 4197
	struct sp_group *spg;
	struct sp_group_master *master;
	struct sp_group_node *spg_node, *tmp;
	bool is_alive = true;

	if (!sp_is_enabled())
		return 0;

4198 4199 4200 4201
	if (current->flags & PF_KTHREAD)
		return 0;

	mm = current->mm;
4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263
	down_write(&sp_group_sem);

	master = mm->sp_group_master;
	if (!master) {
		up_write(&sp_group_sem);
		return 0;
	}

	list_for_each_entry_safe(spg_node, tmp, &master->node_list, group_node) {
		spg = spg_node->spg;

		down_write(&spg->rw_lock);
		/* a dead group should NOT be reactive again */
		if (spg_valid(spg) && list_is_singular(&spg->procs))
			is_alive = spg->is_alive = false;
		spg->proc_num--;
		list_del(&spg_node->proc_node);
		up_write(&spg->rw_lock);

		if (!is_alive)
			blocking_notifier_call_chain(&sp_notifier_chain, 0,
						     spg);
	}

	/* match with get_task_mm() in sp_group_add_task() */
	if (atomic_sub_and_test(master->count, &mm->mm_users)) {
		up_write(&sp_group_sem);
		WARN(1, "Invalid user counting\n");
		return 1;
	}

	up_write(&sp_group_sem);
	return 0;
}

void sp_group_post_exit(struct mm_struct *mm)
{
	struct sp_proc_stat *stat;
	long alloc_size, k2u_size;
	/* lockless visit */
	struct sp_group_master *master = mm->sp_group_master;
	struct sp_group_node *spg_node, *tmp;
	struct sp_group *spg;

	if (!sp_is_enabled() || !master)
		return;

	/*
	 * There are two basic scenarios when a process in the share pool is
	 * exiting but its share pool memory usage is not 0.
	 * 1. Process A called sp_alloc(), but it terminates without calling
	 *    sp_free(). Then its share pool memory usage is a positive number.
	 * 2. Process A never called sp_alloc(), and process B in the same spg
	 *    called sp_alloc() to get an addr u. Then A gets u somehow and
	 *    called sp_free(u). Now A's share pool memory usage is a negative
	 *    number. Notice B's memory usage will be a positive number.
	 *
	 * We decide to print an info when seeing both of the scenarios.
	 *
	 * A process not in an sp group doesn't need to print because there
	 * wont't be any memory which is not freed.
	 */
4264
	stat = &master->instat;
4265
	if (stat) {
4266
		alloc_size = atomic64_read(&stat->alloc_nsize) + atomic64_read(&stat->alloc_hsize);
4267 4268 4269 4270 4271 4272 4273 4274
		k2u_size = atomic64_read(&stat->k2u_size);

		if (alloc_size != 0 || k2u_size != 0)
			pr_info("process %s(%d) exits. It applied %ld aligned KB, k2u shared %ld aligned KB\n",
				stat->comm, stat->tgid,
				byte2kb(alloc_size), byte2kb(k2u_size));
	}

4275
	down_write(&sp_group_sem);
4276 4277 4278
	list_for_each_entry_safe(spg_node, tmp, &master->node_list, group_node) {
		spg = spg_node->spg;
		/* match with refcount inc in sp_group_add_task */
4279 4280
		if (atomic_dec_and_test(&spg->use_count))
			free_sp_group_locked(spg);
4281
		list_del(&spg_node->group_node);
4282 4283
		kfree(spg_node);
	}
4284
	up_write(&sp_group_sem);
4285

4286
	sp_del_group_master(master);
4287

4288 4289 4290
	kfree(master);
}

4291 4292 4293 4294 4295 4296 4297 4298 4299 4300
DEFINE_STATIC_KEY_FALSE(share_pool_enabled_key);

static int __init enable_share_pool(char *s)
{
	static_branch_enable(&share_pool_enabled_key);
	pr_info("Ascend enable share pool features via bootargs\n");

	return 1;
}
__setup("enable_ascend_share_pool", enable_share_pool);
4301 4302 4303

static int __init share_pool_init(void)
{
4304 4305 4306 4307
	if (!sp_is_enabled())
		return 0;

	sp_mapping_normal = sp_mapping_create(SP_MAPPING_NORMAL);
4308
	if (IS_ERR(sp_mapping_normal))
4309 4310 4311
		goto fail;
	atomic_inc(&sp_mapping_normal->user);

C
Chen Jun 已提交
4312 4313 4314 4315 4316
	sp_mapping_ro = sp_mapping_create(SP_MAPPING_RO);
	if (IS_ERR(sp_mapping_ro))
		goto free_normal;
	atomic_inc(&sp_mapping_ro->user);

4317
	proc_sharepool_init();
4318 4319

	return 0;
C
Chen Jun 已提交
4320 4321 4322

free_normal:
	kfree(sp_mapping_normal);
4323 4324 4325 4326 4327 4328
fail:
	pr_err("Ascend share pool initialization failed\n");
	static_branch_disable(&share_pool_enabled_key);
	return 1;
}
late_initcall(share_pool_init);