share_pool.c 108.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Huawei Ascend Share Pool Memory
 *
 * Copyright (C) 2020 Huawei Limited
 * Author: Tang Yizhou <tangyizhou@huawei.com>
 *         Zefan Li <lizefan@huawei.com>
 *         Wu Peng <wupeng58@huawei.com>
 *         Ding Tianhong <dingtgianhong@huawei.com>
 *         Zhou Guanghui <zhouguanghui1@huawei.com>
 *         Li Ming <limingming.li@huawei.com>
 *
 * This code is based on the hisilicon ascend platform.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#define pr_fmt(fmt) "share pool: " fmt

#include <linux/share_pool.h>
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/sched/mm.h>
#include <linux/mm_types.h>
#include <linux/idr.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
#include <linux/printk.h>
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
#include <linux/pid.h>
#include <linux/pid_namespace.h>
#include <linux/atomic.h>
#include <linux/lockdep.h>
#include <linux/kernel.h>
#include <linux/falloc.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/rmap.h>
#include <linux/preempt.h>
#include <linux/swapops.h>
#include <linux/mmzone.h>
#include <linux/timekeeping.h>
#include <linux/time64.h>
52
#include <linux/pagewalk.h>
53

54 55
#define spg_valid(spg)		((spg)->is_alive == true)

56 57 58 59 60
/* Use spa va address as mmap offset. This can work because spa_file
 * is setup with 64-bit address space. So va shall be well covered.
 */
#define addr_offset(spa)	((spa)->va_start)

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
#define byte2kb(size)		((size) >> 10)
#define byte2mb(size)		((size) >> 20)
#define page2kb(page_num)	((page_num) << (PAGE_SHIFT - 10))

#define MAX_GROUP_FOR_SYSTEM	50000
#define MAX_GROUP_FOR_TASK	3000
#define MAX_PROC_PER_GROUP	1024

#define GROUP_NONE		0

#define SEC2US(sec)		((sec) * 1000000)
#define NS2US(ns)		((ns) / 1000)

#define PF_DOMAIN_CORE		0x10000000	/* AOS CORE processes in sched.h */

76 77
static int system_group_count;

78 79 80 81 82
/* idr of all sp_groups */
static DEFINE_IDR(sp_group_idr);
/* rw semaphore for sp_group_idr and mm->sp_group_master */
static DECLARE_RWSEM(sp_group_sem);

83 84
static BLOCKING_NOTIFIER_HEAD(sp_notifier_chain);

85 86 87 88
static DEFINE_IDA(sp_group_id_ida);

/*** Statistical and maintenance tools ***/

89 90 91 92
/* list of all sp_group_masters */
static LIST_HEAD(master_list);
/* mutex to protect insert/delete ops from master_list */
static DEFINE_MUTEX(master_list_lock);
93

94 95 96 97 98
/* list of all spm-dvpp */
static LIST_HEAD(spm_dvpp_list);
/* mutex to protect insert/delete ops from master_list */
static DEFINE_MUTEX(spm_list_lock);

99 100 101
/* for kthread buff_module_guard_work */
static struct sp_proc_stat kthread_stat;

102 103 104 105 106 107 108 109
#define SEQ_printf(m, x...)			\
do {						\
	if (m)					\
		seq_printf(m, x);		\
	else					\
		pr_info(x);			\
} while (0)

110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
#ifndef __GENKSYMS__
struct sp_spg_stat {
	/* total size of all sp_area from sp_alloc and k2u */
	atomic64_t	 size;
	/* total size of all sp_area from sp_alloc 0-order page */
	atomic64_t	 alloc_nsize;
	/* total size of all sp_area from sp_alloc hugepage */
	atomic64_t	 alloc_hsize;
	/* total size of all sp_area from ap_alloc */
	atomic64_t	 alloc_size;
	/* total size of all sp_area from sp_k2u */
	atomic64_t	 k2u_size;
};

/* per process memory usage statistics indexed by tgid */
struct sp_proc_stat {
	int tgid;
	char comm[TASK_COMM_LEN];
	/*
	 * alloc amount minus free amount, may be negative when freed by
	 * another task in the same sp group.
	 */
	atomic64_t alloc_size;
	atomic64_t alloc_nsize;
	atomic64_t alloc_hsize;
	atomic64_t k2u_size;
};

/* per process/sp-group memory usage statistics */
struct spg_proc_stat {
	int tgid;
	int spg_id;  /* 0 for non-group data, such as k2u_task */
	/*
	 * alloc amount minus free amount, may be negative when freed by
	 * another task in the same sp group.
	 */
	atomic64_t alloc_size;
	atomic64_t alloc_nsize;
	atomic64_t alloc_hsize;
	atomic64_t k2u_size;
};

152 153 154 155
enum sp_mapping_type {
	SP_MAPPING_START,
	SP_MAPPING_DVPP		= SP_MAPPING_START,
	SP_MAPPING_NORMAL,
C
Chen Jun 已提交
156
	SP_MAPPING_RO,
157 158 159
	SP_MAPPING_END,
};

160 161 162 163
/*
 * address space management
 */
struct sp_mapping {
164
	unsigned long type;
165 166 167 168 169 170 171 172 173 174 175
	atomic_t user;
	unsigned long start[MAX_DEVID];
	unsigned long end[MAX_DEVID];
	struct rb_root area_root;

	struct rb_node *free_area_cache;
	unsigned long cached_hole_size;
	unsigned long cached_vstart;

	/* list head for all groups attached to this mapping, dvpp mapping only */
	struct list_head group_head;
176
	struct list_head spm_node;
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
};

/* Processes in the same sp_group can share memory.
 * Memory layout for share pool:
 *
 * |-------------------- 8T -------------------|---|------ 8T ------------|
 * |		Device 0	   |  Device 1 |...|                      |
 * |----------------------------------------------------------------------|
 * |------------- 16G -------------|    16G    |   |                      |
 * | DVPP GROUP0   | DVPP GROUP1   | ... | ... |...|  sp normal memory    |
 * |     sp        |    sp         |     |     |   |                      |
 * |----------------------------------------------------------------------|
 *
 * The host SVM feature reserves 8T virtual memory by mmap, and due to the
 * restriction of DVPP, while SVM and share pool will both allocate memory
 * for DVPP, the memory have to be in the same 32G range.
 *
 * Share pool reserves 16T memory, with 8T for normal uses and 8T for DVPP.
 * Within this 8T DVPP memory, SVM will call sp_config_dvpp_range() to
 * tell us which 16G memory range is reserved for share pool .
 *
 * In some scenarios where there is no host SVM feature, share pool uses
 * the default 8G memory setting for DVPP.
 */
struct sp_group {
	int		 id;
	unsigned long	 flag;
	struct file	 *file;
	struct file	 *file_hugetlb;
	/* number of process in this group */
	int		 proc_num;
	/* list head of processes (sp_group_node, each represents a process) */
	struct list_head procs;
	/* list head of sp_area. it is protected by spin_lock sp_area_lock */
	struct list_head spa_list;
	/* group statistics */
	struct sp_spg_stat instat;
	/* is_alive == false means it's being destroyed */
	bool		 is_alive;
	atomic_t	 use_count;
217
	atomic_t	 spa_num;
218 219 220 221
	/* protect the group internal elements, except spa_list */
	struct rw_semaphore	rw_lock;
	/* list node for dvpp mapping */
	struct list_head	mnode;
222
	struct sp_mapping       *mapping[SP_MAPPING_END];
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
};

/* a per-process(per mm) struct which manages a sp_group_node list */
struct sp_group_master {
	/*
	 * number of sp groups the process belongs to,
	 * a.k.a the number of sp_node in node_list
	 */
	unsigned int count;
	/* list head of sp_node */
	struct list_head node_list;
	struct mm_struct *mm;
	/*
	 * Used to apply for the shared pool memory of the current process.
	 * For example, sp_alloc non-share memory or k2task.
	 */
	struct sp_group *local;
	struct sp_proc_stat instat;
	struct list_head list_node;
};

/*
 * each instance represents an sp group the process belongs to
 * sp_group_master    : sp_group_node   = 1 : N
 * sp_group_node->spg : sp_group        = 1 : 1
 * sp_group_node      : sp_group->procs = N : 1
 */
struct sp_group_node {
	/* list node in sp_group->procs */
	struct list_head proc_node;
	/* list node in sp_group_maseter->node_list */
	struct list_head group_node;
	struct sp_group_master *master;
	struct sp_group *spg;
	unsigned long prot;
	struct spg_proc_stat instat;
};
#endif

262 263 264 265 266 267 268 269 270 271 272 273 274 275
static inline void sp_add_group_master(struct sp_group_master *master)
{
	mutex_lock(&master_list_lock);
	list_add_tail(&master->list_node, &master_list);
	mutex_unlock(&master_list_lock);
}

static inline void sp_del_group_master(struct sp_group_master *master)
{
	mutex_lock(&master_list_lock);
	list_del(&master->list_node);
	mutex_unlock(&master_list_lock);
}

276
/* The caller should hold mmap_sem to protect master (TBD) */
G
Guo Mengqi 已提交
277 278
static void sp_init_group_master_stat(int tgid, struct mm_struct *mm,
		struct sp_proc_stat *stat)
279 280 281 282
{
	atomic64_set(&stat->alloc_nsize, 0);
	atomic64_set(&stat->alloc_hsize, 0);
	atomic64_set(&stat->k2u_size, 0);
G
Guo Mengqi 已提交
283
	stat->tgid = tgid;
284 285 286
	get_task_comm(stat->comm, current);
}

287 288 289 290 291 292 293 294 295 296
static unsigned long sp_mapping_type(struct sp_mapping *spm)
{
	return spm->type;
}

static void sp_mapping_set_type(struct sp_mapping *spm, unsigned long type)
{
	spm->type = type;
}

297
static struct sp_mapping *sp_mapping_normal;
C
Chen Jun 已提交
298
static struct sp_mapping *sp_mapping_ro;
299

300 301 302
static void sp_mapping_add_to_list(struct sp_mapping *spm)
{
	mutex_lock(&spm_list_lock);
303
	if (sp_mapping_type(spm) == SP_MAPPING_DVPP)
304 305 306 307 308 309 310
		list_add_tail(&spm->spm_node, &spm_dvpp_list);
	mutex_unlock(&spm_list_lock);
}

static void sp_mapping_remove_from_list(struct sp_mapping *spm)
{
	mutex_lock(&spm_list_lock);
311
	if (sp_mapping_type(spm) == SP_MAPPING_DVPP)
312 313 314 315
		list_del(&spm->spm_node);
	mutex_unlock(&spm_list_lock);
}

316 317 318 319 320
static void sp_mapping_range_init(struct sp_mapping *spm)
{
	int i;

	for (i = 0; i < MAX_DEVID; i++) {
321
		switch (sp_mapping_type(spm)) {
C
Chen Jun 已提交
322 323 324 325
		case SP_MAPPING_RO:
			spm->start[i] = MMAP_SHARE_POOL_RO_START;
			spm->end[i]   = MMAP_SHARE_POOL_RO_END;
			break;
326
		case SP_MAPPING_NORMAL:
327
			spm->start[i] = MMAP_SHARE_POOL_NORMAL_START;
328 329 330 331 332 333 334 335 336
			spm->end[i]   = MMAP_SHARE_POOL_NORMAL_END;
			break;
		case SP_MAPPING_DVPP:
			spm->start[i] = MMAP_SHARE_POOL_DVPP_START + i * MMAP_SHARE_POOL_16G_SIZE;
			spm->end[i]   = spm->start[i] + MMAP_SHARE_POOL_16G_SIZE;
			break;
		default:
			pr_err("Invalid sp_mapping type [%lu]\n", sp_mapping_type(spm));
			break;
337 338 339 340
		}
	}
}

341
static struct sp_mapping *sp_mapping_create(unsigned long type)
342 343 344 345 346 347 348
{
	struct sp_mapping *spm;

	spm = kzalloc(sizeof(struct sp_mapping), GFP_KERNEL);
	if (!spm)
		return ERR_PTR(-ENOMEM);

349
	sp_mapping_set_type(spm, type);
350 351 352
	sp_mapping_range_init(spm);
	atomic_set(&spm->user, 0);
	spm->area_root = RB_ROOT;
353
	INIT_LIST_HEAD(&spm->group_head);
354
	sp_mapping_add_to_list(spm);
355 356 357 358

	return spm;
}

359 360
static void sp_mapping_destroy(struct sp_mapping *spm)
{
361
	sp_mapping_remove_from_list(spm);
362 363 364 365 366
	kfree(spm);
}

static void sp_mapping_attach(struct sp_group *spg, struct sp_mapping *spm)
{
367
	unsigned long type = sp_mapping_type(spm);
368
	atomic_inc(&spm->user);
369

370 371
	spg->mapping[type] = spm;
	if (type == SP_MAPPING_DVPP)
372
		list_add_tail(&spg->mnode, &spm->group_head);
373 374 375 376
}

static void sp_mapping_detach(struct sp_group *spg, struct sp_mapping *spm)
{
377 378
	unsigned long type;

379 380
	if (!spm)
		return;
381

382 383
	type = sp_mapping_type(spm);
	if (type == SP_MAPPING_DVPP)
384 385
		list_del(&spg->mnode);
	if (atomic_dec_and_test(&spm->user))
386
		sp_mapping_destroy(spm);
387 388

	spg->mapping[type] = NULL;
389 390
}

391 392 393 394 395 396 397 398 399 400
/* merge old mapping to new, and the old mapping would be destroyed */
static void sp_mapping_merge(struct sp_mapping *new, struct sp_mapping *old)
{
	struct sp_group *spg, *tmp;

	if (new == old)
		return;

	list_for_each_entry_safe(spg, tmp, &old->group_head, mnode) {
		list_move_tail(&spg->mnode, &new->group_head);
401
		spg->mapping[SP_MAPPING_DVPP] = new;
402 403 404 405 406 407 408 409 410 411 412
	}

	atomic_add(atomic_read(&old->user), &new->user);
	sp_mapping_destroy(old);
}

static bool is_mapping_empty(struct sp_mapping *spm)
{
	return RB_EMPTY_ROOT(&spm->area_root);
}

413 414 415 416
static bool can_mappings_merge(struct sp_mapping *m1, struct sp_mapping *m2)
{
	int i;

417
	for (i = 0; i < MAX_DEVID; i++)
418 419 420 421 422 423
		if (m1->start[i] != m2->start[i] || m1->end[i] != m2->end[i])
			return false;

	return true;
}

424
/*
425 426 427 428 429
 * 1. The mappings of local group is set on creating.
 * 2. This is used to setup the mapping for groups created during add_task.
 * 3. The normal mapping exists for all groups.
 * 4. The dvpp mappings for the new group and local group can merge _iff_ at
 *    least one of the mapping is empty.
430
 * the caller must hold sp_group_sem
431
 * NOTE: undo the mergeing when the later process failed.
432 433 434
 */
static int sp_mapping_group_setup(struct mm_struct *mm, struct sp_group *spg)
{
435 436 437 438
	struct sp_mapping *local_dvpp_mapping, *spg_dvpp_mapping;

	local_dvpp_mapping = mm->sp_group_master->local->mapping[SP_MAPPING_DVPP];
	spg_dvpp_mapping = spg->mapping[SP_MAPPING_DVPP];
439

440
	if (!list_empty(&spg->procs) && !(spg->flag & SPG_FLAG_NON_DVPP)) {
441 442 443 444 445 446
		/*
		 * Don't return an error when the mappings' address range conflict.
		 * As long as the mapping is unused, we can drop the empty mapping.
		 * This may change the address range for the task or group implicitly,
		 * give a warn for it.
		 */
447
		bool is_conflict = !can_mappings_merge(local_dvpp_mapping, spg_dvpp_mapping);
448

449 450
		if (is_mapping_empty(local_dvpp_mapping)) {
			sp_mapping_merge(spg_dvpp_mapping, local_dvpp_mapping);
451 452
			if (is_conflict)
				pr_warn_ratelimited("task address space conflict, spg_id=%d\n", spg->id);
453 454
		} else if (is_mapping_empty(spg_dvpp_mapping)) {
			sp_mapping_merge(local_dvpp_mapping, spg_dvpp_mapping);
455 456 457
			if (is_conflict)
				pr_warn_ratelimited("group address space conflict, spg_id=%d\n", spg->id);
		} else {
458 459
			pr_info_ratelimited("Duplicate address space, id=%d\n", spg->id);
			return -EINVAL;
460 461
		}
	} else {
462 463
		if (!(spg->flag & SPG_FLAG_NON_DVPP))
			/* the mapping of local group is always set */
464 465
			sp_mapping_attach(spg, local_dvpp_mapping);
		if (!spg->mapping[SP_MAPPING_NORMAL])
466
			sp_mapping_attach(spg, sp_mapping_normal);
C
Chen Jun 已提交
467 468
		if (!spg->mapping[SP_MAPPING_RO])
			sp_mapping_attach(spg, sp_mapping_ro);
469 470 471 472 473
	}

	return 0;
}

474
static struct sp_mapping *sp_mapping_find(struct sp_group *spg,
C
Chen Jun 已提交
475 476 477 478 479
						 unsigned long addr)
{
	if (addr >= MMAP_SHARE_POOL_NORMAL_START && addr < MMAP_SHARE_POOL_NORMAL_END)
		return spg->mapping[SP_MAPPING_NORMAL];

C
Chen Jun 已提交
480 481 482
	if (addr >= MMAP_SHARE_POOL_RO_START && addr < MMAP_SHARE_POOL_RO_END)
		return spg->mapping[SP_MAPPING_RO];

C
Chen Jun 已提交
483 484 485
	return spg->mapping[SP_MAPPING_DVPP];
}

486
static struct sp_group *create_spg(int spg_id, unsigned long flag);
487
static void free_new_spg_id(bool new, int spg_id);
488 489 490
static void free_sp_group_locked(struct sp_group *spg);
static int local_group_add_task(struct mm_struct *mm, struct sp_group *spg);
static int init_local_group(struct mm_struct *mm)
491
{
492
	int spg_id, ret;
493
	struct sp_group *spg;
494
	struct sp_mapping *spm;
495 496
	struct sp_group_master *master = mm->sp_group_master;

497 498 499 500
	spg_id = ida_alloc_range(&sp_group_id_ida, SPG_ID_LOCAL_MIN,
				 SPG_ID_LOCAL_MAX, GFP_ATOMIC);
	if (spg_id < 0) {
		pr_err_ratelimited("generate local group id failed %d\n", spg_id);
501
		return spg_id;
502 503
	}

504
	spg = create_spg(spg_id, 0);
505
	if (IS_ERR(spg)) {
506 507
		free_new_spg_id(true, spg_id);
		return PTR_ERR(spg);
508 509 510
	}

	master->local = spg;
511 512 513 514 515 516 517
	spm = sp_mapping_create(SP_MAPPING_DVPP);
	if (IS_ERR(spm)) {
		ret = PTR_ERR(spm);
		goto free_spg;
	}
	sp_mapping_attach(master->local, spm);
	sp_mapping_attach(master->local, sp_mapping_normal);
C
Chen Jun 已提交
518
	sp_mapping_attach(master->local, sp_mapping_ro);
519

520 521
	ret = local_group_add_task(mm, spg);
	if (ret < 0)
522
		/* The spm would be released while destroying the spg */
523 524
		goto free_spg;

525
	return 0;
526 527

free_spg:
528
	/* spg_id is freed in free_sp_group_locked */
529
	free_sp_group_locked(spg);
530 531
	master->local = NULL;
	return ret;
532 533
}

534 535
/* The caller must hold sp_group_sem */
static int sp_init_group_master_locked(struct task_struct *tsk, struct mm_struct *mm)
536
{
537
	int ret;
538 539
	struct sp_group_master *master;

540
	if (mm->sp_group_master)
541 542
		return 0;

543 544 545 546 547 548 549
	master = kmalloc(sizeof(struct sp_group_master), GFP_KERNEL);
	if (!master)
		return -ENOMEM;

	INIT_LIST_HEAD(&master->node_list);
	master->count = 0;
	master->mm = mm;
G
Guo Mengqi 已提交
550
	sp_init_group_master_stat(tsk->tgid, mm, &master->instat);
551
	mm->sp_group_master = master;
552
	sp_add_group_master(master);
553 554 555

	ret = init_local_group(mm);
	if (ret)
556
		goto free_master;
557 558

	return 0;
559 560

free_master:
561
	sp_del_group_master(master);
562 563 564 565 566 567 568 569 570
	mm->sp_group_master = NULL;
	kfree(master);

	return ret;
}

static inline bool is_local_group(int spg_id)
{
	return spg_id >= SPG_ID_LOCAL_MIN && spg_id <= SPG_ID_LOCAL_MAX;
571 572
}

573
static struct sp_group *sp_get_local_group(struct task_struct *tsk, struct mm_struct *mm)
574 575 576 577 578 579 580 581 582 583 584 585 586 587
{
	int ret;
	struct sp_group_master *master;

	down_read(&sp_group_sem);
	master = mm->sp_group_master;
	if (master && master->local) {
		atomic_inc(&master->local->use_count);
		up_read(&sp_group_sem);
		return master->local;
	}
	up_read(&sp_group_sem);

	down_write(&sp_group_sem);
588
	ret = sp_init_group_master_locked(tsk, mm);
589 590 591 592 593 594 595 596 597 598 599
	if (ret) {
		up_write(&sp_group_sem);
		return ERR_PTR(ret);
	}
	master = mm->sp_group_master;
	atomic_inc(&master->local->use_count);
	up_write(&sp_group_sem);

	return master->local;
}

600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
static void update_spg_stat_alloc(unsigned long size, bool inc,
	bool huge, struct sp_spg_stat *stat)
{
	if (inc) {
		atomic64_add(size, &stat->size);
		atomic64_add(size, &stat->alloc_size);
		if (huge)
			atomic64_add(size, &stat->alloc_hsize);
		else
			atomic64_add(size, &stat->alloc_nsize);
	} else {
		atomic64_sub(size, &stat->size);
		atomic64_sub(size, &stat->alloc_size);
		if (huge)
			atomic64_sub(size, &stat->alloc_hsize);
		else
			atomic64_sub(size, &stat->alloc_nsize);
	}
}

static void update_spg_stat_k2u(unsigned long size, bool inc,
	struct sp_spg_stat *stat)
{
	if (inc) {
		atomic64_add(size, &stat->size);
		atomic64_add(size, &stat->k2u_size);
	} else {
		atomic64_sub(size, &stat->size);
		atomic64_sub(size, &stat->k2u_size);
	}
}

632 633
static void update_mem_usage_alloc(unsigned long size, bool inc,
		bool is_hugepage, struct sp_group_node *spg_node)
634
{
635
	struct sp_proc_stat *proc_stat = &spg_node->master->instat;
636 637

	if (inc) {
638 639 640 641 642 643 644 645
		if (is_hugepage) {
			atomic64_add(size, &spg_node->instat.alloc_hsize);
			atomic64_add(size, &proc_stat->alloc_hsize);
			return;
		}
		atomic64_add(size, &spg_node->instat.alloc_nsize);
		atomic64_add(size, &proc_stat->alloc_nsize);
		return;
646
	}
647 648 649 650 651 652 653 654 655

	if (is_hugepage) {
		atomic64_sub(size, &spg_node->instat.alloc_hsize);
		atomic64_sub(size, &proc_stat->alloc_hsize);
		return;
	}
	atomic64_sub(size, &spg_node->instat.alloc_nsize);
	atomic64_sub(size, &proc_stat->alloc_nsize);
	return;
656 657
}

658 659
static void update_mem_usage_k2u(unsigned long size, bool inc,
		struct sp_group_node *spg_node)
660
{
661
	struct sp_proc_stat *proc_stat = &spg_node->master->instat;
662 663

	if (inc) {
664
		atomic64_add(size, &spg_node->instat.k2u_size);
665 666
		atomic64_add(size, &proc_stat->k2u_size);
	} else {
667
		atomic64_sub(size, &spg_node->instat.k2u_size);
668 669 670 671
		atomic64_sub(size, &proc_stat->k2u_size);
	}
}

672
static void sp_init_spg_proc_stat(struct spg_proc_stat *stat, int spg_id)
673
{
674
	stat->tgid = current->tgid;
675
	stat->spg_id = spg_id;
676 677
	atomic64_set(&stat->alloc_nsize, 0);
	atomic64_set(&stat->alloc_hsize, 0);
678 679 680
	atomic64_set(&stat->k2u_size, 0);
}

681
static void sp_init_group_stat(struct sp_spg_stat *stat)
682 683 684 685 686
{
	atomic64_set(&stat->size, 0);
	atomic64_set(&stat->alloc_nsize, 0);
	atomic64_set(&stat->alloc_hsize, 0);
	atomic64_set(&stat->alloc_size, 0);
687
	atomic64_set(&stat->k2u_size, 0);
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717
}

/* statistics of all sp area, protected by sp_area_lock */
struct sp_spa_stat {
	unsigned int total_num;
	unsigned int alloc_num;
	unsigned int k2u_task_num;
	unsigned int k2u_spg_num;
	unsigned long total_size;
	unsigned long alloc_size;
	unsigned long k2u_task_size;
	unsigned long k2u_spg_size;
	unsigned long dvpp_size;
	unsigned long dvpp_va_size;
};

static struct sp_spa_stat spa_stat;

/* statistics of all sp group born from sp_alloc and k2u(spg) */
struct sp_overall_stat {
	atomic_t spa_total_num;
	atomic64_t spa_total_size;
};

static struct sp_overall_stat sp_overall_stat;

/*** Global share pool VA allocator ***/

enum spa_type {
	SPA_TYPE_ALLOC = 1,
718 719
	/* NOTE: reorganize after the statisical structure is reconstructed. */
	SPA_TYPE_ALLOC_PRIVATE = SPA_TYPE_ALLOC,
720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
	SPA_TYPE_K2TASK,
	SPA_TYPE_K2SPG,
};

/*
 * We bump the reference when each mmap succeeds, and it will be dropped
 * when vma is about to release, so sp_area object will be automatically
 * freed when all tasks in the sp group has exited.
 */
struct sp_area {
	unsigned long va_start;
	unsigned long va_end;		/* va_end always align to hugepage */
	unsigned long real_size;	/* real size with alignment */
	unsigned long region_vstart;	/* belong to normal region or DVPP region */
	unsigned long flags;
	bool is_hugepage;
	bool is_dead;
	atomic_t use_count;		/* How many vmas use this VA region */
	struct rb_node rb_node;		/* address sorted rbtree */
	struct list_head link;		/* link to the spg->head */
	struct sp_group *spg;
	enum spa_type type;		/* where spa born from */
	struct mm_struct *mm;		/* owner of k2u(task) */
	unsigned long kva;		/* shared kva */
	pid_t applier;			/* the original applier process */
	int node_id;			/* memory node */
	int device_id;
};
static DEFINE_SPINLOCK(sp_area_lock);

static unsigned long spa_size(struct sp_area *spa)
{
	return spa->real_size;
}

static struct file *spa_file(struct sp_area *spa)
{
	if (spa->is_hugepage)
		return spa->spg->file_hugetlb;
	else
		return spa->spg->file;
}

763 764
/* the caller should hold sp_area_lock */
static void spa_inc_usage(struct sp_area *spa)
765
{
766 767 768 769 770 771 772 773 774
	enum spa_type type = spa->type;
	unsigned long size = spa->real_size;
	bool is_dvpp = spa->flags & SP_DVPP;
	bool is_huge = spa->is_hugepage;

	switch (type) {
	case SPA_TYPE_ALLOC:
		spa_stat.alloc_num += 1;
		spa_stat.alloc_size += size;
775
		update_spg_stat_alloc(size, true, is_huge, &spa->spg->instat);
776 777 778 779
		break;
	case SPA_TYPE_K2TASK:
		spa_stat.k2u_task_num += 1;
		spa_stat.k2u_task_size += size;
780
		update_spg_stat_k2u(size, true, &spa->spg->instat);
781 782 783 784
		break;
	case SPA_TYPE_K2SPG:
		spa_stat.k2u_spg_num += 1;
		spa_stat.k2u_spg_size += size;
785
		update_spg_stat_k2u(size, true, &spa->spg->instat);
786 787 788 789 790 791 792 793 794 795
		break;
	default:
		WARN(1, "invalid spa type");
	}

	if (is_dvpp) {
		spa_stat.dvpp_size += size;
		spa_stat.dvpp_va_size += ALIGN(size, PMD_SIZE);
	}

796
	atomic_inc(&spa->spg->spa_num);
797 798 799 800 801 802 803
	/*
	 * all the calculations won't overflow due to system limitation and
	 * parameter checking in sp_alloc_area()
	 */
	spa_stat.total_num += 1;
	spa_stat.total_size += size;

804
	if (!is_local_group(spa->spg->id)) {
805 806 807
		atomic_inc(&sp_overall_stat.spa_total_num);
		atomic64_add(size, &sp_overall_stat.spa_total_size);
	}
808 809
}

810 811
/* the caller should hold sp_area_lock */
static void spa_dec_usage(struct sp_area *spa)
812
{
813 814 815 816 817 818 819 820 821
	enum spa_type type = spa->type;
	unsigned long size = spa->real_size;
	bool is_dvpp = spa->flags & SP_DVPP;
	bool is_huge = spa->is_hugepage;

	switch (type) {
	case SPA_TYPE_ALLOC:
		spa_stat.alloc_num -= 1;
		spa_stat.alloc_size -= size;
822
		update_spg_stat_alloc(size, false, is_huge, &spa->spg->instat);
823 824 825 826
		break;
	case SPA_TYPE_K2TASK:
		spa_stat.k2u_task_num -= 1;
		spa_stat.k2u_task_size -= size;
827
		update_spg_stat_k2u(size, false, &spa->spg->instat);
828 829 830 831
		break;
	case SPA_TYPE_K2SPG:
		spa_stat.k2u_spg_num -= 1;
		spa_stat.k2u_spg_size -= size;
832
		update_spg_stat_k2u(size, false, &spa->spg->instat);
833 834 835 836 837 838 839 840 841 842
		break;
	default:
		WARN(1, "invalid spa type");
	}

	if (is_dvpp) {
		spa_stat.dvpp_size -= size;
		spa_stat.dvpp_va_size -= ALIGN(size, PMD_SIZE);
	}

843
	atomic_dec(&spa->spg->spa_num);
844 845 846
	spa_stat.total_num -= 1;
	spa_stat.total_size -= size;

847
	if (!is_local_group(spa->spg->id)) {
848 849 850
		atomic_dec(&sp_overall_stat.spa_total_num);
		atomic64_sub(spa->real_size, &sp_overall_stat.spa_total_size);
	}
851 852
}

853 854
static void update_mem_usage(unsigned long size, bool inc, bool is_hugepage,
	struct sp_group_node *spg_node, enum spa_type type)
855
{
856 857
	switch (type) {
	case SPA_TYPE_ALLOC:
858
		update_mem_usage_alloc(size, inc, is_hugepage, spg_node);
859 860 861
		break;
	case SPA_TYPE_K2TASK:
	case SPA_TYPE_K2SPG:
862
		update_mem_usage_k2u(size, inc, spg_node);
863 864 865 866
		break;
	default:
		WARN(1, "invalid stat type\n");
	}
867 868
}

869 870 871 872 873 874 875 876 877 878 879 880
struct sp_group_node *find_spg_node_by_spg(struct mm_struct *mm,
		struct sp_group *spg)
{
	struct sp_group_node *spg_node;

	list_for_each_entry(spg_node, &mm->sp_group_master->node_list, group_node) {
		if (spg_node->spg == spg)
			return spg_node;
	}
	return NULL;
}

881 882
static void sp_update_process_stat(struct task_struct *tsk, bool inc,
	struct sp_area *spa)
883
{
884
	struct sp_group_node *spg_node;
885 886
	unsigned long size = spa->real_size;
	enum spa_type type = spa->type;
887

888
	spg_node = find_spg_node_by_spg(tsk->mm, spa->spg);
G
Guo Mengqi 已提交
889
	update_mem_usage(size, inc, spa->is_hugepage, spg_node, type);
890 891 892 893 894 895
}

static inline void check_interrupt_context(void)
{
	if (unlikely(in_interrupt()))
		panic("function can't be used in interrupt context\n");
896 897
}

898 899 900 901 902 903 904 905
static inline bool check_aoscore_process(struct task_struct *tsk)
{
	if (tsk->flags & PF_DOMAIN_CORE)
		return true;
	else
		return false;
}

906 907
static unsigned long sp_mmap(struct mm_struct *mm, struct file *file,
			     struct sp_area *spa, unsigned long *populate,
908
			     unsigned long prot, struct vm_area_struct **pvma);
909
static void sp_munmap(struct mm_struct *mm, unsigned long addr, unsigned long size);
910 911 912 913 914 915 916 917 918 919 920

#define K2U_NORMAL	0
#define K2U_COREDUMP	1

struct sp_k2u_context {
	unsigned long kva;
	unsigned long kva_aligned;
	unsigned long size;
	unsigned long size_aligned;
	unsigned long sp_flags;
	int state;
921
	enum spa_type type;
922 923
};

924 925
static unsigned long sp_remap_kva_to_vma(struct sp_area *spa, struct mm_struct *mm,
					unsigned long prot, struct sp_k2u_context *kc);
926

927 928 929
static void free_sp_group_id(int spg_id)
{
	/* ida operation is protected by an internal spin_lock */
930 931
	if ((spg_id >= SPG_ID_AUTO_MIN && spg_id <= SPG_ID_AUTO_MAX) ||
	    (spg_id >= SPG_ID_LOCAL_MIN && spg_id <= SPG_ID_LOCAL_MAX))
932 933 934
		ida_free(&sp_group_id_ida, spg_id);
}

935 936 937 938 939 940
static void free_new_spg_id(bool new, int spg_id)
{
	if (new)
		free_sp_group_id(spg_id);
}

941
static void free_sp_group_locked(struct sp_group *spg)
942
{
943 944
	int type;

945 946 947 948
	fput(spg->file);
	fput(spg->file_hugetlb);
	idr_remove(&sp_group_idr, spg->id);
	free_sp_group_id((unsigned int)spg->id);
949 950 951 952

	for (type = SP_MAPPING_START; type < SP_MAPPING_END; type++)
		sp_mapping_detach(spg, spg->mapping[type]);

953 954
	if (!is_local_group(spg->id))
		system_group_count--;
955

956 957 958 959
	kfree(spg);
	WARN(system_group_count < 0, "unexpected group count\n");
}

960 961 962 963 964 965 966
static void free_sp_group(struct sp_group *spg)
{
	down_write(&sp_group_sem);
	free_sp_group_locked(spg);
	up_write(&sp_group_sem);
}

967
static void sp_group_put_locked(struct sp_group *spg)
968 969 970 971 972 973 974
{
	lockdep_assert_held_write(&sp_group_sem);

	if (atomic_dec_and_test(&spg->use_count))
		free_sp_group_locked(spg);
}

975
static void sp_group_put(struct sp_group *spg)
976 977 978 979 980 981
{
	if (atomic_dec_and_test(&spg->use_count))
		free_sp_group(spg);
}

/* use with put_task_struct(task) */
982
static int get_task(int tgid, struct task_struct **task)
983 984
{
	struct task_struct *tsk;
985
	struct pid *p;
986 987

	rcu_read_lock();
988 989
	p = find_pid_ns(tgid, &init_pid_ns);
	tsk = pid_task(p, PIDTYPE_TGID);
990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
	if (!tsk || (tsk->flags & PF_EXITING)) {
		rcu_read_unlock();
		return -ESRCH;
	}
	get_task_struct(tsk);
	rcu_read_unlock();

	*task = tsk;
	return 0;
}

/*
 * the caller must:
 * 1. hold spg->rw_lock
 * 2. ensure no concurrency problem for mm_struct
 */
1006
static bool is_process_in_group(struct sp_group *spg,
1007 1008 1009 1010 1011 1012
						 struct mm_struct *mm)
{
	struct sp_group_node *spg_node;

	list_for_each_entry(spg_node, &spg->procs, proc_node)
		if (spg_node->master->mm == mm)
1013
			return true;
1014

1015
	return false;
1016 1017
}

1018 1019
/* user must call sp_group_put() after use */
static struct sp_group *sp_group_get_locked(int tgid, int spg_id)
1020 1021 1022 1023 1024 1025
{
	struct sp_group *spg = NULL;
	struct task_struct *tsk = NULL;
	int ret = 0;

	if (spg_id == SPG_ID_DEFAULT) {
1026
		ret = get_task(tgid, &tsk);
1027 1028 1029
		if (ret)
			return NULL;

1030 1031 1032
		task_lock(tsk);
		if (tsk->mm == NULL)
			spg = NULL;
1033 1034
		else if (tsk->mm->sp_group_master)
			spg = tsk->mm->sp_group_master->local;
1035
		task_unlock(tsk);
1036 1037

		put_task_struct(tsk);
1038 1039 1040 1041
	} else {
		spg = idr_find(&sp_group_idr, spg_id);
	}

1042 1043
	if (!spg || !atomic_inc_not_zero(&spg->use_count))
		return NULL;
1044

1045
	return spg;
1046 1047
}

1048
static struct sp_group *sp_group_get(int tgid, int spg_id)
1049 1050 1051 1052
{
	struct sp_group *spg;

	down_read(&sp_group_sem);
1053
	spg = sp_group_get_locked(tgid, spg_id);
1054 1055 1056 1057
	up_read(&sp_group_sem);
	return spg;
}

1058 1059
/**
 * mp_sp_group_id_by_pid() - Get the sp_group ID array of a process.
1060
 * @tgid: tgid of target process.
1061 1062 1063 1064 1065 1066 1067 1068 1069
 * @spg_ids: point to an array to save the group ids the process belongs to
 * @num: input the spg_ids array size; output the spg number of the process
 *
 * Return:
 * >0		- the sp_group ID.
 * -ENODEV	- target process doesn't belong to any sp_group.
 * -EINVAL	- spg_ids or num is NULL.
 * -E2BIG	- the num of groups process belongs to is larger than *num
 */
1070
int mg_sp_group_id_by_pid(int tgid, int *spg_ids, int *num)
1071
{
1072
	int ret = 0, real_count;
1073 1074 1075 1076
	struct sp_group_node *node;
	struct sp_group_master *master = NULL;
	struct task_struct *tsk;

1077 1078 1079
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1080 1081
	check_interrupt_context();

1082
	if (!spg_ids || !num || *num <= 0)
1083 1084
		return -EINVAL;

1085
	ret = get_task(tgid, &tsk);
1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
	if (ret)
		return ret;

	down_read(&sp_group_sem);
	task_lock(tsk);
	if (tsk->mm)
		master = tsk->mm->sp_group_master;
	task_unlock(tsk);

	if (!master) {
		ret = -ENODEV;
		goto out_up_read;
	}

1100 1101 1102 1103 1104 1105 1106 1107
	/*
	 * There is a local group for each process which is used for
	 * passthrough allocation. The local group is a internal
	 * implementation for convenience and is not attempt to bother
	 * the user.
	 */
	real_count = master->count - 1;
	if (real_count <= 0) {
1108 1109 1110
		ret = -ENODEV;
		goto out_up_read;
	}
1111
	if ((unsigned int)*num < real_count) {
1112 1113 1114
		ret = -E2BIG;
		goto out_up_read;
	}
1115
	*num = real_count;
1116

1117 1118 1119
	list_for_each_entry(node, &master->node_list, group_node) {
		if (is_local_group(node->spg->id))
			continue;
1120
		*(spg_ids++) = node->spg->id;
1121
	}
1122 1123 1124 1125 1126

out_up_read:
	up_read(&sp_group_sem);
	put_task_struct(tsk);
	return ret;
1127 1128 1129
}
EXPORT_SYMBOL_GPL(mg_sp_group_id_by_pid);

1130 1131 1132 1133 1134
static bool is_online_node_id(int node_id)
{
	return node_id >= 0 && node_id < MAX_NUMNODES && node_online(node_id);
}

1135 1136 1137 1138 1139 1140 1141
static void sp_group_init(struct sp_group *spg, int spg_id, unsigned long flag)
{
	spg->id = spg_id;
	spg->flag = flag;
	spg->is_alive = true;
	spg->proc_num = 0;
	atomic_set(&spg->use_count, 1);
1142
	atomic_set(&spg->spa_num, 0);
1143 1144 1145 1146 1147 1148 1149
	INIT_LIST_HEAD(&spg->procs);
	INIT_LIST_HEAD(&spg->spa_list);
	INIT_LIST_HEAD(&spg->mnode);
	init_rwsem(&spg->rw_lock);
	sp_init_group_stat(&spg->instat);
}

1150
static struct sp_group *create_spg(int spg_id, unsigned long flag)
1151
{
1152 1153
	int ret;
	struct sp_group *spg;
1154
	char name[DNAME_INLINE_LEN];
1155 1156 1157
	struct user_struct *user = NULL;
	int hsize_log = MAP_HUGE_2MB >> MAP_HUGE_SHIFT;

1158 1159
	if (unlikely(system_group_count + 1 == MAX_GROUP_FOR_SYSTEM &&
		     !is_local_group(spg_id))) {
1160
		pr_err("reach system max group num\n");
1161 1162 1163 1164 1165 1166 1167 1168
		return ERR_PTR(-ENOSPC);
	}

	spg = kzalloc(sizeof(*spg), GFP_KERNEL);
	if (spg == NULL)
		return ERR_PTR(-ENOMEM);

	sprintf(name, "sp_group_%d", spg_id);
1169
	spg->file = shmem_kernel_file_setup(name, MAX_LFS_FILESIZE, VM_NORESERVE);
1170 1171 1172
	if (IS_ERR(spg->file)) {
		pr_err("spg file setup failed %ld\n", PTR_ERR(spg->file));
		ret = PTR_ERR(spg->file);
1173
		goto out_kfree;
1174 1175
	}

1176
	sprintf(name, "sp_group_%d_huge", spg_id);
1177
	spg->file_hugetlb = hugetlb_file_setup(name, MAX_LFS_FILESIZE,
1178
				VM_NORESERVE, &user, HUGETLB_ANONHUGE_INODE, hsize_log);
1179
	if (IS_ERR(spg->file_hugetlb)) {
1180
		pr_err("spg file_hugetlb setup failed %ld\n", PTR_ERR(spg->file_hugetlb));
1181 1182 1183 1184
		ret = PTR_ERR(spg->file_hugetlb);
		goto out_fput;
	}

1185 1186 1187 1188 1189 1190 1191 1192
	sp_group_init(spg, spg_id, flag);

	ret = idr_alloc(&sp_group_idr, spg, spg_id, spg_id + 1, GFP_KERNEL);
	if (ret < 0) {
		pr_err("group %d idr alloc failed %d\n", spg_id, ret);
		goto out_fput_huge;
	}

1193 1194
	if (!is_local_group(spg_id))
		system_group_count++;
1195

1196 1197
	return spg;

1198 1199
out_fput_huge:
	fput(spg->file_hugetlb);
1200 1201 1202 1203 1204
out_fput:
	fput(spg->file);
out_kfree:
	kfree(spg);
	return ERR_PTR(ret);
1205 1206
}

1207
/* the caller must hold sp_group_sem */
1208
static struct sp_group *find_or_alloc_sp_group(int spg_id, unsigned long flag)
1209 1210 1211
{
	struct sp_group *spg;

1212
	spg = sp_group_get_locked(current->tgid, spg_id);
1213 1214

	if (!spg) {
1215
		spg = create_spg(spg_id, flag);
1216 1217 1218 1219
	} else {
		down_read(&spg->rw_lock);
		if (!spg_valid(spg)) {
			up_read(&spg->rw_lock);
1220
			sp_group_put_locked(spg);
1221 1222 1223
			return ERR_PTR(-ENODEV);
		}
		up_read(&spg->rw_lock);
1224
		/* spg->use_count has increased due to sp_group_get() */
1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
	}

	return spg;
}

static void __sp_area_drop_locked(struct sp_area *spa);

/* The caller must down_write(&mm->mmap_lock) */
static void sp_munmap_task_areas(struct mm_struct *mm, struct sp_group *spg, struct list_head *stop)
{
	struct sp_area *spa, *prev = NULL;
	int err;


	spin_lock(&sp_area_lock);
	list_for_each_entry(spa, &spg->spa_list, link) {
		if (&spa->link == stop)
			break;

		__sp_area_drop_locked(prev);
		prev = spa;

		atomic_inc(&spa->use_count);
		spin_unlock(&sp_area_lock);

		err = do_munmap(mm, spa->va_start, spa_size(spa), NULL);
		if (err) {
			/* we are not supposed to fail */
			pr_err("failed to unmap VA %pK when munmap task areas\n",
			       (void *)spa->va_start);
		}

		spin_lock(&sp_area_lock);
	}
	__sp_area_drop_locked(prev);

	spin_unlock(&sp_area_lock);
}

/* the caller must hold sp_group_sem */
1265 1266
static int mm_add_group_init(struct task_struct *tsk, struct mm_struct *mm,
			     struct sp_group *spg)
1267
{
1268 1269
	int ret;
	struct sp_group_master *master;
1270

1271 1272 1273 1274 1275 1276 1277 1278 1279
	if (!mm->sp_group_master) {
		ret = sp_init_group_master_locked(tsk, mm);
		if (ret)
			return ret;
	} else {
		if (is_process_in_group(spg, mm)) {
			pr_err_ratelimited("task already in target group, id=%d\n", spg->id);
			return -EEXIST;
		}
1280

1281 1282 1283 1284 1285
		master = mm->sp_group_master;
		if (master->count == MAX_GROUP_FOR_TASK) {
			pr_err("task reaches max group num\n");
			return -ENOSPC;
		}
1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306
	}

	return 0;
}

/* the caller must hold sp_group_sem */
static struct sp_group_node *create_spg_node(struct mm_struct *mm,
	unsigned long prot, struct sp_group *spg)
{
	struct sp_group_master *master = mm->sp_group_master;
	struct sp_group_node *spg_node;

	spg_node = kzalloc(sizeof(struct sp_group_node), GFP_KERNEL);
	if (spg_node == NULL)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&spg_node->group_node);
	INIT_LIST_HEAD(&spg_node->proc_node);
	spg_node->spg = spg;
	spg_node->master = master;
	spg_node->prot = prot;
1307
	sp_init_spg_proc_stat(&spg_node->instat, spg->id);
1308 1309 1310 1311 1312 1313 1314 1315 1316

	list_add_tail(&spg_node->group_node, &master->node_list);
	master->count++;

	return spg_node;
}

/* the caller must down_write(&spg->rw_lock) */
static int insert_spg_node(struct sp_group *spg, struct sp_group_node *node)
1317
{
1318 1319 1320 1321 1322 1323 1324
	if (spg->proc_num + 1 == MAX_PROC_PER_GROUP) {
		pr_err_ratelimited("add group: group reaches max process num\n");
		return -ENOSPC;
	}

	spg->proc_num++;
	list_add_tail(&node->proc_node, &spg->procs);
1325 1326 1327 1328

	return 0;
}

1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347
/* the caller must down_write(&spg->rw_lock) */
static void delete_spg_node(struct sp_group *spg, struct sp_group_node *node)
{
	list_del(&node->proc_node);
	spg->proc_num--;
}

/* the caller must hold sp_group_sem */
static void free_spg_node(struct mm_struct *mm, struct sp_group *spg,
	struct sp_group_node *spg_node)
{
	struct sp_group_master *master = mm->sp_group_master;

	list_del(&spg_node->group_node);
	master->count--;

	kfree(spg_node);
}

1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
static int local_group_add_task(struct mm_struct *mm, struct sp_group *spg)
{
	struct sp_group_node *node;

	node = create_spg_node(mm, PROT_READ | PROT_WRITE, spg);
	if (IS_ERR(node))
		return PTR_ERR(node);

	insert_spg_node(spg, node);
	mmget(mm);

	return 0;
}

1362
/**
1363
 * mg_sp_group_add_task() - Add a process to an share group (sp_group).
1364
 * @tgid: the tgid of the task to be added.
1365 1366
 * @prot: the prot of task for this spg.
 * @spg_id: the ID of the sp_group.
1367
 * @flag: to give some special message.
1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
 *
 * A process can't be added to more than one sp_group in single group mode
 * and can in multiple group mode.
 *
 * Return: A postive group number for success, -errno on failure.
 *
 * The manually specified ID is between [SPG_ID_MIN, SPG_ID_MAX].
 * The automatically allocated ID is between [SPG_ID_AUTO_MIN, SPG_ID_AUTO_MAX].
 * When negative, the return value is -errno.
 */
1378
int mg_sp_group_add_task(int tgid, unsigned long prot, int spg_id)
1379
{
1380
	unsigned long flag = 0;
1381 1382 1383 1384 1385 1386 1387 1388
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct sp_group *spg;
	struct sp_group_node *node = NULL;
	int ret = 0;
	bool id_newly_generated = false;
	struct sp_area *spa, *prev = NULL;

1389 1390 1391
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
	check_interrupt_context();

	/* only allow READ, READ | WRITE */
	if (!((prot == PROT_READ)
	      || (prot == (PROT_READ | PROT_WRITE)))) {
		pr_err_ratelimited("prot is invalid 0x%lx\n", prot);
		return -EINVAL;
	}

	if (spg_id < SPG_ID_MIN || spg_id > SPG_ID_AUTO) {
		pr_err_ratelimited("add group failed, invalid group id %d\n", spg_id);
		return -EINVAL;
	}

	if (spg_id >= SPG_ID_AUTO_MIN && spg_id <= SPG_ID_AUTO_MAX) {
1407
		spg = sp_group_get(tgid, spg_id);
1408 1409 1410 1411 1412 1413 1414 1415 1416 1417

		if (!spg) {
			pr_err_ratelimited("spg %d hasn't been created\n", spg_id);
			return -EINVAL;
		}

		down_read(&spg->rw_lock);
		if (!spg_valid(spg)) {
			up_read(&spg->rw_lock);
			pr_err_ratelimited("add group failed, group id %d is dead\n", spg_id);
1418
			sp_group_put(spg);
1419 1420 1421 1422
			return -EINVAL;
		}
		up_read(&spg->rw_lock);

1423
		sp_group_put(spg);
1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437
	}

	if (spg_id == SPG_ID_AUTO) {
		spg_id = ida_alloc_range(&sp_group_id_ida, SPG_ID_AUTO_MIN,
					 SPG_ID_AUTO_MAX, GFP_ATOMIC);
		if (spg_id < 0) {
			pr_err_ratelimited("add group failed, auto generate group id failed\n");
			return spg_id;
		}
		id_newly_generated = true;
	}

	down_write(&sp_group_sem);

1438
	ret = get_task(tgid, &tsk);
1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
	if (ret) {
		up_write(&sp_group_sem);
		free_new_spg_id(id_newly_generated, spg_id);
		goto out;
	}

	if (check_aoscore_process(tsk)) {
		up_write(&sp_group_sem);
		ret = -EACCES;
		free_new_spg_id(id_newly_generated, spg_id);
		goto out_put_task;
	}

	/*
	 * group_leader: current thread may be exiting in a multithread process
	 *
	 * DESIGN IDEA
	 * We increase mm->mm_users deliberately to ensure it's decreased in
	 * share pool under only 2 circumstances, which will simply the overall
	 * design as mm won't be freed unexpectedly.
	 *
	 * The corresponding refcount decrements are as follows:
	 * 1. the error handling branch of THIS function.
	 * 2. In sp_group_exit(). It's called only when process is exiting.
	 */
	mm = get_task_mm(tsk->group_leader);
	if (!mm) {
		up_write(&sp_group_sem);
		ret = -ESRCH;
		free_new_spg_id(id_newly_generated, spg_id);
		goto out_put_task;
	}

1472
	spg = find_or_alloc_sp_group(spg_id, flag);
1473 1474 1475 1476 1477 1478 1479
	if (IS_ERR(spg)) {
		up_write(&sp_group_sem);
		ret = PTR_ERR(spg);
		free_new_spg_id(id_newly_generated, spg_id);
		goto out_put_mm;
	}

1480 1481 1482 1483
	down_write(&spg->rw_lock);
	ret = mm_add_group_init(tsk, mm, spg);
	if (ret) {
		up_write(&spg->rw_lock);
1484
		goto out_drop_group;
1485
	}
1486

1487
	ret = sp_mapping_group_setup(mm, spg);
1488 1489
	if (ret) {
		up_write(&spg->rw_lock);
1490
		goto out_drop_group;
1491
	}
1492

1493 1494
	node = create_spg_node(mm, prot, spg);
	if (unlikely(IS_ERR(node))) {
1495
		up_write(&spg->rw_lock);
1496
		ret = PTR_ERR(node);
1497
		goto out_drop_group;
1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515
	}

	ret = insert_spg_node(spg, node);
	if (unlikely(ret)) {
		up_write(&spg->rw_lock);
		goto out_drop_spg_node;
	}

	/*
	 * create mappings of existing shared memory segments into this
	 * new process' page table.
	 */
	spin_lock(&sp_area_lock);

	list_for_each_entry(spa, &spg->spa_list, link) {
		unsigned long populate = 0;
		struct file *file = spa_file(spa);
		unsigned long addr;
1516
		unsigned long prot_spa = prot;
C
Chen Jun 已提交
1517 1518

		if ((spa->flags & (SP_PROT_RO | SP_PROT_FOCUS)) == (SP_PROT_RO | SP_PROT_FOCUS))
1519
			prot_spa &= ~PROT_WRITE;
1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531

		__sp_area_drop_locked(prev);
		prev = spa;

		atomic_inc(&spa->use_count);

		if (spa->is_dead == true)
			continue;

		spin_unlock(&sp_area_lock);

		if (spa->type == SPA_TYPE_K2SPG && spa->kva) {
1532
			addr = sp_remap_kva_to_vma(spa, mm, prot_spa, NULL);
1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549
			if (IS_ERR_VALUE(addr))
				pr_warn("add group remap k2u failed %ld\n", addr);

			spin_lock(&sp_area_lock);
			continue;
		}

		down_write(&mm->mmap_lock);
		if (unlikely(mm->core_state)) {
			sp_munmap_task_areas(mm, spg, &spa->link);
			up_write(&mm->mmap_lock);
			ret = -EBUSY;
			pr_err("add group: encountered coredump, abort\n");
			spin_lock(&sp_area_lock);
			break;
		}

1550
		addr = sp_mmap(mm, file, spa, &populate, prot_spa, NULL);
1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
		if (IS_ERR_VALUE(addr)) {
			sp_munmap_task_areas(mm, spg, &spa->link);
			up_write(&mm->mmap_lock);
			ret = addr;
			pr_err("add group: sp mmap failed %d\n", ret);
			spin_lock(&sp_area_lock);
			break;
		}
		up_write(&mm->mmap_lock);

		if (populate) {
			ret = do_mm_populate(mm, spa->va_start, populate, 0);
			if (ret) {
				if (unlikely(fatal_signal_pending(current)))
					pr_warn_ratelimited("add group failed, current thread is killed\n");
				else
					pr_warn_ratelimited("add group failed, mm populate failed (potential no enough memory when -12): %d, spa type is %d\n",
					ret, spa->type);
				down_write(&mm->mmap_lock);
				sp_munmap_task_areas(mm, spg, spa->link.next);
				up_write(&mm->mmap_lock);
				spin_lock(&sp_area_lock);
				break;
			}
		}

		spin_lock(&sp_area_lock);
	}
	__sp_area_drop_locked(prev);
	spin_unlock(&sp_area_lock);

	if (unlikely(ret))
		delete_spg_node(spg, node);
	up_write(&spg->rw_lock);

out_drop_spg_node:
	if (unlikely(ret))
		free_spg_node(mm, spg, node);
	/*
	 * to simplify design, we don't release the resource of
	 * group_master and proc_stat, they will be freed when
	 * process is exiting.
	 */
out_drop_group:
	if (unlikely(ret)) {
		up_write(&sp_group_sem);
1597
		sp_group_put(spg);
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608
	} else
		up_write(&sp_group_sem);
out_put_mm:
	/* No need to put the mm if the sp group adds this mm successfully */
	if (unlikely(ret))
		mmput(mm);
out_put_task:
	put_task_struct(tsk);
out:
	return ret == 0 ? spg_id : ret;
}
1609 1610
EXPORT_SYMBOL_GPL(mg_sp_group_add_task);

1611 1612
/**
 * mg_sp_group_del_task() - delete a process from a sp group.
1613
 * @tgid: the tgid of the task to be deleted
1614 1615 1616 1617 1618 1619 1620
 * @spg_id: sharepool group id
 *
 * the group's spa list must be empty, or deletion will fail.
 *
 * Return:
 * * if success, return 0.
 * * -EINVAL, spg_id invalid or spa_lsit not emtpy or spg dead
1621
 * * -ESRCH, the task group of tgid is not in group / process dead
1622
 */
1623
int mg_sp_group_del_task(int tgid, int spg_id)
1624
{
1625 1626 1627 1628 1629 1630 1631
	int ret = 0;
	struct sp_group *spg;
	struct sp_group_node *spg_node;
	struct task_struct *tsk = NULL;
	struct mm_struct *mm = NULL;
	bool is_alive = true;

1632 1633 1634
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1635
	if (spg_id < SPG_ID_MIN || spg_id > SPG_ID_AUTO) {
1636
		pr_err("del from group failed, invalid group id %d\n", spg_id);
1637 1638 1639
		return -EINVAL;
	}

1640
	spg = sp_group_get(tgid, spg_id);
1641
	if (!spg) {
1642 1643
		pr_err("spg not found or get task failed, tgid:%d, spg_id:%d\n",
			tgid, spg_id);
1644 1645 1646 1647 1648 1649
		return -EINVAL;
	}
	down_write(&sp_group_sem);

	if (!spg_valid(spg)) {
		up_write(&sp_group_sem);
1650
		pr_err("spg dead, spg_id:%d\n", spg_id);
1651 1652 1653 1654
		ret = -EINVAL;
		goto out;
	}

1655
	ret = get_task(tgid, &tsk);
1656 1657
	if (ret) {
		up_write(&sp_group_sem);
1658
		pr_err("task is not found, tgid:%d\n", tgid);
1659 1660 1661 1662 1663
		goto out;
	}
	mm = get_task_mm(tsk->group_leader);
	if (!mm) {
		up_write(&sp_group_sem);
1664
		pr_err("mm is not found, tgid:%d\n", tgid);
1665 1666 1667 1668
		ret = -ESRCH;
		goto out_put_task;
	}

1669 1670 1671 1672 1673 1674 1675
	if (!mm->sp_group_master) {
		up_write(&sp_group_sem);
		pr_err("task(%d) is not in any group(%d)\n", tgid, spg_id);
		ret = -EINVAL;
		goto out_put_mm;
	}

1676
	spg_node = find_spg_node_by_spg(mm, spg);
1677 1678
	if (!spg_node) {
		up_write(&sp_group_sem);
1679
		pr_err("task(%d) not in group(%d)\n", tgid, spg_id);
1680 1681 1682 1683 1684
		ret = -ESRCH;
		goto out_put_mm;
	}

	down_write(&spg->rw_lock);
1685 1686 1687 1688

	if (!list_empty(&spg->spa_list)) {
		up_write(&spg->rw_lock);
		up_write(&sp_group_sem);
1689
		pr_err("spa is not empty, task:%d, spg_id:%d\n", tgid, spg_id);
1690 1691 1692 1693
		ret = -EINVAL;
		goto out_put_mm;
	}

1694 1695 1696 1697
	if (list_is_singular(&spg->procs))
		is_alive = spg->is_alive = false;
	spg->proc_num--;
	list_del(&spg_node->proc_node);
1698
	sp_group_put(spg);
1699 1700 1701 1702 1703 1704 1705
	up_write(&spg->rw_lock);
	if (!is_alive)
		blocking_notifier_call_chain(&sp_notifier_chain, 0, spg);

	list_del(&spg_node->group_node);
	mm->sp_group_master->count--;
	kfree(spg_node);
1706
	atomic_dec(&mm->mm_users);
1707 1708 1709 1710 1711 1712 1713 1714

	up_write(&sp_group_sem);

out_put_mm:
	mmput(mm);
out_put_task:
	put_task_struct(tsk);
out:
1715
	sp_group_put(spg); /* if spg dead, freed here */
1716
	return ret;
1717 1718 1719
}
EXPORT_SYMBOL_GPL(mg_sp_group_del_task);

1720
int mg_sp_id_of_current(void)
1721 1722 1723 1724
{
	int ret, spg_id;
	struct sp_group_master *master;

1725 1726 1727
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

1728
	if ((current->flags & PF_KTHREAD) || !current->mm)
1729 1730 1731 1732
		return -EINVAL;

	down_read(&sp_group_sem);
	master = current->mm->sp_group_master;
1733
	if (master) {
1734 1735 1736 1737 1738 1739 1740
		spg_id = master->local->id;
		up_read(&sp_group_sem);
		return spg_id;
	}
	up_read(&sp_group_sem);

	down_write(&sp_group_sem);
1741
	ret = sp_init_group_master_locked(current, current->mm);
1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
	if (ret) {
		up_write(&sp_group_sem);
		return ret;
	}
	master = current->mm->sp_group_master;
	spg_id = master->local->id;
	up_write(&sp_group_sem);

	return spg_id;
}
EXPORT_SYMBOL_GPL(mg_sp_id_of_current);

1754
/* the caller must hold sp_area_lock */
1755
static void insert_sp_area(struct sp_mapping *spm, struct sp_area *spa)
1756
{
1757
	struct rb_node **p = &spm->area_root.rb_node;
1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773
	struct rb_node *parent = NULL;

	while (*p) {
		struct sp_area *tmp;

		parent = *p;
		tmp = rb_entry(parent, struct sp_area, rb_node);
		if (spa->va_start < tmp->va_end)
			p = &(*p)->rb_left;
		else if (spa->va_end > tmp->va_start)
			p = &(*p)->rb_right;
		else
			BUG();
	}

	rb_link_node(&spa->rb_node, parent, p);
1774
	rb_insert_color(&spa->rb_node, &spm->area_root);
1775 1776 1777 1778 1779 1780 1781 1782
}

/**
 * sp_alloc_area() - Allocate a region of VA from the share pool.
 * @size: the size of VA to allocate.
 * @flags: how to allocate the memory.
 * @spg: the share group that the memory is allocated to.
 * @type: the type of the region.
1783
 * @applier: the tgid of the task which allocates the region.
1784 1785 1786 1787 1788 1789 1790 1791 1792
 *
 * Return: a valid pointer for success, NULL on failure.
 */
static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags,
				     struct sp_group *spg, enum spa_type type,
				     pid_t applier)
{
	struct sp_area *spa, *first, *err;
	struct rb_node *n;
1793 1794
	unsigned long vstart;
	unsigned long vend;
1795 1796 1797
	unsigned long addr;
	unsigned long size_align = ALIGN(size, PMD_SIZE); /* va aligned to 2M */
	int device_id, node_id;
1798
	struct sp_mapping *mapping;
1799 1800 1801 1802 1803 1804 1805 1806 1807

	device_id = sp_flags_device_id(flags);
	node_id = flags & SP_SPEC_NODE_ID ? sp_flags_node_id(flags) : device_id;

	if (!is_online_node_id(node_id)) {
		pr_err_ratelimited("invalid numa node id %d\n", node_id);
		return ERR_PTR(-EINVAL);
	}

C
Chen Jun 已提交
1808 1809 1810 1811 1812 1813
	if (flags & SP_PROT_FOCUS) {
		if ((flags & (SP_DVPP | SP_PROT_RO)) != SP_PROT_RO) {
			pr_err("invalid sp_flags [%lx]\n", flags);
			return ERR_PTR(-EINVAL);
		}
		mapping = spg->mapping[SP_MAPPING_RO];
1814
	} else if (flags & SP_DVPP) {
1815
		mapping = spg->mapping[SP_MAPPING_DVPP];
1816
	} else {
1817
		mapping = spg->mapping[SP_MAPPING_NORMAL];
1818
	}
1819

1820 1821 1822 1823 1824
	if (!mapping) {
		pr_err_ratelimited("non DVPP spg, id %d\n", spg->id);
		return ERR_PTR(-EINVAL);
	}

1825 1826
	vstart = mapping->start[device_id];
	vend = mapping->end[device_id];
1827 1828 1829 1830 1831 1832 1833 1834 1835
	spa = __kmalloc_node(sizeof(struct sp_area), GFP_KERNEL, node_id);
	if (unlikely(!spa))
		return ERR_PTR(-ENOMEM);

	spin_lock(&sp_area_lock);

	/*
	 * Invalidate cache if we have more permissive parameters.
	 * cached_hole_size notes the largest hole noticed _below_
1836
	 * the sp_area cached in free_area_cache: if size fits
1837
	 * into that hole, we want to scan from vstart to reuse
1838 1839
	 * the hole instead of allocating above free_area_cache.
	 * Note that sp_free_area may update free_area_cache
1840 1841
	 * without updating cached_hole_size.
	 */
1842 1843 1844 1845
	if (!mapping->free_area_cache || size_align < mapping->cached_hole_size ||
	    vstart != mapping->cached_vstart) {
		mapping->cached_hole_size = 0;
		mapping->free_area_cache = NULL;
1846 1847 1848
	}

	/* record if we encounter less permissive parameters */
1849
	mapping->cached_vstart = vstart;
1850 1851

	/* find starting point for our search */
1852 1853
	if (mapping->free_area_cache) {
		first = rb_entry(mapping->free_area_cache, struct sp_area, rb_node);
1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865
		addr = first->va_end;
		if (addr + size_align < addr) {
			err = ERR_PTR(-EOVERFLOW);
			goto error;
		}
	} else {
		addr = vstart;
		if (addr + size_align < addr) {
			err = ERR_PTR(-EOVERFLOW);
			goto error;
		}

1866
		n = mapping->area_root.rb_node;
1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887
		first = NULL;

		while (n) {
			struct sp_area *tmp;

			tmp = rb_entry(n, struct sp_area, rb_node);
			if (tmp->va_end >= addr) {
				first = tmp;
				if (tmp->va_start <= addr)
					break;
				n = n->rb_left;
			} else
				n = n->rb_right;
		}

		if (!first)
			goto found;
	}

	/* from the starting point, traverse areas until a suitable hole is found */
	while (addr + size_align > first->va_start && addr + size_align <= vend) {
1888 1889
		if (addr + mapping->cached_hole_size < first->va_start)
			mapping->cached_hole_size = first->va_start - addr;
1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925
		addr = first->va_end;
		if (addr + size_align < addr) {
			err = ERR_PTR(-EOVERFLOW);
			goto error;
		}

		n = rb_next(&first->rb_node);
		if (n)
			first = rb_entry(n, struct sp_area, rb_node);
		else
			goto found;
	}

found:
	if (addr + size_align > vend) {
		err = ERR_PTR(-EOVERFLOW);
		goto error;
	}

	spa->va_start = addr;
	spa->va_end = addr + size_align;
	spa->real_size = size;
	spa->region_vstart = vstart;
	spa->flags = flags;
	spa->is_hugepage = (flags & SP_HUGEPAGE);
	spa->is_dead = false;
	spa->spg = spg;
	atomic_set(&spa->use_count, 1);
	spa->type = type;
	spa->mm = NULL;
	spa->kva = 0;   /* NULL pointer */
	spa->applier = applier;
	spa->node_id = node_id;
	spa->device_id = device_id;

	spa_inc_usage(spa);
1926
	insert_sp_area(mapping, spa);
1927 1928
	mapping->free_area_cache = &spa->rb_node;
	list_add_tail(&spa->link, &spg->spa_list);
1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940

	spin_unlock(&sp_area_lock);

	return spa;

error:
	spin_unlock(&sp_area_lock);
	kfree(spa);
	return err;
}

/* the caller should hold sp_area_lock */
1941
static struct sp_area *find_sp_area_locked(struct sp_group *spg,
1942
		unsigned long addr)
1943
{
C
Chen Jun 已提交
1944 1945
	struct sp_mapping *spm = sp_mapping_find(spg, addr);
	struct rb_node *n = spm->area_root.rb_node;
1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961
	while (n) {
		struct sp_area *spa;

		spa = rb_entry(n, struct sp_area, rb_node);
		if (addr < spa->va_start) {
			n = n->rb_left;
		} else if (addr > spa->va_start) {
			n = n->rb_right;
		} else {
			return spa;
		}
	}

	return NULL;
}

1962
static struct sp_area *get_sp_area(struct sp_group *spg, unsigned long addr)
1963 1964 1965 1966
{
	struct sp_area *n;

	spin_lock(&sp_area_lock);
1967
	n = find_sp_area_locked(spg, addr);
1968 1969 1970 1971 1972 1973
	if (n)
		atomic_inc(&n->use_count);
	spin_unlock(&sp_area_lock);
	return n;
}

1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986
static bool vmalloc_area_clr_flag(unsigned long kva, unsigned long flags)
{
	struct vm_struct *area;

	area = find_vm_area((void *)kva);
	if (area) {
		area->flags &= ~flags;
		return true;
	}

	return false;
}

1987 1988 1989 1990 1991
/*
 * Free the VA region starting from addr to the share pool
 */
static void sp_free_area(struct sp_area *spa)
{
1992 1993 1994
	unsigned long addr = spa->va_start;
	struct sp_mapping *spm;

1995 1996
	lockdep_assert_held(&sp_area_lock);

C
Chen Jun 已提交
1997
	spm = sp_mapping_find(spa->spg, addr);
1998
	if (spm->free_area_cache) {
1999 2000
		struct sp_area *cache;

2001
		cache = rb_entry(spm->free_area_cache, struct sp_area, rb_node);
2002
		if (spa->va_start <= cache->va_start) {
2003
			spm->free_area_cache = rb_prev(&spa->rb_node);
2004 2005 2006 2007
			/*
			 * the new cache node may be changed to another region,
			 * i.e. from DVPP region to normal region
			 */
2008 2009
			if (spm->free_area_cache) {
				cache = rb_entry(spm->free_area_cache,
2010
						 struct sp_area, rb_node);
2011
				spm->cached_vstart = cache->region_vstart;
2012 2013 2014 2015 2016 2017 2018 2019
			}
			/*
			 * We don't try to update cached_hole_size,
			 * but it won't go very wrong.
			 */
		}
	}

2020 2021 2022
	if (spa->kva && !vmalloc_area_clr_flag(spa->kva, VM_SHAREPOOL))
		pr_debug("clear spa->kva %ld is not valid\n", spa->kva);

2023
	spa_dec_usage(spa);
2024
	list_del(&spa->link);
2025

2026
	rb_erase(&spa->rb_node, &spm->area_root);
2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
	RB_CLEAR_NODE(&spa->rb_node);
	kfree(spa);
}

static void __sp_area_drop_locked(struct sp_area *spa)
{
	/*
	 * Considering a situation where task A and B are in the same spg.
	 * A is exiting and calling remove_vma(). Before A calls this func,
	 * B calls sp_free() to free the same spa. So spa maybe NULL when A
	 * calls this func later.
	 */
	if (!spa)
		return;

	if (atomic_dec_and_test(&spa->use_count))
		sp_free_area(spa);
}

static void __sp_area_drop(struct sp_area *spa)
{
	spin_lock(&sp_area_lock);
	__sp_area_drop_locked(spa);
	spin_unlock(&sp_area_lock);
}

void sp_area_drop(struct vm_area_struct *vma)
{
	if (!(vma->vm_flags & VM_SHARE_POOL))
		return;

	/*
	 * Considering a situation where task A and B are in the same spg.
	 * A is exiting and calling remove_vma() -> ... -> sp_area_drop().
	 * Concurrently, B is calling sp_free() to free the same spa.
2062
	 * find_sp_area_locked() and __sp_area_drop_locked() should be
2063 2064 2065
	 * an atomic operation.
	 */
	spin_lock(&sp_area_lock);
2066
	__sp_area_drop_locked(vma->vm_private_data);
2067 2068 2069
	spin_unlock(&sp_area_lock);
}

W
Wang Wensheng 已提交
2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123
/*
 * The function calls of do_munmap() won't change any non-atomic member
 * of struct sp_group. Please review the following chain:
 * do_munmap -> remove_vma_list -> remove_vma -> sp_area_drop ->
 * __sp_area_drop_locked -> sp_free_area
 */
static void sp_munmap(struct mm_struct *mm, unsigned long addr,
			   unsigned long size)
{
	int err;

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		up_write(&mm->mmap_lock);
		pr_info("munmap: encoutered coredump\n");
		return;
	}

	err = do_munmap(mm, addr, size, NULL);
	/* we are not supposed to fail */
	if (err)
		pr_err("failed to unmap VA %pK when sp munmap\n", (void *)addr);

	up_write(&mm->mmap_lock);
}

static void __sp_free(struct sp_group *spg, unsigned long addr,
		      unsigned long size, struct mm_struct *stop)
{
	struct mm_struct *mm;
	struct sp_group_node *spg_node = NULL;

	list_for_each_entry(spg_node, &spg->procs, proc_node) {
		mm = spg_node->master->mm;
		if (mm == stop)
			break;
		sp_munmap(mm, addr, size);
	}
}

/* Free the memory of the backing shmem or hugetlbfs */
static void sp_fallocate(struct sp_area *spa)
{
	int ret;
	unsigned long mode = FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE;
	unsigned long offset = addr_offset(spa);

	ret = vfs_fallocate(spa_file(spa), mode, offset, spa_size(spa));
	if (ret)
		WARN(1, "sp fallocate failed %d\n", ret);
}

static void sp_free_unmap_fallocate(struct sp_area *spa)
{
2124 2125 2126 2127
	down_read(&spa->spg->rw_lock);
	__sp_free(spa->spg, spa->va_start, spa_size(spa), NULL);
	sp_fallocate(spa);
	up_read(&spa->spg->rw_lock);
W
Wang Wensheng 已提交
2128 2129 2130 2131 2132 2133 2134
}

static int sp_check_caller_permission(struct sp_group *spg, struct mm_struct *mm)
{
	int ret = 0;

	down_read(&spg->rw_lock);
2135
	if (!is_process_in_group(spg, mm))
W
Wang Wensheng 已提交
2136 2137
		ret = -EPERM;
	up_read(&spg->rw_lock);
2138

W
Wang Wensheng 已提交
2139 2140 2141 2142 2143 2144 2145 2146 2147 2148
	return ret;
}

#define FREE_CONT	1
#define FREE_END	2

struct sp_free_context {
	unsigned long addr;
	struct sp_area *spa;
	int state;
2149
	int spg_id;
W
Wang Wensheng 已提交
2150 2151 2152 2153 2154 2155 2156 2157
};

/* when success, __sp_area_drop(spa) should be used */
static int sp_free_get_spa(struct sp_free_context *fc)
{
	int ret = 0;
	unsigned long addr = fc->addr;
	struct sp_area *spa;
2158 2159
	struct sp_group *spg;

2160
	spg = sp_group_get(current->tgid, fc->spg_id);
2161 2162 2163 2164
	if (!spg) {
		pr_debug("sp free get group failed %d\n", fc->spg_id);
		return -EINVAL;
	}
W
Wang Wensheng 已提交
2165 2166 2167

	fc->state = FREE_CONT;

2168
	spa = get_sp_area(spg, addr);
2169
	sp_group_put(spg);
W
Wang Wensheng 已提交
2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181
	if (!spa) {
		pr_debug("sp free invalid input addr %lx\n", addr);
		return -EINVAL;
	}

	if (spa->type != SPA_TYPE_ALLOC) {
		ret = -EINVAL;
		pr_debug("sp free failed, %lx is not sp alloc addr\n", addr);
		goto drop_spa;
	}
	fc->spa = spa;

2182 2183
	if (!current->mm)
		goto check_spa;
W
Wang Wensheng 已提交
2184

2185 2186 2187
	ret = sp_check_caller_permission(spa->spg, current->mm);
	if (ret < 0)
		goto drop_spa;
W
Wang Wensheng 已提交
2188 2189

check_spa:
2190 2191 2192 2193
	if (is_local_group(spa->spg->id) && (current->tgid != spa->applier)) {
		ret = -EPERM;
		goto drop_spa;
	}
W
Wang Wensheng 已提交
2194

2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208
	down_write(&spa->spg->rw_lock);
	if (!spg_valid(spa->spg)) {
		fc->state = FREE_END;
		up_write(&spa->spg->rw_lock);
		goto drop_spa;
		/* we must return success(0) in this situation */
	}
	/* the life cycle of spa has a direct relation with sp group */
	if (unlikely(spa->is_dead)) {
		up_write(&spa->spg->rw_lock);
		pr_err_ratelimited("unexpected double sp free\n");
		dump_stack();
		ret = -EINVAL;
		goto drop_spa;
W
Wang Wensheng 已提交
2209
	}
2210 2211 2212
	spa->is_dead = true;
	up_write(&spa->spg->rw_lock);

W
Wang Wensheng 已提交
2213 2214 2215 2216 2217 2218 2219
	return 0;

drop_spa:
	__sp_area_drop(spa);
	return ret;
}

2220
/**
2221
 * mg_sp_free() - Free the memory allocated by mg_sp_alloc().
2222
 * @addr: the starting VA of the memory.
2223
 * @id: Address space identifier, which is used to distinguish the addr.
2224 2225 2226 2227 2228 2229
 *
 * Return:
 * * 0		- success.
 * * -EINVAL	- the memory can't be found or was not allocted by share pool.
 * * -EPERM	- the caller has no permision to free the memory.
 */
2230
int mg_sp_free(unsigned long addr, int id)
2231
{
W
Wang Wensheng 已提交
2232 2233 2234
	int ret = 0;
	struct sp_free_context fc = {
		.addr = addr,
2235
		.spg_id = id,
W
Wang Wensheng 已提交
2236 2237
	};

2238 2239 2240
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

W
Wang Wensheng 已提交
2241 2242
	check_interrupt_context();

2243 2244 2245
	if (current->flags & PF_KTHREAD)
		return -EINVAL;

W
Wang Wensheng 已提交
2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256
	ret = sp_free_get_spa(&fc);
	if (ret || fc.state == FREE_END)
		goto out;

	sp_free_unmap_fallocate(fc.spa);

	if (current->mm == NULL)
		atomic64_sub(fc.spa->real_size, &kthread_stat.alloc_size);
	else
		sp_update_process_stat(current, false, fc.spa);

2257
	__sp_area_drop(fc.spa);  /* match get_sp_area in sp_free_get_spa */
W
Wang Wensheng 已提交
2258 2259
out:
	return ret;
2260 2261 2262
}
EXPORT_SYMBOL_GPL(mg_sp_free);

2263 2264 2265
/* wrapper of __do_mmap() and the caller must hold down_write(&mm->mmap_lock). */
static unsigned long sp_mmap(struct mm_struct *mm, struct file *file,
			     struct sp_area *spa, unsigned long *populate,
2266
			     unsigned long prot, struct vm_area_struct **pvma)
2267 2268 2269 2270 2271 2272 2273
{
	unsigned long addr = spa->va_start;
	unsigned long size = spa_size(spa);
	unsigned long flags = MAP_FIXED | MAP_SHARED | MAP_POPULATE |
			      MAP_SHARE_POOL;
	unsigned long vm_flags = VM_NORESERVE | VM_SHARE_POOL | VM_DONTCOPY;
	unsigned long pgoff = addr_offset(spa) >> PAGE_SHIFT;
2274
	struct vm_area_struct *vma;
2275 2276 2277 2278 2279 2280 2281 2282 2283

	atomic_inc(&spa->use_count);
	addr = __do_mmap_mm(mm, file, addr, size, prot, flags, vm_flags, pgoff,
			 populate, NULL);
	if (IS_ERR_VALUE(addr)) {
		atomic_dec(&spa->use_count);
		pr_err("do_mmap fails %ld\n", addr);
	} else {
		BUG_ON(addr != spa->va_start);
2284 2285 2286 2287
		vma = find_vma(mm, addr);
		vma->vm_private_data = spa;
		if (pvma)
			*pvma = vma;
2288 2289 2290 2291 2292
	}

	return addr;
}

W
Wang Wensheng 已提交
2293 2294 2295
#define ALLOC_NORMAL	1
#define ALLOC_RETRY	2
#define ALLOC_NOMEM	3
2296
#define ALLOC_COREDUMP	4
W
Wang Wensheng 已提交
2297 2298 2299 2300 2301 2302 2303 2304 2305

struct sp_alloc_context {
	struct sp_group *spg;
	struct file *file;
	unsigned long size;
	unsigned long size_aligned;
	unsigned long sp_flags;
	unsigned long populate;
	int state;
2306
	bool have_mbind;
2307
	enum spa_type type;
W
Wang Wensheng 已提交
2308 2309 2310 2311 2312 2313 2314 2315 2316
};

static int sp_alloc_prepare(unsigned long size, unsigned long sp_flags,
	int spg_id, struct sp_alloc_context *ac)
{
	struct sp_group *spg;

	check_interrupt_context();

2317 2318 2319 2320 2321
	if (current->flags & PF_KTHREAD) {
		pr_err_ratelimited("allocation failed, task is kthread\n");
		return -EINVAL;
	}

W
Wang Wensheng 已提交
2322 2323 2324 2325 2326
	if (unlikely(!size || (size >> PAGE_SHIFT) > totalram_pages())) {
		pr_err_ratelimited("allocation failed, invalid size %lu\n", size);
		return -EINVAL;
	}

2327
	if (spg_id != SPG_ID_DEFAULT && (spg_id < SPG_ID_MIN || spg_id >= SPG_ID_AUTO)) {
W
Wang Wensheng 已提交
2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339
		pr_err_ratelimited("allocation failed, invalid group id %d\n", spg_id);
		return -EINVAL;
	}

	if (sp_flags & (~SP_FLAG_MASK)) {
		pr_err_ratelimited("allocation failed, invalid flag %lx\n", sp_flags);
		return -EINVAL;
	}

	if (sp_flags & SP_HUGEPAGE_ONLY)
		sp_flags |= SP_HUGEPAGE;

2340
	if (spg_id != SPG_ID_DEFAULT) {
2341
		spg = sp_group_get(current->tgid, spg_id);
2342 2343 2344
		if (!spg) {
			pr_err_ratelimited("allocation failed, can't find group\n");
			return -ENODEV;
W
Wang Wensheng 已提交
2345 2346
		}

2347 2348 2349 2350
		/* up_read will be at the end of sp_alloc */
		down_read(&spg->rw_lock);
		if (!spg_valid(spg)) {
			up_read(&spg->rw_lock);
2351
			sp_group_put(spg);
2352 2353 2354
			pr_err_ratelimited("allocation failed, spg is dead\n");
			return -ENODEV;
		}
W
Wang Wensheng 已提交
2355

2356 2357
		if (!is_process_in_group(spg, current->mm)) {
			up_read(&spg->rw_lock);
2358
			sp_group_put(spg);
2359 2360
			pr_err_ratelimited("allocation failed, task not in group\n");
			return -ENODEV;
W
Wang Wensheng 已提交
2361
		}
2362
		ac->type = SPA_TYPE_ALLOC;
2363
	} else {  /* allocation pass through scene */
2364
		spg = sp_get_local_group(current, current->mm);
2365 2366
		if (IS_ERR(spg))
			return PTR_ERR(spg);
2367 2368
		down_read(&spg->rw_lock);
		ac->type = SPA_TYPE_ALLOC_PRIVATE;
W
Wang Wensheng 已提交
2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382
	}

	if (sp_flags & SP_HUGEPAGE) {
		ac->file = spg->file_hugetlb;
		ac->size_aligned = ALIGN(size, PMD_SIZE);
	} else {
		ac->file = spg->file;
		ac->size_aligned = ALIGN(size, PAGE_SIZE);
	}

	ac->spg = spg;
	ac->size = size;
	ac->sp_flags = sp_flags;
	ac->state = ALLOC_NORMAL;
2383
	ac->have_mbind = false;
W
Wang Wensheng 已提交
2384 2385 2386 2387 2388 2389
	return 0;
}

static void sp_alloc_unmap(struct mm_struct *mm, struct sp_area *spa,
	struct sp_group_node *spg_node)
{
2390
	__sp_free(spa->spg, spa->va_start, spa->real_size, mm);
W
Wang Wensheng 已提交
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405
}

static int sp_alloc_mmap(struct mm_struct *mm, struct sp_area *spa,
	struct sp_group_node *spg_node, struct sp_alloc_context *ac)
{
	int ret = 0;
	unsigned long mmap_addr;
	/* pass through default permission */
	unsigned long prot = PROT_READ | PROT_WRITE;
	unsigned long populate = 0;
	struct vm_area_struct *vma;

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		up_write(&mm->mmap_lock);
2406
		ac->state = ALLOC_COREDUMP;
W
Wang Wensheng 已提交
2407 2408 2409 2410 2411 2412 2413
		pr_info("allocation encountered coredump\n");
		return -EFAULT;
	}

	if (spg_node)
		prot = spg_node->prot;

2414 2415 2416
	if (ac->sp_flags & SP_PROT_RO)
		prot = PROT_READ;

W
Wang Wensheng 已提交
2417
	/* when success, mmap_addr == spa->va_start */
2418
	mmap_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot, &vma);
W
Wang Wensheng 已提交
2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433
	if (IS_ERR_VALUE(mmap_addr)) {
		up_write(&mm->mmap_lock);
		sp_alloc_unmap(mm, spa, spg_node);
		pr_err("sp mmap in allocation failed %ld\n", mmap_addr);
		return PTR_ERR((void *)mmap_addr);
	}

	if (unlikely(populate == 0)) {
		up_write(&mm->mmap_lock);
		pr_err("allocation sp mmap populate failed\n");
		ret = -EFAULT;
		goto unmap;
	}
	ac->populate = populate;

2434 2435 2436
	if (ac->sp_flags & SP_PROT_RO)
		vma->vm_flags &= ~VM_MAYWRITE;

W
Wang Wensheng 已提交
2437 2438 2439 2440 2441 2442 2443 2444
	/* clean PTE_RDONLY flags or trigger SMMU event */
	if (prot & PROT_WRITE)
		vma->vm_page_prot = __pgprot(((~PTE_RDONLY) & vma->vm_page_prot.pgprot) | PTE_DIRTY);
	up_write(&mm->mmap_lock);

	return ret;

unmap:
2445
	sp_alloc_unmap(list_next_entry(spg_node, proc_node)->master->mm, spa, spg_node);
W
Wang Wensheng 已提交
2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467
	return ret;
}

static void sp_alloc_fallback(struct sp_area *spa, struct sp_alloc_context *ac)
{
	if (ac->file == ac->spg->file) {
		ac->state = ALLOC_NOMEM;
		return;
	}

	if (!(ac->sp_flags & SP_HUGEPAGE_ONLY)) {
		ac->file = ac->spg->file;
		ac->size_aligned = ALIGN(ac->size, PAGE_SIZE);
		ac->sp_flags &= ~SP_HUGEPAGE;
		ac->state = ALLOC_RETRY;
		__sp_area_drop(spa);
		return;
	}
	ac->state = ALLOC_NOMEM;
}

static int sp_alloc_populate(struct mm_struct *mm, struct sp_area *spa,
2468
			     struct sp_alloc_context *ac)
W
Wang Wensheng 已提交
2469 2470 2471 2472 2473 2474 2475
{
	/*
	 * We are not ignoring errors, so if we fail to allocate
	 * physical memory we just return failure, so we won't encounter
	 * page fault later on, and more importantly sp_make_share_u2k()
	 * depends on this feature (and MAP_LOCKED) to work correctly.
	 */
2476

2477
	return do_mm_populate(mm, spa->va_start, ac->populate, 0);
W
Wang Wensheng 已提交
2478 2479
}

2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490
static long sp_mbind(struct mm_struct *mm, unsigned long start, unsigned long len,
		unsigned long node)
{
	nodemask_t nmask;

	nodes_clear(nmask);
	node_set(node, nmask);
	return __do_mbind(start, len, MPOL_BIND, MPOL_F_STATIC_NODES,
			&nmask, MPOL_MF_STRICT, mm);
}

W
Wang Wensheng 已提交
2491 2492 2493 2494 2495 2496
static int __sp_alloc_mmap_populate(struct mm_struct *mm, struct sp_area *spa,
	struct sp_group_node *spg_node, struct sp_alloc_context *ac)
{
	int ret;

	ret = sp_alloc_mmap(mm, spa, spg_node, ac);
2497

2498
	if (ret < 0)
W
Wang Wensheng 已提交
2499 2500
		return ret;

2501 2502 2503 2504 2505
	if (!ac->have_mbind) {
		ret = sp_mbind(mm, spa->va_start, spa->real_size, spa->node_id);
		if (ret < 0) {
			pr_err("cannot bind the memory range to specified node:%d, err:%d\n",
				spa->node_id, ret);
2506
			return ret;
2507 2508 2509 2510 2511 2512 2513 2514 2515 2516
		}
		ac->have_mbind = true;
	}

	ret = sp_alloc_populate(mm, spa, ac);
	if (ret) {
		if (unlikely(fatal_signal_pending(current)))
			pr_warn_ratelimited("allocation failed, current thread is killed\n");
		else
			pr_warn_ratelimited("allocation failed due to mm populate failed(potential no enough memory when -12): %d\n",
2517
					ret);
2518
	}
W
Wang Wensheng 已提交
2519 2520 2521 2522 2523 2524
	return ret;
}

static int sp_alloc_mmap_populate(struct sp_area *spa,
				  struct sp_alloc_context *ac)
{
2525 2526
	int ret = -EINVAL;
	int mmap_ret = 0;
2527
	struct mm_struct *mm, *end_mm = NULL;
W
Wang Wensheng 已提交
2528 2529
	struct sp_group_node *spg_node;

2530 2531 2532 2533 2534
	/* create mapping for each process in the group */
	list_for_each_entry(spg_node, &spa->spg->procs, proc_node) {
		mm = spg_node->master->mm;
		mmap_ret = __sp_alloc_mmap_populate(mm, spa, spg_node, ac);
		if (mmap_ret) {
2535 2536 2537 2538 2539 2540

			/*
			 * Goto fallback procedure upon ERR_VALUE,
			 * but skip the coredump situation,
			 * because we don't want one misbehaving process to affect others.
			 */
2541
			if (ac->state != ALLOC_COREDUMP)
2542
				goto unmap;
2543 2544

			/* Reset state and discard the coredump error. */
2545 2546
			ac->state = ALLOC_NORMAL;
			continue;
W
Wang Wensheng 已提交
2547
		}
2548
		ret = mmap_ret;
W
Wang Wensheng 已提交
2549
	}
2550

W
Wang Wensheng 已提交
2551
	return ret;
2552 2553 2554 2555 2556 2557 2558

unmap:
	/* use the next mm in proc list as end mark */
	if (!list_is_last(&spg_node->proc_node, &spa->spg->procs))
		end_mm = list_next_entry(spg_node, proc_node)->master->mm;
	sp_alloc_unmap(end_mm, spa, spg_node);

2559 2560 2561 2562 2563 2564 2565 2566 2567 2568
	/*
	 * Sometimes do_mm_populate() allocates some memory and then failed to
	 * allocate more. (e.g. memory use reaches cgroup limit.)
	 * In this case, it will return enomem, but will not free the
	 * memory which has already been allocated.
	 *
	 * So if __sp_alloc_mmap_populate fails, always call sp_fallocate()
	 * to make sure backup physical memory of the shared file is freed.
	 */
	sp_fallocate(spa);
2569 2570 2571 2572 2573 2574 2575

	/* if hugepage allocation fails, this will transfer to normal page
	 * and try again. (only if SP_HUGEPAGE_ONLY is not flagged
	 */
	sp_alloc_fallback(spa, ac);

	return mmap_ret;
W
Wang Wensheng 已提交
2576 2577 2578 2579
}

/* spa maybe an error pointer, so introduce variable spg */
static void sp_alloc_finish(int result, struct sp_area *spa,
2580
		struct sp_alloc_context *ac)
W
Wang Wensheng 已提交
2581 2582 2583
{
	struct sp_group *spg = ac->spg;

2584
	/* match sp_alloc_prepare */
2585
	up_read(&spg->rw_lock);
W
Wang Wensheng 已提交
2586 2587 2588 2589 2590

	if (!result)
		sp_update_process_stat(current, true, spa);

	/* this will free spa if mmap failed */
2591
	if (spa && !IS_ERR(spa))
W
Wang Wensheng 已提交
2592 2593
		__sp_area_drop(spa);

2594
	sp_group_put(spg);
W
Wang Wensheng 已提交
2595 2596
}

2597
/**
2598
 * mg_sp_alloc() - Allocate shared memory for all the processes in a sp_group.
2599 2600 2601 2602 2603 2604 2605 2606 2607 2608
 * @size: the size of memory to allocate.
 * @sp_flags: how to allocate the memory.
 * @spg_id: the share group that the memory is allocated to.
 *
 * Use pass through allocation if spg_id == SPG_ID_DEFAULT in multi-group mode.
 *
 * Return:
 * * if succeed, return the starting address of the shared memory.
 * * if fail, return the pointer of -errno.
 */
2609
void *mg_sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id)
2610
{
W
Wang Wensheng 已提交
2611 2612 2613 2614
	struct sp_area *spa = NULL;
	int ret = 0;
	struct sp_alloc_context ac;

2615 2616 2617
	if (!sp_is_enabled())
		return ERR_PTR(-EOPNOTSUPP);

W
Wang Wensheng 已提交
2618 2619 2620 2621 2622 2623
	ret = sp_alloc_prepare(size, sp_flags, spg_id, &ac);
	if (ret)
		return ERR_PTR(ret);

try_again:
	spa = sp_alloc_area(ac.size_aligned, ac.sp_flags, ac.spg,
2624
			    ac.type, current->tgid);
W
Wang Wensheng 已提交
2625 2626 2627 2628 2629 2630 2631 2632
	if (IS_ERR(spa)) {
		pr_err_ratelimited("alloc spa failed in allocation(potential no enough virtual memory when -75): %ld\n",
			PTR_ERR(spa));
		ret = PTR_ERR(spa);
		goto out;
	}

	ret = sp_alloc_mmap_populate(spa, &ac);
2633 2634 2635 2636 2637 2638 2639
	if (ret && ac.state == ALLOC_RETRY) {
		/*
		 * The mempolicy for shared memory is located at backend file, which varies
		 * between normal pages and huge pages. So we should set the mbind policy again
		 * when we retry using normal pages.
		 */
		ac.have_mbind = false;
W
Wang Wensheng 已提交
2640
		goto try_again;
2641
	}
W
Wang Wensheng 已提交
2642 2643 2644 2645 2646 2647 2648

out:
	sp_alloc_finish(ret, spa, &ac);
	if (ret)
		return ERR_PTR(ret);
	else
		return (void *)(spa->va_start);
2649 2650 2651
}
EXPORT_SYMBOL_GPL(mg_sp_alloc);

2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681
/**
 * is_vmap_hugepage() - Check if a kernel address belongs to vmalloc family.
 * @addr: the kernel space address to be checked.
 *
 * Return:
 * * >0		- a vmalloc hugepage addr.
 * * =0		- a normal vmalloc addr.
 * * -errno	- failure.
 */
static int is_vmap_hugepage(unsigned long addr)
{
	struct vm_struct *area;

	if (unlikely(!addr)) {
		pr_err_ratelimited("null vmap addr pointer\n");
		return -EINVAL;
	}

	area = find_vm_area((void *)addr);
	if (unlikely(!area)) {
		pr_debug("can't find vm area(%lx)\n", addr);
		return -EINVAL;
	}

	if (area->flags & VM_HUGE_PAGES)
		return 1;
	else
		return 0;
}

2682 2683
static unsigned long __sp_remap_get_pfn(unsigned long kva)
{
G
Guo Mengqi 已提交
2684
	unsigned long pfn = -EINVAL;
2685

G
Guo Mengqi 已提交
2686
	/* sp_make_share_k2u only support vmalloc address */
2687 2688 2689 2690 2691 2692 2693
	if (is_vmalloc_addr((void *)kva))
		pfn = vmalloc_to_pfn((void *)kva);

	return pfn;
}

/* when called by k2u to group, always make sure rw_lock of spg is down */
2694 2695
static unsigned long sp_remap_kva_to_vma(struct sp_area *spa, struct mm_struct *mm,
					unsigned long prot, struct sp_k2u_context *kc)
2696 2697 2698 2699 2700 2701
{
	struct vm_area_struct *vma;
	unsigned long ret_addr;
	unsigned long populate = 0;
	int ret = 0;
	unsigned long addr, buf, offset;
2702
	unsigned long kva = spa->kva;
2703 2704 2705 2706 2707

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		pr_err("k2u mmap: encountered coredump, abort\n");
		ret_addr = -EBUSY;
2708 2709
		if (kc)
			kc->state = K2U_COREDUMP;
2710 2711 2712
		goto put_mm;
	}

2713
	if (kc && (kc->sp_flags & SP_PROT_RO))
2714 2715
		prot = PROT_READ;

2716
	ret_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot, &vma);
2717 2718 2719 2720 2721 2722 2723 2724
	if (IS_ERR_VALUE(ret_addr)) {
		pr_debug("k2u mmap failed %lx\n", ret_addr);
		goto put_mm;
	}

	if (prot & PROT_WRITE)
		vma->vm_page_prot = __pgprot(((~PTE_RDONLY) & vma->vm_page_prot.pgprot) | PTE_DIRTY);

2725
	if (kc && (kc->sp_flags & SP_PROT_RO))
2726 2727
		vma->vm_flags &= ~VM_MAYWRITE;

2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764
	if (is_vm_hugetlb_page(vma)) {
		ret = remap_vmalloc_hugepage_range(vma, (void *)kva, 0);
		if (ret) {
			do_munmap(mm, ret_addr, spa_size(spa), NULL);
			pr_debug("remap vmalloc hugepage failed, ret %d, kva is %lx\n",
				 ret, (unsigned long)kva);
			ret_addr = ret;
			goto put_mm;
		}
		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
	} else {
		buf = ret_addr;
		addr = kva;
		offset = 0;
		do {
			ret = remap_pfn_range(vma, buf, __sp_remap_get_pfn(addr), PAGE_SIZE,
					__pgprot(vma->vm_page_prot.pgprot));
			if (ret) {
				do_munmap(mm, ret_addr, spa_size(spa), NULL);
				pr_err("remap_pfn_range failed %d\n", ret);
				ret_addr = ret;
				goto put_mm;
			}
			offset += PAGE_SIZE;
			buf += PAGE_SIZE;
			addr += PAGE_SIZE;
		} while (offset < spa_size(spa));
	}

put_mm:
	up_write(&mm->mmap_lock);

	return ret_addr;
}

/**
 * Share kernel memory to a spg, the current process must be in that group
2765
 * @kc: the context for k2u, including kva, size, flags...
2766 2767 2768 2769
 * @spg: the sp group to be shared with
 *
 * Return: the shared user address to start at
 */
2770
static void *sp_make_share_kva_to_spg(struct sp_k2u_context *kc, struct sp_group *spg)
2771 2772 2773 2774
{
	struct sp_area *spa;
	struct mm_struct *mm;
	struct sp_group_node *spg_node;
2775
	unsigned long ret_addr = -ENODEV;
2776 2777

	down_read(&spg->rw_lock);
2778
	spa = sp_alloc_area(kc->size_aligned, kc->sp_flags, spg, kc->type, current->tgid);
2779 2780
	if (IS_ERR(spa)) {
		up_read(&spg->rw_lock);
2781
		pr_err("alloc spa failed in k2u_spg (potential no enough virtual memory when -75): %ld\n",
2782 2783 2784 2785
				PTR_ERR(spa));
		return spa;
	}

2786
	spa->kva = kc->kva_aligned;
2787 2788
	list_for_each_entry(spg_node, &spg->procs, proc_node) {
		mm = spg_node->master->mm;
2789 2790
		kc->state = K2U_NORMAL;
		ret_addr = sp_remap_kva_to_vma(spa, mm, spg_node->prot, kc);
2791
		if (IS_ERR_VALUE(ret_addr)) {
2792
			if (kc->state == K2U_COREDUMP)
2793
				continue;
2794
			pr_err("remap k2u to spg failed %ld\n", ret_addr);
2795 2796 2797 2798 2799 2800 2801
			__sp_free(spg, spa->va_start, spa_size(spa), mm);
			goto out;
		}
	}

out:
	up_read(&spg->rw_lock);
2802
	if (!IS_ERR_VALUE(ret_addr))
2803
		sp_update_process_stat(current, true, spa);
Z
Zhou Guanghui 已提交
2804
	__sp_area_drop(spa);
2805

2806
	return (void *)ret_addr;
2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828
}

static bool vmalloc_area_set_flag(unsigned long kva, unsigned long flags)
{
	struct vm_struct *area;

	area = find_vm_area((void *)kva);
	if (area) {
		area->flags |= flags;
		return true;
	}

	return false;
}

static int sp_k2u_prepare(unsigned long kva, unsigned long size,
	unsigned long sp_flags, int spg_id, struct sp_k2u_context *kc)
{
	int is_hugepage;
	unsigned int page_size = PAGE_SIZE;
	unsigned long kva_aligned, size_aligned;

2829 2830 2831 2832 2833
	if (!size) {
		pr_err_ratelimited("k2u input size is 0.\n");
		return -EINVAL;
	}

2834
	if (sp_flags & ~SP_FLAG_MASK) {
2835 2836 2837
		pr_err_ratelimited("k2u sp_flags %lx error\n", sp_flags);
		return -EINVAL;
	}
2838
	sp_flags &= ~SP_HUGEPAGE;
2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864

	if (!current->mm) {
		pr_err_ratelimited("k2u: kthread is not allowed\n");
		return -EPERM;
	}

	is_hugepage = is_vmap_hugepage(kva);
	if (is_hugepage > 0) {
		sp_flags |= SP_HUGEPAGE;
		page_size = PMD_SIZE;
	} else if (is_hugepage == 0) {
		/* do nothing */
	} else {
		pr_err_ratelimited("k2u kva is not vmalloc address\n");
		return is_hugepage;
	}

	/* aligned down kva is convenient for caller to start with any valid kva */
	kva_aligned = ALIGN_DOWN(kva, page_size);
	size_aligned = ALIGN(kva + size, page_size) - kva_aligned;

	if (!vmalloc_area_set_flag(kva_aligned, VM_SHAREPOOL)) {
		pr_debug("k2u_task kva %lx is not valid\n", kva_aligned);
		return -EINVAL;
	}

2865 2866 2867
	kc->kva          = kva;
	kc->kva_aligned  = kva_aligned;
	kc->size         = size;
2868
	kc->size_aligned = size_aligned;
2869 2870 2871
	kc->sp_flags     = sp_flags;
	kc->type         = (spg_id == SPG_ID_DEFAULT || spg_id == SPG_ID_NONE)
				? SPA_TYPE_K2TASK : SPA_TYPE_K2SPG;
2872

2873
	return 0;
2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885
}

static void *sp_k2u_finish(void *uva, struct sp_k2u_context *kc)
{
	if (IS_ERR(uva))
		vmalloc_area_clr_flag(kc->kva_aligned, VM_SHAREPOOL);
	else
		uva = uva + (kc->kva - kc->kva_aligned);

	return uva;
}

2886
/**
2887
 * mg_sp_make_share_k2u() - Share kernel memory to current process or an sp_group.
2888 2889 2890
 * @kva: the VA of shared kernel memory.
 * @size: the size of shared kernel memory.
 * @sp_flags: how to allocate the memory. We only support SP_DVPP.
2891
 * @tgid:  the tgid of the specified process (Not currently in use).
2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902
 * @spg_id: the share group that the memory is shared to.
 *
 * Return: the shared target user address to start at
 *
 * Share kernel memory to current task if spg_id == SPG_ID_NONE
 * or SPG_ID_DEFAULT in multi-group mode.
 *
 * Return:
 * * if succeed, return the shared user address to start at.
 * * if fail, return the pointer of -errno.
 */
2903
void *mg_sp_make_share_k2u(unsigned long kva, unsigned long size,
2904
			unsigned long sp_flags, int tgid, int spg_id)
2905
{
2906 2907 2908
	void *uva;
	int ret;
	struct sp_k2u_context kc;
2909
	struct sp_group *spg;
2910

2911 2912 2913
	if (!sp_is_enabled())
		return ERR_PTR(-EOPNOTSUPP);

2914 2915 2916 2917 2918 2919
	check_interrupt_context();

	ret = sp_k2u_prepare(kva, size, sp_flags, spg_id, &kc);
	if (ret)
		return ERR_PTR(ret);

2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932
	if (kc.type == SPA_TYPE_K2TASK) {
		down_write(&sp_group_sem);
		ret = sp_init_group_master_locked(current, current->mm);
		up_write(&sp_group_sem);
		if (ret) {
			pr_err("k2u_task init local mapping failed %d\n", ret);
			uva = ERR_PTR(ret);
			goto out;
		}
		/* the caller could use SPG_ID_NONE */
		spg_id = SPG_ID_DEFAULT;
	}

2933
	spg = sp_group_get(current->tgid, spg_id);
2934 2935 2936
	if (spg) {
		ret = sp_check_caller_permission(spg, current->mm);
		if (ret < 0) {
2937
			sp_group_put(spg);
2938 2939
			uva = ERR_PTR(ret);
			goto out;
2940
		}
2941
		uva = sp_make_share_kva_to_spg(&kc, spg);
2942
		sp_group_put(spg);
2943 2944
	} else {
		uva = ERR_PTR(-ENODEV);
2945 2946 2947 2948
	}

out:
	return sp_k2u_finish(uva, &kc);
2949 2950 2951
}
EXPORT_SYMBOL_GPL(mg_sp_make_share_k2u);

2952 2953 2954
static int sp_pmd_entry(pmd_t *pmd, unsigned long addr,
			unsigned long next, struct mm_walk *walk)
{
2955
	struct page *page;
2956 2957
	struct sp_walk_data *sp_walk_data = walk->private;

2958 2959 2960 2961 2962 2963 2964 2965 2966 2967
	/*
	 * There exist a scene in DVPP where the pagetable is huge page but its
	 * vma doesn't record it, something like THP.
	 * So we cannot make out whether it is a hugepage map until we access the
	 * pmd here. If mixed size of pages appear, just return an error.
	 */
	if (pmd_huge(*pmd)) {
		if (!sp_walk_data->is_page_type_set) {
			sp_walk_data->is_page_type_set = true;
			sp_walk_data->is_hugepage = true;
2968
		} else if (!sp_walk_data->is_hugepage) {
2969
			return -EFAULT;
2970
		}
2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987

		/* To skip pte level walk */
		walk->action = ACTION_CONTINUE;

		page = pmd_page(*pmd);
		get_page(page);
		sp_walk_data->pages[sp_walk_data->page_count++] = page;

		return 0;
	}

	if (!sp_walk_data->is_page_type_set) {
		sp_walk_data->is_page_type_set = true;
		sp_walk_data->is_hugepage = false;
	} else if (sp_walk_data->is_hugepage)
		return -EFAULT;

2988
	sp_walk_data->pmd = pmd;
2989

2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132
	return 0;
}

static int sp_pte_entry(pte_t *pte, unsigned long addr,
			unsigned long next, struct mm_walk *walk)
{
	struct page *page;
	struct sp_walk_data *sp_walk_data = walk->private;
	pmd_t *pmd = sp_walk_data->pmd;

retry:
	if (unlikely(!pte_present(*pte))) {
		swp_entry_t entry;

		if (pte_none(*pte))
			goto no_page;
		entry = pte_to_swp_entry(*pte);
		if (!is_migration_entry(entry))
			goto no_page;
		migration_entry_wait(walk->mm, pmd, addr);
		goto retry;
	}

	page = pte_page(*pte);
	get_page(page);
	sp_walk_data->pages[sp_walk_data->page_count++] = page;
	return 0;

no_page:
	pr_debug("the page of addr %lx unexpectedly not in RAM\n",
		 (unsigned long)addr);
	return -EFAULT;
}

static int sp_test_walk(unsigned long addr, unsigned long next,
			struct mm_walk *walk)
{
	/*
	 * FIXME: The devmm driver uses remap_pfn_range() but actually there
	 * are associated struct pages, so they should use vm_map_pages() or
	 * similar APIs. Before the driver has been converted to correct APIs
	 * we use this test_walk() callback so we can treat VM_PFNMAP VMAs as
	 * normal VMAs.
	 */
	return 0;
}

static int sp_pte_hole(unsigned long start, unsigned long end,
		       int depth, struct mm_walk *walk)
{
	pr_debug("hole [%lx, %lx) appeared unexpectedly\n", (unsigned long)start, (unsigned long)end);
	return -EFAULT;
}

static int sp_hugetlb_entry(pte_t *ptep, unsigned long hmask,
			    unsigned long addr, unsigned long next,
			    struct mm_walk *walk)
{
	pte_t pte = huge_ptep_get(ptep);
	struct page *page = pte_page(pte);
	struct sp_walk_data *sp_walk_data;

	if (unlikely(!pte_present(pte))) {
		pr_debug("the page of addr %lx unexpectedly not in RAM\n", (unsigned long)addr);
		return -EFAULT;
	}

	sp_walk_data = walk->private;
	get_page(page);
	sp_walk_data->pages[sp_walk_data->page_count++] = page;
	return 0;
}

/*
 * __sp_walk_page_range() - Walk page table with caller specific callbacks.
 * @uva: the start VA of user memory.
 * @size: the size of user memory.
 * @mm: mm struct of the target task.
 * @sp_walk_data: a structure of a page pointer array.
 *
 * the caller must hold mm->mmap_lock
 *
 * Notes for parameter alignment:
 * When size == 0, let it be page_size, so that at least one page is walked.
 *
 * When size > 0, for convenience, usually the parameters of uva and
 * size are not page aligned. There are four different alignment scenarios and
 * we must handler all of them correctly.
 *
 * The basic idea is to align down uva and align up size so all the pages
 * in range [uva, uva + size) are walked. However, there are special cases.
 *
 * Considering a 2M-hugepage addr scenario. Assuming the caller wants to
 * traverse range [1001M, 1004.5M), so uva and size is 1001M and 3.5M
 * accordingly. The aligned-down uva is 1000M and the aligned-up size is 4M.
 * The traverse range will be [1000M, 1004M). Obviously, the final page for
 * [1004M, 1004.5M) is not covered.
 *
 * To fix this problem, we need to walk an additional page, size should be
 * ALIGN(uva+size) - uva_aligned
 */
static int __sp_walk_page_range(unsigned long uva, unsigned long size,
	struct mm_struct *mm, struct sp_walk_data *sp_walk_data)
{
	int ret = 0;
	struct vm_area_struct *vma;
	unsigned long page_nr;
	struct page **pages = NULL;
	bool is_hugepage = false;
	unsigned long uva_aligned;
	unsigned long size_aligned;
	unsigned int page_size = PAGE_SIZE;
	struct mm_walk_ops sp_walk = {};

	/*
	 * Here we also support non share pool memory in this interface
	 * because the caller can't distinguish whether a uva is from the
	 * share pool or not. It is not the best idea to do so, but currently
	 * it simplifies overall design.
	 *
	 * In this situation, the correctness of the parameters is mainly
	 * guaranteed by the caller.
	 */
	vma = find_vma(mm, uva);
	if (!vma) {
		pr_debug("u2k input uva %lx is invalid\n", (unsigned long)uva);
		return -EINVAL;
	}
	if (is_vm_hugetlb_page(vma))
		is_hugepage = true;

	sp_walk.pte_hole = sp_pte_hole;
	sp_walk.test_walk = sp_test_walk;
	if (is_hugepage) {
		sp_walk_data->is_hugepage = true;
		sp_walk.hugetlb_entry = sp_hugetlb_entry;
		page_size = PMD_SIZE;
	} else {
		sp_walk_data->is_hugepage = false;
		sp_walk.pte_entry = sp_pte_entry;
		sp_walk.pmd_entry = sp_pmd_entry;
	}

3133 3134
	sp_walk_data->is_page_type_set = false;
	sp_walk_data->page_count = 0;
3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158
	sp_walk_data->page_size = page_size;
	uva_aligned = ALIGN_DOWN(uva, page_size);
	sp_walk_data->uva_aligned = uva_aligned;
	if (size == 0)
		size_aligned = page_size;
	else
		/* special alignment handling */
		size_aligned = ALIGN(uva + size, page_size) - uva_aligned;

	if (uva_aligned + size_aligned < uva_aligned) {
		pr_err_ratelimited("overflow happened in walk page range\n");
		return -EINVAL;
	}

	page_nr = size_aligned / page_size;
	pages = kvmalloc(page_nr * sizeof(struct page *), GFP_KERNEL);
	if (!pages) {
		pr_err_ratelimited("alloc page array failed in walk page range\n");
		return -ENOMEM;
	}
	sp_walk_data->pages = pages;

	ret = walk_page_range(mm, uva_aligned, uva_aligned + size_aligned,
			      &sp_walk, sp_walk_data);
3159 3160 3161
	if (ret) {
		while (sp_walk_data->page_count--)
			put_page(pages[sp_walk_data->page_count]);
3162
		kvfree(pages);
3163 3164
		sp_walk_data->pages = NULL;
	}
3165

Z
Zhou Guanghui 已提交
3166 3167 3168
	if (sp_walk_data->is_hugepage)
		sp_walk_data->uva_aligned = ALIGN_DOWN(uva, PMD_SIZE);

3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187
	return ret;
}

static void __sp_walk_page_free(struct sp_walk_data *data)
{
	int i = 0;
	struct page *page;

	while (i < data->page_count) {
		page = data->pages[i++];
		put_page(page);
	}

	kvfree(data->pages);
	/* prevent repeated release */
	data->page_count = 0;
	data->pages = NULL;
}

3188
/**
3189
 * mg_sp_make_share_u2k() - Share user memory of a specified process to kernel.
3190 3191
 * @uva: the VA of shared user memory
 * @size: the size of shared user memory
3192
 * @tgid: the tgid of the specified process(Not currently in use)
3193 3194 3195 3196 3197
 *
 * Return:
 * * if success, return the starting kernel address of the shared memory.
 * * if failed, return the pointer of -errno.
 */
3198
void *mg_sp_make_share_u2k(unsigned long uva, unsigned long size, int tgid)
3199
{
3200 3201 3202
	int ret = 0;
	struct mm_struct *mm = current->mm;
	void *p = ERR_PTR(-ESRCH);
3203
	struct sp_walk_data sp_walk_data;
3204 3205
	struct vm_struct *area;

3206 3207 3208
	if (!sp_is_enabled())
		return ERR_PTR(-EOPNOTSUPP);

3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255
	check_interrupt_context();

	if (mm == NULL) {
		pr_err("u2k: kthread is not allowed\n");
		return ERR_PTR(-EPERM);
	}

	down_write(&mm->mmap_lock);
	if (unlikely(mm->core_state)) {
		up_write(&mm->mmap_lock);
		pr_err("u2k: encountered coredump, abort\n");
		return p;
	}

	ret = __sp_walk_page_range(uva, size, mm, &sp_walk_data);
	if (ret) {
		pr_err_ratelimited("walk page range failed %d\n", ret);
		up_write(&mm->mmap_lock);
		return ERR_PTR(ret);
	}

	if (sp_walk_data.is_hugepage)
		p = vmap_hugepage(sp_walk_data.pages, sp_walk_data.page_count,
				  VM_MAP, PAGE_KERNEL);
	else
		p = vmap(sp_walk_data.pages, sp_walk_data.page_count, VM_MAP,
			 PAGE_KERNEL);
	up_write(&mm->mmap_lock);

	if (!p) {
		pr_err("vmap(huge) in u2k failed\n");
		__sp_walk_page_free(&sp_walk_data);
		return ERR_PTR(-ENOMEM);
	}

	p = p + (uva - sp_walk_data.uva_aligned);

	/*
	 * kva p may be used later in k2u. Since p comes from uva originally,
	 * it's reasonable to add flag VM_USERMAP so that p can be remapped
	 * into userspace again.
	 */
	area = find_vm_area(p);
	area->flags |= VM_USERMAP;

	kvfree(sp_walk_data.pages);
	return p;
3256 3257 3258
}
EXPORT_SYMBOL_GPL(mg_sp_make_share_u2k);

3259
/*
3260 3261 3262 3263 3264
 * sp_unshare_uva - unshare a uva from sp_make_share_k2u
 * @uva: the uva to be unshared
 * @size: not used actually and we just check it
 * @group_id: specify the spg of the uva; for local group, it can be SPG_ID_DEFAULT
 *            unless current process is exiting.
3265 3266 3267 3268 3269 3270
 *
 * Procedure of unshare uva must be compatible with:
 *
 * 1. DVPP channel destroy procedure:
 * do_exit() -> exit_mm() (mm no longer in spg and current->mm == NULL) ->
 * exit_task_work() -> task_work_run() -> __fput() -> ... -> vdec_close() ->
3271
 * sp_unshare(uva, local_spg_id)
3272
 */
3273
static int sp_unshare_uva(unsigned long uva, unsigned long size, int group_id)
3274
{
3275 3276 3277
	int ret = 0;
	struct sp_area *spa;
	unsigned int page_size;
3278 3279
	struct sp_group *spg;

3280
	spg = sp_group_get(current->tgid, group_id);
3281
	if (!spg) {
3282
		pr_err("sp unshare find group failed %d\n", group_id);
3283 3284
		return -EINVAL;
	}
3285

3286
	/* All the spa are aligned to 2M. */
3287
	spa = get_sp_area(spg, ALIGN_DOWN(uva, PMD_SIZE));
3288
	if (!spa) {
3289 3290 3291
		ret = -EINVAL;
		pr_err("invalid input uva %lx in unshare uva\n", (unsigned long)uva);
		goto out;
3292 3293 3294
	}

	if (spa->type != SPA_TYPE_K2TASK && spa->type != SPA_TYPE_K2SPG) {
3295
		pr_err("unshare wrong type spa\n");
3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306
		ret = -EINVAL;
		goto out_drop_area;
	}
	/*
	 * 1. overflow actually won't happen due to an spa must be valid.
	 * 2. we must unshare [spa->va_start, spa->va_start + spa->real_size) completely
	 *    because an spa is one-to-one correspondence with an vma.
	 *    Thus input parameter size is not necessarily needed.
	 */
	page_size = (spa->is_hugepage ? PMD_SIZE : PAGE_SIZE);

3307
	if (spa->real_size < ALIGN(size, page_size)) {
3308
		ret = -EINVAL;
3309
		pr_err("unshare uva failed, invalid parameter size %lu\n", size);
3310 3311 3312
		goto out_drop_area;
	}

3313 3314 3315
	down_read(&spa->spg->rw_lock);
	/* always allow dvpp channel destroy procedure */
	if (current->mm && !is_process_in_group(spa->spg, current->mm)) {
3316
		up_read(&spa->spg->rw_lock);
3317 3318 3319 3320 3321
		pr_err("unshare uva failed, caller process doesn't belong to target group\n");
		ret = -EPERM;
		goto out_drop_area;
	}
	up_read(&spa->spg->rw_lock);
3322

3323 3324
	down_write(&spa->spg->rw_lock);
	if (!spg_valid(spa->spg)) {
3325
		up_write(&spa->spg->rw_lock);
3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338
		pr_info("no need to unshare uva, sp group of spa is dead\n");
		goto out_clr_flag;
	}
	/* the life cycle of spa has a direct relation with sp group */
	if (unlikely(spa->is_dead)) {
		up_write(&spa->spg->rw_lock);
		pr_err("unexpected double sp unshare\n");
		dump_stack();
		ret = -EINVAL;
		goto out_drop_area;
	}
	spa->is_dead = true;
	up_write(&spa->spg->rw_lock);
3339

3340 3341 3342
	down_read(&spa->spg->rw_lock);
	__sp_free(spa->spg, spa->va_start, spa->real_size, NULL);
	up_read(&spa->spg->rw_lock);
3343

3344 3345 3346 3347
	if (current->mm == NULL)
		atomic64_sub(spa->real_size, &kthread_stat.k2u_size);
	else
		sp_update_process_stat(current, false, spa);
3348 3349 3350

out_clr_flag:
	if (!vmalloc_area_clr_flag(spa->kva, VM_SHAREPOOL))
3351
		pr_info("clear spa->kva %ld is not valid\n", spa->kva);
3352 3353 3354 3355 3356
	spa->kva = 0;

out_drop_area:
	__sp_area_drop(spa);
out:
3357
	sp_group_put(spg);
3358
	return ret;
3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404
}

/* No possible concurrent protection, take care when use */
static int sp_unshare_kva(unsigned long kva, unsigned long size)
{
	unsigned long addr, kva_aligned;
	struct page *page;
	unsigned long size_aligned;
	unsigned long step;
	bool is_hugepage = true;
	int ret;

	ret = is_vmap_hugepage(kva);
	if (ret > 0) {
		kva_aligned = ALIGN_DOWN(kva, PMD_SIZE);
		size_aligned = ALIGN(kva + size, PMD_SIZE) - kva_aligned;
		step = PMD_SIZE;
	} else if (ret == 0) {
		kva_aligned = ALIGN_DOWN(kva, PAGE_SIZE);
		size_aligned = ALIGN(kva + size, PAGE_SIZE) - kva_aligned;
		step = PAGE_SIZE;
		is_hugepage = false;
	} else {
		pr_err_ratelimited("check vmap hugepage failed %d\n", ret);
		return -EINVAL;
	}

	if (kva_aligned + size_aligned < kva_aligned) {
		pr_err_ratelimited("overflow happened in unshare kva\n");
		return -EINVAL;
	}

	for (addr = kva_aligned; addr < (kva_aligned + size_aligned); addr += step) {
		page = vmalloc_to_page((void *)addr);
		if (page)
			put_page(page);
		else
			WARN(1, "vmalloc %pK to page/hugepage failed\n",
			       (void *)addr);
	}

	vunmap((void *)kva_aligned);

	return 0;
}

3405
/**
3406
 * mg_sp_unshare() - Unshare the kernel or user memory which shared by calling
3407 3408 3409 3410 3411 3412 3413 3414
 *                sp_make_share_{k2u,u2k}().
 * @va: the specified virtual address of memory
 * @size: the size of unshared memory
 *
 * Use spg_id of current thread if spg_id == SPG_ID_DEFAULT.
 *
 * Return: 0 for success, -errno on failure.
 */
3415
int mg_sp_unshare(unsigned long va, unsigned long size, int spg_id)
3416
{
3417 3418
	int ret = 0;

3419 3420 3421
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

3422 3423
	check_interrupt_context();

3424 3425 3426
	if (current->flags & PF_KTHREAD)
		return -EINVAL;

3427 3428
	if (va < TASK_SIZE) {
		/* user address */
3429
		ret = sp_unshare_uva(va, size, spg_id);
3430 3431 3432 3433 3434 3435 3436 3437 3438 3439
	} else if (va >= PAGE_OFFSET) {
		/* kernel address */
		ret = sp_unshare_kva(va, size);
	} else {
		/* regard user and kernel address ranges as bad address */
		pr_debug("unshare addr %lx is not a user or kernel addr\n", (unsigned long)va);
		ret = -EFAULT;
	}

	return ret;
3440 3441 3442 3443
}
EXPORT_SYMBOL_GPL(mg_sp_unshare);

/**
3444
 * mg_sp_walk_page_range() - Walk page table with caller specific callbacks.
3445 3446 3447 3448 3449 3450 3451 3452 3453 3454
 * @uva: the start VA of user memory.
 * @size: the size of user memory.
 * @tsk: task struct of the target task.
 * @sp_walk_data: a structure of a page pointer array.
 *
 * Return: 0 for success, -errno on failure.
 *
 * When return 0, sp_walk_data describing [uva, uva+size) can be used.
 * When return -errno, information in sp_walk_data is useless.
 */
3455
int mg_sp_walk_page_range(unsigned long uva, unsigned long size,
3456 3457
	struct task_struct *tsk, struct sp_walk_data *sp_walk_data)
{
3458 3459 3460
	struct mm_struct *mm;
	int ret = 0;

3461 3462 3463
	if (!sp_is_enabled())
		return -EOPNOTSUPP;

3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480
	check_interrupt_context();

	if (unlikely(!sp_walk_data)) {
		pr_err_ratelimited("null pointer when walk page range\n");
		return -EINVAL;
	}
	if (!tsk || (tsk->flags & PF_EXITING))
		return -ESRCH;

	get_task_struct(tsk);
	mm = get_task_mm(tsk);
	if (!mm) {
		put_task_struct(tsk);
		return -ESRCH;
	}

	down_write(&mm->mmap_lock);
3481
	if (likely(!mm->core_state)) {
3482
		ret = __sp_walk_page_range(uva, size, mm, sp_walk_data);
3483
	} else {
3484 3485 3486 3487 3488 3489 3490 3491 3492
		pr_err("walk page range: encoutered coredump\n");
		ret = -ESRCH;
	}
	up_write(&mm->mmap_lock);

	mmput(mm);
	put_task_struct(tsk);

	return ret;
3493 3494 3495 3496
}
EXPORT_SYMBOL_GPL(mg_sp_walk_page_range);

/**
3497
 * mg_sp_walk_page_free() - Free the sp_walk_data structure.
3498 3499
 * @sp_walk_data: a structure of a page pointer array to be freed.
 */
3500
void mg_sp_walk_page_free(struct sp_walk_data *sp_walk_data)
3501
{
3502 3503 3504
	if (!sp_is_enabled())
		return;

3505 3506 3507 3508 3509 3510
	check_interrupt_context();

	if (!sp_walk_data)
		return;

	__sp_walk_page_free(sp_walk_data);
3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525
}
EXPORT_SYMBOL_GPL(mg_sp_walk_page_free);

int sp_register_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_register(&sp_notifier_chain, nb);
}
EXPORT_SYMBOL_GPL(sp_register_notifier);

int sp_unregister_notifier(struct notifier_block *nb)
{
	return blocking_notifier_chain_unregister(&sp_notifier_chain, nb);
}
EXPORT_SYMBOL_GPL(sp_unregister_notifier);

3526
static bool is_sp_dynamic_dvpp_addr(unsigned long addr);
3527
/**
3528
 * mg_sp_config_dvpp_range() - User can config the share pool start address
3529 3530 3531 3532
 *                          of each Da-vinci device.
 * @start: the value of share pool start
 * @size: the value of share pool
 * @device_id: the num of Da-vinci device
3533
 * @tgid: the tgid of device process
3534 3535 3536 3537 3538
 *
 * Return true for success.
 * Return false if parameter invalid or has been set up.
 * This functuon has no concurrent problem.
 */
3539
bool mg_sp_config_dvpp_range(size_t start, size_t size, int device_id, int tgid)
3540
{
3541 3542 3543 3544 3545 3546 3547 3548
	int ret;
	bool err = false;
	struct task_struct *tsk;
	struct mm_struct *mm;
	struct sp_group *spg;
	struct sp_mapping *spm;
	unsigned long default_start;

3549 3550 3551
	if (!sp_is_enabled())
		return false;

3552
	/* NOTE: check the start address */
3553
	if (tgid < 0 || size <= 0 || size > MMAP_SHARE_POOL_16G_SIZE ||
3554
	    device_id < 0 || device_id >= MAX_DEVID || !is_online_node_id(device_id)
3555
		|| !is_sp_dynamic_dvpp_addr(start) || !is_sp_dynamic_dvpp_addr(start + size - 1))
3556 3557
		return false;

3558
	ret = get_task(tgid, &tsk);
3559 3560 3561 3562 3563 3564 3565
	if (ret)
		return false;

	mm = get_task_mm(tsk->group_leader);
	if (!mm)
		goto put_task;

3566
	spg = sp_get_local_group(tsk, mm);
3567 3568 3569
	if (IS_ERR(spg))
		goto put_mm;

3570
	spm = spg->mapping[SP_MAPPING_DVPP];
3571
	default_start = MMAP_SHARE_POOL_DVPP_START + device_id * MMAP_SHARE_POOL_16G_SIZE;
3572 3573 3574 3575 3576 3577 3578 3579 3580 3581
	/* The dvpp range of each group can be configured only once */
	if (spm->start[device_id] != default_start)
		goto put_spg;

	spm->start[device_id] = start;
	spm->end[device_id] = start + size;

	err = true;

put_spg:
3582
	sp_group_put(spg);
3583 3584 3585 3586 3587 3588
put_mm:
	mmput(mm);
put_task:
	put_task_struct(tsk);

	return err;
3589 3590 3591
}
EXPORT_SYMBOL_GPL(mg_sp_config_dvpp_range);

3592
static bool is_sp_reserve_addr(unsigned long addr)
3593
{
3594
	return addr >= MMAP_SHARE_POOL_START && addr < MMAP_SHARE_POOL_END;
3595 3596
}

3597 3598 3599 3600 3601 3602 3603
/*
 *	| 16G host | 16G device | ... |     |
 *	^
 *	|
 *	MMAP_SHARE_POOL_DVPP_BASE + 16G * 64
 *	We only check the device regions.
 */
3604
static bool is_sp_dynamic_dvpp_addr(unsigned long addr)
3605
{
3606
	if (addr < MMAP_SHARE_POOL_DYNAMIC_DVPP_BASE || addr >= MMAP_SHARE_POOL_DYNAMIC_DVPP_END)
3607 3608
		return false;

3609
	return (addr - MMAP_SHARE_POOL_DYNAMIC_DVPP_BASE) & MMAP_SHARE_POOL_16G_SIZE;
3610 3611
}

3612
/**
3613
 * mg_is_sharepool_addr() - Check if a user memory address belongs to share pool.
3614 3615 3616 3617
 * @addr: the userspace address to be checked.
 *
 * Return true if addr belongs to share pool, or false vice versa.
 */
3618
bool mg_is_sharepool_addr(unsigned long addr)
3619
{
3620
	return sp_is_enabled() &&
3621
		((is_sp_reserve_addr(addr) || is_sp_dynamic_dvpp_addr(addr)));
3622 3623 3624
}
EXPORT_SYMBOL_GPL(mg_is_sharepool_addr);

3625 3626 3627 3628 3629 3630 3631 3632
int sp_node_id(struct vm_area_struct *vma)
{
	struct sp_area *spa;
	int node_id = numa_node_id();

	if (!sp_is_enabled())
		return node_id;

3633
	if (vma && (vma->vm_flags & VM_SHARE_POOL) && vma->vm_private_data) {
3634 3635
		spa = vma->vm_private_data;
		node_id = spa->node_id;
3636 3637 3638 3639 3640
	}

	return node_id;
}

3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656
/*** Statistical and maintenance functions ***/

static void get_mm_rss_info(struct mm_struct *mm, unsigned long *anon,
	unsigned long *file, unsigned long *shmem, unsigned long *total_rss)
{
	*anon = get_mm_counter(mm, MM_ANONPAGES);
	*file = get_mm_counter(mm, MM_FILEPAGES);
	*shmem = get_mm_counter(mm, MM_SHMEMPAGES);
	*total_rss = *anon + *file + *shmem;
}

static long get_proc_k2u(struct sp_proc_stat *stat)
{
	return byte2kb(atomic64_read(&stat->k2u_size));
}

3657
static long get_proc_alloc(struct sp_proc_stat *stat)
3658
{
3659 3660
	return byte2kb(atomic64_read(&stat->alloc_nsize) +
			atomic64_read(&stat->alloc_hsize));
3661 3662
}

G
Guo Mengqi 已提交
3663
static void get_process_sp_res(struct sp_group_master *master,
3664
		long *sp_res_out, long *sp_res_nsize_out)
3665
{
G
Guo Mengqi 已提交
3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677
	struct sp_group *spg;
	struct sp_group_node *spg_node;

	*sp_res_out = 0;
	*sp_res_nsize_out = 0;

	list_for_each_entry(spg_node, &master->node_list, group_node) {
		spg = spg_node->spg;
		*sp_res_out += byte2kb(atomic64_read(&spg->instat.alloc_nsize));
		*sp_res_out += byte2kb(atomic64_read(&spg->instat.alloc_hsize));
		*sp_res_nsize_out += byte2kb(atomic64_read(&spg->instat.alloc_nsize));
	}
3678 3679
}

3680
static long get_sp_res_by_spg_proc(struct sp_group_node *spg_node)
3681
{
G
Guo Mengqi 已提交
3682 3683
	return byte2kb(atomic64_read(&spg_node->spg->instat.alloc_nsize) +
			atomic64_read(&spg_node->spg->instat.alloc_hsize));
3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703
}

/*
 *  Statistics of RSS has a maximum 64 pages deviation (256KB).
 *  Please check_sync_rss_stat().
 */
static void get_process_non_sp_res(unsigned long total_rss, unsigned long shmem,
	long sp_res_nsize, long *non_sp_res_out, long *non_sp_shm_out)
{
	long non_sp_res, non_sp_shm;

	non_sp_res = page2kb(total_rss) - sp_res_nsize;
	non_sp_res = non_sp_res < 0 ? 0 : non_sp_res;
	non_sp_shm = page2kb(shmem) - sp_res_nsize;
	non_sp_shm = non_sp_shm < 0 ? 0 : non_sp_shm;

	*non_sp_res_out = non_sp_res;
	*non_sp_shm_out = non_sp_shm;
}

3704
static long get_spg_proc_alloc(struct sp_group_node *spg_node)
3705
{
3706 3707
	return byte2kb(atomic64_read(&spg_node->instat.alloc_nsize) +
				atomic64_read(&spg_node->instat.alloc_hsize));
3708 3709
}

3710
static long get_spg_proc_k2u(struct sp_group_node *spg_node)
3711
{
3712
	return byte2kb(atomic64_read(&spg_node->instat.k2u_size));
3713 3714 3715 3716 3717 3718 3719 3720
}

static void print_process_prot(struct seq_file *seq, unsigned long prot)
{
	if (prot == PROT_READ)
		seq_puts(seq, "R");
	else if (prot == (PROT_READ | PROT_WRITE))
		seq_puts(seq, "RW");
3721
	else
3722 3723 3724 3725 3726 3727
		seq_puts(seq, "-");
}

int proc_sp_group_state(struct seq_file *m, struct pid_namespace *ns,
			struct pid *pid, struct task_struct *task)
{
Z
Zhou Guanghui 已提交
3728
	struct mm_struct *mm;
3729 3730
	struct sp_group_master *master;
	struct sp_proc_stat *proc_stat;
3731 3732
	struct sp_group_node *spg_node;
	unsigned long anon, file, shmem, total_rss;
3733 3734
	long sp_res, sp_res_nsize, non_sp_res, non_sp_shm;

3735 3736 3737
	if (!sp_is_enabled())
		return 0;

Z
Zhou Guanghui 已提交
3738
	mm = get_task_mm(task);
3739 3740 3741
	if (!mm)
		return 0;

3742
	down_read(&sp_group_sem);
3743
	down_read(&mm->mmap_lock);
3744
	master = mm->sp_group_master;
Z
Zhou Guanghui 已提交
3745 3746
	if (!master)
		goto out;
3747 3748

	get_mm_rss_info(mm, &anon, &file, &shmem, &total_rss);
3749
	proc_stat = &master->instat;
G
Guo Mengqi 已提交
3750
	get_process_sp_res(master, &sp_res, &sp_res_nsize);
3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766
	get_process_non_sp_res(total_rss, shmem, sp_res_nsize,
			       &non_sp_res, &non_sp_shm);

	seq_puts(m, "Share Pool Aggregate Data of This Process\n\n");
	seq_printf(m, "%-8s %-16s %-9s %-9s %-9s %-10s %-10s %-8s\n",
		   "PID", "COMM", "SP_ALLOC", "SP_K2U", "SP_RES", "Non-SP_RES",
		   "Non-SP_Shm", "VIRT");
	seq_printf(m, "%-8d %-16s %-9ld %-9ld %-9ld %-10ld %-10ld %-8ld\n",
		   proc_stat->tgid, proc_stat->comm,
		   get_proc_alloc(proc_stat),
		   get_proc_k2u(proc_stat),
		   sp_res, non_sp_res, non_sp_shm,
		   page2kb(mm->total_vm));

	seq_puts(m, "\n\nProcess in Each SP Group\n\n");
	seq_printf(m, "%-8s %-9s %-9s %-9s %-4s\n",
3767
			"Group_ID", "SP_ALLOC", "SP_K2U", "SP_RES", "PROT");
3768

3769
	list_for_each_entry(spg_node, &master->node_list, group_node) {
3770
		seq_printf(m, "%-8d %-9ld %-9ld %-9ld ",
3771 3772 3773 3774 3775
				spg_node->spg->id,
				get_spg_proc_alloc(spg_node),
				get_spg_proc_k2u(spg_node),
				get_sp_res_by_spg_proc(spg_node));
		print_process_prot(m, spg_node->prot);
3776 3777
		seq_putc(m, '\n');
	}
Z
Zhou Guanghui 已提交
3778 3779

out:
3780
	up_read(&mm->mmap_lock);
3781
	up_read(&sp_group_sem);
Z
Zhou Guanghui 已提交
3782
	mmput(mm);
3783 3784 3785
	return 0;
}

3786
static void spa_stat_of_mapping_show(struct seq_file *seq, struct sp_mapping *spm)
3787 3788 3789 3790 3791
{
	struct rb_node *node;
	struct sp_area *spa, *prev = NULL;

	spin_lock(&sp_area_lock);
3792
	for (node = rb_first(&spm->area_root); node; node = rb_next(node)) {
3793 3794 3795 3796 3797 3798 3799
		__sp_area_drop_locked(prev);

		spa = rb_entry(node, struct sp_area, rb_node);
		prev = spa;
		atomic_inc(&spa->use_count);
		spin_unlock(&sp_area_lock);

3800 3801 3802 3803
		if (spg_valid(spa->spg))  /* k2u to group */
			seq_printf(seq, "%-10d ", spa->spg->id);
		else  /* spg is dead */
			seq_printf(seq, "%-10s ", "Dead");
3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838

		seq_printf(seq, "%2s%-14lx %2s%-14lx %-10ld ",
			   "0x", spa->va_start,
			   "0x", spa->va_end,
			   byte2kb(spa->real_size));

		switch (spa->type) {
		case SPA_TYPE_ALLOC:
			seq_printf(seq, "%-7s ", "ALLOC");
			break;
		case SPA_TYPE_K2TASK:
			seq_printf(seq, "%-7s ", "TASK");
			break;
		case SPA_TYPE_K2SPG:
			seq_printf(seq, "%-7s ", "SPG");
			break;
		default:
			/* usually impossible, perhaps a developer's mistake */
			break;
		}

		if (spa->is_hugepage)
			seq_printf(seq, "%-5s ", "Y");
		else
			seq_printf(seq, "%-5s ", "N");

		seq_printf(seq, "%-8d ",  spa->applier);
		seq_printf(seq, "%-8d\n", atomic_read(&spa->use_count));

		spin_lock(&sp_area_lock);
	}
	__sp_area_drop_locked(prev);
	spin_unlock(&sp_area_lock);
}

C
Chen Jun 已提交
3839 3840 3841 3842 3843
static void spa_ro_stat_show(struct seq_file *seq)
{
	spa_stat_of_mapping_show(seq, sp_mapping_ro);
}

3844 3845 3846 3847 3848 3849 3850
static void spa_normal_stat_show(struct seq_file *seq)
{
	spa_stat_of_mapping_show(seq, sp_mapping_normal);
}

static void spa_dvpp_stat_show(struct seq_file *seq)
{
3851 3852 3853 3854 3855 3856
	struct sp_mapping *spm;

	mutex_lock(&spm_list_lock);
	list_for_each_entry(spm, &spm_dvpp_list, spm_node)
		spa_stat_of_mapping_show(seq, spm);
	mutex_unlock(&spm_list_lock);
3857 3858 3859
}


3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881
void spa_overview_show(struct seq_file *seq)
{
	unsigned int total_num, alloc_num, k2u_task_num, k2u_spg_num;
	unsigned long total_size, alloc_size, k2u_task_size, k2u_spg_size;
	unsigned long dvpp_size, dvpp_va_size;

	if (!sp_is_enabled())
		return;

	spin_lock(&sp_area_lock);
	total_num     = spa_stat.total_num;
	alloc_num     = spa_stat.alloc_num;
	k2u_task_num  = spa_stat.k2u_task_num;
	k2u_spg_num   = spa_stat.k2u_spg_num;
	total_size    = spa_stat.total_size;
	alloc_size    = spa_stat.alloc_size;
	k2u_task_size = spa_stat.k2u_task_size;
	k2u_spg_size  = spa_stat.k2u_spg_size;
	dvpp_size     = spa_stat.dvpp_size;
	dvpp_va_size  = spa_stat.dvpp_va_size;
	spin_unlock(&sp_area_lock);

3882 3883 3884 3885 3886 3887 3888 3889 3890 3891
	SEQ_printf(seq, "Spa total num %u.\n", total_num);
	SEQ_printf(seq, "Spa alloc num %u, k2u(task) num %u, k2u(spg) num %u.\n",
		   alloc_num, k2u_task_num, k2u_spg_num);
	SEQ_printf(seq, "Spa total size:     %13lu KB\n", byte2kb(total_size));
	SEQ_printf(seq, "Spa alloc size:     %13lu KB\n", byte2kb(alloc_size));
	SEQ_printf(seq, "Spa k2u(task) size: %13lu KB\n", byte2kb(k2u_task_size));
	SEQ_printf(seq, "Spa k2u(spg) size:  %13lu KB\n", byte2kb(k2u_spg_size));
	SEQ_printf(seq, "Spa dvpp size:      %13lu KB\n", byte2kb(dvpp_size));
	SEQ_printf(seq, "Spa dvpp va size:   %13lu MB\n", byte2mb(dvpp_va_size));
	SEQ_printf(seq, "\n");
3892 3893
}

3894
static int spg_info_show(int id, void *p, void *data)
3895
{
3896
	struct sp_group *spg = p;
3897 3898
	struct seq_file *seq = data;

3899
	if (id >= SPG_ID_LOCAL_MIN && id <= SPG_ID_LOCAL_MAX)
3900
		return 0;
3901

3902
	SEQ_printf(seq, "Group %6d ", id);
3903

3904 3905 3906
	down_read(&spg->rw_lock);
	SEQ_printf(seq, "size: %lld KB, spa num: %d, total alloc: %lld KB, normal alloc: %lld KB, huge alloc: %lld KB\n",
			byte2kb(atomic64_read(&spg->instat.size)),
3907
			atomic_read(&spg->spa_num),
3908 3909 3910 3911
			byte2kb(atomic64_read(&spg->instat.alloc_size)),
			byte2kb(atomic64_read(&spg->instat.alloc_nsize)),
			byte2kb(atomic64_read(&spg->instat.alloc_hsize)));
	up_read(&spg->rw_lock);
3912 3913 3914 3915 3916 3917 3918 3919 3920

	return 0;
}

void spg_overview_show(struct seq_file *seq)
{
	if (!sp_is_enabled())
		return;

3921 3922 3923
	SEQ_printf(seq, "Share pool total size: %lld KB, spa total num: %d.\n",
			byte2kb(atomic64_read(&sp_overall_stat.spa_total_size)),
			atomic_read(&sp_overall_stat.spa_total_num));
3924

3925 3926 3927
	down_read(&sp_group_sem);
	idr_for_each(&sp_group_idr, spg_info_show, seq);
	up_read(&sp_group_sem);
3928

3929
	SEQ_printf(seq, "\n");
3930 3931
}

3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942
static bool should_show_statistics(void)
{
	if (!capable(CAP_SYS_ADMIN))
		return false;

	if (task_active_pid_ns(current) != &init_pid_ns)
		return false;

	return true;
}

3943 3944
static int spa_stat_show(struct seq_file *seq, void *offset)
{
3945 3946 3947
	if (!should_show_statistics())
		return -EPERM;

3948 3949 3950 3951
	spg_overview_show(seq);
	spa_overview_show(seq);
	/* print the file header */
	seq_printf(seq, "%-10s %-16s %-16s %-10s %-7s %-5s %-8s %-8s\n",
3952
			"Group ID", "va_start", "va_end", "Size(KB)", "Type", "Huge", "PID", "Ref");
C
Chen Jun 已提交
3953
	spa_ro_stat_show(seq);
3954 3955
	spa_normal_stat_show(seq);
	spa_dvpp_stat_show(seq);
3956 3957 3958
	return 0;
}

3959
static int proc_usage_by_group(int id, void *p, void *data)
3960
{
3961
	struct sp_group *spg = p;
3962
	struct seq_file *seq = data;
3963
	struct sp_group_node *spg_node;
3964
	struct mm_struct *mm;
3965 3966 3967
	struct sp_group_master *master;
	int tgid;
	unsigned long anon, file, shmem, total_rss;
3968

3969 3970 3971 3972 3973
	down_read(&spg->rw_lock);
	list_for_each_entry(spg_node, &spg->procs, proc_node) {
		master = spg_node->master;
		mm = master->mm;
		tgid = master->instat.tgid;
3974 3975 3976 3977

		get_mm_rss_info(mm, &anon, &file, &shmem, &total_rss);

		seq_printf(seq, "%-8d ", tgid);
3978 3979
		seq_printf(seq, "%-8d ", id);
		seq_printf(seq, "%-9ld %-9ld %-9ld %-8ld %-7ld %-7ld ",
3980 3981 3982 3983
				get_spg_proc_alloc(spg_node),
				get_spg_proc_k2u(spg_node),
				get_sp_res_by_spg_proc(spg_node),
				page2kb(mm->total_vm), page2kb(total_rss),
3984
				page2kb(shmem));
3985
		print_process_prot(seq, spg_node->prot);
3986 3987
		seq_putc(seq, '\n');
	}
3988
	up_read(&spg->rw_lock);
3989
	cond_resched();
3990

3991 3992 3993
	return 0;
}

3994
static int proc_group_usage_show(struct seq_file *seq, void *offset)
3995
{
3996 3997 3998
	if (!should_show_statistics())
		return -EPERM;

3999 4000
	spg_overview_show(seq);
	spa_overview_show(seq);
4001

4002
	/* print the file header */
4003 4004 4005
	seq_printf(seq, "%-8s %-8s %-9s %-9s %-9s %-8s %-7s %-7s %-4s\n",
			"PID", "Group_ID", "SP_ALLOC", "SP_K2U", "SP_RES",
			"VIRT", "RES", "Shm", "PROT");
4006 4007
	/* print kthread buff_module_guard_work */
	seq_printf(seq, "%-8s %-8s %-9lld %-9lld\n",
4008 4009 4010
			"guard", "-",
			byte2kb(atomic64_read(&kthread_stat.alloc_size)),
			byte2kb(atomic64_read(&kthread_stat.k2u_size)));
4011

W
Wang Wensheng 已提交
4012
	down_read(&sp_group_sem);
4013
	idr_for_each(&sp_group_idr, proc_usage_by_group, seq);
W
Wang Wensheng 已提交
4014 4015
	up_read(&sp_group_sem);

4016 4017 4018
	return 0;
}

4019
static int proc_usage_show(struct seq_file *seq, void *offset)
4020
{
4021
	struct sp_group_master *master = NULL;
4022 4023
	unsigned long anon, file, shmem, total_rss;
	long sp_res, sp_res_nsize, non_sp_res, non_sp_shm;
4024
	struct sp_proc_stat *proc_stat;
4025

4026 4027 4028
	if (!should_show_statistics())
		return -EPERM;

4029
	seq_printf(seq, "%-8s %-16s %-9s %-9s %-9s %-10s %-10s %-8s\n",
4030 4031 4032
			"PID", "COMM", "SP_ALLOC", "SP_K2U", "SP_RES", "Non-SP_RES",
			"Non-SP_Shm", "VIRT");

4033
	down_read(&sp_group_sem);
4034 4035 4036 4037
	mutex_lock(&master_list_lock);
	list_for_each_entry(master, &master_list, list_node) {
		proc_stat = &master->instat;
		get_mm_rss_info(master->mm, &anon, &file, &shmem, &total_rss);
G
Guo Mengqi 已提交
4038
		get_process_sp_res(master, &sp_res, &sp_res_nsize);
4039 4040 4041 4042 4043 4044 4045 4046 4047 4048
		get_process_non_sp_res(total_rss, shmem, sp_res_nsize,
				&non_sp_res, &non_sp_shm);
		seq_printf(seq, "%-8d %-16s %-9ld %-9ld %-9ld %-10ld %-10ld %-8ld\n",
				proc_stat->tgid, proc_stat->comm,
				get_proc_alloc(proc_stat),
				get_proc_k2u(proc_stat),
				sp_res, non_sp_res, non_sp_shm,
				page2kb(master->mm->total_vm));
	}
	mutex_unlock(&master_list_lock);
4049
	up_read(&sp_group_sem);
4050 4051 4052 4053 4054 4055 4056 4057 4058 4059

	return 0;
}

static void __init proc_sharepool_init(void)
{
	if (!proc_mkdir("sharepool", NULL))
		return;

	proc_create_single_data("sharepool/spa_stat", 0400, NULL, spa_stat_show, NULL);
4060 4061
	proc_create_single_data("sharepool/proc_stat", 0400, NULL, proc_group_usage_show, NULL);
	proc_create_single_data("sharepool/proc_overview", 0400, NULL, proc_usage_show, NULL);
4062 4063 4064 4065
}

/*** End of tatistical and maintenance functions ***/

4066 4067
bool sp_check_addr(unsigned long addr)
{
4068
	if (sp_is_enabled() && mg_is_sharepool_addr(addr) &&
4069
	    !check_aoscore_process(current))
4070
		return true;
4071
	else
4072 4073 4074 4075 4076
		return false;
}

bool sp_check_mmap_addr(unsigned long addr, unsigned long flags)
{
4077
	if (sp_is_enabled() && mg_is_sharepool_addr(addr) &&
4078
	    !check_aoscore_process(current) && !(flags & MAP_SHARE_POOL))
4079
		return true;
4080
	else
4081 4082 4083
		return false;
}

4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100
vm_fault_t sharepool_no_page(struct mm_struct *mm,
			struct vm_area_struct *vma,
			struct address_space *mapping, pgoff_t idx,
			unsigned long address, pte_t *ptep, unsigned int flags)
{
	struct hstate *h = hstate_vma(vma);
	vm_fault_t ret = VM_FAULT_SIGBUS;
	unsigned long size;
	struct page *page;
	pte_t new_pte;
	spinlock_t *ptl;
	unsigned long haddr = address & huge_page_mask(h);
	bool new_page = false;
	int err;
	int node_id;
	struct sp_area *spa;

4101
	spa = vma->vm_private_data;
4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116
	if (!spa) {
		pr_err("share pool: vma is invalid, not from sp mmap\n");
		return ret;
	}
	node_id = spa->node_id;

retry:
	page = find_lock_page(mapping, idx);
	if (!page) {
		size = i_size_read(mapping->host) >> huge_page_shift(h);
		if (idx >= size)
			goto out;

		page = alloc_huge_page(vma, haddr, 0);
		if (IS_ERR(page)) {
4117 4118
			page = hugetlb_alloc_hugepage(node_id,
					HUGETLB_ALLOC_BUDDY | HUGETLB_ALLOC_NORECLAIM);
4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179
			if (!page)
				page = ERR_PTR(-ENOMEM);
		}
		if (IS_ERR(page)) {
			ptl = huge_pte_lock(h, mm, ptep);
			if (!huge_pte_none(huge_ptep_get(ptep))) {
				ret = 0;
				spin_unlock(ptl);
				goto out;
			}
			spin_unlock(ptl);
			ret = vmf_error(PTR_ERR(page));
			goto out;
		}
		__SetPageUptodate(page);
		new_page = true;

		/* sharepool pages are all shared */
		err = huge_add_to_page_cache(page, mapping, idx);
		if (err) {
			put_page(page);
			if (err == -EEXIST)
				goto retry;
			goto out;
		}
	}


	ptl = huge_pte_lock(h, mm, ptep);
	size = i_size_read(mapping->host) >> huge_page_shift(h);
	if (idx >= size)
		goto backout;

	ret = 0;
	if (!huge_pte_none(huge_ptep_get(ptep)))
		goto backout;

	page_dup_rmap(page, true);
	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
				&& (vma->vm_flags & VM_SHARED)));
	set_huge_pte_at(mm, haddr, ptep, new_pte);

	hugetlb_count_add(pages_per_huge_page(h), mm);

	spin_unlock(ptl);

	if (new_page) {
		SetPagePrivate(&page[1]);
	}

	unlock_page(page);
out:
	return ret;

backout:
	spin_unlock(ptl);
	unlock_page(page);
	put_page(page);
	goto out;
}

4180
/*
4181 4182
 * The caller must ensure that this function is called
 * when the last thread in the thread group exits.
4183
 */
4184
int sp_group_exit(void)
4185
{
4186
	struct mm_struct *mm;
4187 4188 4189 4190 4191 4192 4193 4194
	struct sp_group *spg;
	struct sp_group_master *master;
	struct sp_group_node *spg_node, *tmp;
	bool is_alive = true;

	if (!sp_is_enabled())
		return 0;

4195 4196 4197 4198
	if (current->flags & PF_KTHREAD)
		return 0;

	mm = current->mm;
4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260
	down_write(&sp_group_sem);

	master = mm->sp_group_master;
	if (!master) {
		up_write(&sp_group_sem);
		return 0;
	}

	list_for_each_entry_safe(spg_node, tmp, &master->node_list, group_node) {
		spg = spg_node->spg;

		down_write(&spg->rw_lock);
		/* a dead group should NOT be reactive again */
		if (spg_valid(spg) && list_is_singular(&spg->procs))
			is_alive = spg->is_alive = false;
		spg->proc_num--;
		list_del(&spg_node->proc_node);
		up_write(&spg->rw_lock);

		if (!is_alive)
			blocking_notifier_call_chain(&sp_notifier_chain, 0,
						     spg);
	}

	/* match with get_task_mm() in sp_group_add_task() */
	if (atomic_sub_and_test(master->count, &mm->mm_users)) {
		up_write(&sp_group_sem);
		WARN(1, "Invalid user counting\n");
		return 1;
	}

	up_write(&sp_group_sem);
	return 0;
}

void sp_group_post_exit(struct mm_struct *mm)
{
	struct sp_proc_stat *stat;
	long alloc_size, k2u_size;
	/* lockless visit */
	struct sp_group_master *master = mm->sp_group_master;
	struct sp_group_node *spg_node, *tmp;
	struct sp_group *spg;

	if (!sp_is_enabled() || !master)
		return;

	/*
	 * There are two basic scenarios when a process in the share pool is
	 * exiting but its share pool memory usage is not 0.
	 * 1. Process A called sp_alloc(), but it terminates without calling
	 *    sp_free(). Then its share pool memory usage is a positive number.
	 * 2. Process A never called sp_alloc(), and process B in the same spg
	 *    called sp_alloc() to get an addr u. Then A gets u somehow and
	 *    called sp_free(u). Now A's share pool memory usage is a negative
	 *    number. Notice B's memory usage will be a positive number.
	 *
	 * We decide to print an info when seeing both of the scenarios.
	 *
	 * A process not in an sp group doesn't need to print because there
	 * wont't be any memory which is not freed.
	 */
4261
	stat = &master->instat;
4262
	if (stat) {
4263
		alloc_size = atomic64_read(&stat->alloc_nsize) + atomic64_read(&stat->alloc_hsize);
4264 4265 4266 4267 4268 4269 4270 4271
		k2u_size = atomic64_read(&stat->k2u_size);

		if (alloc_size != 0 || k2u_size != 0)
			pr_info("process %s(%d) exits. It applied %ld aligned KB, k2u shared %ld aligned KB\n",
				stat->comm, stat->tgid,
				byte2kb(alloc_size), byte2kb(k2u_size));
	}

4272
	down_write(&sp_group_sem);
4273 4274 4275
	list_for_each_entry_safe(spg_node, tmp, &master->node_list, group_node) {
		spg = spg_node->spg;
		/* match with refcount inc in sp_group_add_task */
4276 4277
		if (atomic_dec_and_test(&spg->use_count))
			free_sp_group_locked(spg);
4278
		list_del(&spg_node->group_node);
4279 4280
		kfree(spg_node);
	}
4281
	up_write(&sp_group_sem);
4282

4283
	sp_del_group_master(master);
4284

4285 4286 4287
	kfree(master);
}

4288 4289 4290 4291 4292 4293 4294 4295 4296 4297
DEFINE_STATIC_KEY_FALSE(share_pool_enabled_key);

static int __init enable_share_pool(char *s)
{
	static_branch_enable(&share_pool_enabled_key);
	pr_info("Ascend enable share pool features via bootargs\n");

	return 1;
}
__setup("enable_ascend_share_pool", enable_share_pool);
4298 4299 4300

static int __init share_pool_init(void)
{
4301 4302 4303 4304
	if (!sp_is_enabled())
		return 0;

	sp_mapping_normal = sp_mapping_create(SP_MAPPING_NORMAL);
4305
	if (IS_ERR(sp_mapping_normal))
4306 4307 4308
		goto fail;
	atomic_inc(&sp_mapping_normal->user);

C
Chen Jun 已提交
4309 4310 4311 4312 4313
	sp_mapping_ro = sp_mapping_create(SP_MAPPING_RO);
	if (IS_ERR(sp_mapping_ro))
		goto free_normal;
	atomic_inc(&sp_mapping_ro->user);

4314
	proc_sharepool_init();
4315 4316

	return 0;
C
Chen Jun 已提交
4317 4318 4319

free_normal:
	kfree(sp_mapping_normal);
4320 4321 4322 4323 4324 4325
fail:
	pr_err("Ascend share pool initialization failed\n");
	static_branch_disable(&share_pool_enabled_key);
	return 1;
}
late_initcall(share_pool_init);