task_mmu.c 14.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
M
Mauricio Lin 已提交
5
#include <linux/highmem.h>
K
Kees Cook 已提交
6
#include <linux/ptrace.h>
7 8
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
M
Mauricio Lin 已提交
9

L
Linus Torvalds 已提交
10 11
#include <asm/elf.h>
#include <asm/uaccess.h>
M
Mauricio Lin 已提交
12
#include <asm/tlbflush.h>
L
Linus Torvalds 已提交
13 14 15 16 17
#include "internal.h"

char *task_mem(struct mm_struct *mm, char *buffer)
{
	unsigned long data, text, lib;
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;

	/*
	 * Note: to minimize their overhead, mm maintains hiwater_vm and
	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
	 * collector of these hiwater stats must therefore get total_vm
	 * and rss too, which will usually be the higher.  Barriers? not
	 * worth the effort, such snapshots can always be inconsistent.
	 */
	hiwater_vm = total_vm = mm->total_vm;
	if (hiwater_vm < mm->hiwater_vm)
		hiwater_vm = mm->hiwater_vm;
	hiwater_rss = total_rss = get_mm_rss(mm);
	if (hiwater_rss < mm->hiwater_rss)
		hiwater_rss = mm->hiwater_rss;
L
Linus Torvalds 已提交
33 34 35 36 37

	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
	buffer += sprintf(buffer,
38
		"VmPeak:\t%8lu kB\n"
L
Linus Torvalds 已提交
39 40
		"VmSize:\t%8lu kB\n"
		"VmLck:\t%8lu kB\n"
41
		"VmHWM:\t%8lu kB\n"
L
Linus Torvalds 已提交
42 43 44 45 46 47
		"VmRSS:\t%8lu kB\n"
		"VmData:\t%8lu kB\n"
		"VmStk:\t%8lu kB\n"
		"VmExe:\t%8lu kB\n"
		"VmLib:\t%8lu kB\n"
		"VmPTE:\t%8lu kB\n",
48 49
		hiwater_vm << (PAGE_SHIFT-10),
		(total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
L
Linus Torvalds 已提交
50
		mm->locked_vm << (PAGE_SHIFT-10),
51 52
		hiwater_rss << (PAGE_SHIFT-10),
		total_rss << (PAGE_SHIFT-10),
L
Linus Torvalds 已提交
53 54 55 56 57 58 59 60 61 62 63 64 65 66
		data << (PAGE_SHIFT-10),
		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
	return buffer;
}

unsigned long task_vsize(struct mm_struct *mm)
{
	return PAGE_SIZE * mm->total_vm;
}

int task_statm(struct mm_struct *mm, int *shared, int *text,
	       int *data, int *resident)
{
67
	*shared = get_mm_counter(mm, file_rss);
L
Linus Torvalds 已提交
68 69 70
	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
								>> PAGE_SHIFT;
	*data = mm->total_vm - mm->shared_vm;
71
	*resident = *shared + get_mm_counter(mm, anon_rss);
L
Linus Torvalds 已提交
72 73 74 75 76 77 78
	return mm->total_vm;
}

int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
{
	struct vm_area_struct * vma;
	int result = -ENOENT;
79 80
	struct task_struct *task = get_proc_task(inode);
	struct mm_struct * mm = NULL;
L
Linus Torvalds 已提交
81

82 83 84 85
	if (task) {
		mm = get_task_mm(task);
		put_task_struct(task);
	}
L
Linus Torvalds 已提交
86 87 88 89 90 91 92 93 94 95 96 97
	if (!mm)
		goto out;
	down_read(&mm->mmap_sem);

	vma = mm->mmap;
	while (vma) {
		if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
			break;
		vma = vma->vm_next;
	}

	if (vma) {
98 99
		*mnt = mntget(vma->vm_file->f_path.mnt);
		*dentry = dget(vma->vm_file->f_path.dentry);
L
Linus Torvalds 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
		result = 0;
	}

	up_read(&mm->mmap_sem);
	mmput(mm);
out:
	return result;
}

static void pad_len_spaces(struct seq_file *m, int len)
{
	len = 25 + sizeof(void*) * 6 - len;
	if (len < 1)
		len = 1;
	seq_printf(m, "%*c", len, ' ');
}

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
/*
 * Proportional Set Size(PSS): my share of RSS.
 *
 * PSS of a process is the count of pages it has in memory, where each
 * page is divided by the number of processes sharing it.  So if a
 * process has 1000 pages all to itself, and 1000 shared with one other
 * process, its PSS will be 1500.
 *
 * To keep (accumulated) division errors low, we adopt a 64bit
 * fixed-point pss counter to minimize division errors. So (pss >>
 * PSS_SHIFT) would be the real byte count.
 *
 * A shift of 12 before division means (assuming 4K page size):
 * 	- 1M 3-user-pages add up to 8KB errors;
 * 	- supports mapcount up to 2^24, or 16M;
 * 	- supports PSS up to 2^52 bytes, or 4PB.
 */
#define PSS_SHIFT 12

M
Mauricio Lin 已提交
136 137 138 139 140 141 142
struct mem_size_stats
{
	unsigned long resident;
	unsigned long shared_clean;
	unsigned long shared_dirty;
	unsigned long private_clean;
	unsigned long private_dirty;
143
	unsigned long referenced;
144
	u64 pss;
M
Mauricio Lin 已提交
145 146
};

147 148 149 150 151 152 153
struct pmd_walker {
	struct vm_area_struct *vma;
	void *private;
	void (*action)(struct vm_area_struct *, pmd_t *, unsigned long,
		       unsigned long, void *);
};

M
Mauricio Lin 已提交
154
static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
L
Linus Torvalds 已提交
155
{
156 157
	struct proc_maps_private *priv = m->private;
	struct task_struct *task = priv->task;
M
Mauricio Lin 已提交
158 159 160 161
	struct vm_area_struct *vma = v;
	struct mm_struct *mm = vma->vm_mm;
	struct file *file = vma->vm_file;
	int flags = vma->vm_flags;
L
Linus Torvalds 已提交
162 163 164 165
	unsigned long ino = 0;
	dev_t dev = 0;
	int len;

K
Kees Cook 已提交
166 167 168
	if (maps_protect && !ptrace_may_attach(task))
		return -EACCES;

L
Linus Torvalds 已提交
169
	if (file) {
170
		struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
L
Linus Torvalds 已提交
171 172 173 174 175
		dev = inode->i_sb->s_dev;
		ino = inode->i_ino;
	}

	seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
M
Mauricio Lin 已提交
176 177
			vma->vm_start,
			vma->vm_end,
L
Linus Torvalds 已提交
178 179 180 181
			flags & VM_READ ? 'r' : '-',
			flags & VM_WRITE ? 'w' : '-',
			flags & VM_EXEC ? 'x' : '-',
			flags & VM_MAYSHARE ? 's' : 'p',
M
Mauricio Lin 已提交
182
			vma->vm_pgoff << PAGE_SHIFT,
L
Linus Torvalds 已提交
183 184 185 186 187 188
			MAJOR(dev), MINOR(dev), ino, &len);

	/*
	 * Print the dentry name for named mappings, and a
	 * special [heap] marker for the heap:
	 */
M
Mauricio Lin 已提交
189
	if (file) {
L
Linus Torvalds 已提交
190
		pad_len_spaces(m, len);
191
		seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n");
L
Linus Torvalds 已提交
192
	} else {
193 194 195 196
		const char *name = arch_vma_name(vma);
		if (!name) {
			if (mm) {
				if (vma->vm_start <= mm->start_brk &&
M
Mauricio Lin 已提交
197
						vma->vm_end >= mm->brk) {
198 199 200 201
					name = "[heap]";
				} else if (vma->vm_start <= mm->start_stack &&
					   vma->vm_end >= mm->start_stack) {
					name = "[stack]";
L
Linus Torvalds 已提交
202
				}
203 204
			} else {
				name = "[vdso]";
L
Linus Torvalds 已提交
205
			}
206 207
		}
		if (name) {
L
Linus Torvalds 已提交
208
			pad_len_spaces(m, len);
209
			seq_puts(m, name);
L
Linus Torvalds 已提交
210 211 212
		}
	}
	seq_putc(m, '\n');
M
Mauricio Lin 已提交
213 214 215

	if (mss)
		seq_printf(m,
216 217
			   "Size:           %8lu kB\n"
			   "Rss:            %8lu kB\n"
218
			   "Pss:            %8lu kB\n"
219 220 221 222
			   "Shared_Clean:   %8lu kB\n"
			   "Shared_Dirty:   %8lu kB\n"
			   "Private_Clean:  %8lu kB\n"
			   "Private_Dirty:  %8lu kB\n"
223
			   "Referenced:     %8lu kB\n",
M
Mauricio Lin 已提交
224 225
			   (vma->vm_end - vma->vm_start) >> 10,
			   mss->resident >> 10,
226
			   (unsigned long)(mss->pss >> (10 + PSS_SHIFT)),
M
Mauricio Lin 已提交
227 228 229
			   mss->shared_clean  >> 10,
			   mss->shared_dirty  >> 10,
			   mss->private_clean >> 10,
230 231
			   mss->private_dirty >> 10,
			   mss->referenced >> 10);
M
Mauricio Lin 已提交
232 233 234

	if (m->count < m->size)  /* vma is copied successfully */
		m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
L
Linus Torvalds 已提交
235 236 237
	return 0;
}

M
Mauricio Lin 已提交
238 239
static int show_map(struct seq_file *m, void *v)
{
240
	return show_map_internal(m, v, NULL);
M
Mauricio Lin 已提交
241 242
}

243 244 245
static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
			    unsigned long addr, unsigned long end,
			    void *private)
M
Mauricio Lin 已提交
246
{
247
	struct mem_size_stats *mss = private;
M
Mauricio Lin 已提交
248
	pte_t *pte, ptent;
249
	spinlock_t *ptl;
M
Mauricio Lin 已提交
250
	struct page *page;
251
	int mapcount;
M
Mauricio Lin 已提交
252

253
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
254
	for (; addr != end; pte++, addr += PAGE_SIZE) {
M
Mauricio Lin 已提交
255
		ptent = *pte;
256
		if (!pte_present(ptent))
M
Mauricio Lin 已提交
257 258 259
			continue;

		mss->resident += PAGE_SIZE;
N
Nick Piggin 已提交
260 261 262

		page = vm_normal_page(vma, addr, ptent);
		if (!page)
M
Mauricio Lin 已提交
263 264
			continue;

265 266 267
		/* Accumulate the size in pages that have been accessed. */
		if (pte_young(ptent) || PageReferenced(page))
			mss->referenced += PAGE_SIZE;
268 269
		mapcount = page_mapcount(page);
		if (mapcount >= 2) {
M
Mauricio Lin 已提交
270 271 272 273
			if (pte_dirty(ptent))
				mss->shared_dirty += PAGE_SIZE;
			else
				mss->shared_clean += PAGE_SIZE;
274
			mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
M
Mauricio Lin 已提交
275 276 277 278 279
		} else {
			if (pte_dirty(ptent))
				mss->private_dirty += PAGE_SIZE;
			else
				mss->private_clean += PAGE_SIZE;
280
			mss->pss += (PAGE_SIZE << PSS_SHIFT);
M
Mauricio Lin 已提交
281
		}
282
	}
283 284
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();
M
Mauricio Lin 已提交
285 286
}

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
				 unsigned long addr, unsigned long end,
				 void *private)
{
	pte_t *pte, ptent;
	spinlock_t *ptl;
	struct page *page;

	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; pte++, addr += PAGE_SIZE) {
		ptent = *pte;
		if (!pte_present(ptent))
			continue;

		page = vm_normal_page(vma, addr, ptent);
		if (!page)
			continue;

		/* Clear accessed and referenced bits. */
		ptep_test_and_clear_young(vma, addr, pte);
		ClearPageReferenced(page);
	}
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();
}

static inline void walk_pmd_range(struct pmd_walker *walker, pud_t *pud,
				  unsigned long addr, unsigned long end)
M
Mauricio Lin 已提交
315 316 317 318
{
	pmd_t *pmd;
	unsigned long next;

319 320
	for (pmd = pmd_offset(pud, addr); addr != end;
	     pmd++, addr = next) {
M
Mauricio Lin 已提交
321 322 323
		next = pmd_addr_end(addr, end);
		if (pmd_none_or_clear_bad(pmd))
			continue;
324 325
		walker->action(walker->vma, pmd, addr, next, walker->private);
	}
M
Mauricio Lin 已提交
326 327
}

328 329
static inline void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd,
				  unsigned long addr, unsigned long end)
M
Mauricio Lin 已提交
330 331 332 333
{
	pud_t *pud;
	unsigned long next;

334 335
	for (pud = pud_offset(pgd, addr); addr != end;
	     pud++, addr = next) {
M
Mauricio Lin 已提交
336 337 338
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
339
		walk_pmd_range(walker, pud, addr, next);
340
	}
M
Mauricio Lin 已提交
341 342
}

343 344 345 346 347 348 349 350 351 352 353 354 355 356
/*
 * walk_page_range - walk the page tables of a VMA with a callback
 * @vma - VMA to walk
 * @action - callback invoked for every bottom-level (PTE) page table
 * @private - private data passed to the callback function
 *
 * Recursively walk the page table for the memory area in a VMA, calling
 * a callback for every bottom-level (PTE) page table.
 */
static inline void walk_page_range(struct vm_area_struct *vma,
				   void (*action)(struct vm_area_struct *,
						  pmd_t *, unsigned long,
						  unsigned long, void *),
				   void *private)
M
Mauricio Lin 已提交
357
{
358 359 360 361 362 363 364
	unsigned long addr = vma->vm_start;
	unsigned long end = vma->vm_end;
	struct pmd_walker walker = {
		.vma		= vma,
		.private	= private,
		.action		= action,
	};
M
Mauricio Lin 已提交
365 366 367
	pgd_t *pgd;
	unsigned long next;

368 369
	for (pgd = pgd_offset(vma->vm_mm, addr); addr != end;
	     pgd++, addr = next) {
M
Mauricio Lin 已提交
370 371 372
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
373
		walk_pud_range(&walker, pgd, addr, next);
374
	}
M
Mauricio Lin 已提交
375 376 377 378 379 380 381 382
}

static int show_smap(struct seq_file *m, void *v)
{
	struct vm_area_struct *vma = v;
	struct mem_size_stats mss;

	memset(&mss, 0, sizeof mss);
N
Nick Piggin 已提交
383
	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
384
		walk_page_range(vma, smaps_pte_range, &mss);
M
Mauricio Lin 已提交
385 386 387
	return show_map_internal(m, v, &mss);
}

388 389 390 391 392 393 394 395 396 397 398 399
void clear_refs_smap(struct mm_struct *mm)
{
	struct vm_area_struct *vma;

	down_read(&mm->mmap_sem);
	for (vma = mm->mmap; vma; vma = vma->vm_next)
		if (vma->vm_mm && !is_vm_hugetlb_page(vma))
			walk_page_range(vma, clear_refs_pte_range, NULL);
	flush_tlb_mm(mm);
	up_read(&mm->mmap_sem);
}

L
Linus Torvalds 已提交
400 401
static void *m_start(struct seq_file *m, loff_t *pos)
{
402
	struct proc_maps_private *priv = m->private;
L
Linus Torvalds 已提交
403 404
	unsigned long last_addr = m->version;
	struct mm_struct *mm;
405
	struct vm_area_struct *vma, *tail_vma = NULL;
L
Linus Torvalds 已提交
406 407
	loff_t l = *pos;

408 409 410 411
	/* Clear the per syscall fields in priv */
	priv->task = NULL;
	priv->tail_vma = NULL;

L
Linus Torvalds 已提交
412 413 414
	/*
	 * We remember last_addr rather than next_addr to hit with
	 * mmap_cache most of the time. We have zero last_addr at
M
Mauricio Lin 已提交
415 416
	 * the beginning and also after lseek. We will have -1 last_addr
	 * after the end of the vmas.
L
Linus Torvalds 已提交
417 418 419 420 421
	 */

	if (last_addr == -1UL)
		return NULL;

422
	priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
423 424 425
	if (!priv->task)
		return NULL;

426
	mm = mm_for_maps(priv->task);
L
Linus Torvalds 已提交
427 428 429
	if (!mm)
		return NULL;

430
	priv->tail_vma = tail_vma = get_gate_vma(priv->task);
L
Linus Torvalds 已提交
431 432

	/* Start with last addr hint */
M
Mauricio Lin 已提交
433 434
	if (last_addr && (vma = find_vma(mm, last_addr))) {
		vma = vma->vm_next;
L
Linus Torvalds 已提交
435 436 437 438
		goto out;
	}

	/*
M
Mauricio Lin 已提交
439
	 * Check the vma index is within the range and do
L
Linus Torvalds 已提交
440 441
	 * sequential scan until m_index.
	 */
M
Mauricio Lin 已提交
442
	vma = NULL;
L
Linus Torvalds 已提交
443
	if ((unsigned long)l < mm->map_count) {
M
Mauricio Lin 已提交
444 445 446
		vma = mm->mmap;
		while (l-- && vma)
			vma = vma->vm_next;
L
Linus Torvalds 已提交
447 448 449 450
		goto out;
	}

	if (l != mm->map_count)
M
Mauricio Lin 已提交
451
		tail_vma = NULL; /* After gate vma */
L
Linus Torvalds 已提交
452 453

out:
M
Mauricio Lin 已提交
454 455
	if (vma)
		return vma;
L
Linus Torvalds 已提交
456

M
Mauricio Lin 已提交
457 458
	/* End of vmas has been reached */
	m->version = (tail_vma != NULL)? 0: -1UL;
L
Linus Torvalds 已提交
459 460
	up_read(&mm->mmap_sem);
	mmput(mm);
M
Mauricio Lin 已提交
461
	return tail_vma;
L
Linus Torvalds 已提交
462 463
}

464
static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
L
Linus Torvalds 已提交
465
{
466
	if (vma && vma != priv->tail_vma) {
M
Mauricio Lin 已提交
467
		struct mm_struct *mm = vma->vm_mm;
L
Linus Torvalds 已提交
468 469 470 471 472 473 474
		up_read(&mm->mmap_sem);
		mmput(mm);
	}
}

static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
475
	struct proc_maps_private *priv = m->private;
M
Mauricio Lin 已提交
476
	struct vm_area_struct *vma = v;
477
	struct vm_area_struct *tail_vma = priv->tail_vma;
L
Linus Torvalds 已提交
478 479

	(*pos)++;
M
Mauricio Lin 已提交
480 481
	if (vma && (vma != tail_vma) && vma->vm_next)
		return vma->vm_next;
482
	vma_stop(priv, vma);
M
Mauricio Lin 已提交
483
	return (vma != tail_vma)? tail_vma: NULL;
L
Linus Torvalds 已提交
484 485
}

486 487 488 489 490 491 492 493 494 495
static void m_stop(struct seq_file *m, void *v)
{
	struct proc_maps_private *priv = m->private;
	struct vm_area_struct *vma = v;

	vma_stop(priv, vma);
	if (priv->task)
		put_task_struct(priv->task);
}

496
static struct seq_operations proc_pid_maps_op = {
L
Linus Torvalds 已提交
497 498 499 500 501
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
	.show	= show_map
};
502

503
static struct seq_operations proc_pid_smaps_op = {
M
Mauricio Lin 已提交
504 505 506 507 508 509
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
	.show	= show_smap
};

510 511 512
static int do_maps_open(struct inode *inode, struct file *file,
			struct seq_operations *ops)
{
513 514 515 516
	struct proc_maps_private *priv;
	int ret = -ENOMEM;
	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (priv) {
517
		priv->pid = proc_pid(inode);
518 519 520 521 522 523 524
		ret = seq_open(file, ops);
		if (!ret) {
			struct seq_file *m = file->private_data;
			m->private = priv;
		} else {
			kfree(priv);
		}
525 526 527 528 529 530 531 532 533
	}
	return ret;
}

static int maps_open(struct inode *inode, struct file *file)
{
	return do_maps_open(inode, file, &proc_pid_maps_op);
}

534
const struct file_operations proc_maps_operations = {
535 536 537
	.open		= maps_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
538
	.release	= seq_release_private,
539 540
};

541
#ifdef CONFIG_NUMA
542
extern int show_numa_map(struct seq_file *m, void *v);
543

K
Kees Cook 已提交
544 545 546 547 548 549 550 551 552 553 554
static int show_numa_map_checked(struct seq_file *m, void *v)
{
	struct proc_maps_private *priv = m->private;
	struct task_struct *task = priv->task;

	if (maps_protect && !ptrace_may_attach(task))
		return -EACCES;

	return show_numa_map(m, v);
}

555
static struct seq_operations proc_pid_numa_maps_op = {
556 557 558
        .start  = m_start,
        .next   = m_next,
        .stop   = m_stop,
K
Kees Cook 已提交
559
        .show   = show_numa_map_checked
560
};
561 562 563 564 565 566

static int numa_maps_open(struct inode *inode, struct file *file)
{
	return do_maps_open(inode, file, &proc_pid_numa_maps_op);
}

567
const struct file_operations proc_numa_maps_operations = {
568 569 570
	.open		= numa_maps_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
571
	.release	= seq_release_private,
572
};
573
#endif
574 575 576 577 578 579

static int smaps_open(struct inode *inode, struct file *file)
{
	return do_maps_open(inode, file, &proc_pid_smaps_op);
}

580
const struct file_operations proc_smaps_operations = {
581 582 583
	.open		= smaps_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
584
	.release	= seq_release_private,
585
};