pti.c 17.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
/*
 * Copyright(c) 2017 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * This code is based in part on work published here:
 *
 *	https://github.com/IAIK/KAISER
 *
 * The original work was written by and and signed off by for the Linux
 * kernel by:
 *
 *   Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
 *   Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
 *   Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
 *   Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
 *
 * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
 * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
 *		       Andy Lutomirsky <luto@amacapital.net>
 */
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
38
#include <linux/cpu.h>
39 40 41

#include <asm/cpufeature.h>
#include <asm/hypervisor.h>
42
#include <asm/vsyscall.h>
43 44 45 46 47 48
#include <asm/cmdline.h>
#include <asm/pti.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/desc.h>
49
#include <asm/sections.h>
50 51 52 53

#undef pr_fmt
#define pr_fmt(fmt)     "Kernel/User page tables isolation: " fmt

54 55 56 57 58
/* Backporting helper */
#ifndef __GFP_NOTRACK
#define __GFP_NOTRACK	0
#endif

59 60 61 62 63 64 65 66 67 68
/*
 * Define the page-table levels we clone for user-space on 32
 * and 64 bit.
 */
#ifdef CONFIG_X86_64
#define	PTI_LEVEL_KERNEL_IMAGE	PTI_CLONE_PMD
#else
#define	PTI_LEVEL_KERNEL_IMAGE	PTI_CLONE_PTE
#endif

69 70
static void __init pti_print_if_insecure(const char *reason)
{
71
	if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
72 73 74
		pr_info("%s\n", reason);
}

75 76
static void __init pti_print_if_secure(const char *reason)
{
77
	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
78 79 80
		pr_info("%s\n", reason);
}

81 82 83 84 85 86
enum pti_mode {
	PTI_AUTO = 0,
	PTI_FORCE_OFF,
	PTI_FORCE_ON
} pti_mode;

87 88
void __init pti_check_boottime_disable(void)
{
89 90 91
	char arg[5];
	int ret;

92 93 94
	/* Assume mode is auto unless overridden. */
	pti_mode = PTI_AUTO;

95
	if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
96
		pti_mode = PTI_FORCE_OFF;
97 98 99 100
		pti_print_if_insecure("disabled on XEN PV.");
		return;
	}

101 102 103
	ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
	if (ret > 0)  {
		if (ret == 3 && !strncmp(arg, "off", 3)) {
104
			pti_mode = PTI_FORCE_OFF;
105 106 107 108
			pti_print_if_insecure("disabled on command line.");
			return;
		}
		if (ret == 2 && !strncmp(arg, "on", 2)) {
109
			pti_mode = PTI_FORCE_ON;
110 111 112
			pti_print_if_secure("force enabled on command line.");
			goto enable;
		}
113 114
		if (ret == 4 && !strncmp(arg, "auto", 4)) {
			pti_mode = PTI_AUTO;
115
			goto autosel;
116
		}
117 118
	}

119 120
	if (cmdline_find_option_bool(boot_command_line, "nopti") ||
	    cpu_mitigations_off()) {
121
		pti_mode = PTI_FORCE_OFF;
122 123 124 125
		pti_print_if_insecure("disabled on command line.");
		return;
	}

126
autosel:
127
	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
128
		return;
129
enable:
130 131 132
	setup_force_cpu_cap(X86_FEATURE_PTI);
}

133
pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
{
	/*
	 * Changes to the high (kernel) portion of the kernelmode page
	 * tables are not automatically propagated to the usermode tables.
	 *
	 * Users should keep in mind that, unlike the kernelmode tables,
	 * there is no vmalloc_fault equivalent for the usermode tables.
	 * Top-level entries added to init_mm's usermode pgd after boot
	 * will not be automatically propagated to other mms.
	 */
	if (!pgdp_maps_userspace(pgdp))
		return pgd;

	/*
	 * The user page tables get the full PGD, accessible from
	 * userspace:
	 */
	kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;

	/*
	 * If this is normal user memory, make it NX in the kernel
	 * pagetables so that, if we somehow screw up and return to
	 * usermode with the kernel CR3 loaded, we'll get a page fault
	 * instead of allowing user code to execute with the wrong CR3.
	 *
	 * As exceptions, we don't set NX if:
	 *  - _PAGE_USER is not set.  This could be an executable
	 *     EFI runtime mapping or something similar, and the kernel
	 *     may execute from it
	 *  - we don't have NX support
	 *  - we're clearing the PGD (i.e. the new pgd is not present).
	 */
	if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
	    (__supported_pte_mask & _PAGE_NX))
		pgd.pgd |= _PAGE_NX;

	/* return the copy of the PGD we want the kernel to use: */
	return pgd;
}

174 175 176 177 178 179
/*
 * Walk the user copy of the page tables (optionally) trying to allocate
 * page table pages on the way down.
 *
 * Returns a pointer to a P4D on success, or NULL on failure.
 */
180
static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
181 182 183 184 185 186 187 188 189 190 191
{
	pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);

	if (address < PAGE_OFFSET) {
		WARN_ONCE(1, "attempt to walk user address\n");
		return NULL;
	}

	if (pgd_none(*pgd)) {
		unsigned long new_p4d_page = __get_free_page(gfp);
192
		if (WARN_ON_ONCE(!new_p4d_page))
193 194
			return NULL;

195
		set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
196 197 198 199 200 201 202 203 204 205 206 207
	}
	BUILD_BUG_ON(pgd_large(*pgd) != 0);

	return p4d_offset(pgd, address);
}

/*
 * Walk the user copy of the page tables (optionally) trying to allocate
 * page table pages on the way down.
 *
 * Returns a pointer to a PMD on success, or NULL on failure.
 */
208
static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
209 210
{
	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
211
	p4d_t *p4d;
212 213
	pud_t *pud;

214 215 216 217
	p4d = pti_user_pagetable_walk_p4d(address);
	if (!p4d)
		return NULL;

218 219 220
	BUILD_BUG_ON(p4d_large(*p4d) != 0);
	if (p4d_none(*p4d)) {
		unsigned long new_pud_page = __get_free_page(gfp);
221
		if (WARN_ON_ONCE(!new_pud_page))
222 223
			return NULL;

224
		set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
225 226 227 228 229 230 231 232 233 234
	}

	pud = pud_offset(p4d, address);
	/* The user page tables do not use large mappings: */
	if (pud_large(*pud)) {
		WARN_ON(1);
		return NULL;
	}
	if (pud_none(*pud)) {
		unsigned long new_pmd_page = __get_free_page(gfp);
235
		if (WARN_ON_ONCE(!new_pmd_page))
236 237
			return NULL;

238
		set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
239 240 241 242 243
	}

	return pmd_offset(pud, address);
}

244 245 246 247 248 249 250 251 252
/*
 * Walk the shadow copy of the page tables (optionally) trying to allocate
 * page table pages on the way down.  Does not support large pages.
 *
 * Note: this is only used when mapping *new* kernel data into the
 * user/shadow page tables.  It is never used for userspace data.
 *
 * Returns a pointer to a PTE on success, or NULL on failure.
 */
253
static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
254 255
{
	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
256
	pmd_t *pmd;
257 258
	pte_t *pte;

259 260 261 262
	pmd = pti_user_pagetable_walk_pmd(address);
	if (!pmd)
		return NULL;

263 264 265 266 267 268 269 270 271 272 273
	/* We can't do anything sensible if we hit a large mapping. */
	if (pmd_large(*pmd)) {
		WARN_ON(1);
		return NULL;
	}

	if (pmd_none(*pmd)) {
		unsigned long new_pte_page = __get_free_page(gfp);
		if (!new_pte_page)
			return NULL;

274
		set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
275 276 277 278 279 280 281 282 283 284
	}

	pte = pte_offset_kernel(pmd, address);
	if (pte_flags(*pte) & _PAGE_USER) {
		WARN_ONCE(1, "attempt to walk to user pte\n");
		return NULL;
	}
	return pte;
}

285
#ifdef CONFIG_X86_VSYSCALL_EMULATION
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
static void __init pti_setup_vsyscall(void)
{
	pte_t *pte, *target_pte;
	unsigned int level;

	pte = lookup_address(VSYSCALL_ADDR, &level);
	if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
		return;

	target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
	if (WARN_ON(!target_pte))
		return;

	*target_pte = *pte;
	set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
}
#else
static void __init pti_setup_vsyscall(void) { }
#endif

306 307 308 309 310
enum pti_clone_level {
	PTI_CLONE_PMD,
	PTI_CLONE_PTE,
};

311
static void
312 313
pti_clone_pgtable(unsigned long start, unsigned long end,
		  enum pti_clone_level level)
314 315 316 317 318 319 320
{
	unsigned long addr;

	/*
	 * Clone the populated PMDs which cover start to end. These PMD areas
	 * can have holes.
	 */
321 322
	for (addr = start; addr < end;) {
		pte_t *pte, *target_pte;
323 324 325 326 327
		pmd_t *pmd, *target_pmd;
		pgd_t *pgd;
		p4d_t *p4d;
		pud_t *pud;

328 329 330 331
		/* Overflow check */
		if (addr < start)
			break;

332 333 334 335 336 337
		pgd = pgd_offset_k(addr);
		if (WARN_ON(pgd_none(*pgd)))
			return;
		p4d = p4d_offset(pgd, addr);
		if (WARN_ON(p4d_none(*p4d)))
			return;
338

339
		pud = pud_offset(p4d, addr);
340 341
		if (pud_none(*pud)) {
			addr += PUD_SIZE;
342
			continue;
343 344
		}

345
		pmd = pmd_offset(pud, addr);
346 347
		if (pmd_none(*pmd)) {
			addr += PMD_SIZE;
348
			continue;
349
		}
350

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
		if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
			target_pmd = pti_user_pagetable_walk_pmd(addr);
			if (WARN_ON(!target_pmd))
				return;

			/*
			 * Only clone present PMDs.  This ensures only setting
			 * _PAGE_GLOBAL on present PMDs.  This should only be
			 * called on well-known addresses anyway, so a non-
			 * present PMD would be a surprise.
			 */
			if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
				return;

			/*
			 * Setting 'target_pmd' below creates a mapping in both
			 * the user and kernel page tables.  It is effectively
			 * global, so set it as global in both copies.  Note:
			 * the X86_FEATURE_PGE check is not _required_ because
			 * the CPU ignores _PAGE_GLOBAL when PGE is not
			 * supported.  The check keeps consistentency with
			 * code that only set this bit when supported.
			 */
			if (boot_cpu_has(X86_FEATURE_PGE))
				*pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);

			/*
			 * Copy the PMD.  That is, the kernelmode and usermode
			 * tables will share the last-level page tables of this
			 * address range
			 */
			*target_pmd = *pmd;

			addr += PMD_SIZE;

		} else if (level == PTI_CLONE_PTE) {

			/* Walk the page-table down to the pte level */
			pte = pte_offset_kernel(pmd, addr);
			if (pte_none(*pte)) {
				addr += PAGE_SIZE;
				continue;
			}

			/* Only clone present PTEs */
			if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
				return;

			/* Allocate PTE in the user page-table */
			target_pte = pti_user_pagetable_walk_pte(addr);
			if (WARN_ON(!target_pte))
				return;

			/* Set GLOBAL bit in both PTEs */
			if (boot_cpu_has(X86_FEATURE_PGE))
				*pte = pte_set_flags(*pte, _PAGE_GLOBAL);

			/* Clone the PTE */
			*target_pte = *pte;

			addr += PAGE_SIZE;

		} else {
			BUG();
		}
416 417 418
	}
}

419
#ifdef CONFIG_X86_64
420 421 422 423 424 425 426 427 428 429
/*
 * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
 * next-level entry on 5-level systems.
 */
static void __init pti_clone_p4d(unsigned long addr)
{
	p4d_t *kernel_p4d, *user_p4d;
	pgd_t *kernel_pgd;

	user_p4d = pti_user_pagetable_walk_p4d(addr);
430 431 432
	if (!user_p4d)
		return;

433 434 435 436 437 438 439 440 441 442 443 444 445
	kernel_pgd = pgd_offset_k(addr);
	kernel_p4d = p4d_offset(kernel_pgd, addr);
	*user_p4d = *kernel_p4d;
}

/*
 * Clone the CPU_ENTRY_AREA into the user space visible page table.
 */
static void __init pti_clone_user_shared(void)
{
	pti_clone_p4d(CPU_ENTRY_AREA_BASE);
}

446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
#else /* CONFIG_X86_64 */

/*
 * On 32 bit PAE systems with 1GB of Kernel address space there is only
 * one pgd/p4d for the whole kernel. Cloning that would map the whole
 * address space into the user page-tables, making PTI useless. So clone
 * the page-table on the PMD level to prevent that.
 */
static void __init pti_clone_user_shared(void)
{
	unsigned long start, end;

	start = CPU_ENTRY_AREA_BASE;
	end   = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);

461
	pti_clone_pgtable(start, end, PTI_CLONE_PMD);
462 463 464
}
#endif /* CONFIG_X86_64 */

465
/*
S
Seunghun Han 已提交
466
 * Clone the ESPFIX P4D into the user space visible page table
467 468 469 470 471 472 473 474
 */
static void __init pti_setup_espfix64(void)
{
#ifdef CONFIG_X86_ESPFIX64
	pti_clone_p4d(ESPFIX_BASE_ADDR);
#endif
}

T
Thomas Gleixner 已提交
475 476 477
/*
 * Clone the populated PMDs of the entry and irqentry text and force it RO.
 */
478
static void pti_clone_entry_text(void)
T
Thomas Gleixner 已提交
479
{
480 481 482
	pti_clone_pgtable((unsigned long) __entry_text_start,
			  (unsigned long) __irqentry_text_end,
			  PTI_CLONE_PMD);
T
Thomas Gleixner 已提交
483 484
}

485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
/*
 * Global pages and PCIDs are both ways to make kernel TLB entries
 * live longer, reduce TLB misses and improve kernel performance.
 * But, leaving all kernel text Global makes it potentially accessible
 * to Meltdown-style attacks which make it trivial to find gadgets or
 * defeat KASLR.
 *
 * Only use global pages when it is really worth it.
 */
static inline bool pti_kernel_image_global_ok(void)
{
	/*
	 * Systems with PCIDs get litlle benefit from global
	 * kernel text and are not worth the downsides.
	 */
	if (cpu_feature_enabled(X86_FEATURE_PCID))
		return false;

	/*
	 * Only do global kernel image for pti=auto.  Do the most
	 * secure thing (not global) if pti=on specified.
	 */
	if (pti_mode != PTI_AUTO)
		return false;

	/*
	 * K8 may not tolerate the cleared _PAGE_RW on the userspace
	 * global kernel image pages.  Do the safe thing (disable
	 * global kernel image).  This is unlikely to ever be
	 * noticed because PTI is disabled by default on AMD CPUs.
	 */
	if (boot_cpu_has(X86_FEATURE_K8))
		return false;

519 520 521 522 523 524 525 526 527 528
	/*
	 * RANDSTRUCT derives its hardening benefits from the
	 * attacker's lack of knowledge about the layout of kernel
	 * data structures.  Keep the kernel image non-global in
	 * cases where RANDSTRUCT is in use to help keep the layout a
	 * secret.
	 */
	if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
		return false;

529 530 531
	return true;
}

532 533 534 535 536 537 538
/*
 * This is the only user for these and it is not arch-generic
 * like the other set_memory.h functions.  Just extern them.
 */
extern int set_memory_nonglobal(unsigned long addr, int numpages);
extern int set_memory_global(unsigned long addr, int numpages);

539 540 541 542
/*
 * For some configurations, map all of kernel text into the user page
 * tables.  This reduces TLB misses, especially on non-PCID systems.
 */
543
static void pti_clone_kernel_text(void)
544
{
545 546 547 548 549
	/*
	 * rodata is part of the kernel image and is normally
	 * readable on the filesystem or on the web.  But, do not
	 * clone the areas past rodata, they might contain secrets.
	 */
550
	unsigned long start = PFN_ALIGN(_text);
551
	unsigned long end_clone  = (unsigned long)__end_rodata_aligned;
552
	unsigned long end_global = PFN_ALIGN((unsigned long)__stop___ex_table);
553 554 555 556

	if (!pti_kernel_image_global_ok())
		return;

557 558 559 560 561 562 563
	pr_debug("mapping partial kernel image into user address space\n");

	/*
	 * Note that this will undo _some_ of the work that
	 * pti_set_kernel_image_nonglobal() did to clear the
	 * global bit.
	 */
564
	pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
565 566

	/*
567
	 * pti_clone_pgtable() will set the global bit in any PMDs
568 569 570 571 572 573
	 * that it clones, but we also need to get any PTEs in
	 * the last level for areas that are not huge-page-aligned.
	 */

	/* Set the global bit for normal non-__init kernel text: */
	set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
574 575
}

576 577 578 579 580 581 582 583 584 585 586
void pti_set_kernel_image_nonglobal(void)
{
	/*
	 * The identity map is created with PMDs, regardless of the
	 * actual length of the kernel.  We need to clear
	 * _PAGE_GLOBAL up to a PMD boundary, not just to the end
	 * of the image.
	 */
	unsigned long start = PFN_ALIGN(_text);
	unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);

587 588 589 590 591
	/*
	 * This clears _PAGE_GLOBAL from the entire kernel image.
	 * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
	 * areas that are mapped to userspace.
	 */
592 593 594
	set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
}

595 596 597 598 599 600 601 602 603
/*
 * Initialize kernel page table isolation
 */
void __init pti_init(void)
{
	if (!static_cpu_has(X86_FEATURE_PTI))
		return;

	pr_info("enabled\n");
604

605 606 607 608 609 610 611
#ifdef CONFIG_X86_32
	/*
	 * We check for X86_FEATURE_PCID here. But the init-code will
	 * clear the feature flag on 32 bit because the feature is not
	 * supported on 32 bit anyway. To print the warning we need to
	 * check with cpuid directly again.
	 */
J
Joerg Roedel 已提交
612
	if (cpuid_ecx(0x1) & BIT(17)) {
613 614 615 616 617 618 619 620 621 622 623 624 625 626
		/* Use printk to work around pr_fmt() */
		printk(KERN_WARNING "\n");
		printk(KERN_WARNING "************************************************************\n");
		printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
		printk(KERN_WARNING "**                                                        **\n");
		printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
		printk(KERN_WARNING "** Your performance will increase dramatically if you     **\n");
		printk(KERN_WARNING "** switch to a 64-bit kernel!                             **\n");
		printk(KERN_WARNING "**                                                        **\n");
		printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
		printk(KERN_WARNING "************************************************************\n");
	}
#endif

627
	pti_clone_user_shared();
628 629 630 631

	/* Undo all global bits from the init pagetables in head_64.S: */
	pti_set_kernel_image_nonglobal();
	/* Replace some of the global bits just for shared entry text: */
T
Thomas Gleixner 已提交
632
	pti_clone_entry_text();
633
	pti_setup_espfix64();
634
	pti_setup_vsyscall();
635
}
636 637

/*
638 639 640 641 642
 * Finalize the kernel mappings in the userspace page-table. Some of the
 * mappings for the kernel image might have changed since pti_init()
 * cloned them. This is because parts of the kernel image have been
 * mapped RO and/or NX.  These changes need to be cloned again to the
 * userspace page-table.
643 644 645 646
 */
void pti_finalize(void)
{
	/*
647 648
	 * We need to clone everything (again) that maps parts of the
	 * kernel image.
649
	 */
650
	pti_clone_entry_text();
651
	pti_clone_kernel_text();
652 653

	debug_checkwx_user();
654
}