hash_native_64.c 13.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * native hashtable management.
 *
 * SMP scalability work:
 *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
 * 
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */
12 13 14

#undef DEBUG_LOW

L
Linus Torvalds 已提交
15 16 17 18 19 20 21 22 23 24 25 26 27
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/threads.h>
#include <linux/smp.h>

#include <asm/abs_addr.h>
#include <asm/machdep.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/cputable.h>
28
#include <asm/udbg.h>
29
#include <asm/kexec.h>
30
#include <asm/ppc-opcode.h>
31 32 33 34 35 36

#ifdef DEBUG_LOW
#define DBG_LOW(fmt...) udbg_printf(fmt)
#else
#define DBG_LOW(fmt...)
#endif
L
Linus Torvalds 已提交
37 38 39 40 41

#define HPTE_LOCK_BIT 3

static DEFINE_SPINLOCK(native_tlbie_lock);

P
Paul Mackerras 已提交
42
static inline void __tlbie(unsigned long va, int psize, int ssize)
43 44 45 46 47 48 49 50 51
{
	unsigned int penc;

	/* clear top 16 bits, non SLS segment */
	va &= ~(0xffffULL << 48);

	switch (psize) {
	case MMU_PAGE_4K:
		va &= ~0xffful;
P
Paul Mackerras 已提交
52
		va |= ssize << 8;
53 54 55 56
		asm volatile(ASM_MMU_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0),
					       %2)
			     : : "r" (va), "r"(0), "i" (MMU_FTR_TLBIE_206)
			     : "memory");
57 58 59 60
		break;
	default:
		penc = mmu_psize_defs[psize].penc;
		va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
61
		va |= penc << 12;
P
Paul Mackerras 已提交
62
		va |= ssize << 8;
63 64 65 66 67
		va |= 1; /* L */
		asm volatile(ASM_MMU_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0),
					       %2)
			     : : "r" (va), "r"(0), "i" (MMU_FTR_TLBIE_206)
			     : "memory");
68 69 70 71
		break;
	}
}

P
Paul Mackerras 已提交
72
static inline void __tlbiel(unsigned long va, int psize, int ssize)
73 74 75 76 77 78 79 80 81
{
	unsigned int penc;

	/* clear top 16 bits, non SLS segment */
	va &= ~(0xffffULL << 48);

	switch (psize) {
	case MMU_PAGE_4K:
		va &= ~0xffful;
P
Paul Mackerras 已提交
82
		va |= ssize << 8;
83 84 85 86 87 88
		asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
			     : : "r"(va) : "memory");
		break;
	default:
		penc = mmu_psize_defs[psize].penc;
		va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
89
		va |= penc << 12;
P
Paul Mackerras 已提交
90
		va |= ssize << 8;
91
		va |= 1; /* L */
92 93 94 95 96 97 98
		asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
			     : : "r"(va) : "memory");
		break;
	}

}

P
Paul Mackerras 已提交
99
static inline void tlbie(unsigned long va, int psize, int ssize, int local)
100 101 102 103 104 105 106 107 108 109
{
	unsigned int use_local = local && cpu_has_feature(CPU_FTR_TLBIEL);
	int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);

	if (use_local)
		use_local = mmu_psize_defs[psize].tlbiel;
	if (lock_tlbie && !use_local)
		spin_lock(&native_tlbie_lock);
	asm volatile("ptesync": : :"memory");
	if (use_local) {
P
Paul Mackerras 已提交
110
		__tlbiel(va, psize, ssize);
111 112
		asm volatile("ptesync": : :"memory");
	} else {
P
Paul Mackerras 已提交
113
		__tlbie(va, psize, ssize);
114 115 116 117 118 119
		asm volatile("eieio; tlbsync; ptesync": : :"memory");
	}
	if (lock_tlbie && !use_local)
		spin_unlock(&native_tlbie_lock);
}

120
static inline void native_lock_hpte(struct hash_pte *hptep)
L
Linus Torvalds 已提交
121
{
122
	unsigned long *word = &hptep->v;
L
Linus Torvalds 已提交
123 124 125 126 127 128 129 130 131

	while (1) {
		if (!test_and_set_bit(HPTE_LOCK_BIT, word))
			break;
		while(test_bit(HPTE_LOCK_BIT, word))
			cpu_relax();
	}
}

132
static inline void native_unlock_hpte(struct hash_pte *hptep)
L
Linus Torvalds 已提交
133
{
134
	unsigned long *word = &hptep->v;
L
Linus Torvalds 已提交
135 136 137 138 139

	asm volatile("lwsync":::"memory");
	clear_bit(HPTE_LOCK_BIT, word);
}

140
static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
141
			unsigned long pa, unsigned long rflags,
P
Paul Mackerras 已提交
142
			unsigned long vflags, int psize, int ssize)
L
Linus Torvalds 已提交
143
{
144
	struct hash_pte *hptep = htab_address + hpte_group;
145
	unsigned long hpte_v, hpte_r;
L
Linus Torvalds 已提交
146 147
	int i;

148 149 150 151 152 153
	if (!(vflags & HPTE_V_BOLTED)) {
		DBG_LOW("    insert(group=%lx, va=%016lx, pa=%016lx,"
			" rflags=%lx, vflags=%lx, psize=%d)\n",
			hpte_group, va, pa, rflags, vflags, psize);
	}

L
Linus Torvalds 已提交
154
	for (i = 0; i < HPTES_PER_GROUP; i++) {
155
		if (! (hptep->v & HPTE_V_VALID)) {
L
Linus Torvalds 已提交
156 157
			/* retry with lock held */
			native_lock_hpte(hptep);
158
			if (! (hptep->v & HPTE_V_VALID))
L
Linus Torvalds 已提交
159 160 161 162 163 164 165 166 167 168
				break;
			native_unlock_hpte(hptep);
		}

		hptep++;
	}

	if (i == HPTES_PER_GROUP)
		return -1;

P
Paul Mackerras 已提交
169
	hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
170 171 172 173 174 175
	hpte_r = hpte_encode_r(pa, psize) | rflags;

	if (!(vflags & HPTE_V_BOLTED)) {
		DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
			i, hpte_v, hpte_r);
	}
L
Linus Torvalds 已提交
176

177
	hptep->r = hpte_r;
L
Linus Torvalds 已提交
178
	/* Guarantee the second dword is visible before the valid bit */
179
	eieio();
L
Linus Torvalds 已提交
180 181 182 183
	/*
	 * Now set the first dword including the valid bit
	 * NOTE: this also unlocks the hpte
	 */
184
	hptep->v = hpte_v;
L
Linus Torvalds 已提交
185 186 187

	__asm__ __volatile__ ("ptesync" : : : "memory");

188
	return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
L
Linus Torvalds 已提交
189 190 191 192
}

static long native_hpte_remove(unsigned long hpte_group)
{
193
	struct hash_pte *hptep;
L
Linus Torvalds 已提交
194 195
	int i;
	int slot_offset;
196
	unsigned long hpte_v;
L
Linus Torvalds 已提交
197

198 199
	DBG_LOW("    remove(group=%lx)\n", hpte_group);

L
Linus Torvalds 已提交
200 201 202 203 204
	/* pick a random entry to start at */
	slot_offset = mftb() & 0x7;

	for (i = 0; i < HPTES_PER_GROUP; i++) {
		hptep = htab_address + hpte_group + slot_offset;
205
		hpte_v = hptep->v;
L
Linus Torvalds 已提交
206

207
		if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
L
Linus Torvalds 已提交
208 209
			/* retry with lock held */
			native_lock_hpte(hptep);
210 211 212
			hpte_v = hptep->v;
			if ((hpte_v & HPTE_V_VALID)
			    && !(hpte_v & HPTE_V_BOLTED))
L
Linus Torvalds 已提交
213 214 215 216 217 218 219 220 221 222 223 224
				break;
			native_unlock_hpte(hptep);
		}

		slot_offset++;
		slot_offset &= 0x7;
	}

	if (i == HPTES_PER_GROUP)
		return -1;

	/* Invalidate the hpte. NOTE: this also unlocks it */
225
	hptep->v = 0;
L
Linus Torvalds 已提交
226 227 228 229

	return i;
}

230
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
P
Paul Mackerras 已提交
231 232
				 unsigned long va, int psize, int ssize,
				 int local)
L
Linus Torvalds 已提交
233
{
234
	struct hash_pte *hptep = htab_address + slot;
235 236 237
	unsigned long hpte_v, want_v;
	int ret = 0;

P
Paul Mackerras 已提交
238
	want_v = hpte_encode_v(va, psize, ssize);
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254

	DBG_LOW("    update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)",
		va, want_v & HPTE_V_AVPN, slot, newpp);

	native_lock_hpte(hptep);

	hpte_v = hptep->v;

	/* Even if we miss, we need to invalidate the TLB */
	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
		DBG_LOW(" -> miss\n");
		ret = -1;
	} else {
		DBG_LOW(" -> hit\n");
		/* Update the HPTE */
		hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
255
			(newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
256
	}
257
	native_unlock_hpte(hptep);
258 259

	/* Ensure it is out of the tlb too. */
P
Paul Mackerras 已提交
260
	tlbie(va, psize, ssize, local);
261 262

	return ret;
L
Linus Torvalds 已提交
263 264
}

P
Paul Mackerras 已提交
265
static long native_hpte_find(unsigned long va, int psize, int ssize)
L
Linus Torvalds 已提交
266
{
267
	struct hash_pte *hptep;
L
Linus Torvalds 已提交
268
	unsigned long hash;
P
Paul Mackerras 已提交
269
	unsigned long i;
L
Linus Torvalds 已提交
270
	long slot;
271
	unsigned long want_v, hpte_v;
L
Linus Torvalds 已提交
272

P
Paul Mackerras 已提交
273 274
	hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize);
	want_v = hpte_encode_v(va, psize, ssize);
L
Linus Torvalds 已提交
275

P
Paul Mackerras 已提交
276 277 278 279 280
	/* Bolted mappings are only ever in the primary group */
	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
	for (i = 0; i < HPTES_PER_GROUP; i++) {
		hptep = htab_address + slot;
		hpte_v = hptep->v;
L
Linus Torvalds 已提交
281

P
Paul Mackerras 已提交
282 283 284 285
		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
			/* HPTE matches */
			return slot;
		++slot;
L
Linus Torvalds 已提交
286 287 288 289 290 291 292 293 294 295 296 297
	}

	return -1;
}

/*
 * Update the page protection bits. Intended to be used to create
 * guard pages for kernel data structures on pages which are bolted
 * in the HPT. Assumes pages being operated on will not be stolen.
 *
 * No need to lock here because we should be the only user.
 */
298
static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
P
Paul Mackerras 已提交
299
				       int psize, int ssize)
L
Linus Torvalds 已提交
300
{
301
	unsigned long vsid, va;
L
Linus Torvalds 已提交
302
	long slot;
303
	struct hash_pte *hptep;
L
Linus Torvalds 已提交
304

P
Paul Mackerras 已提交
305 306
	vsid = get_kernel_vsid(ea, ssize);
	va = hpt_va(ea, vsid, ssize);
L
Linus Torvalds 已提交
307

P
Paul Mackerras 已提交
308
	slot = native_hpte_find(va, psize, ssize);
L
Linus Torvalds 已提交
309 310 311 312
	if (slot == -1)
		panic("could not find page to bolt\n");
	hptep = htab_address + slot;

313 314 315
	/* Update the HPTE */
	hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
		(newpp & (HPTE_R_PP | HPTE_R_N));
L
Linus Torvalds 已提交
316

317
	/* Ensure it is out of the tlb too. */
P
Paul Mackerras 已提交
318
	tlbie(va, psize, ssize, 0);
L
Linus Torvalds 已提交
319 320 321
}

static void native_hpte_invalidate(unsigned long slot, unsigned long va,
P
Paul Mackerras 已提交
322
				   int psize, int ssize, int local)
L
Linus Torvalds 已提交
323
{
324
	struct hash_pte *hptep = htab_address + slot;
325
	unsigned long hpte_v;
326
	unsigned long want_v;
L
Linus Torvalds 已提交
327 328 329 330
	unsigned long flags;

	local_irq_save(flags);

331 332
	DBG_LOW("    invalidate(va=%016lx, hash: %x)\n", va, slot);

P
Paul Mackerras 已提交
333
	want_v = hpte_encode_v(va, psize, ssize);
334
	native_lock_hpte(hptep);
335
	hpte_v = hptep->v;
L
Linus Torvalds 已提交
336 337

	/* Even if we miss, we need to invalidate the TLB */
338
	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
L
Linus Torvalds 已提交
339
		native_unlock_hpte(hptep);
340
	else
L
Linus Torvalds 已提交
341
		/* Invalidate the hpte. NOTE: this also unlocks it */
342
		hptep->v = 0;
L
Linus Torvalds 已提交
343

344
	/* Invalidate the TLB */
P
Paul Mackerras 已提交
345
	tlbie(va, psize, ssize, local);
346

L
Linus Torvalds 已提交
347 348 349
	local_irq_restore(flags);
}

350 351 352
#define LP_SHIFT	12
#define LP_BITS		8
#define LP_MASK(i)	((0xFF >> (i)) << LP_SHIFT)
353

354
static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
P
Paul Mackerras 已提交
355
			int *psize, int *ssize, unsigned long *va)
356 357 358 359
{
	unsigned long hpte_r = hpte->r;
	unsigned long hpte_v = hpte->v;
	unsigned long avpn;
360
	int i, size, shift, penc;
361 362 363 364 365 366 367 368 369 370

	if (!(hpte_v & HPTE_V_LARGE))
		size = MMU_PAGE_4K;
	else {
		for (i = 0; i < LP_BITS; i++) {
			if ((hpte_r & LP_MASK(i+1)) == LP_MASK(i+1))
				break;
		}
		penc = LP_MASK(i+1) >> LP_SHIFT;
		for (size = 0; size < MMU_PAGE_COUNT; size++) {
371

372 373 374
			/* 4K pages are not represented by LP */
			if (size == MMU_PAGE_4K)
				continue;
375

376 377 378
			/* valid entries have a shift value */
			if (!mmu_psize_defs[size].shift)
				continue;
379

380 381 382 383
			if (penc == mmu_psize_defs[size].penc)
				break;
		}
	}
384

385
	/* This works for all page sizes, and for 256M and 1T segments */
386
	shift = mmu_psize_defs[size].shift;
387
	avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm) << 23;
388

389 390
	if (shift < 23) {
		unsigned long vpi, vsid, pteg;
391

392 393 394 395 396
		pteg = slot / HPTES_PER_GROUP;
		if (hpte_v & HPTE_V_SECONDARY)
			pteg = ~pteg;
		switch (hpte_v >> HPTE_V_SSIZE_SHIFT) {
		case MMU_SEGSIZE_256M:
397
			vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask;
398 399 400 401 402 403
			break;
		case MMU_SEGSIZE_1T:
			vsid = avpn >> 40;
			vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
			break;
		default:
404
			avpn = vpi = size = 0;
405
		}
406
		avpn |= (vpi << mmu_psize_defs[size].shift);
407 408
	}

409 410
	*va = avpn;
	*psize = size;
P
Paul Mackerras 已提交
411
	*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
412 413
}

414 415 416 417 418 419 420 421 422 423 424
/*
 * clear all mappings on kexec.  All cpus are in real mode (or they will
 * be when they isi), and we are the only one left.  We rely on our kernel
 * mapping being 0xC0's and the hardware ignoring those two real bits.
 *
 * TODO: add batching support when enabled.  remember, no dynamic memory here,
 * athough there is the control page available...
 */
static void native_hpte_clear(void)
{
	unsigned long slot, slots, flags;
425
	struct hash_pte *hptep = htab_address;
426
	unsigned long hpte_v, va;
427
	unsigned long pteg_count;
P
Paul Mackerras 已提交
428
	int psize, ssize;
429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446

	pteg_count = htab_hash_mask + 1;

	local_irq_save(flags);

	/* we take the tlbie lock and hold it.  Some hardware will
	 * deadlock if we try to tlbie from two processors at once.
	 */
	spin_lock(&native_tlbie_lock);

	slots = pteg_count * HPTES_PER_GROUP;

	for (slot = 0; slot < slots; slot++, hptep++) {
		/*
		 * we could lock the pte here, but we are the only cpu
		 * running,  right?  and for crash dump, we probably
		 * don't want to wait for a maybe bad cpu.
		 */
447
		hpte_v = hptep->v;
448

449 450 451 452
		/*
		 * Call __tlbie() here rather than tlbie() since we
		 * already hold the native_tlbie_lock.
		 */
453
		if (hpte_v & HPTE_V_VALID) {
P
Paul Mackerras 已提交
454
			hpte_decode(hptep, slot, &psize, &ssize, &va);
455
			hptep->v = 0;
P
Paul Mackerras 已提交
456
			__tlbie(va, psize, ssize);
457 458 459
		}
	}

460
	asm volatile("eieio; tlbsync; ptesync":::"memory");
461 462 463 464
	spin_unlock(&native_tlbie_lock);
	local_irq_restore(flags);
}

465 466 467 468
/*
 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
 * the lock all the time
 */
469
static void native_flush_hash_range(unsigned long number, int local)
L
Linus Torvalds 已提交
470
{
471
	unsigned long va, hash, index, hidx, shift, slot;
472
	struct hash_pte *hptep;
473
	unsigned long hpte_v;
474 475 476
	unsigned long want_v;
	unsigned long flags;
	real_pte_t pte;
L
Linus Torvalds 已提交
477
	struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
478
	unsigned long psize = batch->psize;
P
Paul Mackerras 已提交
479
	int ssize = batch->ssize;
480
	int i;
L
Linus Torvalds 已提交
481 482 483 484

	local_irq_save(flags);

	for (i = 0; i < number; i++) {
485 486 487 488
		va = batch->vaddr[i];
		pte = batch->pte[i];

		pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
P
Paul Mackerras 已提交
489
			hash = hpt_hash(va, shift, ssize);
490 491 492 493 494 495
			hidx = __rpte_to_hidx(pte, index);
			if (hidx & _PTEIDX_SECONDARY)
				hash = ~hash;
			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
			slot += hidx & _PTEIDX_GROUP_IX;
			hptep = htab_address + slot;
P
Paul Mackerras 已提交
496
			want_v = hpte_encode_v(va, psize, ssize);
497 498 499 500 501 502 503 504
			native_lock_hpte(hptep);
			hpte_v = hptep->v;
			if (!HPTE_V_COMPARE(hpte_v, want_v) ||
			    !(hpte_v & HPTE_V_VALID))
				native_unlock_hpte(hptep);
			else
				hptep->v = 0;
		} pte_iterate_hashed_end();
L
Linus Torvalds 已提交
505 506
	}

507 508
	if (cpu_has_feature(CPU_FTR_TLBIEL) &&
	    mmu_psize_defs[psize].tlbiel && local) {
L
Linus Torvalds 已提交
509
		asm volatile("ptesync":::"memory");
510 511 512 513 514 515
		for (i = 0; i < number; i++) {
			va = batch->vaddr[i];
			pte = batch->pte[i];

			pte_iterate_hashed_subpages(pte, psize, va, index,
						    shift) {
P
Paul Mackerras 已提交
516
				__tlbiel(va, psize, ssize);
517 518
			} pte_iterate_hashed_end();
		}
L
Linus Torvalds 已提交
519 520 521 522 523 524 525 526
		asm volatile("ptesync":::"memory");
	} else {
		int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE);

		if (lock_tlbie)
			spin_lock(&native_tlbie_lock);

		asm volatile("ptesync":::"memory");
527 528 529 530 531 532
		for (i = 0; i < number; i++) {
			va = batch->vaddr[i];
			pte = batch->pte[i];

			pte_iterate_hashed_subpages(pte, psize, va, index,
						    shift) {
P
Paul Mackerras 已提交
533
				__tlbie(va, psize, ssize);
534 535
			} pte_iterate_hashed_end();
		}
L
Linus Torvalds 已提交
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
		asm volatile("eieio; tlbsync; ptesync":::"memory");

		if (lock_tlbie)
			spin_unlock(&native_tlbie_lock);
	}

	local_irq_restore(flags);
}

#ifdef CONFIG_PPC_PSERIES
/* Disable TLB batching on nighthawk */
static inline int tlb_batching_enabled(void)
{
	struct device_node *root = of_find_node_by_path("/");
	int enabled = 1;

	if (root) {
553
		const char *model = of_get_property(root, "model", NULL);
L
Linus Torvalds 已提交
554 555 556 557 558 559 560 561 562 563 564 565 566 567
		if (model && !strcmp(model, "IBM,9076-N81"))
			enabled = 0;
		of_node_put(root);
	}

	return enabled;
}
#else
static inline int tlb_batching_enabled(void)
{
	return 1;
}
#endif

568
void __init hpte_init_native(void)
L
Linus Torvalds 已提交
569 570 571 572 573
{
	ppc_md.hpte_invalidate	= native_hpte_invalidate;
	ppc_md.hpte_updatepp	= native_hpte_updatepp;
	ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
	ppc_md.hpte_insert	= native_hpte_insert;
574 575
	ppc_md.hpte_remove	= native_hpte_remove;
	ppc_md.hpte_clear_all	= native_hpte_clear;
L
Linus Torvalds 已提交
576 577 578
	if (tlb_batching_enabled())
		ppc_md.flush_hash_range = native_flush_hash_range;
}