nfscache.c 15.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * Request reply cache. This is currently a global cache, but this may
 * change in the future and be a per-client cache.
 *
 * This code is heavily inspired by the 44BSD implementation, although
 * it does things a bit differently.
 *
 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
 */

11
#include <linux/slab.h>
12
#include <linux/sunrpc/addr.h>
13
#include <linux/highmem.h>
14 15
#include <linux/log2.h>
#include <linux/hash.h>
16
#include <net/checksum.h>
17

18 19
#include "nfsd.h"
#include "cache.h"
L
Linus Torvalds 已提交
20

21 22
#define NFSDDBG_FACILITY	NFSDDBG_REPCACHE

23 24 25 26 27 28
/*
 * We use this value to determine the number of hash buckets from the max
 * cache size, the idea being that when the cache is at its maximum number
 * of entries, then this should be the average number of entries per bucket.
 */
#define TARGET_BUCKET_SIZE	64
L
Linus Torvalds 已提交
29

30
struct nfsd_drc_bucket {
31
	struct list_head lru_head;
32
	spinlock_t cache_lock;
33 34 35
};

static struct nfsd_drc_bucket	*drc_hashtbl;
36
static struct kmem_cache	*drc_slab;
37 38

/* max number of entries allowed in the cache */
39
static unsigned int		max_drc_entries;
L
Linus Torvalds 已提交
40

41 42
/* number of significant bits in the hash value */
static unsigned int		maskbits;
43
static unsigned int		drc_hashsize;
44

45 46 47 48 49 50
/*
 * Stats and other tracking of on the duplicate reply cache. All of these and
 * the "rc" fields in nfsdstats are protected by the cache_lock
 */

/* total number of entries */
51
static atomic_t			num_drc_entries;
52 53 54 55

/* cache misses due only to checksum comparison failures */
static unsigned int		payload_misses;

56 57 58
/* amount of memory (in bytes) currently consumed by the DRC */
static unsigned int		drc_mem_usage;

59 60 61 62 63 64
/* longest hash chain seen */
static unsigned int		longest_chain;

/* size of cache when we saw the longest hash chain */
static unsigned int		longest_chain_cachesize;

L
Linus Torvalds 已提交
65
static int	nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
66
static void	cache_cleaner_func(struct work_struct *unused);
67 68 69 70
static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
					    struct shrink_control *sc);
static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
					   struct shrink_control *sc);
71

72
static struct shrinker nfsd_reply_cache_shrinker = {
73 74
	.scan_objects = nfsd_reply_cache_scan,
	.count_objects = nfsd_reply_cache_count,
75 76
	.seeks	= 1,
};
L
Linus Torvalds 已提交
77

G
Greg Banks 已提交
78
/*
L
Linus Torvalds 已提交
79 80 81 82
 * locking for the reply cache:
 * A cache entry is "single use" if c_state == RC_INPROG
 * Otherwise, it when accessing _prev or _next, the lock must be held.
 */
83
static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
L
Linus Torvalds 已提交
84

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
/*
 * Put a cap on the size of the DRC based on the amount of available
 * low memory in the machine.
 *
 *  64MB:    8192
 * 128MB:   11585
 * 256MB:   16384
 * 512MB:   23170
 *   1GB:   32768
 *   2GB:   46340
 *   4GB:   65536
 *   8GB:   92681
 *  16GB:  131072
 *
 * ...with a hard cap of 256k entries. In the worst case, each entry will be
 * ~1k, so the above numbers should give a rough max of the amount of memory
 * used in k.
 */
static unsigned int
nfsd_cache_size_limit(void)
{
	unsigned int limit;
	unsigned long low_pages = totalram_pages - totalhigh_pages;

	limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10);
	return min_t(unsigned int, limit, 256*1024);
}

113 114 115 116 117 118 119 120 121 122
/*
 * Compute the number of hash buckets we need. Divide the max cachesize by
 * the "target" max bucket size, and round up to next power of two.
 */
static unsigned int
nfsd_hashsize(unsigned int limit)
{
	return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
}

123 124 125 126 127 128
static u32
nfsd_cache_hash(__be32 xid)
{
	return hash_32(be32_to_cpu(xid), maskbits);
}

129 130
static struct svc_cacherep *
nfsd_reply_cache_alloc(void)
L
Linus Torvalds 已提交
131 132 133
{
	struct svc_cacherep	*rp;

134 135
	rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
	if (rp) {
L
Linus Torvalds 已提交
136 137
		rp->c_state = RC_UNUSED;
		rp->c_type = RC_NOCACHE;
138
		INIT_LIST_HEAD(&rp->c_lru);
L
Linus Torvalds 已提交
139
	}
140 141
	return rp;
}
L
Linus Torvalds 已提交
142

143 144 145
static void
nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
{
146 147
	if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
		drc_mem_usage -= rp->c_replvec.iov_len;
148
		kfree(rp->c_replvec.iov_base);
149
	}
150
	list_del(&rp->c_lru);
151
	atomic_dec(&num_drc_entries);
152
	drc_mem_usage -= sizeof(*rp);
153 154 155
	kmem_cache_free(drc_slab, rp);
}

156
static void
157
nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
158
{
159
	spin_lock(&b->cache_lock);
160
	nfsd_reply_cache_free_locked(rp);
161
	spin_unlock(&b->cache_lock);
162 163
}

164 165
int nfsd_reply_cache_init(void)
{
166
	unsigned int hashsize;
167
	unsigned int i;
168

169
	max_drc_entries = nfsd_cache_size_limit();
170
	atomic_set(&num_drc_entries, 0);
171 172
	hashsize = nfsd_hashsize(max_drc_entries);
	maskbits = ilog2(hashsize);
173

174
	register_shrinker(&nfsd_reply_cache_shrinker);
175 176 177 178 179
	drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
					0, 0, NULL);
	if (!drc_slab)
		goto out_nomem;

180 181
	drc_hashtbl = kcalloc(hashsize, sizeof(*drc_hashtbl), GFP_KERNEL);
	if (!drc_hashtbl)
182
		goto out_nomem;
183
	for (i = 0; i < hashsize; i++) {
184
		INIT_LIST_HEAD(&drc_hashtbl[i].lru_head);
185 186
		spin_lock_init(&drc_hashtbl[i].cache_lock);
	}
187
	drc_hashsize = hashsize;
L
Linus Torvalds 已提交
188

189 190 191 192 193
	return 0;
out_nomem:
	printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
	nfsd_reply_cache_shutdown();
	return -ENOMEM;
L
Linus Torvalds 已提交
194 195
}

196
void nfsd_reply_cache_shutdown(void)
L
Linus Torvalds 已提交
197 198
{
	struct svc_cacherep	*rp;
199
	unsigned int i;
L
Linus Torvalds 已提交
200

201
	unregister_shrinker(&nfsd_reply_cache_shrinker);
202 203
	cancel_delayed_work_sync(&cache_cleaner);

204 205 206 207 208 209
	for (i = 0; i < drc_hashsize; i++) {
		struct list_head *head = &drc_hashtbl[i].lru_head;
		while (!list_empty(head)) {
			rp = list_first_entry(head, struct svc_cacherep, c_lru);
			nfsd_reply_cache_free_locked(rp);
		}
L
Linus Torvalds 已提交
210 211
	}

212 213
	kfree (drc_hashtbl);
	drc_hashtbl = NULL;
214
	drc_hashsize = 0;
215 216 217 218 219

	if (drc_slab) {
		kmem_cache_destroy(drc_slab);
		drc_slab = NULL;
	}
L
Linus Torvalds 已提交
220 221 222
}

/*
223 224
 * Move cache entry to end of LRU list, and queue the cleaner to run if it's
 * not already scheduled.
L
Linus Torvalds 已提交
225 226
 */
static void
227
lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
L
Linus Torvalds 已提交
228
{
229
	rp->c_timestamp = jiffies;
230
	list_move_tail(&rp->c_lru, &b->lru_head);
231
	schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
L
Linus Torvalds 已提交
232 233
}

234
static long
235
prune_bucket(struct nfsd_drc_bucket *b)
236 237
{
	struct svc_cacherep *rp, *tmp;
238
	long freed = 0;
239

240
	list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
241 242 243 244 245 246
		/*
		 * Don't free entries attached to calls that are still
		 * in-progress, but do keep scanning the list.
		 */
		if (rp->c_state == RC_INPROG)
			continue;
247
		if (atomic_read(&num_drc_entries) <= max_drc_entries &&
248
		    time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
249 250
			break;
		nfsd_reply_cache_free_locked(rp);
251
		freed++;
252
	}
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
	return freed;
}

/*
 * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
 * Also prune the oldest ones when the total exceeds the max number of entries.
 */
static long
prune_cache_entries(void)
{
	unsigned int i;
	long freed = 0;
	bool cancel = true;

	for (i = 0; i < drc_hashsize; i++) {
		struct nfsd_drc_bucket *b = &drc_hashtbl[i];

270 271 272
		if (list_empty(&b->lru_head))
			continue;
		spin_lock(&b->cache_lock);
273 274 275
		freed += prune_bucket(b);
		if (!list_empty(&b->lru_head))
			cancel = false;
276
		spin_unlock(&b->cache_lock);
277
	}
278 279

	/*
280 281
	 * Conditionally rearm the job to run in RC_EXPIRE since we just
	 * ran the pruner.
282
	 */
283
	if (!cancel)
284
		mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
285
	return freed;
286 287 288 289 290 291 292 293
}

static void
cache_cleaner_func(struct work_struct *unused)
{
	prune_cache_entries();
}

294 295
static unsigned long
nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
296
{
297
	return atomic_read(&num_drc_entries);
298 299
}

300 301 302
static unsigned long
nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
{
303
	return prune_cache_entries();
304
}
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
/*
 * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
 */
static __wsum
nfsd_cache_csum(struct svc_rqst *rqstp)
{
	int idx;
	unsigned int base;
	__wsum csum;
	struct xdr_buf *buf = &rqstp->rq_arg;
	const unsigned char *p = buf->head[0].iov_base;
	size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len,
				RC_CSUMLEN);
	size_t len = min(buf->head[0].iov_len, csum_len);

	/* rq_arg.head first */
	csum = csum_partial(p, len, 0);
	csum_len -= len;

	/* Continue into page array */
	idx = buf->page_base / PAGE_SIZE;
	base = buf->page_base & ~PAGE_MASK;
	while (csum_len) {
		p = page_address(buf->pages[idx]) + base;
329
		len = min_t(size_t, PAGE_SIZE - base, csum_len);
330 331 332 333 334 335 336 337
		csum = csum_partial(p, len, csum);
		csum_len -= len;
		base = 0;
		++idx;
	}
	return csum;
}

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
static bool
nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp)
{
	/* Check RPC header info first */
	if (rqstp->rq_xid != rp->c_xid || rqstp->rq_proc != rp->c_proc ||
	    rqstp->rq_prot != rp->c_prot || rqstp->rq_vers != rp->c_vers ||
	    rqstp->rq_arg.len != rp->c_len ||
	    !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) ||
	    rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr))
		return false;

	/* compare checksum of NFS data */
	if (csum != rp->c_csum) {
		++payload_misses;
		return false;
	}

	return true;
}

358 359 360 361 362 363
/*
 * Search the request hash for an entry that matches the given rqstp.
 * Must be called with cache_lock held. Returns the found entry or
 * NULL on failure.
 */
static struct svc_cacherep *
364 365
nfsd_cache_search(struct nfsd_drc_bucket *b, struct svc_rqst *rqstp,
		__wsum csum)
366
{
367
	struct svc_cacherep	*rp, *ret = NULL;
368
	struct list_head 	*rh = &b->lru_head;
369
	unsigned int		entries = 0;
370

371
	list_for_each_entry(rp, rh, c_lru) {
372 373 374 375 376 377 378 379 380 381
		++entries;
		if (nfsd_cache_match(rqstp, csum, rp)) {
			ret = rp;
			break;
		}
	}

	/* tally hash chain length stats */
	if (entries > longest_chain) {
		longest_chain = entries;
382
		longest_chain_cachesize = atomic_read(&num_drc_entries);
383 384
	} else if (entries == longest_chain) {
		/* prefer to keep the smallest cachesize possible here */
385 386 387
		longest_chain_cachesize = min_t(unsigned int,
				longest_chain_cachesize,
				atomic_read(&num_drc_entries));
388
	}
389 390

	return ret;
391 392
}

L
Linus Torvalds 已提交
393 394
/*
 * Try to find an entry matching the current call in the cache. When none
395 396 397 398
 * is found, we try to grab the oldest expired entry off the LRU list. If
 * a suitable one isn't there, then drop the cache_lock and allocate a
 * new one, then search again in case one got inserted while this thread
 * didn't hold the lock.
L
Linus Torvalds 已提交
399 400
 */
int
401
nfsd_cache_lookup(struct svc_rqst *rqstp)
L
Linus Torvalds 已提交
402
{
403
	struct svc_cacherep	*rp, *found;
404 405
	__be32			xid = rqstp->rq_xid;
	u32			proto =  rqstp->rq_prot,
L
Linus Torvalds 已提交
406 407
				vers = rqstp->rq_vers,
				proc = rqstp->rq_proc;
408
	__wsum			csum;
409 410
	u32 hash = nfsd_cache_hash(xid);
	struct nfsd_drc_bucket *b = &drc_hashtbl[hash];
L
Linus Torvalds 已提交
411
	unsigned long		age;
412
	int type = rqstp->rq_cachetype;
413
	int rtn = RC_DOIT;
L
Linus Torvalds 已提交
414 415

	rqstp->rq_cacherep = NULL;
416
	if (type == RC_NOCACHE) {
L
Linus Torvalds 已提交
417
		nfsdstats.rcnocache++;
418
		return rtn;
L
Linus Torvalds 已提交
419 420
	}

421 422
	csum = nfsd_cache_csum(rqstp);

423 424
	/*
	 * Since the common case is a cache miss followed by an insert,
425
	 * preallocate an entry.
426
	 */
427
	rp = nfsd_reply_cache_alloc();
428
	spin_lock(&b->cache_lock);
429
	if (likely(rp)) {
430
		atomic_inc(&num_drc_entries);
431 432
		drc_mem_usage += sizeof(*rp);
	}
433

434
	/* go ahead and prune the cache */
435
	prune_bucket(b);
436

437
	found = nfsd_cache_search(b, rqstp, csum);
438
	if (found) {
439 440
		if (likely(rp))
			nfsd_reply_cache_free_locked(rp);
441 442
		rp = found;
		goto found_entry;
L
Linus Torvalds 已提交
443 444
	}

445 446 447 448 449
	if (!rp) {
		dprintk("nfsd: unable to allocate DRC entry!\n");
		goto out;
	}

450
	nfsdstats.rcmisses++;
L
Linus Torvalds 已提交
451 452 453 454
	rqstp->rq_cacherep = rp;
	rp->c_state = RC_INPROG;
	rp->c_xid = xid;
	rp->c_proc = proc;
455 456
	rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp));
	rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp)));
L
Linus Torvalds 已提交
457 458
	rp->c_prot = proto;
	rp->c_vers = vers;
459 460
	rp->c_len = rqstp->rq_arg.len;
	rp->c_csum = csum;
L
Linus Torvalds 已提交
461

462
	lru_put_end(b, rp);
L
Linus Torvalds 已提交
463 464 465

	/* release any buffer */
	if (rp->c_type == RC_REPLBUFF) {
466
		drc_mem_usage -= rp->c_replvec.iov_len;
L
Linus Torvalds 已提交
467 468 469 470 471
		kfree(rp->c_replvec.iov_base);
		rp->c_replvec.iov_base = NULL;
	}
	rp->c_type = RC_NOCACHE;
 out:
472
	spin_unlock(&b->cache_lock);
L
Linus Torvalds 已提交
473 474 475
	return rtn;

found_entry:
476
	nfsdstats.rchits++;
L
Linus Torvalds 已提交
477 478
	/* We found a matching entry which is either in progress or done. */
	age = jiffies - rp->c_timestamp;
479
	lru_put_end(b, rp);
L
Linus Torvalds 已提交
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506

	rtn = RC_DROPIT;
	/* Request being processed or excessive rexmits */
	if (rp->c_state == RC_INPROG || age < RC_DELAY)
		goto out;

	/* From the hall of fame of impractical attacks:
	 * Is this a user who tries to snoop on the cache? */
	rtn = RC_DOIT;
	if (!rqstp->rq_secure && rp->c_secure)
		goto out;

	/* Compose RPC reply header */
	switch (rp->c_type) {
	case RC_NOCACHE:
		break;
	case RC_REPLSTAT:
		svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
		rtn = RC_REPLY;
		break;
	case RC_REPLBUFF:
		if (!nfsd_cache_append(rqstp, &rp->c_replvec))
			goto out;	/* should not happen */
		rtn = RC_REPLY;
		break;
	default:
		printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
507
		nfsd_reply_cache_free_locked(rp);
L
Linus Torvalds 已提交
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
	}

	goto out;
}

/*
 * Update a cache entry. This is called from nfsd_dispatch when
 * the procedure has been executed and the complete reply is in
 * rqstp->rq_res.
 *
 * We're copying around data here rather than swapping buffers because
 * the toplevel loop requires max-sized buffers, which would be a waste
 * of memory for a cache with a max reply size of 100 bytes (diropokres).
 *
 * If we should start to use different types of cache entries tailored
 * specifically for attrstat and fh's, we may save even more space.
 *
 * Also note that a cachetype of RC_NOCACHE can legally be passed when
 * nfsd failed to encode a reply that otherwise would have been cached.
 * In this case, nfsd_cache_update is called with statp == NULL.
 */
void
530
nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
L
Linus Torvalds 已提交
531
{
532
	struct svc_cacherep *rp = rqstp->rq_cacherep;
L
Linus Torvalds 已提交
533
	struct kvec	*resv = &rqstp->rq_res.head[0], *cachv;
534 535
	u32		hash;
	struct nfsd_drc_bucket *b;
L
Linus Torvalds 已提交
536
	int		len;
537
	size_t		bufsize = 0;
L
Linus Torvalds 已提交
538

539
	if (!rp)
L
Linus Torvalds 已提交
540 541
		return;

542 543 544
	hash = nfsd_cache_hash(rp->c_xid);
	b = &drc_hashtbl[hash];

L
Linus Torvalds 已提交
545 546
	len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
	len >>= 2;
G
Greg Banks 已提交
547

L
Linus Torvalds 已提交
548 549
	/* Don't cache excessive amounts of data and XDR failures */
	if (!statp || len > (256 >> 2)) {
550
		nfsd_reply_cache_free(b, rp);
L
Linus Torvalds 已提交
551 552 553 554 555 556 557 558 559 560 561
		return;
	}

	switch (cachetype) {
	case RC_REPLSTAT:
		if (len != 1)
			printk("nfsd: RC_REPLSTAT/reply len %d!\n",len);
		rp->c_replstat = *statp;
		break;
	case RC_REPLBUFF:
		cachv = &rp->c_replvec;
562 563
		bufsize = len << 2;
		cachv->iov_base = kmalloc(bufsize, GFP_KERNEL);
L
Linus Torvalds 已提交
564
		if (!cachv->iov_base) {
565
			nfsd_reply_cache_free(b, rp);
L
Linus Torvalds 已提交
566 567
			return;
		}
568 569
		cachv->iov_len = bufsize;
		memcpy(cachv->iov_base, statp, bufsize);
L
Linus Torvalds 已提交
570
		break;
571
	case RC_NOCACHE:
572
		nfsd_reply_cache_free(b, rp);
573
		return;
L
Linus Torvalds 已提交
574
	}
575
	spin_lock(&b->cache_lock);
576
	drc_mem_usage += bufsize;
577
	lru_put_end(b, rp);
L
Linus Torvalds 已提交
578 579 580
	rp->c_secure = rqstp->rq_secure;
	rp->c_type = cachetype;
	rp->c_state = RC_DONE;
581
	spin_unlock(&b->cache_lock);
L
Linus Torvalds 已提交
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
	return;
}

/*
 * Copy cached reply to current reply buffer. Should always fit.
 * FIXME as reply is in a page, we should just attach the page, and
 * keep a refcount....
 */
static int
nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
{
	struct kvec	*vec = &rqstp->rq_res.head[0];

	if (vec->iov_len + data->iov_len > PAGE_SIZE) {
		printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n",
				data->iov_len);
		return 0;
	}
	memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len);
	vec->iov_len += data->iov_len;
	return 1;
}
604 605 606 607 608 609 610 611 612

/*
 * Note that fields may be added, removed or reordered in the future. Programs
 * scraping this file for info should test the labels to ensure they're
 * getting the correct field.
 */
static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
{
	seq_printf(m, "max entries:           %u\n", max_drc_entries);
613 614
	seq_printf(m, "num entries:           %u\n",
			atomic_read(&num_drc_entries));
615
	seq_printf(m, "hash buckets:          %u\n", 1 << maskbits);
616 617 618 619 620
	seq_printf(m, "mem usage:             %u\n", drc_mem_usage);
	seq_printf(m, "cache hits:            %u\n", nfsdstats.rchits);
	seq_printf(m, "cache misses:          %u\n", nfsdstats.rcmisses);
	seq_printf(m, "not cached:            %u\n", nfsdstats.rcnocache);
	seq_printf(m, "payload misses:        %u\n", payload_misses);
621 622
	seq_printf(m, "longest chain len:     %u\n", longest_chain);
	seq_printf(m, "cachesize at longest:  %u\n", longest_chain_cachesize);
623 624 625 626 627 628 629
	return 0;
}

int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file)
{
	return single_open(file, nfsd_reply_cache_stats_show, NULL);
}