pnfs_dev.c 9.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 *  Device operations for the pnfs client.
 *
 *  Copyright (c) 2002
 *  The Regents of the University of Michigan
 *  All Rights Reserved
 *
 *  Dean Hildebrand <dhildebz@umich.edu>
 *  Garth Goodson   <Garth.Goodson@netapp.com>
 *
 *  Permission is granted to use, copy, create derivative works, and
 *  redistribute this software and such derivative works for any purpose,
 *  so long as the name of the University of Michigan is not used in
 *  any advertising or publicity pertaining to the use or distribution
 *  of this software without specific, written prior authorization. If
 *  the above copyright notice or any other identification of the
 *  University of Michigan is included in any copy of any portion of
 *  this software, then the disclaimer below must also be included.
 *
 *  This software is provided as is, without representation or warranty
 *  of any kind either express or implied, including without limitation
 *  the implied warranties of merchantability, fitness for a particular
 *  purpose, or noninfringement.  The Regents of the University of
 *  Michigan shall not be liable for any damages, including special,
 *  indirect, incidental, or consequential damages, with respect to any
 *  claim arising out of or in connection with the use of the software,
 *  even if it has been or is hereafter advised of the possibility of
 *  such damages.
 */

31
#include <linux/export.h>
32 33 34
#include <linux/nfs_fs.h>
#include "nfs4session.h"
#include "internal.h"
35 36 37 38 39 40 41 42 43 44 45
#include "pnfs.h"

#define NFSDBG_FACILITY		NFSDBG_PNFS

/*
 * Device ID RCU cache. A device ID is unique per server and layout type.
 */
#define NFS4_DEVICE_ID_HASH_BITS	5
#define NFS4_DEVICE_ID_HASH_SIZE	(1 << NFS4_DEVICE_ID_HASH_BITS)
#define NFS4_DEVICE_ID_HASH_MASK	(NFS4_DEVICE_ID_HASH_SIZE - 1)

46 47
#define PNFS_DEVICE_RETRY_TIMEOUT (120*HZ)

48 49 50
static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
static DEFINE_SPINLOCK(nfs4_deviceid_lock);

51
#ifdef NFS_DEBUG
52 53 54 55 56 57 58 59 60
void
nfs4_print_deviceid(const struct nfs4_deviceid *id)
{
	u32 *p = (u32 *)id;

	dprintk("%s: device id= [%x%x%x%x]\n", __func__,
		p[0], p[1], p[2], p[3]);
}
EXPORT_SYMBOL_GPL(nfs4_print_deviceid);
61
#endif
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76

static inline u32
nfs4_deviceid_hash(const struct nfs4_deviceid *id)
{
	unsigned char *cptr = (unsigned char *)id->data;
	unsigned int nbytes = NFS4_DEVICEID4_SIZE;
	u32 x = 0;

	while (nbytes--) {
		x *= 37;
		x += *cptr++;
	}
	return x & NFS4_DEVICE_ID_HASH_MASK;
}

M
Marc Eshel 已提交
77
static struct nfs4_deviceid_node *
78 79
_lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
		 const struct nfs_client *clp, const struct nfs4_deviceid *id,
M
Marc Eshel 已提交
80 81 82 83
		 long hash)
{
	struct nfs4_deviceid_node *d;

84
	hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
85 86
		if (d->ld == ld && d->nfs_client == clp &&
		    !memcmp(&d->deviceid, id, sizeof(*id))) {
M
Marc Eshel 已提交
87 88 89 90 91 92 93 94
			if (atomic_read(&d->ref))
				return d;
			else
				continue;
		}
	return NULL;
}

95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
static struct nfs4_deviceid_node *
nfs4_get_device_info(struct nfs_server *server,
		const struct nfs4_deviceid *dev_id,
		struct rpc_cred *cred, gfp_t gfp_flags)
{
	struct nfs4_deviceid_node *d = NULL;
	struct pnfs_device *pdev = NULL;
	struct page **pages = NULL;
	u32 max_resp_sz;
	int max_pages;
	int rc, i;

	/*
	 * Use the session max response size as the basis for setting
	 * GETDEVICEINFO's maxcount
	 */
	max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
	if (server->pnfs_curr_ld->max_deviceinfo_size &&
	    server->pnfs_curr_ld->max_deviceinfo_size < max_resp_sz)
		max_resp_sz = server->pnfs_curr_ld->max_deviceinfo_size;
	max_pages = nfs_page_array_len(0, max_resp_sz);
	dprintk("%s: server %p max_resp_sz %u max_pages %d\n",
		__func__, server, max_resp_sz, max_pages);

	pdev = kzalloc(sizeof(*pdev), gfp_flags);
	if (!pdev)
		return NULL;

	pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
	if (!pages)
		goto out_free_pdev;

	for (i = 0; i < max_pages; i++) {
		pages[i] = alloc_page(gfp_flags);
		if (!pages[i])
			goto out_free_pages;
	}

	memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id));
	pdev->layout_type = server->pnfs_curr_ld->id;
	pdev->pages = pages;
	pdev->pgbase = 0;
	pdev->pglen = max_resp_sz;
	pdev->mincount = 0;
	pdev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead;

	rc = nfs4_proc_getdeviceinfo(server, pdev, cred);
	dprintk("%s getdevice info returns %d\n", __func__, rc);
	if (rc)
		goto out_free_pages;

	/*
	 * Found new device, need to decode it and then add it to the
	 * list of known devices for this mountpoint.
	 */
	d = server->pnfs_curr_ld->alloc_deviceid_node(server, pdev,
			gfp_flags);

out_free_pages:
	for (i = 0; i < max_pages; i++)
		__free_page(pages[i]);
	kfree(pages);
out_free_pdev:
	kfree(pdev);
	dprintk("<-- %s d %p\n", __func__, d);
	return d;
}

163 164 165 166 167 168
/*
 * Lookup a deviceid in cache and get a reference count on it if found
 *
 * @clp nfs_client associated with deviceid
 * @id deviceid to look up
 */
169
static struct nfs4_deviceid_node *
170 171
__nfs4_find_get_deviceid(struct nfs_server *server,
		const struct nfs4_deviceid *id, long hash)
M
Marc Eshel 已提交
172 173 174 175
{
	struct nfs4_deviceid_node *d;

	rcu_read_lock();
176 177
	d = _lookup_deviceid(server->pnfs_curr_ld, server->nfs_client, id,
			hash);
178 179
	if (d != NULL && !atomic_inc_not_zero(&d->ref))
		d = NULL;
M
Marc Eshel 已提交
180 181 182 183
	rcu_read_unlock();
	return d;
}

184
struct nfs4_deviceid_node *
185 186 187
nfs4_find_get_deviceid(struct nfs_server *server,
		const struct nfs4_deviceid *id, struct rpc_cred *cred,
		gfp_t gfp_mask)
M
Marc Eshel 已提交
188
{
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
	long hash = nfs4_deviceid_hash(id);
	struct nfs4_deviceid_node *d, *new;

	d = __nfs4_find_get_deviceid(server, id, hash);
	if (d)
		return d;

	new = nfs4_get_device_info(server, id, cred, gfp_mask);
	if (!new)
		return new;

	spin_lock(&nfs4_deviceid_lock);
	d = __nfs4_find_get_deviceid(server, id, hash);
	if (d) {
		spin_unlock(&nfs4_deviceid_lock);
		server->pnfs_curr_ld->free_deviceid_node(new);
		return d;
	}
	hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
	atomic_inc(&new->ref);
	spin_unlock(&nfs4_deviceid_lock);

	return new;
M
Marc Eshel 已提交
212 213 214 215
}
EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid);

/*
216
 * Remove a deviceid from cache
M
Marc Eshel 已提交
217 218 219 220 221 222
 *
 * @clp nfs_client associated with deviceid
 * @id the deviceid to unhash
 *
 * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
 */
223 224
void
nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
225
			 const struct nfs_client *clp, const struct nfs4_deviceid *id)
226 227 228
{
	struct nfs4_deviceid_node *d;

M
Marc Eshel 已提交
229
	spin_lock(&nfs4_deviceid_lock);
230
	rcu_read_lock();
231
	d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
232
	rcu_read_unlock();
M
Marc Eshel 已提交
233 234
	if (!d) {
		spin_unlock(&nfs4_deviceid_lock);
235
		return;
M
Marc Eshel 已提交
236 237 238 239 240 241
	}
	hlist_del_init_rcu(&d->node);
	spin_unlock(&nfs4_deviceid_lock);

	/* balance the initial ref set in pnfs_insert_deviceid */
	if (atomic_dec_and_test(&d->ref))
242
		d->ld->free_deviceid_node(d);
M
Marc Eshel 已提交
243 244
}
EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
245 246

void
247
nfs4_init_deviceid_node(struct nfs4_deviceid_node *d, struct nfs_server *server,
248 249
			const struct nfs4_deviceid *id)
{
250
	INIT_HLIST_NODE(&d->node);
251
	INIT_HLIST_NODE(&d->tmpnode);
252 253
	d->ld = server->pnfs_curr_ld;
	d->nfs_client = server->nfs_client;
254
	d->flags = 0;
255
	d->deviceid = *id;
256
	atomic_set(&d->ref, 1);
257 258 259 260 261 262 263 264 265
}
EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node);

/*
 * Dereference a deviceid node and delete it when its reference count drops
 * to zero.
 *
 * @d deviceid node to put
 *
266 267 268
 * return true iff the node was deleted
 * Note that since the test for d->ref == 0 is sufficient to establish
 * that the node is no longer hashed in the global device id cache.
269 270 271 272
 */
bool
nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
{
273
	if (!atomic_dec_and_test(&d->ref))
274
		return false;
275
	d->ld->free_deviceid_node(d);
276 277 278
	return true;
}
EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
279

280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
void
nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node)
{
	node->timestamp_unavailable = jiffies;
	set_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
}
EXPORT_SYMBOL_GPL(nfs4_mark_deviceid_unavailable);

bool
nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node)
{
	if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags)) {
		unsigned long start, end;

		end = jiffies;
		start = end - PNFS_DEVICE_RETRY_TIMEOUT;
		if (time_in_range(node->timestamp_unavailable, start, end))
			return true;
		clear_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags);
	}
	return false;
}
EXPORT_SYMBOL_GPL(nfs4_test_deviceid_unavailable);

304 305 306 307 308 309
static void
_deviceid_purge_client(const struct nfs_client *clp, long hash)
{
	struct nfs4_deviceid_node *d;
	HLIST_HEAD(tmp);

310
	spin_lock(&nfs4_deviceid_lock);
311
	rcu_read_lock();
312
	hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node)
313 314
		if (d->nfs_client == clp && atomic_read(&d->ref)) {
			hlist_del_init_rcu(&d->node);
315
			hlist_add_head(&d->tmpnode, &tmp);
316 317
		}
	rcu_read_unlock();
318
	spin_unlock(&nfs4_deviceid_lock);
319 320 321 322

	if (hlist_empty(&tmp))
		return;

323 324 325
	while (!hlist_empty(&tmp)) {
		d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
		hlist_del(&d->tmpnode);
326 327
		if (atomic_dec_and_test(&d->ref))
			d->ld->free_deviceid_node(d);
328
	}
329 330 331 332 333 334 335
}

void
nfs4_deviceid_purge_client(const struct nfs_client *clp)
{
	long h;

336 337
	if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
		return;
338 339 340
	for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
		_deviceid_purge_client(clp, h);
}
341 342 343 344 345 346 347 348 349 350 351 352

/*
 * Stop use of all deviceids associated with an nfs_client
 */
void
nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
{
	struct nfs4_deviceid_node *d;
	int i;

	rcu_read_lock();
	for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
353
		hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[i], node)
354 355 356 357 358
			if (d->nfs_client == clp)
				set_bit(NFS_DEVICEID_INVALID, &d->flags);
	}
	rcu_read_unlock();
}