locking.c 7.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * Copyright (C) 2008 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */
#include <linux/sched.h>
#include <linux/pagemap.h>
#include <linux/spinlock.h>
#include <linux/page-flags.h>
22
#include <asm/bug.h>
23 24 25 26
#include "ctree.h"
#include "extent_io.h"
#include "locking.h"

27
void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
C
Chris Mason 已提交
28

29
/*
30 31 32
 * if we currently have a spinning reader or writer lock
 * (indicated by the rw flag) this will bump the count
 * of blocking holders and drop the spinlock.
33
 */
34
void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
35
{
36 37 38 39 40 41 42 43
	if (eb->lock_nested) {
		read_lock(&eb->lock);
		if (eb->lock_nested && current->pid == eb->lock_owner) {
			read_unlock(&eb->lock);
			return;
		}
		read_unlock(&eb->lock);
	}
44 45 46 47 48 49 50 51 52 53 54 55 56 57
	if (rw == BTRFS_WRITE_LOCK) {
		if (atomic_read(&eb->blocking_writers) == 0) {
			WARN_ON(atomic_read(&eb->spinning_writers) != 1);
			atomic_dec(&eb->spinning_writers);
			btrfs_assert_tree_locked(eb);
			atomic_inc(&eb->blocking_writers);
			write_unlock(&eb->lock);
		}
	} else if (rw == BTRFS_READ_LOCK) {
		btrfs_assert_tree_read_locked(eb);
		atomic_inc(&eb->blocking_readers);
		WARN_ON(atomic_read(&eb->spinning_readers) == 0);
		atomic_dec(&eb->spinning_readers);
		read_unlock(&eb->lock);
58
	}
59
	return;
60
}
61

62
/*
63 64
 * if we currently have a blocking lock, take the spinlock
 * and drop our blocking count
65
 */
66
void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
67
{
68 69 70 71 72 73 74 75
	if (eb->lock_nested) {
		read_lock(&eb->lock);
		if (&eb->lock_nested && current->pid == eb->lock_owner) {
			read_unlock(&eb->lock);
			return;
		}
		read_unlock(&eb->lock);
	}
76 77 78 79 80
	if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
		BUG_ON(atomic_read(&eb->blocking_writers) != 1);
		write_lock(&eb->lock);
		WARN_ON(atomic_read(&eb->spinning_writers));
		atomic_inc(&eb->spinning_writers);
81 82
		if (atomic_dec_and_test(&eb->blocking_writers) &&
		    waitqueue_active(&eb->write_lock_wq))
83 84 85 86 87
			wake_up(&eb->write_lock_wq);
	} else if (rw == BTRFS_READ_LOCK_BLOCKING) {
		BUG_ON(atomic_read(&eb->blocking_readers) == 0);
		read_lock(&eb->lock);
		atomic_inc(&eb->spinning_readers);
88 89
		if (atomic_dec_and_test(&eb->blocking_readers) &&
		    waitqueue_active(&eb->read_lock_wq))
90
			wake_up(&eb->read_lock_wq);
91
	}
92
	return;
93 94 95
}

/*
96 97
 * take a spinning read lock.  This will wait for any blocking
 * writers
98
 */
99
void btrfs_tree_read_lock(struct extent_buffer *eb)
100
{
101
again:
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
	read_lock(&eb->lock);
	if (atomic_read(&eb->blocking_writers) &&
	    current->pid == eb->lock_owner) {
		/*
		 * This extent is already write-locked by our thread. We allow
		 * an additional read lock to be added because it's for the same
		 * thread. btrfs_find_all_roots() depends on this as it may be
		 * called on a partly (write-)locked tree.
		 */
		BUG_ON(eb->lock_nested);
		eb->lock_nested = 1;
		read_unlock(&eb->lock);
		return;
	}
	read_unlock(&eb->lock);
117 118 119 120 121
	wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
	read_lock(&eb->lock);
	if (atomic_read(&eb->blocking_writers)) {
		read_unlock(&eb->lock);
		goto again;
122
	}
123 124
	atomic_inc(&eb->read_locks);
	atomic_inc(&eb->spinning_readers);
125 126 127
}

/*
128 129
 * returns 1 if we get the read lock and 0 if we don't
 * this won't wait for blocking writers
130
 */
131
int btrfs_try_tree_read_lock(struct extent_buffer *eb)
132
{
133 134
	if (atomic_read(&eb->blocking_writers))
		return 0;
135

136 137 138 139
	read_lock(&eb->lock);
	if (atomic_read(&eb->blocking_writers)) {
		read_unlock(&eb->lock);
		return 0;
140
	}
141 142 143
	atomic_inc(&eb->read_locks);
	atomic_inc(&eb->spinning_readers);
	return 1;
144 145 146
}

/*
147 148
 * returns 1 if we get the read lock and 0 if we don't
 * this won't wait for blocking writers or readers
149
 */
150
int btrfs_try_tree_write_lock(struct extent_buffer *eb)
151
{
152 153 154 155 156 157 158 159 160 161 162
	if (atomic_read(&eb->blocking_writers) ||
	    atomic_read(&eb->blocking_readers))
		return 0;
	write_lock(&eb->lock);
	if (atomic_read(&eb->blocking_writers) ||
	    atomic_read(&eb->blocking_readers)) {
		write_unlock(&eb->lock);
		return 0;
	}
	atomic_inc(&eb->write_locks);
	atomic_inc(&eb->spinning_writers);
163
	eb->lock_owner = current->pid;
164 165 166 167
	return 1;
}

/*
168 169 170 171
 * drop a spinning read lock
 */
void btrfs_tree_read_unlock(struct extent_buffer *eb)
{
172 173 174 175 176 177 178 179 180
	if (eb->lock_nested) {
		read_lock(&eb->lock);
		if (eb->lock_nested && current->pid == eb->lock_owner) {
			eb->lock_nested = 0;
			read_unlock(&eb->lock);
			return;
		}
		read_unlock(&eb->lock);
	}
181 182 183 184 185 186 187 188 189 190 191 192
	btrfs_assert_tree_read_locked(eb);
	WARN_ON(atomic_read(&eb->spinning_readers) == 0);
	atomic_dec(&eb->spinning_readers);
	atomic_dec(&eb->read_locks);
	read_unlock(&eb->lock);
}

/*
 * drop a blocking read lock
 */
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
{
193 194 195 196 197 198 199 200 201
	if (eb->lock_nested) {
		read_lock(&eb->lock);
		if (eb->lock_nested && current->pid == eb->lock_owner) {
			eb->lock_nested = 0;
			read_unlock(&eb->lock);
			return;
		}
		read_unlock(&eb->lock);
	}
202 203
	btrfs_assert_tree_read_locked(eb);
	WARN_ON(atomic_read(&eb->blocking_readers) == 0);
204 205
	if (atomic_dec_and_test(&eb->blocking_readers) &&
	    waitqueue_active(&eb->read_lock_wq))
206 207 208 209 210 211 212
		wake_up(&eb->read_lock_wq);
	atomic_dec(&eb->read_locks);
}

/*
 * take a spinning write lock.  This will wait for both
 * blocking readers or writers
213
 */
214
void btrfs_tree_lock(struct extent_buffer *eb)
215
{
216 217 218 219 220 221 222 223 224
again:
	wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
	wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
	write_lock(&eb->lock);
	if (atomic_read(&eb->blocking_readers)) {
		write_unlock(&eb->lock);
		wait_event(eb->read_lock_wq,
			   atomic_read(&eb->blocking_readers) == 0);
		goto again;
225
	}
226 227 228 229 230 231 232 233 234
	if (atomic_read(&eb->blocking_writers)) {
		write_unlock(&eb->lock);
		wait_event(eb->write_lock_wq,
			   atomic_read(&eb->blocking_writers) == 0);
		goto again;
	}
	WARN_ON(atomic_read(&eb->spinning_writers));
	atomic_inc(&eb->spinning_writers);
	atomic_inc(&eb->write_locks);
235
	eb->lock_owner = current->pid;
236 237
}

238 239 240
/*
 * drop a spinning or a blocking write lock.
 */
241
void btrfs_tree_unlock(struct extent_buffer *eb)
242
{
243 244 245 246 247 248 249 250 251 252
	int blockers = atomic_read(&eb->blocking_writers);

	BUG_ON(blockers > 1);

	btrfs_assert_tree_locked(eb);
	atomic_dec(&eb->write_locks);

	if (blockers) {
		WARN_ON(atomic_read(&eb->spinning_writers));
		atomic_dec(&eb->blocking_writers);
253 254 255
		smp_mb();
		if (waitqueue_active(&eb->write_lock_wq))
			wake_up(&eb->write_lock_wq);
256 257 258 259 260
	} else {
		WARN_ON(atomic_read(&eb->spinning_writers) != 1);
		atomic_dec(&eb->spinning_writers);
		write_unlock(&eb->lock);
	}
261 262
}

263
void btrfs_assert_tree_locked(struct extent_buffer *eb)
264
{
265 266 267 268 269 270
	BUG_ON(!atomic_read(&eb->write_locks));
}

void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
{
	BUG_ON(!atomic_read(&eb->read_locks));
271
}