提交 54761697 编写于 作者: A Andreas Gruenbacher 提交者: Philipp Reisner

drbd: Rename struct drbd_conf -> struct drbd_device

sed -i -e 's:\<drbd_conf\>:drbd_device:g'
Signed-off-by: NAndreas Gruenbacher <agruen@linbit.com>
Signed-off-by: NPhilipp Reisner <philipp.reisner@linbit.com>
上级 a3603a6e
......@@ -105,7 +105,7 @@ struct update_al_work {
};
void *drbd_md_get_buffer(struct drbd_conf *mdev)
void *drbd_md_get_buffer(struct drbd_device *mdev)
{
int r;
......@@ -116,13 +116,13 @@ void *drbd_md_get_buffer(struct drbd_conf *mdev)
return r ? NULL : page_address(mdev->md_io_page);
}
void drbd_md_put_buffer(struct drbd_conf *mdev)
void drbd_md_put_buffer(struct drbd_device *mdev)
{
if (atomic_dec_and_test(&mdev->md_io_in_use))
wake_up(&mdev->misc_wait);
}
void wait_until_done_or_force_detached(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
void wait_until_done_or_force_detached(struct drbd_device *mdev, struct drbd_backing_dev *bdev,
unsigned int *done)
{
long dt;
......@@ -142,7 +142,7 @@ void wait_until_done_or_force_detached(struct drbd_conf *mdev, struct drbd_backi
}
}
static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
static int _drbd_md_sync_page_io(struct drbd_device *mdev,
struct drbd_backing_dev *bdev,
struct page *page, sector_t sector,
int rw, int size)
......@@ -192,7 +192,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
return err;
}
int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
int drbd_md_sync_page_io(struct drbd_device *mdev, struct drbd_backing_dev *bdev,
sector_t sector, int rw)
{
int err;
......@@ -222,7 +222,7 @@ int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
return err;
}
static struct bm_extent *find_active_resync_extent(struct drbd_conf *mdev, unsigned int enr)
static struct bm_extent *find_active_resync_extent(struct drbd_device *mdev, unsigned int enr)
{
struct lc_element *tmp;
tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
......@@ -234,7 +234,7 @@ static struct bm_extent *find_active_resync_extent(struct drbd_conf *mdev, unsig
return NULL;
}
static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr, bool nonblock)
static struct lc_element *_al_get(struct drbd_device *mdev, unsigned int enr, bool nonblock)
{
struct lc_element *al_ext;
struct bm_extent *bm_ext;
......@@ -257,7 +257,7 @@ static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr, bool
return al_ext;
}
bool drbd_al_begin_io_fastpath(struct drbd_conf *mdev, struct drbd_interval *i)
bool drbd_al_begin_io_fastpath(struct drbd_device *mdev, struct drbd_interval *i)
{
/* for bios crossing activity log extent boundaries,
* we may need to activate two extents in one go */
......@@ -275,7 +275,7 @@ bool drbd_al_begin_io_fastpath(struct drbd_conf *mdev, struct drbd_interval *i)
}
static
bool drbd_al_begin_io_prepare(struct drbd_conf *mdev, struct drbd_interval *i)
bool drbd_al_begin_io_prepare(struct drbd_device *mdev, struct drbd_interval *i)
{
/* for bios crossing activity log extent boundaries,
* we may need to activate two extents in one go */
......@@ -297,7 +297,7 @@ bool drbd_al_begin_io_prepare(struct drbd_conf *mdev, struct drbd_interval *i)
return need_transaction;
}
static int al_write_transaction(struct drbd_conf *mdev, bool delegate);
static int al_write_transaction(struct drbd_device *mdev, bool delegate);
/* When called through generic_make_request(), we must delegate
* activity log I/O to the worker thread: a further request
......@@ -311,7 +311,7 @@ static int al_write_transaction(struct drbd_conf *mdev, bool delegate);
/*
* @delegate: delegate activity log I/O to the worker thread
*/
void drbd_al_begin_io_commit(struct drbd_conf *mdev, bool delegate)
void drbd_al_begin_io_commit(struct drbd_device *mdev, bool delegate)
{
bool locked = false;
......@@ -352,7 +352,7 @@ void drbd_al_begin_io_commit(struct drbd_conf *mdev, bool delegate)
/*
* @delegate: delegate activity log I/O to the worker thread
*/
void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool delegate)
void drbd_al_begin_io(struct drbd_device *mdev, struct drbd_interval *i, bool delegate)
{
BUG_ON(delegate && current == mdev->tconn->worker.task);
......@@ -360,7 +360,7 @@ void drbd_al_begin_io(struct drbd_conf *mdev, struct drbd_interval *i, bool dele
drbd_al_begin_io_commit(mdev, delegate);
}
int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i)
int drbd_al_begin_io_nonblock(struct drbd_device *mdev, struct drbd_interval *i)
{
struct lru_cache *al = mdev->act_log;
/* for bios crossing activity log extent boundaries,
......@@ -409,7 +409,7 @@ int drbd_al_begin_io_nonblock(struct drbd_conf *mdev, struct drbd_interval *i)
return 0;
}
void drbd_al_complete_io(struct drbd_conf *mdev, struct drbd_interval *i)
void drbd_al_complete_io(struct drbd_device *mdev, struct drbd_interval *i)
{
/* for bios crossing activity log extent boundaries,
* we may need to activate two extents in one go */
......@@ -461,7 +461,7 @@ static unsigned int rs_extent_to_bm_page(unsigned int rs_enr)
(BM_EXT_SHIFT - BM_BLOCK_SHIFT));
}
static sector_t al_tr_number_to_on_disk_sector(struct drbd_conf *mdev)
static sector_t al_tr_number_to_on_disk_sector(struct drbd_device *mdev)
{
const unsigned int stripes = mdev->ldev->md.al_stripes;
const unsigned int stripe_size_4kB = mdev->ldev->md.al_stripe_size_4k;
......@@ -480,7 +480,7 @@ static sector_t al_tr_number_to_on_disk_sector(struct drbd_conf *mdev)
}
static int
_al_write_transaction(struct drbd_conf *mdev)
_al_write_transaction(struct drbd_device *mdev)
{
struct al_transaction_on_disk *buffer;
struct lc_element *e;
......@@ -594,7 +594,7 @@ _al_write_transaction(struct drbd_conf *mdev)
static int w_al_write_transaction(struct drbd_work *w, int unused)
{
struct update_al_work *aw = container_of(w, struct update_al_work, w);
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
int err;
err = _al_write_transaction(mdev);
......@@ -607,7 +607,7 @@ static int w_al_write_transaction(struct drbd_work *w, int unused)
/* Calls from worker context (see w_restart_disk_io()) need to write the
transaction directly. Others came through generic_make_request(),
those need to delegate it to the worker. */
static int al_write_transaction(struct drbd_conf *mdev, bool delegate)
static int al_write_transaction(struct drbd_device *mdev, bool delegate)
{
if (delegate) {
struct update_al_work al_work;
......@@ -621,7 +621,7 @@ static int al_write_transaction(struct drbd_conf *mdev, bool delegate)
return _al_write_transaction(mdev);
}
static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
static int _try_lc_del(struct drbd_device *mdev, struct lc_element *al_ext)
{
int rv;
......@@ -643,7 +643,7 @@ static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
*
* You need to lock mdev->act_log with lc_try_lock() / lc_unlock()
*/
void drbd_al_shrink(struct drbd_conf *mdev)
void drbd_al_shrink(struct drbd_device *mdev)
{
struct lc_element *al_ext;
int i;
......@@ -660,7 +660,7 @@ void drbd_al_shrink(struct drbd_conf *mdev)
wake_up(&mdev->al_wait);
}
int drbd_initialize_al(struct drbd_conf *mdev, void *buffer)
int drbd_initialize_al(struct drbd_device *mdev, void *buffer)
{
struct al_transaction_on_disk *al = buffer;
struct drbd_md *md = &mdev->ldev->md;
......@@ -684,7 +684,7 @@ int drbd_initialize_al(struct drbd_conf *mdev, void *buffer)
static int w_update_odbm(struct drbd_work *w, int unused)
{
struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w);
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
struct sib_info sib = { .sib_reason = SIB_SYNC_PROGRESS, };
if (!get_ldev(mdev)) {
......@@ -721,7 +721,7 @@ static int w_update_odbm(struct drbd_work *w, int unused)
*
* TODO will be obsoleted once we have a caching lru of the on disk bitmap
*/
static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
static void drbd_try_clear_on_disk_bm(struct drbd_device *mdev, sector_t sector,
int count, int success)
{
struct lc_element *e;
......@@ -809,7 +809,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
}
}
void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go)
void drbd_advance_rs_marks(struct drbd_device *mdev, unsigned long still_to_go)
{
unsigned long now = jiffies;
unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark];
......@@ -832,7 +832,7 @@ void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go)
* called by worker on C_SYNC_TARGET and receiver on SyncSource.
*
*/
void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
void __drbd_set_in_sync(struct drbd_device *mdev, sector_t sector, int size,
const char *file, const unsigned int line)
{
/* Is called from worker and receiver context _only_ */
......@@ -904,7 +904,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
* called by tl_clear and drbd_send_dblock (==drbd_make_request).
* so this can be _any_ process.
*/
int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
int __drbd_set_out_of_sync(struct drbd_device *mdev, sector_t sector, int size,
const char *file, const unsigned int line)
{
unsigned long sbnr, ebnr, flags;
......@@ -956,7 +956,7 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
}
static
struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
struct bm_extent *_bme_get(struct drbd_device *mdev, unsigned int enr)
{
struct lc_element *e;
struct bm_extent *bm_ext;
......@@ -996,7 +996,7 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
return bm_ext;
}
static int _is_in_al(struct drbd_conf *mdev, unsigned int enr)
static int _is_in_al(struct drbd_device *mdev, unsigned int enr)
{
int rv;
......@@ -1014,7 +1014,7 @@ static int _is_in_al(struct drbd_conf *mdev, unsigned int enr)
*
* This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted.
*/
int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
int drbd_rs_begin_io(struct drbd_device *mdev, sector_t sector)
{
unsigned int enr = BM_SECT_TO_EXT(sector);
struct bm_extent *bm_ext;
......@@ -1067,7 +1067,7 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
* tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN
* if there is still application IO going on in this area.
*/
int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
int drbd_try_rs_begin_io(struct drbd_device *mdev, sector_t sector)
{
unsigned int enr = BM_SECT_TO_EXT(sector);
const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT;
......@@ -1166,7 +1166,7 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
return -EAGAIN;
}
void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
void drbd_rs_complete_io(struct drbd_device *mdev, sector_t sector)
{
unsigned int enr = BM_SECT_TO_EXT(sector);
struct lc_element *e;
......@@ -1204,7 +1204,7 @@ void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
* drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED)
* @mdev: DRBD device.
*/
void drbd_rs_cancel_all(struct drbd_conf *mdev)
void drbd_rs_cancel_all(struct drbd_device *mdev)
{
spin_lock_irq(&mdev->al_lock);
......@@ -1225,7 +1225,7 @@ void drbd_rs_cancel_all(struct drbd_conf *mdev)
* Returns 0 upon success, -EAGAIN if at least one reference count was
* not zero.
*/
int drbd_rs_del_all(struct drbd_conf *mdev)
int drbd_rs_del_all(struct drbd_device *mdev)
{
struct lc_element *e;
struct bm_extent *bm_ext;
......@@ -1276,7 +1276,7 @@ int drbd_rs_del_all(struct drbd_conf *mdev)
* @sector: The sector number.
* @size: Size of failed IO operation, in byte.
*/
void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
void drbd_rs_failed_io(struct drbd_device *mdev, sector_t sector, int size)
{
/* Is called from worker and receiver context _only_ */
unsigned long sbnr, ebnr, lbnr;
......
......@@ -113,7 +113,7 @@ struct drbd_bitmap {
};
#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
static void __bm_print_lock_info(struct drbd_device *mdev, const char *func)
{
struct drbd_bitmap *b = mdev->bitmap;
if (!__ratelimit(&drbd_ratelimit_state))
......@@ -124,7 +124,7 @@ static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
drbd_task_to_thread_name(mdev->tconn, b->bm_task));
}
void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
void drbd_bm_lock(struct drbd_device *mdev, char *why, enum bm_flag flags)
{
struct drbd_bitmap *b = mdev->bitmap;
int trylock_failed;
......@@ -151,7 +151,7 @@ void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags)
b->bm_task = current;
}
void drbd_bm_unlock(struct drbd_conf *mdev)
void drbd_bm_unlock(struct drbd_device *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
if (!b) {
......@@ -211,14 +211,14 @@ static unsigned long bm_page_to_idx(struct page *page)
/* As is very unlikely that the same page is under IO from more than one
* context, we can get away with a bit per page and one wait queue per bitmap.
*/
static void bm_page_lock_io(struct drbd_conf *mdev, int page_nr)
static void bm_page_lock_io(struct drbd_device *mdev, int page_nr)
{
struct drbd_bitmap *b = mdev->bitmap;
void *addr = &page_private(b->bm_pages[page_nr]);
wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
}
static void bm_page_unlock_io(struct drbd_conf *mdev, int page_nr)
static void bm_page_unlock_io(struct drbd_device *mdev, int page_nr)
{
struct drbd_bitmap *b = mdev->bitmap;
void *addr = &page_private(b->bm_pages[page_nr]);
......@@ -249,7 +249,7 @@ static void bm_set_page_need_writeout(struct page *page)
* hints, then call drbd_bm_write_hinted(), which will only write out changed
* pages which are flagged with this mark.
*/
void drbd_bm_mark_for_writeout(struct drbd_conf *mdev, int page_nr)
void drbd_bm_mark_for_writeout(struct drbd_device *mdev, int page_nr)
{
struct page *page;
if (page_nr >= mdev->bitmap->bm_number_of_pages) {
......@@ -340,7 +340,7 @@ static void bm_unmap(unsigned long *p_addr)
/*
* actually most functions herein should take a struct drbd_bitmap*, not a
* struct drbd_conf*, but for the debug macros I like to have the mdev around
* struct drbd_device*, but for the debug macros I like to have the mdev around
* to be able to report device specific.
*/
......@@ -438,7 +438,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
* called on driver init only. TODO call when a device is created.
* allocates the drbd_bitmap, and stores it in mdev->bitmap.
*/
int drbd_bm_init(struct drbd_conf *mdev)
int drbd_bm_init(struct drbd_device *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
WARN_ON(b != NULL);
......@@ -454,7 +454,7 @@ int drbd_bm_init(struct drbd_conf *mdev)
return 0;
}
sector_t drbd_bm_capacity(struct drbd_conf *mdev)
sector_t drbd_bm_capacity(struct drbd_device *mdev)
{
if (!expect(mdev->bitmap))
return 0;
......@@ -463,7 +463,7 @@ sector_t drbd_bm_capacity(struct drbd_conf *mdev)
/* called on driver unload. TODO: call when a device is destroyed.
*/
void drbd_bm_cleanup(struct drbd_conf *mdev)
void drbd_bm_cleanup(struct drbd_device *mdev)
{
if (!expect(mdev->bitmap))
return;
......@@ -631,7 +631,7 @@ static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev)
* In case this is actually a resize, we copy the old bitmap into the new one.
* Otherwise, the bitmap is initialized to all bits set.
*/
int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
int drbd_bm_resize(struct drbd_device *mdev, sector_t capacity, int set_new_bits)
{
struct drbd_bitmap *b = mdev->bitmap;
unsigned long bits, words, owords, obits;
......@@ -757,7 +757,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits)
*
* maybe bm_set should be atomic_t ?
*/
unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
unsigned long _drbd_bm_total_weight(struct drbd_device *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
unsigned long s;
......@@ -775,7 +775,7 @@ unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
return s;
}
unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
unsigned long drbd_bm_total_weight(struct drbd_device *mdev)
{
unsigned long s;
/* if I don't have a disk, I don't know about out-of-sync status */
......@@ -786,7 +786,7 @@ unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
return s;
}
size_t drbd_bm_words(struct drbd_conf *mdev)
size_t drbd_bm_words(struct drbd_device *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
if (!expect(b))
......@@ -797,7 +797,7 @@ size_t drbd_bm_words(struct drbd_conf *mdev)
return b->bm_words;
}
unsigned long drbd_bm_bits(struct drbd_conf *mdev)
unsigned long drbd_bm_bits(struct drbd_device *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
if (!expect(b))
......@@ -811,7 +811,7 @@ unsigned long drbd_bm_bits(struct drbd_conf *mdev)
* bitmap must be locked by drbd_bm_lock.
* currently only used from receive_bitmap.
*/
void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
void drbd_bm_merge_lel(struct drbd_device *mdev, size_t offset, size_t number,
unsigned long *buffer)
{
struct drbd_bitmap *b = mdev->bitmap;
......@@ -860,7 +860,7 @@ void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
/* copy number words from the bitmap starting at offset into the buffer.
* buffer[i] will be little endian unsigned long.
*/
void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
void drbd_bm_get_lel(struct drbd_device *mdev, size_t offset, size_t number,
unsigned long *buffer)
{
struct drbd_bitmap *b = mdev->bitmap;
......@@ -897,7 +897,7 @@ void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
}
/* set all bits in the bitmap */
void drbd_bm_set_all(struct drbd_conf *mdev)
void drbd_bm_set_all(struct drbd_device *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
if (!expect(b))
......@@ -913,7 +913,7 @@ void drbd_bm_set_all(struct drbd_conf *mdev)
}
/* clear all bits in the bitmap */
void drbd_bm_clear_all(struct drbd_conf *mdev)
void drbd_bm_clear_all(struct drbd_device *mdev)
{
struct drbd_bitmap *b = mdev->bitmap;
if (!expect(b))
......@@ -928,7 +928,7 @@ void drbd_bm_clear_all(struct drbd_conf *mdev)
}
struct bm_aio_ctx {
struct drbd_conf *mdev;
struct drbd_device *mdev;
atomic_t in_flight;
unsigned int done;
unsigned flags;
......@@ -951,7 +951,7 @@ static void bm_aio_ctx_destroy(struct kref *kref)
static void bm_async_io_complete(struct bio *bio, int error)
{
struct bm_aio_ctx *ctx = bio->bi_private;
struct drbd_conf *mdev = ctx->mdev;
struct drbd_device *mdev = ctx->mdev;
struct drbd_bitmap *b = mdev->bitmap;
unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
int uptodate = bio_flagged(bio, BIO_UPTODATE);
......@@ -1000,7 +1000,7 @@ static void bm_async_io_complete(struct bio *bio, int error)
static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
{
struct bio *bio = bio_alloc_drbd(GFP_NOIO);
struct drbd_conf *mdev = ctx->mdev;
struct drbd_device *mdev = ctx->mdev;
struct drbd_bitmap *b = mdev->bitmap;
struct page *page;
unsigned int len;
......@@ -1049,7 +1049,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
/*
* bm_rw: read/write the whole bitmap from/to its on disk location.
*/
static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
static int bm_rw(struct drbd_device *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
{
struct bm_aio_ctx *ctx;
struct drbd_bitmap *b = mdev->bitmap;
......@@ -1173,7 +1173,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
* drbd_bm_read() - Read the whole bitmap from its on disk location.
* @mdev: DRBD device.
*/
int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
int drbd_bm_read(struct drbd_device *mdev) __must_hold(local)
{
return bm_rw(mdev, READ, 0, 0);
}
......@@ -1184,7 +1184,7 @@ int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
*
* Will only write pages that have changed since last IO.
*/
int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
int drbd_bm_write(struct drbd_device *mdev) __must_hold(local)
{
return bm_rw(mdev, WRITE, 0, 0);
}
......@@ -1195,7 +1195,7 @@ int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
*
* Will write all pages.
*/
int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local)
int drbd_bm_write_all(struct drbd_device *mdev) __must_hold(local)
{
return bm_rw(mdev, WRITE, BM_WRITE_ALL_PAGES, 0);
}
......@@ -1211,7 +1211,7 @@ int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local)
* verify is aborted due to a failed peer disk, while local IO continues, or
* pending resync acks are still being processed.
*/
int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local)
int drbd_bm_write_copy_pages(struct drbd_device *mdev) __must_hold(local)
{
return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0);
}
......@@ -1220,7 +1220,7 @@ int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local)
* drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed.
* @mdev: DRBD device.
*/
int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local)
int drbd_bm_write_hinted(struct drbd_device *mdev) __must_hold(local)
{
return bm_rw(mdev, WRITE, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
}
......@@ -1237,7 +1237,7 @@ int drbd_bm_write_hinted(struct drbd_conf *mdev) __must_hold(local)
* In case this becomes an issue on systems with larger PAGE_SIZE,
* we may want to change this again to write 4k aligned 4k pieces.
*/
int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
int drbd_bm_write_page(struct drbd_device *mdev, unsigned int idx) __must_hold(local)
{
struct bm_aio_ctx *ctx;
int err;
......@@ -1288,7 +1288,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
*
* this returns a bit number, NOT a sector!
*/
static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
static unsigned long __bm_find_next(struct drbd_device *mdev, unsigned long bm_fo,
const int find_zero_bit)
{
struct drbd_bitmap *b = mdev->bitmap;
......@@ -1328,7 +1328,7 @@ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
return bm_fo;
}
static unsigned long bm_find_next(struct drbd_conf *mdev,
static unsigned long bm_find_next(struct drbd_device *mdev,
unsigned long bm_fo, const int find_zero_bit)
{
struct drbd_bitmap *b = mdev->bitmap;
......@@ -1349,14 +1349,14 @@ static unsigned long bm_find_next(struct drbd_conf *mdev,
return i;
}
unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
unsigned long drbd_bm_find_next(struct drbd_device *mdev, unsigned long bm_fo)
{
return bm_find_next(mdev, bm_fo, 0);
}
#if 0
/* not yet needed for anything. */
unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
unsigned long drbd_bm_find_next_zero(struct drbd_device *mdev, unsigned long bm_fo)
{
return bm_find_next(mdev, bm_fo, 1);
}
......@@ -1364,13 +1364,13 @@ unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo
/* does not spin_lock_irqsave.
* you must take drbd_bm_lock() first */
unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
unsigned long _drbd_bm_find_next(struct drbd_device *mdev, unsigned long bm_fo)
{
/* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
return __bm_find_next(mdev, bm_fo, 0);
}
unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
unsigned long _drbd_bm_find_next_zero(struct drbd_device *mdev, unsigned long bm_fo)
{
/* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
return __bm_find_next(mdev, bm_fo, 1);
......@@ -1382,7 +1382,7 @@ unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_f
* wants bitnr, not sector.
* expected to be called for only a few bits (e - s about BITS_PER_LONG).
* Must hold bitmap lock already. */
static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
static int __bm_change_bits_to(struct drbd_device *mdev, const unsigned long s,
unsigned long e, int val)
{
struct drbd_bitmap *b = mdev->bitmap;
......@@ -1431,7 +1431,7 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
* for val != 0, we change 0 -> 1, return code positive
* for val == 0, we change 1 -> 0, return code negative
* wants bitnr, not sector */
static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
static int bm_change_bits_to(struct drbd_device *mdev, const unsigned long s,
const unsigned long e, int val)
{
unsigned long flags;
......@@ -1454,13 +1454,13 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
}
/* returns number of bits changed 0 -> 1 */
int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
int drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const unsigned long e)
{
return bm_change_bits_to(mdev, s, e, 1);
}
/* returns number of bits changed 1 -> 0 */
int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
int drbd_bm_clear_bits(struct drbd_device *mdev, const unsigned long s, const unsigned long e)
{
return -bm_change_bits_to(mdev, s, e, 0);
}
......@@ -1494,7 +1494,7 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
* You must first drbd_bm_lock().
* Can be called to set the whole bitmap in one go.
* Sets bits from s to e _inclusive_. */
void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
void _drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const unsigned long e)
{
/* First set_bit from the first bit (s)
* up to the next long boundary (sl),
......@@ -1574,7 +1574,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* 0 ... bit not set
* -1 ... first out of bounds access, stop testing for bits!
*/
int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
int drbd_bm_test_bit(struct drbd_device *mdev, const unsigned long bitnr)
{
unsigned long flags;
struct drbd_bitmap *b = mdev->bitmap;
......@@ -1605,7 +1605,7 @@ int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
}
/* returns number of bits set in the range [s, e] */
int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
int drbd_bm_count_bits(struct drbd_device *mdev, const unsigned long s, const unsigned long e)
{
unsigned long flags;
struct drbd_bitmap *b = mdev->bitmap;
......@@ -1660,7 +1660,7 @@ int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* reference count of some bitmap extent element from some lru instead...
*
*/
int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
int drbd_bm_e_weight(struct drbd_device *mdev, unsigned long enr)
{
struct drbd_bitmap *b = mdev->bitmap;
int count, s, e;
......
此差异已折叠。
此差异已折叠。
......@@ -103,7 +103,7 @@ static struct drbd_config_context {
/* pointer into reply buffer */
struct drbd_genlmsghdr *reply_dh;
/* resolved from attributes, if possible */
struct drbd_conf *mdev;
struct drbd_device *mdev;
struct drbd_tconn *tconn;
} adm_ctx;
......@@ -313,7 +313,7 @@ static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
}
int drbd_khelper(struct drbd_conf *mdev, char *cmd)
int drbd_khelper(struct drbd_device *mdev, char *cmd)
{
char *envp[] = { "HOME=/",
"TERM=linux",
......@@ -400,7 +400,7 @@ static int conn_khelper(struct drbd_tconn *tconn, char *cmd)
static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
{
enum drbd_fencing_p fp = FP_NOT_AVAIL;
struct drbd_conf *mdev;
struct drbd_device *mdev;
int vnr;
rcu_read_lock();
......@@ -534,7 +534,7 @@ void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
}
enum drbd_state_rv
drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
drbd_set_role(struct drbd_device *mdev, enum drbd_role new_role, int force)
{
const int max_tries = 4;
enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
......@@ -729,7 +729,7 @@ int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
* Activity log size used to be fixed 32kB,
* but is about to become configurable.
*/
static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
static void drbd_md_set_sector_offsets(struct drbd_device *mdev,
struct drbd_backing_dev *bdev)
{
sector_t md_size_sect = 0;
......@@ -807,7 +807,7 @@ char *ppsize(char *buf, unsigned long long size)
* and can be long lived.
* This changes an mdev->flag, is triggered by drbd internals,
* and should be short-lived. */
void drbd_suspend_io(struct drbd_conf *mdev)
void drbd_suspend_io(struct drbd_device *mdev)
{
set_bit(SUSPEND_IO, &mdev->flags);
if (drbd_suspended(mdev))
......@@ -815,7 +815,7 @@ void drbd_suspend_io(struct drbd_conf *mdev)
wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
}
void drbd_resume_io(struct drbd_conf *mdev)
void drbd_resume_io(struct drbd_device *mdev)
{
clear_bit(SUSPEND_IO, &mdev->flags);
wake_up(&mdev->misc_wait);
......@@ -829,7 +829,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
* You should call drbd_md_sync() after calling this function.
*/
enum determine_dev_size
drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
drbd_determine_dev_size(struct drbd_device *mdev, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
{
sector_t prev_first_sect, prev_size; /* previous meta location */
sector_t la_size_sect, u_size;
......@@ -979,7 +979,7 @@ drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct res
}
sector_t
drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
drbd_new_dev_size(struct drbd_device *mdev, struct drbd_backing_dev *bdev,
sector_t u_size, int assume_peer_has_space)
{
sector_t p_size = mdev->p_size; /* partner's disk size. */
......@@ -1033,7 +1033,7 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
* failed, and 0 on success. You should call drbd_md_sync() after you called
* this function.
*/
static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
static int drbd_check_al_size(struct drbd_device *mdev, struct disk_conf *dc)
{
struct lru_cache *n, *t;
struct lc_element *e;
......@@ -1078,7 +1078,7 @@ static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
return 0;
}
static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
static void drbd_setup_queue_param(struct drbd_device *mdev, unsigned int max_bio_size)
{
struct request_queue * const q = mdev->rq_queue;
unsigned int max_hw_sectors = max_bio_size >> 9;
......@@ -1115,7 +1115,7 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_
}
}
void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
void drbd_reconsider_max_bio_size(struct drbd_device *mdev)
{
unsigned int now, new, local, peer;
......@@ -1180,7 +1180,7 @@ static void conn_reconfig_done(struct drbd_tconn *tconn)
}
/* Make sure IO is suspended before calling this function(). */
static void drbd_suspend_al(struct drbd_conf *mdev)
static void drbd_suspend_al(struct drbd_device *mdev)
{
int s = 0;
......@@ -1238,7 +1238,7 @@ static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
{
enum drbd_ret_code retcode;
struct drbd_conf *mdev;
struct drbd_device *mdev;
struct disk_conf *new_disk_conf, *old_disk_conf;
struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
int err, fifo_size;
......@@ -1366,7 +1366,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_conf *mdev;
struct drbd_device *mdev;
int err;
enum drbd_ret_code retcode;
enum determine_dev_size dd;
......@@ -1800,7 +1800,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
return 0;
}
static int adm_detach(struct drbd_conf *mdev, int force)
static int adm_detach(struct drbd_device *mdev, int force)
{
enum drbd_state_rv retcode;
int ret;
......@@ -1862,7 +1862,7 @@ int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
static bool conn_resync_running(struct drbd_tconn *tconn)
{
struct drbd_conf *mdev;
struct drbd_device *mdev;
bool rv = false;
int vnr;
......@@ -1883,7 +1883,7 @@ static bool conn_resync_running(struct drbd_tconn *tconn)
static bool conn_ov_running(struct drbd_tconn *tconn)
{
struct drbd_conf *mdev;
struct drbd_device *mdev;
bool rv = false;
int vnr;
......@@ -1903,7 +1903,7 @@ static bool conn_ov_running(struct drbd_tconn *tconn)
static enum drbd_ret_code
_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
{
struct drbd_conf *mdev;
struct drbd_device *mdev;
int i;
if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
......@@ -1947,7 +1947,7 @@ static enum drbd_ret_code
check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
{
static enum drbd_ret_code rv;
struct drbd_conf *mdev;
struct drbd_device *mdev;
int i;
rcu_read_lock();
......@@ -2139,7 +2139,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_conf *mdev;
struct drbd_device *mdev;
struct net_conf *old_conf, *new_conf = NULL;
struct crypto crypto = { };
struct drbd_tconn *tconn;
......@@ -2349,7 +2349,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
return 0;
}
void resync_after_online_grow(struct drbd_conf *mdev)
void resync_after_online_grow(struct drbd_device *mdev)
{
int iass; /* I am sync source */
......@@ -2369,7 +2369,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
{
struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
struct resize_parms rs;
struct drbd_conf *mdev;
struct drbd_device *mdev;
enum drbd_ret_code retcode;
enum determine_dev_size dd;
bool change_al_layout = false;
......@@ -2535,7 +2535,7 @@ int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_conf *mdev;
struct drbd_device *mdev;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
......@@ -2590,7 +2590,7 @@ static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *
return 0;
}
static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
static int drbd_bmio_set_susp_al(struct drbd_device *mdev)
{
int rv;
......@@ -2602,7 +2602,7 @@ static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
{
int retcode; /* drbd_ret_code, drbd_state_rv */
struct drbd_conf *mdev;
struct drbd_device *mdev;
retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
......@@ -2692,7 +2692,7 @@ int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_conf *mdev;
struct drbd_device *mdev;
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
......@@ -2753,7 +2753,7 @@ static int nla_put_drbd_cfg_context(struct sk_buff *skb,
return -EMSGSIZE;
}
static int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *mdev,
const struct sib_info *sib)
{
struct state_info *si = NULL; /* for sizeof(si->member); */
......@@ -2897,7 +2897,7 @@ int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
{
struct drbd_conf *mdev;
struct drbd_device *mdev;
struct drbd_genlmsghdr *dh;
struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
struct drbd_tconn *tconn = NULL;
......@@ -3097,7 +3097,7 @@ int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_conf *mdev;
struct drbd_device *mdev;
enum drbd_ret_code retcode;
struct start_ov_parms parms;
......@@ -3138,7 +3138,7 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
{
struct drbd_conf *mdev;
struct drbd_device *mdev;
enum drbd_ret_code retcode;
int skip_initial_sync = 0;
int err;
......@@ -3302,7 +3302,7 @@ int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
return 0;
}
static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
static enum drbd_ret_code adm_delete_minor(struct drbd_device *mdev)
{
if (mdev->state.disk == D_DISKLESS &&
/* no need to be mdev->state.conn == C_STANDALONE &&
......@@ -3341,7 +3341,7 @@ int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
{
int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
struct drbd_conf *mdev;
struct drbd_device *mdev;
unsigned i;
retcode = drbd_adm_prepare(skb, info, 0);
......@@ -3441,7 +3441,7 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
return 0;
}
void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
void drbd_bcast_event(struct drbd_device *mdev, const struct sib_info *sib)
{
static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
struct sk_buff *msg;
......
......@@ -66,7 +66,7 @@ static void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
* [=====>..............] 33.5% (23456/123456)
* finish: 2:20:20 speed: 6,345 (6,456) K/sec
*/
static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
static void drbd_syncer_progress(struct drbd_device *mdev, struct seq_file *seq)
{
unsigned long db, dt, dbdt, rt, rs_left;
unsigned int res;
......@@ -202,7 +202,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
{
int i, prev_i = -1;
const char *sn;
struct drbd_conf *mdev;
struct drbd_device *mdev;
struct net_conf *nc;
char wp;
......
此差异已折叠。
......@@ -31,10 +31,10 @@
#include "drbd_req.h"
static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size);
static bool drbd_may_do_local_read(struct drbd_device *mdev, sector_t sector, int size);
/* Update disk stats at start of I/O request */
static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
static void _drbd_start_io_acct(struct drbd_device *mdev, struct drbd_request *req)
{
const int rw = bio_data_dir(req->master_bio);
int cpu;
......@@ -49,7 +49,7 @@ static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req
}
/* Update disk stats when completing request upwards */
static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
static void _drbd_end_io_acct(struct drbd_device *mdev, struct drbd_request *req)
{
int rw = bio_data_dir(req->master_bio);
unsigned long duration = jiffies - req->start_time;
......@@ -61,7 +61,7 @@ static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req)
part_stat_unlock();
}
static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
static struct drbd_request *drbd_req_new(struct drbd_device *mdev,
struct bio *bio_src)
{
struct drbd_request *req;
......@@ -95,7 +95,7 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
void drbd_req_destroy(struct kref *kref)
{
struct drbd_request *req = container_of(kref, struct drbd_request, kref);
struct drbd_conf *mdev = req->w.mdev;
struct drbd_device *mdev = req->w.mdev;
const unsigned s = req->rq_state;
if ((req->master_bio && !(s & RQ_POSTPONED)) ||
......@@ -179,7 +179,7 @@ void start_new_tl_epoch(struct drbd_tconn *tconn)
wake_all_senders(tconn);
}
void complete_master_bio(struct drbd_conf *mdev,
void complete_master_bio(struct drbd_device *mdev,
struct bio_and_error *m)
{
bio_endio(m->bio, m->error);
......@@ -190,7 +190,7 @@ void complete_master_bio(struct drbd_conf *mdev,
static void drbd_remove_request_interval(struct rb_root *root,
struct drbd_request *req)
{
struct drbd_conf *mdev = req->w.mdev;
struct drbd_device *mdev = req->w.mdev;
struct drbd_interval *i = &req->i;
drbd_remove_interval(root, i);
......@@ -210,7 +210,7 @@ static
void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
{
const unsigned s = req->rq_state;
struct drbd_conf *mdev = req->w.mdev;
struct drbd_device *mdev = req->w.mdev;
int rw;
int error, ok;
......@@ -305,7 +305,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
{
struct drbd_conf *mdev = req->w.mdev;
struct drbd_device *mdev = req->w.mdev;
D_ASSERT(m || (req->rq_state & RQ_POSTPONED));
if (!atomic_sub_and_test(put, &req->completion_ref))
......@@ -328,7 +328,7 @@ static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_
static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
int clear, int set)
{
struct drbd_conf *mdev = req->w.mdev;
struct drbd_device *mdev = req->w.mdev;
unsigned s = req->rq_state;
int c_put = 0;
int k_put = 0;
......@@ -424,7 +424,7 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
kref_sub(&req->kref, k_put, drbd_req_destroy);
}
static void drbd_report_io_error(struct drbd_conf *mdev, struct drbd_request *req)
static void drbd_report_io_error(struct drbd_device *mdev, struct drbd_request *req)
{
char b[BDEVNAME_SIZE];
......@@ -453,7 +453,7 @@ static void drbd_report_io_error(struct drbd_conf *mdev, struct drbd_request *re
int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m)
{
struct drbd_conf *mdev = req->w.mdev;
struct drbd_device *mdev = req->w.mdev;
struct net_conf *nc;
int p, rv = 0;
......@@ -771,7 +771,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* since size may be bigger than BM_BLOCK_SIZE,
* we may need to check several bits.
*/
static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int size)
static bool drbd_may_do_local_read(struct drbd_device *mdev, sector_t sector, int size)
{
unsigned long sbnr, ebnr;
sector_t esector, nr_sectors;
......@@ -791,7 +791,7 @@ static bool drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int
return drbd_bm_count_bits(mdev, sbnr, ebnr) == 0;
}
static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector,
static bool remote_due_to_read_balancing(struct drbd_device *mdev, sector_t sector,
enum drbd_read_balancing rbm)
{
struct backing_dev_info *bdi;
......@@ -834,7 +834,7 @@ static bool remote_due_to_read_balancing(struct drbd_conf *mdev, sector_t sector
static void complete_conflicting_writes(struct drbd_request *req)
{
DEFINE_WAIT(wait);
struct drbd_conf *mdev = req->w.mdev;
struct drbd_device *mdev = req->w.mdev;
struct drbd_interval *i;
sector_t sector = req->i.sector;
int size = req->i.size;
......@@ -858,7 +858,7 @@ static void complete_conflicting_writes(struct drbd_request *req)
}
/* called within req_lock and rcu_read_lock() */
static void maybe_pull_ahead(struct drbd_conf *mdev)
static void maybe_pull_ahead(struct drbd_device *mdev)
{
struct drbd_tconn *tconn = mdev->tconn;
struct net_conf *nc;
......@@ -914,7 +914,7 @@ static void maybe_pull_ahead(struct drbd_conf *mdev)
*/
static bool do_remote_read(struct drbd_request *req)
{
struct drbd_conf *mdev = req->w.mdev;
struct drbd_device *mdev = req->w.mdev;
enum drbd_read_balancing rbm;
if (req->private_bio) {
......@@ -959,7 +959,7 @@ static bool do_remote_read(struct drbd_request *req)
* which does NOT include those that we are L_AHEAD for. */
static int drbd_process_write_request(struct drbd_request *req)
{
struct drbd_conf *mdev = req->w.mdev;
struct drbd_device *mdev = req->w.mdev;
int remote, send_oos;
remote = drbd_should_do_remote(mdev->state);
......@@ -996,7 +996,7 @@ static int drbd_process_write_request(struct drbd_request *req)
static void
drbd_submit_req_private_bio(struct drbd_request *req)
{
struct drbd_conf *mdev = req->w.mdev;
struct drbd_device *mdev = req->w.mdev;
struct bio *bio = req->private_bio;
const int rw = bio_rw(bio);
......@@ -1020,7 +1020,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
bio_endio(bio, -EIO);
}
static void drbd_queue_write(struct drbd_conf *mdev, struct drbd_request *req)
static void drbd_queue_write(struct drbd_device *mdev, struct drbd_request *req)
{
spin_lock(&mdev->submit.lock);
list_add_tail(&req->tl_requests, &mdev->submit.writes);
......@@ -1034,7 +1034,7 @@ static void drbd_queue_write(struct drbd_conf *mdev, struct drbd_request *req)
* Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
*/
static struct drbd_request *
drbd_request_prepare(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
drbd_request_prepare(struct drbd_device *mdev, struct bio *bio, unsigned long start_time)
{
const int rw = bio_data_dir(bio);
struct drbd_request *req;
......@@ -1071,7 +1071,7 @@ drbd_request_prepare(struct drbd_conf *mdev, struct bio *bio, unsigned long star
return req;
}
static void drbd_send_and_submit(struct drbd_conf *mdev, struct drbd_request *req)
static void drbd_send_and_submit(struct drbd_device *mdev, struct drbd_request *req)
{
const int rw = bio_rw(req->master_bio);
struct bio_and_error m = { NULL, };
......@@ -1160,7 +1160,7 @@ static void drbd_send_and_submit(struct drbd_conf *mdev, struct drbd_request *re
complete_master_bio(mdev, &m);
}
void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
void __drbd_make_request(struct drbd_device *mdev, struct bio *bio, unsigned long start_time)
{
struct drbd_request *req = drbd_request_prepare(mdev, bio, start_time);
if (IS_ERR_OR_NULL(req))
......@@ -1168,7 +1168,7 @@ void __drbd_make_request(struct drbd_conf *mdev, struct bio *bio, unsigned long
drbd_send_and_submit(mdev, req);
}
static void submit_fast_path(struct drbd_conf *mdev, struct list_head *incoming)
static void submit_fast_path(struct drbd_device *mdev, struct list_head *incoming)
{
struct drbd_request *req, *tmp;
list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
......@@ -1188,7 +1188,7 @@ static void submit_fast_path(struct drbd_conf *mdev, struct list_head *incoming)
}
}
static bool prepare_al_transaction_nonblock(struct drbd_conf *mdev,
static bool prepare_al_transaction_nonblock(struct drbd_device *mdev,
struct list_head *incoming,
struct list_head *pending)
{
......@@ -1215,7 +1215,7 @@ static bool prepare_al_transaction_nonblock(struct drbd_conf *mdev,
void do_submit(struct work_struct *ws)
{
struct drbd_conf *mdev = container_of(ws, struct drbd_conf, submit.worker);
struct drbd_device *mdev = container_of(ws, struct drbd_device, submit.worker);
LIST_HEAD(incoming);
LIST_HEAD(pending);
struct drbd_request *req, *tmp;
......@@ -1272,7 +1272,7 @@ void do_submit(struct work_struct *ws)
void drbd_make_request(struct request_queue *q, struct bio *bio)
{
struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
struct drbd_device *mdev = (struct drbd_device *) q->queuedata;
unsigned long start_time;
start_time = jiffies;
......@@ -1300,7 +1300,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
*/
int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
{
struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
struct drbd_device *mdev = (struct drbd_device *) q->queuedata;
unsigned int bio_size = bvm->bi_size;
int limit = DRBD_MAX_BIO_SIZE;
int backing_limit;
......@@ -1334,7 +1334,7 @@ static struct drbd_request *find_oldest_request(struct drbd_tconn *tconn)
void request_timer_fn(unsigned long data)
{
struct drbd_conf *mdev = (struct drbd_conf *) data;
struct drbd_device *mdev = (struct drbd_device *) data;
struct drbd_tconn *tconn = mdev->tconn;
struct drbd_request *req; /* oldest request */
struct net_conf *nc;
......
......@@ -281,7 +281,7 @@ extern void _req_may_be_done(struct drbd_request *req,
struct bio_and_error *m);
extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct bio_and_error *m);
extern void complete_master_bio(struct drbd_conf *mdev,
extern void complete_master_bio(struct drbd_device *mdev,
struct bio_and_error *m);
extern void request_timer_fn(unsigned long data);
extern void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
......@@ -294,7 +294,7 @@ extern void drbd_restart_request(struct drbd_request *req);
* outside the spinlock, e.g. when walking some list on cleanup. */
static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
{
struct drbd_conf *mdev = req->w.mdev;
struct drbd_device *mdev = req->w.mdev;
struct bio_and_error m;
int rv;
......@@ -314,7 +314,7 @@ static inline int req_mod(struct drbd_request *req,
enum drbd_req_event what)
{
unsigned long flags;
struct drbd_conf *mdev = req->w.mdev;
struct drbd_device *mdev = req->w.mdev;
struct bio_and_error m;
int rv;
......
......@@ -48,12 +48,12 @@ enum sanitize_state_warnings {
};
static int w_after_state_ch(struct drbd_work *w, int unused);
static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
static void after_state_ch(struct drbd_device *mdev, union drbd_state os,
union drbd_state ns, enum chg_state_flags flags);
static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
static enum drbd_state_rv is_valid_state(struct drbd_device *, union drbd_state);
static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_tconn *);
static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
static union drbd_state sanitize_state(struct drbd_device *mdev, union drbd_state ns,
enum sanitize_state_warnings *warn);
static inline bool is_susp(union drbd_state s)
......@@ -63,7 +63,7 @@ static inline bool is_susp(union drbd_state s)
bool conn_all_vols_unconf(struct drbd_tconn *tconn)
{
struct drbd_conf *mdev;
struct drbd_device *mdev;
bool rv = true;
int vnr;
......@@ -103,7 +103,7 @@ static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
enum drbd_role conn_highest_role(struct drbd_tconn *tconn)
{
enum drbd_role role = R_UNKNOWN;
struct drbd_conf *mdev;
struct drbd_device *mdev;
int vnr;
rcu_read_lock();
......@@ -117,7 +117,7 @@ enum drbd_role conn_highest_role(struct drbd_tconn *tconn)
enum drbd_role conn_highest_peer(struct drbd_tconn *tconn)
{
enum drbd_role peer = R_UNKNOWN;
struct drbd_conf *mdev;
struct drbd_device *mdev;
int vnr;
rcu_read_lock();
......@@ -131,7 +131,7 @@ enum drbd_role conn_highest_peer(struct drbd_tconn *tconn)
enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn)
{
enum drbd_disk_state ds = D_DISKLESS;
struct drbd_conf *mdev;
struct drbd_device *mdev;
int vnr;
rcu_read_lock();
......@@ -145,7 +145,7 @@ enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn)
enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn)
{
enum drbd_disk_state ds = D_MASK;
struct drbd_conf *mdev;
struct drbd_device *mdev;
int vnr;
rcu_read_lock();
......@@ -159,7 +159,7 @@ enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn)
enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn)
{
enum drbd_disk_state ds = D_DISKLESS;
struct drbd_conf *mdev;
struct drbd_device *mdev;
int vnr;
rcu_read_lock();
......@@ -173,7 +173,7 @@ enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn)
enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn)
{
enum drbd_conns conn = C_MASK;
struct drbd_conf *mdev;
struct drbd_device *mdev;
int vnr;
rcu_read_lock();
......@@ -186,7 +186,7 @@ enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn)
static bool no_peer_wf_report_params(struct drbd_tconn *tconn)
{
struct drbd_conf *mdev;
struct drbd_device *mdev;
int vnr;
bool rv = true;
......@@ -208,7 +208,7 @@ static bool no_peer_wf_report_params(struct drbd_tconn *tconn)
* @os: old (current) state.
* @ns: new (wanted) state.
*/
static int cl_wide_st_chg(struct drbd_conf *mdev,
static int cl_wide_st_chg(struct drbd_device *mdev,
union drbd_state os, union drbd_state ns)
{
return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
......@@ -230,7 +230,7 @@ apply_mask_val(union drbd_state os, union drbd_state mask, union drbd_state val)
}
enum drbd_state_rv
drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
drbd_change_state(struct drbd_device *mdev, enum chg_state_flags f,
union drbd_state mask, union drbd_state val)
{
unsigned long flags;
......@@ -251,14 +251,14 @@ drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
* @mask: mask of state bits to change.
* @val: value of new state bits.
*/
void drbd_force_state(struct drbd_conf *mdev,
void drbd_force_state(struct drbd_device *mdev,
union drbd_state mask, union drbd_state val)
{
drbd_change_state(mdev, CS_HARD, mask, val);
}
static enum drbd_state_rv
_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
_req_st_cond(struct drbd_device *mdev, union drbd_state mask,
union drbd_state val)
{
union drbd_state os, ns;
......@@ -304,7 +304,7 @@ _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
* _drbd_request_state().
*/
static enum drbd_state_rv
drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
drbd_req_state(struct drbd_device *mdev, union drbd_state mask,
union drbd_state val, enum chg_state_flags f)
{
struct completion done;
......@@ -385,7 +385,7 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
* flag, or when logging of failed state change requests is not desired.
*/
enum drbd_state_rv
_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
_drbd_request_state(struct drbd_device *mdev, union drbd_state mask,
union drbd_state val, enum chg_state_flags f)
{
enum drbd_state_rv rv;
......@@ -396,7 +396,7 @@ _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
return rv;
}
static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
static void print_st(struct drbd_device *mdev, char *name, union drbd_state ns)
{
dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
name,
......@@ -414,7 +414,7 @@ static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
);
}
void print_st_err(struct drbd_conf *mdev, union drbd_state os,
void print_st_err(struct drbd_device *mdev, union drbd_state os,
union drbd_state ns, enum drbd_state_rv err)
{
if (err == SS_IN_TRANSIENT_STATE)
......@@ -455,7 +455,7 @@ static long print_state_change(char *pb, union drbd_state os, union drbd_state n
return pbp - pb;
}
static void drbd_pr_state_change(struct drbd_conf *mdev, union drbd_state os, union drbd_state ns,
static void drbd_pr_state_change(struct drbd_device *mdev, union drbd_state os, union drbd_state ns,
enum chg_state_flags flags)
{
char pb[300];
......@@ -504,7 +504,7 @@ static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os,
* @ns: State to consider.
*/
static enum drbd_state_rv
is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
is_valid_state(struct drbd_device *mdev, union drbd_state ns)
{
/* See drbd_state_sw_errors in drbd_strings.c */
......@@ -701,7 +701,7 @@ is_valid_transition(union drbd_state os, union drbd_state ns)
return rv;
}
static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
static void print_sanitize_warnings(struct drbd_device *mdev, enum sanitize_state_warnings warn)
{
static const char *msg_table[] = {
[NO_WARNING] = "",
......@@ -726,7 +726,7 @@ static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_
* When we loose connection, we have to set the state of the peers disk (pdsk)
* to D_UNKNOWN. This rule and many more along those lines are in this function.
*/
static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
static union drbd_state sanitize_state(struct drbd_device *mdev, union drbd_state ns,
enum sanitize_state_warnings *warn)
{
enum drbd_fencing_p fp;
......@@ -890,14 +890,14 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
return ns;
}
void drbd_resume_al(struct drbd_conf *mdev)
void drbd_resume_al(struct drbd_device *mdev)
{
if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
dev_info(DEV, "Resumed AL updates\n");
}
/* helper for __drbd_set_state */
static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
static void set_ov_position(struct drbd_device *mdev, enum drbd_conns cs)
{
if (mdev->tconn->agreed_pro_version < 90)
mdev->ov_start_sector = 0;
......@@ -933,7 +933,7 @@ static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
* Caller needs to hold req_lock, and global_state_lock. Do not call directly.
*/
enum drbd_state_rv
__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
__drbd_set_state(struct drbd_device *mdev, union drbd_state ns,
enum chg_state_flags flags, struct completion *done)
{
union drbd_state os;
......@@ -1145,7 +1145,7 @@ static int w_after_state_ch(struct drbd_work *w, int unused)
{
struct after_state_chg_work *ascw =
container_of(w, struct after_state_chg_work, w);
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
if (ascw->flags & CS_WAIT_COMPLETE) {
......@@ -1157,7 +1157,7 @@ static int w_after_state_ch(struct drbd_work *w, int unused)
return 0;
}
static void abw_start_sync(struct drbd_conf *mdev, int rv)
static void abw_start_sync(struct drbd_device *mdev, int rv)
{
if (rv) {
dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
......@@ -1175,8 +1175,8 @@ static void abw_start_sync(struct drbd_conf *mdev, int rv)
}
}
int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
int (*io_fn)(struct drbd_conf *),
int drbd_bitmap_io_from_worker(struct drbd_device *mdev,
int (*io_fn)(struct drbd_device *),
char *why, enum bm_flag flags)
{
int rv;
......@@ -1202,7 +1202,7 @@ int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
* @ns: new state.
* @flags: Flags
*/
static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
static void after_state_ch(struct drbd_device *mdev, union drbd_state os,
union drbd_state ns, enum chg_state_flags flags)
{
struct sib_info sib;
......@@ -1255,7 +1255,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
spin_lock_irq(&tconn->req_lock);
if (tconn->susp_fen && conn_lowest_conn(tconn) >= C_CONNECTED) {
/* case2: The connection was established again: */
struct drbd_conf *odev;
struct drbd_device *odev;
int vnr;
rcu_read_lock();
......@@ -1529,7 +1529,7 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
struct drbd_tconn *tconn = w->tconn;
enum drbd_conns oc = acscw->oc;
union drbd_state ns_max = acscw->ns_max;
struct drbd_conf *mdev;
struct drbd_device *mdev;
int vnr;
kfree(acscw);
......@@ -1583,7 +1583,7 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum chg_state_flags *pf)
{
enum chg_state_flags flags = ~0;
struct drbd_conf *mdev;
struct drbd_device *mdev;
int vnr, first_vol = 1;
union drbd_dev_state os, cs = {
{ .role = R_SECONDARY,
......@@ -1631,7 +1631,7 @@ conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union
{
enum drbd_state_rv rv = SS_SUCCESS;
union drbd_state ns, os;
struct drbd_conf *mdev;
struct drbd_device *mdev;
int vnr;
rcu_read_lock();
......@@ -1680,7 +1680,7 @@ conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state
.disk = D_MASK,
.pdsk = D_MASK
} };
struct drbd_conf *mdev;
struct drbd_device *mdev;
enum drbd_state_rv rv;
int vnr, number_of_volumes = 0;
......
#ifndef DRBD_STATE_H
#define DRBD_STATE_H
struct drbd_conf;
struct drbd_device;
struct drbd_tconn;
/**
......@@ -107,20 +107,20 @@ union drbd_dev_state {
unsigned int i;
};
extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev,
extern enum drbd_state_rv drbd_change_state(struct drbd_device *mdev,
enum chg_state_flags f,
union drbd_state mask,
union drbd_state val);
extern void drbd_force_state(struct drbd_conf *, union drbd_state,
extern void drbd_force_state(struct drbd_device *, union drbd_state,
union drbd_state);
extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *,
extern enum drbd_state_rv _drbd_request_state(struct drbd_device *,
union drbd_state,
union drbd_state,
enum chg_state_flags);
extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state,
extern enum drbd_state_rv __drbd_set_state(struct drbd_device *, union drbd_state,
enum chg_state_flags,
struct completion *done);
extern void print_st_err(struct drbd_conf *, union drbd_state,
extern void print_st_err(struct drbd_device *, union drbd_state,
union drbd_state, int);
enum drbd_state_rv
......@@ -131,7 +131,7 @@ enum drbd_state_rv
conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags);
extern void drbd_resume_al(struct drbd_conf *mdev);
extern void drbd_resume_al(struct drbd_device *mdev);
extern bool conn_all_vols_unconf(struct drbd_tconn *tconn);
/**
......@@ -144,7 +144,7 @@ extern bool conn_all_vols_unconf(struct drbd_tconn *tconn);
* quite verbose in case the state change is not possible, and all those
* state changes are globally serialized.
*/
static inline int drbd_request_state(struct drbd_conf *mdev,
static inline int drbd_request_state(struct drbd_device *mdev,
union drbd_state mask,
union drbd_state val)
{
......
......@@ -68,10 +68,10 @@ rwlock_t global_state_lock;
void drbd_md_io_complete(struct bio *bio, int error)
{
struct drbd_md_io *md_io;
struct drbd_conf *mdev;
struct drbd_device *mdev;
md_io = (struct drbd_md_io *)bio->bi_private;
mdev = container_of(md_io, struct drbd_conf, md_io);
mdev = container_of(md_io, struct drbd_device, md_io);
md_io->error = error;
......@@ -100,7 +100,7 @@ void drbd_md_io_complete(struct bio *bio, int error)
static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{
unsigned long flags = 0;
struct drbd_conf *mdev = peer_req->w.mdev;
struct drbd_device *mdev = peer_req->w.mdev;
spin_lock_irqsave(&mdev->tconn->req_lock, flags);
mdev->read_cnt += peer_req->i.size >> 9;
......@@ -120,7 +120,7 @@ static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __rele
static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local)
{
unsigned long flags = 0;
struct drbd_conf *mdev = peer_req->w.mdev;
struct drbd_device *mdev = peer_req->w.mdev;
struct drbd_interval i;
int do_wake;
u64 block_id;
......@@ -171,7 +171,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
void drbd_peer_request_endio(struct bio *bio, int error)
{
struct drbd_peer_request *peer_req = bio->bi_private;
struct drbd_conf *mdev = peer_req->w.mdev;
struct drbd_device *mdev = peer_req->w.mdev;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
int is_write = bio_data_dir(bio) == WRITE;
......@@ -208,7 +208,7 @@ void drbd_request_endio(struct bio *bio, int error)
{
unsigned long flags;
struct drbd_request *req = bio->bi_private;
struct drbd_conf *mdev = req->w.mdev;
struct drbd_device *mdev = req->w.mdev;
struct bio_and_error m;
enum drbd_req_event what;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
......@@ -282,7 +282,7 @@ void drbd_request_endio(struct bio *bio, int error)
complete_master_bio(mdev, &m);
}
void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
void drbd_csum_ee(struct drbd_device *mdev, struct crypto_hash *tfm,
struct drbd_peer_request *peer_req, void *digest)
{
struct hash_desc desc;
......@@ -310,7 +310,7 @@ void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
crypto_hash_final(&desc, digest);
}
void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
void drbd_csum_bio(struct drbd_device *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
{
struct hash_desc desc;
struct scatterlist sg;
......@@ -334,7 +334,7 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
static int w_e_send_csum(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
int digest_size;
void *digest;
int err = 0;
......@@ -379,7 +379,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
static int read_for_csum(struct drbd_device *mdev, sector_t sector, int size)
{
struct drbd_peer_request *peer_req;
......@@ -421,7 +421,7 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
int w_resync_timer(struct drbd_work *w, int cancel)
{
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
switch (mdev->state.conn) {
case C_VERIFY_S:
w_make_ov_request(w, cancel);
......@@ -436,7 +436,7 @@ int w_resync_timer(struct drbd_work *w, int cancel)
void resync_timer_fn(unsigned long data)
{
struct drbd_conf *mdev = (struct drbd_conf *) data;
struct drbd_device *mdev = (struct drbd_device *) data;
if (list_empty(&mdev->resync_work.list))
drbd_queue_work(&mdev->tconn->sender_work, &mdev->resync_work);
......@@ -486,7 +486,7 @@ struct fifo_buffer *fifo_alloc(int fifo_size)
return fb;
}
static int drbd_rs_controller(struct drbd_conf *mdev)
static int drbd_rs_controller(struct drbd_device *mdev)
{
struct disk_conf *dc;
unsigned int sect_in; /* Number of sectors that came in since the last turn */
......@@ -542,7 +542,7 @@ static int drbd_rs_controller(struct drbd_conf *mdev)
return req_sect;
}
static int drbd_rs_number_requests(struct drbd_conf *mdev)
static int drbd_rs_number_requests(struct drbd_device *mdev)
{
int number;
......@@ -563,7 +563,7 @@ static int drbd_rs_number_requests(struct drbd_conf *mdev)
int w_make_resync_request(struct drbd_work *w, int cancel)
{
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
unsigned long bit;
sector_t sector;
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
......@@ -726,7 +726,7 @@ int w_make_resync_request(struct drbd_work *w, int cancel)
static int w_make_ov_request(struct drbd_work *w, int cancel)
{
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
int number, i, size;
sector_t sector;
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
......@@ -780,7 +780,7 @@ static int w_make_ov_request(struct drbd_work *w, int cancel)
int w_ov_finished(struct drbd_work *w, int cancel)
{
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
kfree(w);
ov_out_of_sync_print(mdev);
drbd_resync_finished(mdev);
......@@ -790,7 +790,7 @@ int w_ov_finished(struct drbd_work *w, int cancel)
static int w_resync_finished(struct drbd_work *w, int cancel)
{
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
kfree(w);
drbd_resync_finished(mdev);
......@@ -798,7 +798,7 @@ static int w_resync_finished(struct drbd_work *w, int cancel)
return 0;
}
static void ping_peer(struct drbd_conf *mdev)
static void ping_peer(struct drbd_device *mdev)
{
struct drbd_tconn *tconn = mdev->tconn;
......@@ -808,7 +808,7 @@ static void ping_peer(struct drbd_conf *mdev)
test_bit(GOT_PING_ACK, &tconn->flags) || mdev->state.conn < C_CONNECTED);
}
int drbd_resync_finished(struct drbd_conf *mdev)
int drbd_resync_finished(struct drbd_device *mdev)
{
unsigned long db, dt, dbdt;
unsigned long n_oos;
......@@ -963,7 +963,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
}
/* helper */
static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
static void move_to_net_ee_or_free(struct drbd_device *mdev, struct drbd_peer_request *peer_req)
{
if (drbd_peer_req_has_active_page(peer_req)) {
/* This might happen if sendpage() has not finished */
......@@ -987,7 +987,7 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_requ
int w_e_end_data_req(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
int err;
if (unlikely(cancel)) {
......@@ -1024,7 +1024,7 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
int err;
if (unlikely(cancel)) {
......@@ -1073,7 +1073,7 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
struct digest_info *di;
int digest_size;
void *digest = NULL;
......@@ -1136,7 +1136,7 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
int w_e_end_ov_req(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
sector_t sector = peer_req->i.sector;
unsigned int size = peer_req->i.size;
int digest_size;
......@@ -1178,7 +1178,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
return err;
}
void drbd_ov_out_of_sync_found(struct drbd_conf *mdev, sector_t sector, int size)
void drbd_ov_out_of_sync_found(struct drbd_device *mdev, sector_t sector, int size)
{
if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) {
mdev->ov_last_oos_size += size>>9;
......@@ -1192,7 +1192,7 @@ void drbd_ov_out_of_sync_found(struct drbd_conf *mdev, sector_t sector, int size
int w_e_end_ov_reply(struct drbd_work *w, int cancel)
{
struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w);
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
struct digest_info *di;
void *digest;
sector_t sector = peer_req->i.sector;
......@@ -1292,7 +1292,7 @@ static int drbd_send_barrier(struct drbd_tconn *tconn)
int w_send_write_hint(struct drbd_work *w, int cancel)
{
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
struct drbd_socket *sock;
if (cancel)
......@@ -1327,7 +1327,7 @@ static void maybe_send_barrier(struct drbd_tconn *tconn, unsigned int epoch)
int w_send_out_of_sync(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
struct drbd_tconn *tconn = mdev->tconn;
int err;
......@@ -1357,7 +1357,7 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel)
int w_send_dblock(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
struct drbd_tconn *tconn = mdev->tconn;
int err;
......@@ -1385,7 +1385,7 @@ int w_send_dblock(struct drbd_work *w, int cancel)
int w_send_read_req(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
struct drbd_tconn *tconn = mdev->tconn;
int err;
......@@ -1409,7 +1409,7 @@ int w_send_read_req(struct drbd_work *w, int cancel)
int w_restart_disk_io(struct drbd_work *w, int cancel)
{
struct drbd_request *req = container_of(w, struct drbd_request, w);
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
drbd_al_begin_io(mdev, &req->i, false);
......@@ -1421,9 +1421,9 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
return 0;
}
static int _drbd_may_sync_now(struct drbd_conf *mdev)
static int _drbd_may_sync_now(struct drbd_device *mdev)
{
struct drbd_conf *odev = mdev;
struct drbd_device *odev = mdev;
int resync_after;
while (1) {
......@@ -1451,9 +1451,9 @@ static int _drbd_may_sync_now(struct drbd_conf *mdev)
*
* Called from process context only (admin command and after_state_ch).
*/
static int _drbd_pause_after(struct drbd_conf *mdev)
static int _drbd_pause_after(struct drbd_device *mdev)
{
struct drbd_conf *odev;
struct drbd_device *odev;
int i, rv = 0;
rcu_read_lock();
......@@ -1475,9 +1475,9 @@ static int _drbd_pause_after(struct drbd_conf *mdev)
*
* Called from process context only (admin command and worker).
*/
static int _drbd_resume_next(struct drbd_conf *mdev)
static int _drbd_resume_next(struct drbd_device *mdev)
{
struct drbd_conf *odev;
struct drbd_device *odev;
int i, rv = 0;
rcu_read_lock();
......@@ -1495,14 +1495,14 @@ static int _drbd_resume_next(struct drbd_conf *mdev)
return rv;
}
void resume_next_sg(struct drbd_conf *mdev)
void resume_next_sg(struct drbd_device *mdev)
{
write_lock_irq(&global_state_lock);
_drbd_resume_next(mdev);
write_unlock_irq(&global_state_lock);
}
void suspend_other_sg(struct drbd_conf *mdev)
void suspend_other_sg(struct drbd_device *mdev)
{
write_lock_irq(&global_state_lock);
_drbd_pause_after(mdev);
......@@ -1510,9 +1510,9 @@ void suspend_other_sg(struct drbd_conf *mdev)
}
/* caller must hold global_state_lock */
enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor)
enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *mdev, int o_minor)
{
struct drbd_conf *odev;
struct drbd_device *odev;
int resync_after;
if (o_minor == -1)
......@@ -1548,7 +1548,7 @@ enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev, int o_minor)
}
/* caller must hold global_state_lock */
void drbd_resync_after_changed(struct drbd_conf *mdev)
void drbd_resync_after_changed(struct drbd_device *mdev)
{
int changes;
......@@ -1558,7 +1558,7 @@ void drbd_resync_after_changed(struct drbd_conf *mdev)
} while (changes);
}
void drbd_rs_controller_reset(struct drbd_conf *mdev)
void drbd_rs_controller_reset(struct drbd_device *mdev)
{
struct fifo_buffer *plan;
......@@ -1579,14 +1579,14 @@ void drbd_rs_controller_reset(struct drbd_conf *mdev)
void start_resync_timer_fn(unsigned long data)
{
struct drbd_conf *mdev = (struct drbd_conf *) data;
struct drbd_device *mdev = (struct drbd_device *) data;
drbd_queue_work(&mdev->tconn->sender_work, &mdev->start_resync_work);
}
int w_start_resync(struct drbd_work *w, int cancel)
{
struct drbd_conf *mdev = w->mdev;
struct drbd_device *mdev = w->mdev;
if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
dev_warn(DEV, "w_start_resync later...\n");
......@@ -1608,7 +1608,7 @@ int w_start_resync(struct drbd_work *w, int cancel)
* This function might bring you directly into one of the
* C_PAUSED_SYNC_* states.
*/
void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
void drbd_start_resync(struct drbd_device *mdev, enum drbd_conns side)
{
union drbd_state ns;
int r;
......@@ -1886,7 +1886,7 @@ int drbd_worker(struct drbd_thread *thi)
{
struct drbd_tconn *tconn = thi->tconn;
struct drbd_work *w = NULL;
struct drbd_conf *mdev;
struct drbd_device *mdev;
LIST_HEAD(work_list);
int vnr;
......
......@@ -9,7 +9,7 @@
extern char *drbd_sec_holder;
/* sets the number of 512 byte sectors of our virtual device */
static inline void drbd_set_my_capacity(struct drbd_conf *mdev,
static inline void drbd_set_my_capacity(struct drbd_device *mdev,
sector_t size)
{
/* set_capacity(mdev->this_bdev->bd_disk, size); */
......@@ -27,7 +27,7 @@ extern void drbd_request_endio(struct bio *bio, int error);
/*
* used to submit our private bio
*/
static inline void drbd_generic_make_request(struct drbd_conf *mdev,
static inline void drbd_generic_make_request(struct drbd_device *mdev,
int fault_type, struct bio *bio)
{
__release(local);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册