提交 6a0afdf5 编写于 作者: J Jens Axboe

drbd: remove tracing bits

They should be reimplemented in the current scheme.
Signed-off-by: NJens Axboe <jens.axboe@oracle.com>
上级 ab8fafc2
......@@ -38,17 +38,6 @@ config BLK_DEV_DRBD
If unsure, say N.
config DRBD_TRACE
tristate "DRBD tracing"
depends on BLK_DEV_DRBD
select TRACEPOINTS
default n
help
Say Y here if you want to be able to trace various events in DRBD.
If unsure, say N.
config DRBD_FAULT_INJECTION
bool "DRBD fault injection"
depends on BLK_DEV_DRBD
......
......@@ -2,7 +2,4 @@ drbd-y := drbd_bitmap.o drbd_proc.o
drbd-y += drbd_worker.o drbd_receiver.o drbd_req.o drbd_actlog.o
drbd-y += drbd_main.o drbd_strings.o drbd_nl.o
drbd_trace-y := drbd_tracing.o
obj-$(CONFIG_BLK_DEV_DRBD) += drbd.o
obj-$(CONFIG_DRBD_TRACE) += drbd_trace.o
......@@ -26,7 +26,6 @@
#include <linux/slab.h>
#include <linux/drbd.h>
#include "drbd_int.h"
#include "drbd_tracing.h"
#include "drbd_wrappers.h"
/* We maintain a trivial check sum in our on disk activity log.
......@@ -66,17 +65,6 @@ struct drbd_atodb_wait {
int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int);
/* The actual tracepoint needs to have constant number of known arguments...
*/
void trace_drbd_resync(struct drbd_conf *mdev, int level, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
trace__drbd_resync(mdev, level, fmt, ap);
va_end(ap);
}
static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
struct drbd_backing_dev *bdev,
struct page *page, sector_t sector,
......@@ -105,8 +93,6 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
bio->bi_end_io = drbd_md_io_complete;
bio->bi_rw = rw;
trace_drbd_bio(mdev, "Md", bio, 0, NULL);
if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
bio_endio(bio, -EIO);
else
......@@ -236,8 +222,6 @@ void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)
D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
trace_drbd_actlog(mdev, sector, "al_begin_io");
wait_event(mdev->al_wait, (al_ext = _al_get(mdev, enr)));
if (al_ext->lc_number != enr) {
......@@ -270,8 +254,6 @@ void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector)
struct lc_element *extent;
unsigned long flags;
trace_drbd_actlog(mdev, sector, "al_complete_io");
spin_lock_irqsave(&mdev->al_lock, flags);
extent = lc_find(mdev->act_log, enr);
......@@ -967,10 +949,6 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
trace_drbd_resync(mdev, TRACE_LVL_METRICS,
"drbd_set_in_sync: sector=%llus size=%u sbnr=%lu ebnr=%lu\n",
(unsigned long long)sector, size, sbnr, ebnr);
if (sbnr > ebnr)
return;
......@@ -1045,10 +1023,6 @@ void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
sbnr = BM_SECT_TO_BIT(sector);
ebnr = BM_SECT_TO_BIT(esector);
trace_drbd_resync(mdev, TRACE_LVL_METRICS,
"drbd_set_out_of_sync: sector=%llus size=%u sbnr=%lu ebnr=%lu\n",
(unsigned long long)sector, size, sbnr, ebnr);
/* ok, (capacity & 7) != 0 sometimes, but who cares...
* we count rs_{total,left} in bits, not sectors. */
spin_lock_irqsave(&mdev->al_lock, flags);
......@@ -1143,10 +1117,6 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
struct bm_extent *bm_ext;
int i, sig;
trace_drbd_resync(mdev, TRACE_LVL_ALL,
"drbd_rs_begin_io: sector=%llus (rs_end=%d)\n",
(unsigned long long)sector, enr);
sig = wait_event_interruptible(mdev->al_wait,
(bm_ext = _bme_get(mdev, enr)));
if (sig)
......@@ -1192,9 +1162,6 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
struct bm_extent *bm_ext;
int i;
trace_drbd_resync(mdev, TRACE_LVL_ALL, "drbd_try_rs_begin_io: sector=%llus\n",
(unsigned long long)sector);
spin_lock_irq(&mdev->al_lock);
if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) {
/* in case you have very heavy scattered io, it may
......@@ -1210,11 +1177,6 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
* the lc_put here...
* we also have to wake_up
*/
trace_drbd_resync(mdev, TRACE_LVL_ALL,
"dropping %u, apparently got 'synced' by application io\n",
mdev->resync_wenr);
e = lc_find(mdev->resync, mdev->resync_wenr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
if (bm_ext) {
......@@ -1242,21 +1204,14 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
* but then could not set BME_LOCKED,
* so we tried again.
* drop the extra reference. */
trace_drbd_resync(mdev, TRACE_LVL_ALL,
"dropping extra reference on %u\n", enr);
bm_ext->lce.refcnt--;
D_ASSERT(bm_ext->lce.refcnt > 0);
}
goto check_al;
} else {
/* do we rather want to try later? */
if (mdev->resync_locked > mdev->resync->nr_elements-3) {
trace_drbd_resync(mdev, TRACE_LVL_ALL,
"resync_locked = %u!\n", mdev->resync_locked);
if (mdev->resync_locked > mdev->resync->nr_elements-3)
goto try_again;
}
/* Do or do not. There is no try. -- Yoda */
e = lc_get(mdev->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
......@@ -1281,8 +1236,6 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
goto check_al;
}
check_al:
trace_drbd_resync(mdev, TRACE_LVL_ALL, "checking al for %u\n", enr);
for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
if (unlikely(al_enr+i == mdev->act_log->new_number))
goto try_again;
......@@ -1296,7 +1249,6 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
return 0;
try_again:
trace_drbd_resync(mdev, TRACE_LVL_ALL, "need to try again for %u\n", enr);
if (bm_ext)
mdev->resync_wenr = enr;
spin_unlock_irq(&mdev->al_lock);
......@@ -1310,10 +1262,6 @@ void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
struct bm_extent *bm_ext;
unsigned long flags;
trace_drbd_resync(mdev, TRACE_LVL_ALL,
"drbd_rs_complete_io: sector=%llus (rs_enr=%d)\n",
(long long)sector, enr);
spin_lock_irqsave(&mdev->al_lock, flags);
e = lc_find(mdev->resync, enr);
bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
......@@ -1348,8 +1296,6 @@ void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
*/
void drbd_rs_cancel_all(struct drbd_conf *mdev)
{
trace_drbd_resync(mdev, TRACE_LVL_METRICS, "drbd_rs_cancel_all\n");
spin_lock_irq(&mdev->al_lock);
if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */
......@@ -1375,8 +1321,6 @@ int drbd_rs_del_all(struct drbd_conf *mdev)
struct bm_extent *bm_ext;
int i;
trace_drbd_resync(mdev, TRACE_LVL_METRICS, "drbd_rs_del_all\n");
spin_lock_irq(&mdev->al_lock);
if (get_ldev_if_state(mdev, D_FAILED)) {
......@@ -1429,10 +1373,6 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
sector_t esector, nr_sectors;
int wake_up = 0;
trace_drbd_resync(mdev, TRACE_LVL_SUMMARY,
"drbd_rs_failed_io: sector=%llus, size=%u\n",
(unsigned long long)sector, size);
if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
(unsigned long long)sector, size);
......
......@@ -135,8 +135,6 @@ enum {
DRBD_FAULT_MAX,
};
extern void trace_drbd_resync(struct drbd_conf *mdev, int level, const char *fmt, ...);
#ifdef CONFIG_DRBD_FAULT_INJECTION
extern unsigned int
_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type);
......@@ -712,11 +710,6 @@ enum epoch_event {
EV_GOT_BARRIER_NR,
EV_BARRIER_DONE,
EV_BECAME_LAST,
EV_TRACE_FLUSH, /* TRACE_ are not real events, only used for tracing */
EV_TRACE_ADD_BARRIER, /* Doing the first write as a barrier write */
EV_TRACE_SETTING_BI, /* Barrier is expressed with the first write of the next epoch */
EV_TRACE_ALLOC,
EV_TRACE_FREE,
EV_CLEANUP = 32, /* used as flag */
};
......
......@@ -53,7 +53,6 @@
#include <linux/drbd_limits.h>
#include "drbd_int.h"
#include "drbd_tracing.h"
#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
#include "drbd_vli.h"
......@@ -80,18 +79,6 @@ static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
static void md_sync_timer_fn(unsigned long data);
static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
DEFINE_TRACE(drbd_unplug);
DEFINE_TRACE(drbd_uuid);
DEFINE_TRACE(drbd_ee);
DEFINE_TRACE(drbd_packet);
DEFINE_TRACE(drbd_md_io);
DEFINE_TRACE(drbd_epoch);
DEFINE_TRACE(drbd_netlink);
DEFINE_TRACE(drbd_actlog);
DEFINE_TRACE(drbd_bio);
DEFINE_TRACE(_drbd_resync);
DEFINE_TRACE(drbd_req);
MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
"Lars Ellenberg <lars@linbit.com>");
MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
......@@ -1576,7 +1563,6 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
h->command = cpu_to_be16(cmd);
h->length = cpu_to_be16(size-sizeof(struct p_header));
trace_drbd_packet(mdev, sock, 0, (void *)h, __FILE__, __LINE__);
sent = drbd_send(mdev, sock, h, size, msg_flags);
ok = (sent == size);
......@@ -1628,8 +1614,6 @@ int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
if (!drbd_get_data_sock(mdev))
return 0;
trace_drbd_packet(mdev, mdev->data.socket, 0, (void *)&h, __FILE__, __LINE__);
ok = (sizeof(h) ==
drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
ok = ok && (size ==
......@@ -2359,7 +2343,6 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
dp_flags |= DP_MAY_SET_IN_SYNC;
p.dp_flags = cpu_to_be32(dp_flags);
trace_drbd_packet(mdev, mdev->data.socket, 0, (void *)&p, __FILE__, __LINE__);
set_bit(UNPLUG_REMOTE, &mdev->flags);
ok = (sizeof(p) ==
drbd_send(mdev, mdev->data.socket, &p, sizeof(p), MSG_MORE));
......@@ -2410,7 +2393,6 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
if (!drbd_get_data_sock(mdev))
return 0;
trace_drbd_packet(mdev, mdev->data.socket, 0, (void *)&p, __FILE__, __LINE__);
ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p,
sizeof(p), MSG_MORE);
if (ok && dgs) {
......@@ -2546,8 +2528,6 @@ static void drbd_unplug_fn(struct request_queue *q)
{
struct drbd_conf *mdev = q->queuedata;
trace_drbd_unplug(mdev, "got unplugged");
/* unplug FIRST */
spin_lock_irq(q->queue_lock);
blk_remove_plug(q);
......@@ -3252,8 +3232,6 @@ void drbd_md_sync(struct drbd_conf *mdev)
if (!get_ldev_if_state(mdev, D_FAILED))
return;
trace_drbd_md_io(mdev, WRITE, mdev->ldev);
mutex_lock(&mdev->md_io_mutex);
buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
memset(buffer, 0, 512);
......@@ -3308,8 +3286,6 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
if (!get_ldev_if_state(mdev, D_ATTACHING))
return ERR_IO_MD_DISK;
trace_drbd_md_io(mdev, READ, bdev);
mutex_lock(&mdev->md_io_mutex);
buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
......@@ -3388,11 +3364,8 @@ static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
{
int i;
for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) {
for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
trace_drbd_uuid(mdev, i+1);
}
}
void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
......@@ -3407,7 +3380,6 @@ void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
}
mdev->ldev->md.uuid[idx] = val;
trace_drbd_uuid(mdev, idx);
drbd_md_mark_dirty(mdev);
}
......@@ -3417,7 +3389,6 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
if (mdev->ldev->md.uuid[idx]) {
drbd_uuid_move_history(mdev);
mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
trace_drbd_uuid(mdev, UI_HISTORY_START);
}
_drbd_uuid_set(mdev, idx, val);
}
......@@ -3436,7 +3407,6 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
dev_info(DEV, "Creating new current UUID\n");
D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0);
mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
trace_drbd_uuid(mdev, UI_BITMAP);
get_random_bytes(&val, sizeof(u64));
_drbd_uuid_set(mdev, UI_CURRENT, val);
......@@ -3451,8 +3421,6 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
drbd_uuid_move_history(mdev);
mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
mdev->ldev->md.uuid[UI_BITMAP] = 0;
trace_drbd_uuid(mdev, UI_HISTORY_START);
trace_drbd_uuid(mdev, UI_BITMAP);
} else {
if (mdev->ldev->md.uuid[UI_BITMAP])
dev_warn(DEV, "bm UUID already set");
......@@ -3460,7 +3428,6 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
mdev->ldev->md.uuid[UI_BITMAP] = val;
mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1);
trace_drbd_uuid(mdev, UI_BITMAP);
}
drbd_md_mark_dirty(mdev);
}
......@@ -3727,7 +3694,6 @@ const char *drbd_buildtag(void)
module_init(drbd_init)
module_exit(drbd_cleanup)
/* For drbd_tracing: */
EXPORT_SYMBOL(drbd_conn_str);
EXPORT_SYMBOL(drbd_role_str);
EXPORT_SYMBOL(drbd_disk_str);
......
......@@ -33,7 +33,6 @@
#include <linux/blkpg.h>
#include <linux/cpumask.h>
#include "drbd_int.h"
#include "drbd_tracing.h"
#include "drbd_wrappers.h"
#include <asm/unaligned.h>
#include <linux/drbd_tag_magic.h>
......@@ -2024,8 +2023,6 @@ static void drbd_connector_callback(struct cn_msg *req)
goto fail;
}
trace_drbd_netlink(req, 1);
if (nlp->packet_type >= P_nl_after_last_packet) {
retcode = ERR_PACKET_NR;
goto fail;
......@@ -2063,7 +2060,6 @@ static void drbd_connector_callback(struct cn_msg *req)
cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr;
cn_reply->flags = 0;
trace_drbd_netlink(cn_reply, 0);
rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL);
if (rr && rr != -ESRCH)
printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
......@@ -2157,7 +2153,6 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
reply->minor = mdev_to_minor(mdev);
reply->ret_code = NO_ERROR;
trace_drbd_netlink(cn_reply, 0);
cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
}
......@@ -2190,7 +2185,6 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
reply->minor = mdev_to_minor(mdev);
reply->ret_code = NO_ERROR;
trace_drbd_netlink(cn_reply, 0);
cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
}
......@@ -2262,7 +2256,6 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
reply->minor = mdev_to_minor(mdev);
reply->ret_code = NO_ERROR;
trace_drbd_netlink(cn_reply, 0);
cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
kfree(cn_reply);
}
......@@ -2302,7 +2295,6 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
reply->minor = mdev_to_minor(mdev);
reply->ret_code = NO_ERROR;
trace_drbd_netlink(cn_reply, 0);
cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
}
......@@ -2356,7 +2348,6 @@ void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
reply->ret_code = ret_code;
trace_drbd_netlink(cn_reply, 0);
rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
if (rr && rr != -ESRCH)
printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr);
......
......@@ -47,7 +47,6 @@
#include <linux/string.h>
#include <linux/scatterlist.h>
#include "drbd_int.h"
#include "drbd_tracing.h"
#include "drbd_req.h"
#include "drbd_vli.h"
......@@ -350,8 +349,6 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
e->epoch = NULL;
e->flags = 0;
trace_drbd_ee(mdev, e, "allocated");
return e;
fail2:
......@@ -366,7 +363,6 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
{
struct bio *bio = e->private_bio;
trace_drbd_ee(mdev, e, "freed");
drbd_pp_free_bio_pages(mdev, bio);
bio_put(bio);
D_ASSERT(hlist_unhashed(&e->colision));
......@@ -420,7 +416,6 @@ static int drbd_process_done_ee(struct drbd_conf *mdev)
* all ignore the last argument.
*/
list_for_each_entry_safe(e, t, &work_list, w.list) {
trace_drbd_ee(mdev, e, "process_done_ee");
/* list_del not necessary, next/prev members not touched */
ok = e->w.cb(mdev, &e->w, !ok) && ok;
drbd_free_ee(mdev, e);
......@@ -1021,8 +1016,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
break;
}
trace_drbd_epoch(mdev, epoch, ev);
if (epoch_size != 0 &&
atomic_read(&epoch->active) == 0 &&
test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
......@@ -1054,7 +1047,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
list_del(&epoch->list);
ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
mdev->epochs--;
trace_drbd_epoch(mdev, epoch, EV_TRACE_FREE);
kfree(epoch);
if (rv == FE_STILL_LIVE)
......@@ -1080,7 +1072,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
struct flush_work *fw;
fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
if (fw) {
trace_drbd_epoch(mdev, epoch, EV_TRACE_FLUSH);
fw->w.cb = w_flush;
fw->epoch = epoch;
drbd_queue_work(&mdev->data.work, &fw->w);
......@@ -1251,7 +1242,6 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
list_add(&epoch->list, &mdev->current_epoch->list);
mdev->current_epoch = epoch;
mdev->epochs++;
trace_drbd_epoch(mdev, epoch, EV_TRACE_ALLOC);
} else {
/* The current_epoch got recycled while we allocated this one... */
kfree(epoch);
......@@ -1458,8 +1448,6 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si
list_add(&e->w.list, &mdev->sync_ee);
spin_unlock_irq(&mdev->req_lock);
trace_drbd_ee(mdev, e, "submitting for (rs)write");
trace_drbd_bio(mdev, "Sec", e->private_bio, 0, NULL);
drbd_generic_make_request(mdev, DRBD_FAULT_RS_WR, e->private_bio);
/* accounting done in endio */
......@@ -1721,16 +1709,13 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
if (epoch == e->epoch) {
set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
trace_drbd_epoch(mdev, e->epoch, EV_TRACE_ADD_BARRIER);
rw |= (1<<BIO_RW_BARRIER);
e->flags |= EE_IS_BARRIER;
} else {
if (atomic_read(&epoch->epoch_size) > 1 ||
!test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
trace_drbd_epoch(mdev, epoch, EV_TRACE_SETTING_BI);
set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
trace_drbd_epoch(mdev, e->epoch, EV_TRACE_ADD_BARRIER);
rw |= (1<<BIO_RW_BARRIER);
e->flags |= EE_IS_BARRIER;
}
......@@ -1905,8 +1890,6 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
}
e->private_bio->bi_rw = rw;
trace_drbd_ee(mdev, e, "submitting for (data)write");
trace_drbd_bio(mdev, "Sec", e->private_bio, 0, NULL);
drbd_generic_make_request(mdev, DRBD_FAULT_DT_WR, e->private_bio);
/* accounting done in endio */
......@@ -2065,8 +2048,6 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
inc_unacked(mdev);
trace_drbd_ee(mdev, e, "submitting for read");
trace_drbd_bio(mdev, "Sec", e->private_bio, 0, NULL);
drbd_generic_make_request(mdev, fault_type, e->private_bio);
maybe_kick_lo(mdev);
......@@ -3543,9 +3524,6 @@ static void drbdd(struct drbd_conf *mdev)
drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
break;
}
trace_drbd_packet(mdev, mdev->data.socket, 2, &mdev->data.rbuf,
__FILE__, __LINE__);
}
}
......@@ -3825,9 +3803,6 @@ static int drbd_do_handshake(struct drbd_conf *mdev)
return 0;
}
trace_drbd_packet(mdev, mdev->data.socket, 2, &mdev->data.rbuf,
__FILE__, __LINE__);
p->protocol_min = be32_to_cpu(p->protocol_min);
p->protocol_max = be32_to_cpu(p->protocol_max);
if (p->protocol_max == 0)
......@@ -4420,14 +4395,11 @@ int drbd_asender(struct drbd_thread *thi)
goto disconnect;
}
expect = cmd->pkt_size;
ERR_IF(len != expect-sizeof(struct p_header)) {
trace_drbd_packet(mdev, mdev->meta.socket, 1, (void *)h, __FILE__, __LINE__);
ERR_IF(len != expect-sizeof(struct p_header))
goto reconnect;
}
}
if (received == expect) {
D_ASSERT(cmd != NULL);
trace_drbd_packet(mdev, mdev->meta.socket, 1, (void *)h, __FILE__, __LINE__);
if (!cmd->process(mdev, h))
goto reconnect;
......
......@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <linux/drbd.h>
#include "drbd_int.h"
#include "drbd_tracing.h"
#include "drbd_req.h"
......@@ -218,7 +217,6 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev,
void complete_master_bio(struct drbd_conf *mdev,
struct bio_and_error *m)
{
trace_drbd_bio(mdev, "Rq", m->bio, 1, NULL);
bio_endio(m->bio, m->error);
dec_ap_bio(mdev);
}
......@@ -236,8 +234,6 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
/* only WRITES may end up here without a master bio (on barrier ack) */
int rw = req->master_bio ? bio_data_dir(req->master_bio) : WRITE;
trace_drbd_req(req, nothing, "_req_may_be_done");
/* we must not complete the master bio, while it is
* still being processed by _drbd_send_zc_bio (drbd_send_dblock)
* not yet acknowledged by the peer
......@@ -415,8 +411,6 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
struct drbd_conf *mdev = req->mdev;
m->bio = NULL;
trace_drbd_req(req, what, NULL);
switch (what) {
default:
dev_err(DEV, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
......@@ -666,7 +660,6 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
* this is bad, because if the connection is lost now,
* we won't be able to clean them up... */
dev_err(DEV, "FIXME (barrier_acked but pending)\n");
trace_drbd_req(req, nothing, "FIXME (barrier_acked but pending)");
list_move(&req->tl_requests, &mdev->out_of_sequence_requests);
}
D_ASSERT(req->rq_state & RQ_NET_SENT);
......@@ -736,8 +729,6 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
return 0;
}
trace_drbd_bio(mdev, "Rq", bio, 0, req);
local = get_ldev(mdev);
if (!local) {
bio_put(req->private_bio); /* or we get a bio leak */
......@@ -928,8 +919,6 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
if (local) {
req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
trace_drbd_bio(mdev, "Pri", req->private_bio, 0, NULL);
if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
: rw == READ ? DRBD_FAULT_DT_RD
: DRBD_FAULT_DT_RA))
......
此差异已折叠。
/*
drbd_tracing.h
This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
drbd is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
drbd is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with drbd; see the file COPYING. If not, write to
the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef DRBD_TRACING_H
#define DRBD_TRACING_H
#include <linux/tracepoint.h>
#include "drbd_int.h"
#include "drbd_req.h"
enum {
TRACE_LVL_ALWAYS = 0,
TRACE_LVL_SUMMARY,
TRACE_LVL_METRICS,
TRACE_LVL_ALL,
TRACE_LVL_MAX
};
DECLARE_TRACE(drbd_unplug,
TP_PROTO(struct drbd_conf *mdev, char* msg),
TP_ARGS(mdev, msg));
DECLARE_TRACE(drbd_uuid,
TP_PROTO(struct drbd_conf *mdev, enum drbd_uuid_index index),
TP_ARGS(mdev, index));
DECLARE_TRACE(drbd_ee,
TP_PROTO(struct drbd_conf *mdev, struct drbd_epoch_entry *e, char* msg),
TP_ARGS(mdev, e, msg));
DECLARE_TRACE(drbd_md_io,
TP_PROTO(struct drbd_conf *mdev, int rw, struct drbd_backing_dev *bdev),
TP_ARGS(mdev, rw, bdev));
DECLARE_TRACE(drbd_epoch,
TP_PROTO(struct drbd_conf *mdev, struct drbd_epoch *epoch, enum epoch_event ev),
TP_ARGS(mdev, epoch, ev));
DECLARE_TRACE(drbd_netlink,
TP_PROTO(void *data, int is_req),
TP_ARGS(data, is_req));
DECLARE_TRACE(drbd_actlog,
TP_PROTO(struct drbd_conf *mdev, sector_t sector, char* msg),
TP_ARGS(mdev, sector, msg));
DECLARE_TRACE(drbd_bio,
TP_PROTO(struct drbd_conf *mdev, const char *pfx, struct bio *bio, int complete,
struct drbd_request *r),
TP_ARGS(mdev, pfx, bio, complete, r));
DECLARE_TRACE(drbd_req,
TP_PROTO(struct drbd_request *req, enum drbd_req_event what, char *msg),
TP_ARGS(req, what, msg));
DECLARE_TRACE(drbd_packet,
TP_PROTO(struct drbd_conf *mdev, struct socket *sock,
int recv, union p_polymorph *p, char *file, int line),
TP_ARGS(mdev, sock, recv, p, file, line));
DECLARE_TRACE(_drbd_resync,
TP_PROTO(struct drbd_conf *mdev, int level, const char *fmt, va_list args),
TP_ARGS(mdev, level, fmt, args));
#endif
......@@ -40,7 +40,6 @@
#include "drbd_int.h"
#include "drbd_req.h"
#include "drbd_tracing.h"
#define SLEEP_TIME (HZ/10)
......@@ -82,8 +81,6 @@ void drbd_md_io_complete(struct bio *bio, int error)
md_io = (struct drbd_md_io *)bio->bi_private;
md_io->error = error;
trace_drbd_bio(md_io->mdev, "Md", bio, 1, NULL);
complete(&md_io->event);
}
......@@ -114,8 +111,6 @@ void drbd_endio_read_sec(struct bio *bio, int error) __releases(local)
D_ASSERT(e->block_id != ID_VACANT);
trace_drbd_bio(mdev, "Sec", bio, 1, NULL);
spin_lock_irqsave(&mdev->req_lock, flags);
mdev->read_cnt += e->size >> 9;
list_del(&e->w.list);
......@@ -126,8 +121,6 @@ void drbd_endio_read_sec(struct bio *bio, int error) __releases(local)
drbd_chk_io_error(mdev, error, FALSE);
drbd_queue_work(&mdev->data.work, &e->w);
put_ldev(mdev);
trace_drbd_ee(mdev, e, "read completed");
}
/* writes on behalf of the partner, or resync writes,
......@@ -176,8 +169,6 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
D_ASSERT(e->block_id != ID_VACANT);
trace_drbd_bio(mdev, "Sec", bio, 1, NULL);
spin_lock_irqsave(&mdev->req_lock, flags);
mdev->writ_cnt += e->size >> 9;
is_syncer_req = is_syncer_block_id(e->block_id);
......@@ -192,8 +183,6 @@ void drbd_endio_write_sec(struct bio *bio, int error) __releases(local)
list_del(&e->w.list); /* has been on active_ee or sync_ee */
list_add_tail(&e->w.list, &mdev->done_ee);
trace_drbd_ee(mdev, e, "write completed");
/* No hlist_del_init(&e->colision) here, we did not send the Ack yet,
* neither did we wake possibly waiting conflicting requests.
* done from "drbd_process_done_ee" within the appropriate w.cb
......@@ -244,8 +233,6 @@ void drbd_endio_pri(struct bio *bio, int error)
error = -EIO;
}
trace_drbd_bio(mdev, "Pri", bio, 1, NULL);
/* to avoid recursion in __req_mod */
if (unlikely(error)) {
what = (bio_data_dir(bio) == WRITE)
......@@ -1321,9 +1308,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
return;
}
trace_drbd_resync(mdev, TRACE_LVL_SUMMARY, "Resync starting: side=%s\n",
side == C_SYNC_TARGET ? "SyncTarget" : "SyncSource");
/* In case a previous resync run was aborted by an IO error/detach on the peer. */
drbd_rs_cancel_all(mdev);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册