提交 f32c9e03 编写于 作者: M Martin Peschke 提交者: Martin K. Petersen

scsi: zfcp: early returns for traces disabled via level

This patch adds early checks to avoid burning CPU cycles on
the assembly of trace entries which would be skipped anyway.

Introduce a static const variable to keep the trace level to check with
debug_level_enabled() in sync with the actual trace emit with
debug_event(). In order not to refactor the SAN tracing too much,
simply use a define instead.

This change is only for the non / semi hot paths,
while the actual (I/O) hot path was already improved earlier:
zfcp_dbf_scsi() is already guarded by its only caller _zfcp_dbf_scsi()
since commit dcd20e23 ("[SCSI] zfcp: Only collect SCSI debug data for
matching trace levels").
zfcp_dbf_hba_fsf_res() is already guarded by its only caller
zfcp_dbf_hba_fsf_response() since commit 2e261af8 ("[SCSI] zfcp: Only
collect FSF/HBA debug data for matching trace levels").
Signed-off-by: NMartin Peschke <mpeschke@linux.vnet.ibm.com>
[maier@linux.vnet.ibm.com: rebase, reword, default level 3 branch prediction]
Signed-off-by: NSteffen Maier <maier@linux.vnet.ibm.com>
Reviewed-by: NBenjamin Block <bblock@linux.vnet.ibm.com>
Signed-off-by: NBenjamin Block <bblock@linux.vnet.ibm.com>
Signed-off-by: NMartin K. Petersen <martin.petersen@oracle.com>
上级 b096ef86
...@@ -113,8 +113,12 @@ void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req) ...@@ -113,8 +113,12 @@ void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
struct zfcp_dbf *dbf = req->adapter->dbf; struct zfcp_dbf *dbf = req->adapter->dbf;
struct fsf_status_read_buffer *srb = req->data; struct fsf_status_read_buffer *srb = req->data;
struct zfcp_dbf_hba *rec = &dbf->hba_buf; struct zfcp_dbf_hba *rec = &dbf->hba_buf;
static int const level = 2;
unsigned long flags; unsigned long flags;
if (unlikely(!debug_level_enabled(dbf->hba, level)))
return;
spin_lock_irqsave(&dbf->hba_lock, flags); spin_lock_irqsave(&dbf->hba_lock, flags);
memset(rec, 0, sizeof(*rec)); memset(rec, 0, sizeof(*rec));
...@@ -142,7 +146,7 @@ void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req) ...@@ -142,7 +146,7 @@ void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len, zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
"fsf_uss", req->req_id); "fsf_uss", req->req_id);
log: log:
debug_event(dbf->hba, 2, rec, sizeof(*rec)); debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags); spin_unlock_irqrestore(&dbf->hba_lock, flags);
} }
...@@ -156,8 +160,12 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req) ...@@ -156,8 +160,12 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
struct zfcp_dbf *dbf = req->adapter->dbf; struct zfcp_dbf *dbf = req->adapter->dbf;
struct zfcp_dbf_hba *rec = &dbf->hba_buf; struct zfcp_dbf_hba *rec = &dbf->hba_buf;
struct fsf_status_read_buffer *sr_buf = req->data; struct fsf_status_read_buffer *sr_buf = req->data;
static int const level = 1;
unsigned long flags; unsigned long flags;
if (unlikely(!debug_level_enabled(dbf->hba, level)))
return;
spin_lock_irqsave(&dbf->hba_lock, flags); spin_lock_irqsave(&dbf->hba_lock, flags);
memset(rec, 0, sizeof(*rec)); memset(rec, 0, sizeof(*rec));
...@@ -169,7 +177,7 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req) ...@@ -169,7 +177,7 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
memcpy(&rec->u.be, &sr_buf->payload.bit_error, memcpy(&rec->u.be, &sr_buf->payload.bit_error,
sizeof(struct fsf_bit_error_payload)); sizeof(struct fsf_bit_error_payload));
debug_event(dbf->hba, 1, rec, sizeof(*rec)); debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags); spin_unlock_irqrestore(&dbf->hba_lock, flags);
} }
...@@ -186,8 +194,12 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount, ...@@ -186,8 +194,12 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
struct zfcp_dbf *dbf = adapter->dbf; struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_pay *payload = &dbf->pay_buf; struct zfcp_dbf_pay *payload = &dbf->pay_buf;
unsigned long flags; unsigned long flags;
static int const level = 1;
u16 length; u16 length;
if (unlikely(!debug_level_enabled(dbf->pay, level)))
return;
if (!pl) if (!pl)
return; return;
...@@ -202,7 +214,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount, ...@@ -202,7 +214,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
while (payload->counter < scount && (char *)pl[payload->counter]) { while (payload->counter < scount && (char *)pl[payload->counter]) {
memcpy(payload->data, (char *)pl[payload->counter], length); memcpy(payload->data, (char *)pl[payload->counter], length);
debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length)); debug_event(dbf->pay, level, payload, zfcp_dbf_plen(length));
payload->counter++; payload->counter++;
} }
...@@ -217,15 +229,19 @@ void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter) ...@@ -217,15 +229,19 @@ void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
{ {
struct zfcp_dbf *dbf = adapter->dbf; struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_hba *rec = &dbf->hba_buf; struct zfcp_dbf_hba *rec = &dbf->hba_buf;
static int const level = 1;
unsigned long flags; unsigned long flags;
if (unlikely(!debug_level_enabled(dbf->hba, level)))
return;
spin_lock_irqsave(&dbf->hba_lock, flags); spin_lock_irqsave(&dbf->hba_lock, flags);
memset(rec, 0, sizeof(*rec)); memset(rec, 0, sizeof(*rec));
memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
rec->id = ZFCP_DBF_HBA_BASIC; rec->id = ZFCP_DBF_HBA_BASIC;
debug_event(dbf->hba, 1, rec, sizeof(*rec)); debug_event(dbf->hba, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->hba_lock, flags); spin_unlock_irqrestore(&dbf->hba_lock, flags);
} }
...@@ -264,9 +280,13 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, ...@@ -264,9 +280,13 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
{ {
struct zfcp_dbf *dbf = adapter->dbf; struct zfcp_dbf *dbf = adapter->dbf;
struct zfcp_dbf_rec *rec = &dbf->rec_buf; struct zfcp_dbf_rec *rec = &dbf->rec_buf;
static int const level = 1;
struct list_head *entry; struct list_head *entry;
unsigned long flags; unsigned long flags;
if (unlikely(!debug_level_enabled(dbf->rec, level)))
return;
spin_lock_irqsave(&dbf->rec_lock, flags); spin_lock_irqsave(&dbf->rec_lock, flags);
memset(rec, 0, sizeof(*rec)); memset(rec, 0, sizeof(*rec));
...@@ -283,7 +303,7 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, ...@@ -283,7 +303,7 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
rec->u.trig.want = want; rec->u.trig.want = want;
rec->u.trig.need = need; rec->u.trig.need = need;
debug_event(dbf->rec, 1, rec, sizeof(*rec)); debug_event(dbf->rec, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->rec_lock, flags); spin_unlock_irqrestore(&dbf->rec_lock, flags);
} }
...@@ -300,6 +320,9 @@ void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp) ...@@ -300,6 +320,9 @@ void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
struct zfcp_dbf_rec *rec = &dbf->rec_buf; struct zfcp_dbf_rec *rec = &dbf->rec_buf;
unsigned long flags; unsigned long flags;
if (!debug_level_enabled(dbf->rec, level))
return;
spin_lock_irqsave(&dbf->rec_lock, flags); spin_lock_irqsave(&dbf->rec_lock, flags);
memset(rec, 0, sizeof(*rec)); memset(rec, 0, sizeof(*rec));
...@@ -345,8 +368,12 @@ void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port, ...@@ -345,8 +368,12 @@ void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
{ {
struct zfcp_dbf *dbf = wka_port->adapter->dbf; struct zfcp_dbf *dbf = wka_port->adapter->dbf;
struct zfcp_dbf_rec *rec = &dbf->rec_buf; struct zfcp_dbf_rec *rec = &dbf->rec_buf;
static int const level = 1;
unsigned long flags; unsigned long flags;
if (unlikely(!debug_level_enabled(dbf->rec, level)))
return;
spin_lock_irqsave(&dbf->rec_lock, flags); spin_lock_irqsave(&dbf->rec_lock, flags);
memset(rec, 0, sizeof(*rec)); memset(rec, 0, sizeof(*rec));
...@@ -362,10 +389,12 @@ void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port, ...@@ -362,10 +389,12 @@ void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
rec->u.run.rec_action = ~0; rec->u.run.rec_action = ~0;
rec->u.run.rec_count = ~0; rec->u.run.rec_count = ~0;
debug_event(dbf->rec, 1, rec, sizeof(*rec)); debug_event(dbf->rec, level, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->rec_lock, flags); spin_unlock_irqrestore(&dbf->rec_lock, flags);
} }
#define ZFCP_DBF_SAN_LEVEL 1
static inline static inline
void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
char *paytag, struct scatterlist *sg, u8 id, u16 len, char *paytag, struct scatterlist *sg, u8 id, u16 len,
...@@ -408,7 +437,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, ...@@ -408,7 +437,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
(u16)(sg->length - offset)); (u16)(sg->length - offset));
/* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */ /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
memcpy(payload->data, sg_virt(sg) + offset, pay_len); memcpy(payload->data, sg_virt(sg) + offset, pay_len);
debug_event(dbf->pay, 1, payload, debug_event(dbf->pay, ZFCP_DBF_SAN_LEVEL, payload,
zfcp_dbf_plen(pay_len)); zfcp_dbf_plen(pay_len));
payload->counter++; payload->counter++;
offset += pay_len; offset += pay_len;
...@@ -418,7 +447,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, ...@@ -418,7 +447,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
spin_unlock(&dbf->pay_lock); spin_unlock(&dbf->pay_lock);
out: out:
debug_event(dbf->san, 1, rec, sizeof(*rec)); debug_event(dbf->san, ZFCP_DBF_SAN_LEVEL, rec, sizeof(*rec));
spin_unlock_irqrestore(&dbf->san_lock, flags); spin_unlock_irqrestore(&dbf->san_lock, flags);
} }
...@@ -434,6 +463,9 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id) ...@@ -434,6 +463,9 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
struct zfcp_fsf_ct_els *ct_els = fsf->data; struct zfcp_fsf_ct_els *ct_els = fsf->data;
u16 length; u16 length;
if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
return;
length = (u16)zfcp_qdio_real_bytes(ct_els->req); length = (u16)zfcp_qdio_real_bytes(ct_els->req);
zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ, zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
length, fsf->req_id, d_id, length); length, fsf->req_id, d_id, length);
...@@ -512,6 +544,9 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf) ...@@ -512,6 +544,9 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
struct zfcp_fsf_ct_els *ct_els = fsf->data; struct zfcp_fsf_ct_els *ct_els = fsf->data;
u16 length; u16 length;
if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
return;
length = (u16)zfcp_qdio_real_bytes(ct_els->resp); length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES, zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
length, fsf->req_id, ct_els->d_id, length, fsf->req_id, ct_els->d_id,
...@@ -531,6 +566,9 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf) ...@@ -531,6 +566,9 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
u16 length; u16 length;
struct scatterlist sg; struct scatterlist sg;
if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
return;
length = (u16)(srb->length - length = (u16)(srb->length -
offsetof(struct fsf_status_read_buffer, payload)); offsetof(struct fsf_status_read_buffer, payload));
sg_init_one(&sg, srb->payload.data, length); sg_init_one(&sg, srb->payload.data, length);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册