提交 0997f1c5 编写于 作者: S Stefan Raspl 提交者: Jens Axboe

blktrace: pass zfcp driver data

This patch writes the channel and fabric latencies in nanoseconds per
request via blktrace for later analysis. The utilization of the inbound
and outbound adapter queue is also reported.
Signed-off-by: NStefan Raspl <raspl@linux.vnet.ibm.com>
Signed-off-by: NMartin Peschke <mp3@de.ibm.com>
Signed-off-by: NChristof Schmitt <christof.schmitt@de.ibm.com>
Signed-off-by: NJens Axboe <jens.axboe@oracle.com>
上级 756f8243
...@@ -583,6 +583,8 @@ struct zfcp_fsf_req { ...@@ -583,6 +583,8 @@ struct zfcp_fsf_req {
unsigned long long issued; /* request sent time (STCK) */ unsigned long long issued; /* request sent time (STCK) */
struct zfcp_unit *unit; struct zfcp_unit *unit;
void (*handler)(struct zfcp_fsf_req *); void (*handler)(struct zfcp_fsf_req *);
u16 qdio_outb_usage;/* usage of outbound queue */
u16 qdio_inb_usage; /* usage of inbound queue */
}; };
/* driver data */ /* driver data */
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
* Copyright IBM Corporation 2002, 2008 * Copyright IBM Corporation 2002, 2008
*/ */
#include <linux/blktrace_api.h>
#include "zfcp_ext.h" #include "zfcp_ext.h"
static void zfcp_fsf_request_timeout_handler(unsigned long data) static void zfcp_fsf_request_timeout_handler(unsigned long data)
...@@ -777,6 +778,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) ...@@ -777,6 +778,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
list_add_tail(&req->list, &adapter->req_list[idx]); list_add_tail(&req->list, &adapter->req_list[idx]);
spin_unlock(&adapter->req_list_lock); spin_unlock(&adapter->req_list_lock);
req->qdio_outb_usage = atomic_read(&req_q->count);
req->issued = get_clock(); req->issued = get_clock();
if (zfcp_qdio_send(req)) { if (zfcp_qdio_send(req)) {
/* Queues are down..... */ /* Queues are down..... */
...@@ -2082,6 +2084,36 @@ static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req) ...@@ -2082,6 +2084,36 @@ static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req)
spin_unlock_irqrestore(&unit->latencies.lock, flags); spin_unlock_irqrestore(&unit->latencies.lock, flags);
} }
#ifdef CONFIG_BLK_DEV_IO_TRACE
static void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
{
struct fsf_qual_latency_info *lat_inf;
struct scsi_cmnd *scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
struct request *req = scsi_cmnd->request;
struct zfcp_blk_drv_data trace;
int ticks = fsf_req->adapter->timer_ticks;
trace.flags = 0;
trace.magic = ZFCP_BLK_DRV_DATA_MAGIC;
if (fsf_req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
trace.flags |= ZFCP_BLK_LAT_VALID;
lat_inf = &fsf_req->qtcb->prefix.prot_status_qual.latency_info;
trace.channel_lat = lat_inf->channel_lat * ticks;
trace.fabric_lat = lat_inf->fabric_lat * ticks;
}
if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
trace.flags |= ZFCP_BLK_REQ_ERROR;
trace.inb_usage = fsf_req->qdio_inb_usage;
trace.outb_usage = fsf_req->qdio_outb_usage;
blk_add_driver_data(req->q, req, &trace, sizeof(trace));
}
#else
static inline void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
{
}
#endif
static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
{ {
struct scsi_cmnd *scpnt = req->data; struct scsi_cmnd *scpnt = req->data;
...@@ -2114,6 +2146,8 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) ...@@ -2114,6 +2146,8 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)
zfcp_fsf_req_latency(req); zfcp_fsf_req_latency(req);
zfcp_fsf_trace_latency(req);
if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) { if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
if (fcp_rsp_info[3] == RSP_CODE_GOOD) if (fcp_rsp_info[3] == RSP_CODE_GOOD)
set_host_byte(scpnt, DID_OK); set_host_byte(scpnt, DID_OK);
......
...@@ -439,4 +439,16 @@ struct fsf_qtcb { ...@@ -439,4 +439,16 @@ struct fsf_qtcb {
u8 log[FSF_QTCB_LOG_SIZE]; u8 log[FSF_QTCB_LOG_SIZE];
} __attribute__ ((packed)); } __attribute__ ((packed));
struct zfcp_blk_drv_data {
#define ZFCP_BLK_DRV_DATA_MAGIC 0x1
u32 magic;
#define ZFCP_BLK_LAT_VALID 0x1
#define ZFCP_BLK_REQ_ERROR 0x2
u16 flags;
u8 inb_usage;
u8 outb_usage;
u64 channel_lat;
u64 fabric_lat;
} __attribute__ ((packed));
#endif /* FSF_H */ #endif /* FSF_H */
...@@ -115,6 +115,7 @@ static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, ...@@ -115,6 +115,7 @@ static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
spin_unlock_irqrestore(&adapter->req_list_lock, flags); spin_unlock_irqrestore(&adapter->req_list_lock, flags);
fsf_req->sbal_response = sbal_idx; fsf_req->sbal_response = sbal_idx;
fsf_req->qdio_inb_usage = atomic_read(&adapter->resp_q.count);
zfcp_fsf_req_complete(fsf_req); zfcp_fsf_req_complete(fsf_req);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册