提交 994a9303 编写于 作者: D Dan Williams

isci: cleanup/optimize queue increment macros

Every single i/o or event completion incurs a test and branch to see if
the cycle bit changed.  For power-of-2 queue sizes the cycle bit can be
read directly from the rollover of the queue pointer.

Likely premature optimization, but the hidden if() and hidden
assignments / side-effects in the macros were already asking to be
cleaned up.
Signed-off-by: NDan Williams <dan.j.williams@intel.com>
上级 dd047c8e
......@@ -122,34 +122,6 @@
>> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
)
/**
* INCREMENT_COMPLETION_QUEUE_GET() -
*
* This macro will increment the controllers completion queue index value and
* possibly toggle the cycle bit if the completion queue index wraps back to 0.
*/
#define INCREMENT_COMPLETION_QUEUE_GET(controller, index, cycle) \
INCREMENT_QUEUE_GET(\
(index), \
(cycle), \
SCU_MAX_COMPLETION_QUEUE_ENTRIES, \
SMU_CQGR_CYCLE_BIT)
/**
* INCREMENT_EVENT_QUEUE_GET() -
*
* This macro will increment the controllers event queue index value and
* possibly toggle the event cycle bit if the event queue index wraps back to 0.
*/
#define INCREMENT_EVENT_QUEUE_GET(controller, index, cycle) \
INCREMENT_QUEUE_GET(\
(index), \
(cycle), \
SCU_MAX_EVENTS, \
SMU_CQGR_EVENT_CYCLE_BIT \
)
/**
* NORMALIZE_GET_POINTER() -
*
......@@ -528,15 +500,13 @@ static void scic_sds_controller_event_completion(struct scic_sds_controller *sci
}
}
static void scic_sds_controller_process_completions(struct scic_sds_controller *scic)
{
u32 completion_count = 0;
u32 completion_entry;
u32 get_index;
u32 get_cycle;
u32 event_index;
u32 event_get;
u32 event_cycle;
dev_dbg(scic_to_dev(scic),
......@@ -548,7 +518,7 @@ static void scic_sds_controller_process_completions(struct scic_sds_controller *
get_index = NORMALIZE_GET_POINTER(scic->completion_queue_get);
get_cycle = SMU_CQGR_CYCLE_BIT & scic->completion_queue_get;
event_index = NORMALIZE_EVENT_POINTER(scic->completion_queue_get);
event_get = NORMALIZE_EVENT_POINTER(scic->completion_queue_get);
event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & scic->completion_queue_get;
while (
......@@ -558,7 +528,11 @@ static void scic_sds_controller_process_completions(struct scic_sds_controller *
completion_count++;
completion_entry = scic->completion_queue[get_index];
INCREMENT_COMPLETION_QUEUE_GET(scic, get_index, get_cycle);
/* increment the get pointer and check for rollover to toggle the cycle bit */
get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
(SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
dev_dbg(scic_to_dev(scic),
"%s: completion queue entry:0x%08x\n",
......@@ -579,18 +553,14 @@ static void scic_sds_controller_process_completions(struct scic_sds_controller *
break;
case SCU_COMPLETION_TYPE_EVENT:
INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle);
scic_sds_controller_event_completion(scic, completion_entry);
break;
case SCU_COMPLETION_TYPE_NOTIFY: {
event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
(SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
case SCU_COMPLETION_TYPE_NOTIFY:
/*
* Presently we do the same thing with a notify event that we do with the
* other event codes. */
INCREMENT_EVENT_QUEUE_GET(scic, event_index, event_cycle);
scic_sds_controller_event_completion(scic, completion_entry);
break;
}
default:
dev_warn(scic_to_dev(scic),
"%s: SCIC Controller received unknown "
......@@ -607,7 +577,7 @@ static void scic_sds_controller_process_completions(struct scic_sds_controller *
SMU_CQGR_GEN_BIT(ENABLE) |
SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
event_cycle |
SMU_CQGR_GEN_VAL(EVENT_POINTER, event_index) |
SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
get_cycle |
SMU_CQGR_GEN_VAL(POINTER, get_index);
......
......@@ -523,22 +523,6 @@ static inline struct isci_host *scic_to_ihost(struct scic_sds_controller *scic)
return ihost;
}
/**
* INCREMENT_QUEUE_GET() -
*
* This macro will increment the specified index to and if the index wraps to 0
* it will toggel the cycle bit.
*/
#define INCREMENT_QUEUE_GET(index, cycle, entry_count, bit_toggle) \
{ \
if ((index) + 1 == entry_count) { \
(index) = 0; \
(cycle) = (cycle) ^ (bit_toggle); \
} else { \
index = index + 1; \
} \
}
/**
* scic_sds_controller_get_protocol_engine_group() -
*
......
......@@ -90,7 +90,8 @@ enum sci_controller_mode {
#define SCI_MAX_DOMAINS SCI_MAX_PORTS
#define SCU_MAX_CRITICAL_NOTIFICATIONS (384)
#define SCU_MAX_EVENTS (128)
#define SCU_MAX_EVENTS_SHIFT (7)
#define SCU_MAX_EVENTS (1 << SCU_MAX_EVENTS_SHIFT)
#define SCU_MAX_UNSOLICITED_FRAMES (128)
#define SCU_MAX_COMPLETION_QUEUE_SCRATCH (128)
#define SCU_MAX_COMPLETION_QUEUE_ENTRIES (SCU_MAX_CRITICAL_NOTIFICATIONS \
......@@ -98,6 +99,7 @@ enum sci_controller_mode {
+ SCU_MAX_UNSOLICITED_FRAMES \
+ SCI_MAX_IO_REQUESTS \
+ SCU_MAX_COMPLETION_QUEUE_SCRATCH)
#define SCU_MAX_COMPLETION_QUEUE_SHIFT (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES))
#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096)
#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024)
......
......@@ -209,7 +209,8 @@ bool scic_sds_unsolicited_frame_control_release_frame(
/*
* In the event there are NULL entries in the UF table, we need to
* advance the get pointer in order to find out if this frame should
* be released (i.e. update the get pointer). */
* be released (i.e. update the get pointer)
*/
while (lower_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
upper_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
frame_get < SCU_MAX_UNSOLICITED_FRAMES)
......@@ -217,40 +218,37 @@ bool scic_sds_unsolicited_frame_control_release_frame(
/*
* The table has a NULL entry as it's last element. This is
* illegal. */
* illegal.
*/
BUG_ON(frame_get >= SCU_MAX_UNSOLICITED_FRAMES);
if (frame_index >= SCU_MAX_UNSOLICITED_FRAMES)
return false;
if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
uf_control->buffers.array[frame_index].state = UNSOLICITED_FRAME_RELEASED;
if (frame_get != frame_index) {
/*
* Frames remain in use until we advance the get pointer
* so there is nothing we can do here
*/
return false;
}
/*
* The frame index is equal to the current get pointer so we
* can now free up all of the frame entries that */
if (frame_get == frame_index) {
while (
uf_control->buffers.array[frame_get].state
== UNSOLICITED_FRAME_RELEASED
) {
* can now free up all of the frame entries that
*/
while (uf_control->buffers.array[frame_get].state == UNSOLICITED_FRAME_RELEASED) {
uf_control->buffers.array[frame_get].state = UNSOLICITED_FRAME_EMPTY;
INCREMENT_QUEUE_GET(
frame_get,
frame_cycle,
SCU_MAX_UNSOLICITED_FRAMES - 1,
SCU_MAX_UNSOLICITED_FRAMES);
if (frame_get+1 == SCU_MAX_UNSOLICITED_FRAMES-1) {
frame_cycle ^= SCU_MAX_UNSOLICITED_FRAMES;
frame_get = 0;
} else
frame_get++;
}
uf_control->get =
(SCU_UFQGP_GEN_BIT(ENABLE_BIT) | frame_cycle | frame_get);
uf_control->get = SCU_UFQGP_GEN_BIT(ENABLE_BIT) | frame_cycle | frame_get;
return true;
} else {
/*
* Frames remain in use until we advance the get pointer
* so there is nothing we can do here */
}
}
return false;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册