提交 5b37717a 编写于 作者: S Stefano Panella 提交者: David Vrabel

uwb: improved MAS allocator and reservation conflict handling

Greatly enhance the MAS allocator:
  - Handle row and column reservations.
  - Permit all the available MAS to be allocated.
  - Follows the WiMedia rules on MAS selection.

Take appropriate action when reservation conflicts are detected.
  - Correctly identify which reservation wins the conflict.
  - Protect alien BP reservations.
  - If an owned reservation loses, resize/move it.
  - Follow the backoff procedure before requesting additional MAS.

When reservations are terminated, move the remaining reservations (if
necessary) so they keep following the MAS allocation rules.
Signed-off-by: NStefano Panella <stefano.panella@csr.com>
Signed-off-by: NDavid Vrabel <david.vrabel@csr.com>
上级 c35fa3ea
......@@ -48,13 +48,15 @@ static void wusbhc_rsv_complete_cb(struct uwb_rsv *rsv)
{
struct wusbhc *wusbhc = rsv->pal_priv;
struct device *dev = wusbhc->dev;
struct uwb_mas_bm mas;
char buf[72];
switch (rsv->state) {
case UWB_RSV_STATE_O_ESTABLISHED:
bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS);
uwb_rsv_get_usable_mas(rsv, &mas);
bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS);
dev_dbg(dev, "established reservation: %s\n", buf);
wusbhc_bwa_set(wusbhc, rsv->stream, &rsv->mas);
wusbhc_bwa_set(wusbhc, rsv->stream, &mas);
break;
case UWB_RSV_STATE_NONE:
dev_dbg(dev, "removed reservation\n");
......@@ -85,13 +87,12 @@ int wusbhc_rsv_establish(struct wusbhc *wusbhc)
bcid.data[0] = wusbhc->cluster_id;
bcid.data[1] = 0;
rsv->owner = &rc->uwb_dev;
rsv->target.type = UWB_RSV_TARGET_DEVADDR;
rsv->target.devaddr = bcid;
rsv->type = UWB_DRP_TYPE_PRIVATE;
rsv->max_mas = 256;
rsv->min_mas = 16; /* one MAS per zone? */
rsv->sparsity = 16; /* at least one MAS in each zone? */
rsv->max_mas = 256; /* try to get as much as possible */
rsv->min_mas = 15; /* one MAS per zone */
rsv->max_interval = 1; /* max latency is one zone */
rsv->is_multicast = true;
ret = uwb_rsv_establish(rsv);
......
......@@ -6,6 +6,7 @@ obj-$(CONFIG_UWB_I1480U) += i1480/
uwb-objs := \
address.o \
allocator.o \
beacon.o \
driver.o \
drp.o \
......
/*
* UWB reservation management.
*
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/uwb.h>
#include "uwb-internal.h"
static void uwb_rsv_fill_column_alloc(struct uwb_rsv_alloc_info *ai)
{
int col, mas, safe_mas, unsafe_mas;
unsigned char *bm = ai->bm;
struct uwb_rsv_col_info *ci = ai->ci;
unsigned char c;
for (col = ci->csi.start_col; col < UWB_NUM_ZONES; col += ci->csi.interval) {
safe_mas = ci->csi.safe_mas_per_col;
unsafe_mas = ci->csi.unsafe_mas_per_col;
for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++ ) {
if (bm[col * UWB_MAS_PER_ZONE + mas] == 0) {
if (safe_mas > 0) {
safe_mas--;
c = UWB_RSV_MAS_SAFE;
} else if (unsafe_mas > 0) {
unsafe_mas--;
c = UWB_RSV_MAS_UNSAFE;
} else {
break;
}
bm[col * UWB_MAS_PER_ZONE + mas] = c;
}
}
}
}
static void uwb_rsv_fill_row_alloc(struct uwb_rsv_alloc_info *ai)
{
int mas, col, rows;
unsigned char *bm = ai->bm;
struct uwb_rsv_row_info *ri = &ai->ri;
unsigned char c;
rows = 1;
c = UWB_RSV_MAS_SAFE;
for (mas = UWB_MAS_PER_ZONE - 1; mas >= 0; mas--) {
if (ri->avail[mas] == 1) {
if (rows > ri->used_rows) {
break;
} else if (rows > 7) {
c = UWB_RSV_MAS_UNSAFE;
}
for (col = 0; col < UWB_NUM_ZONES; col++) {
if (bm[col * UWB_NUM_ZONES + mas] != UWB_RSV_MAS_NOT_AVAIL) {
bm[col * UWB_NUM_ZONES + mas] = c;
if(c == UWB_RSV_MAS_SAFE)
ai->safe_allocated_mases++;
else
ai->unsafe_allocated_mases++;
}
}
rows++;
}
}
ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases;
}
/*
* Find the best column set for a given availability, interval, num safe mas and
* num unsafe mas.
*
* The different sets are tried in order as shown below, depending on the interval.
*
* interval = 16
* deep = 0
* set 1 -> { 8 }
* deep = 1
* set 1 -> { 4 }
* set 2 -> { 12 }
* deep = 2
* set 1 -> { 2 }
* set 2 -> { 6 }
* set 3 -> { 10 }
* set 4 -> { 14 }
* deep = 3
* set 1 -> { 1 }
* set 2 -> { 3 }
* set 3 -> { 5 }
* set 4 -> { 7 }
* set 5 -> { 9 }
* set 6 -> { 11 }
* set 7 -> { 13 }
* set 8 -> { 15 }
*
* interval = 8
* deep = 0
* set 1 -> { 4 12 }
* deep = 1
* set 1 -> { 2 10 }
* set 2 -> { 6 14 }
* deep = 2
* set 1 -> { 1 9 }
* set 2 -> { 3 11 }
* set 3 -> { 5 13 }
* set 4 -> { 7 15 }
*
* interval = 4
* deep = 0
* set 1 -> { 2 6 10 14 }
* deep = 1
* set 1 -> { 1 5 9 13 }
* set 2 -> { 3 7 11 15 }
*
* interval = 2
* deep = 0
* set 1 -> { 1 3 5 7 9 11 13 15 }
*/
static int uwb_rsv_find_best_column_set(struct uwb_rsv_alloc_info *ai, int interval,
int num_safe_mas, int num_unsafe_mas)
{
struct uwb_rsv_col_info *ci = ai->ci;
struct uwb_rsv_col_set_info *csi = &ci->csi;
struct uwb_rsv_col_set_info tmp_csi;
int deep, set, col, start_col_deep, col_start_set;
int start_col, max_mas_in_set, lowest_max_mas_in_deep;
int n_mas;
int found = UWB_RSV_ALLOC_NOT_FOUND;
tmp_csi.start_col = 0;
start_col_deep = interval;
n_mas = num_unsafe_mas + num_safe_mas;
for (deep = 0; ((interval >> deep) & 0x1) == 0; deep++) {
start_col_deep /= 2;
col_start_set = 0;
lowest_max_mas_in_deep = UWB_MAS_PER_ZONE;
for (set = 1; set <= (1 << deep); set++) {
max_mas_in_set = 0;
start_col = start_col_deep + col_start_set;
for (col = start_col; col < UWB_NUM_ZONES; col += interval) {
if (ci[col].max_avail_safe >= num_safe_mas &&
ci[col].max_avail_unsafe >= n_mas) {
if (ci[col].highest_mas[n_mas] > max_mas_in_set)
max_mas_in_set = ci[col].highest_mas[n_mas];
} else {
max_mas_in_set = 0;
break;
}
}
if ((lowest_max_mas_in_deep > max_mas_in_set) && max_mas_in_set) {
lowest_max_mas_in_deep = max_mas_in_set;
tmp_csi.start_col = start_col;
}
col_start_set += (interval >> deep);
}
if (lowest_max_mas_in_deep < 8) {
csi->start_col = tmp_csi.start_col;
found = UWB_RSV_ALLOC_FOUND;
break;
} else if ((lowest_max_mas_in_deep > 8) &&
(lowest_max_mas_in_deep != UWB_MAS_PER_ZONE) &&
(found == UWB_RSV_ALLOC_NOT_FOUND)) {
csi->start_col = tmp_csi.start_col;
found = UWB_RSV_ALLOC_FOUND;
}
}
if (found == UWB_RSV_ALLOC_FOUND) {
csi->interval = interval;
csi->safe_mas_per_col = num_safe_mas;
csi->unsafe_mas_per_col = num_unsafe_mas;
ai->safe_allocated_mases = (UWB_NUM_ZONES / interval) * num_safe_mas;
ai->unsafe_allocated_mases = (UWB_NUM_ZONES / interval) * num_unsafe_mas;
ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases;
ai->interval = interval;
}
return found;
}
static void get_row_descriptors(struct uwb_rsv_alloc_info *ai)
{
unsigned char *bm = ai->bm;
struct uwb_rsv_row_info *ri = &ai->ri;
int col, mas;
ri->free_rows = 16;
for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) {
ri->avail[mas] = 1;
for (col = 1; col < UWB_NUM_ZONES; col++) {
if (bm[col * UWB_NUM_ZONES + mas] == UWB_RSV_MAS_NOT_AVAIL) {
ri->free_rows--;
ri->avail[mas]=0;
break;
}
}
}
}
static void uwb_rsv_fill_column_info(unsigned char *bm, int column, struct uwb_rsv_col_info *rci)
{
int mas;
int block_count = 0, start_block = 0;
int previous_avail = 0;
int available = 0;
int safe_mas_in_row[UWB_MAS_PER_ZONE] = {
8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1,
};
rci->max_avail_safe = 0;
for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) {
if (!bm[column * UWB_NUM_ZONES + mas]) {
available++;
rci->max_avail_unsafe = available;
rci->highest_mas[available] = mas;
if (previous_avail) {
block_count++;
if ((block_count > safe_mas_in_row[start_block]) &&
(!rci->max_avail_safe))
rci->max_avail_safe = available - 1;
} else {
previous_avail = 1;
start_block = mas;
block_count = 1;
}
} else {
previous_avail = 0;
}
}
if (!rci->max_avail_safe)
rci->max_avail_safe = rci->max_avail_unsafe;
}
static void get_column_descriptors(struct uwb_rsv_alloc_info *ai)
{
unsigned char *bm = ai->bm;
struct uwb_rsv_col_info *ci = ai->ci;
int col;
for (col = 1; col < UWB_NUM_ZONES; col++) {
uwb_rsv_fill_column_info(bm, col, &ci[col]);
}
}
static int uwb_rsv_find_best_row_alloc(struct uwb_rsv_alloc_info *ai)
{
int n_rows;
int max_rows = ai->max_mas / UWB_USABLE_MAS_PER_ROW;
int min_rows = ai->min_mas / UWB_USABLE_MAS_PER_ROW;
if (ai->min_mas % UWB_USABLE_MAS_PER_ROW)
min_rows++;
for (n_rows = max_rows; n_rows >= min_rows; n_rows--) {
if (n_rows <= ai->ri.free_rows) {
ai->ri.used_rows = n_rows;
ai->interval = 1; /* row reservation */
uwb_rsv_fill_row_alloc(ai);
return UWB_RSV_ALLOC_FOUND;
}
}
return UWB_RSV_ALLOC_NOT_FOUND;
}
static int uwb_rsv_find_best_col_alloc(struct uwb_rsv_alloc_info *ai, int interval)
{
int n_safe, n_unsafe, n_mas;
int n_column = UWB_NUM_ZONES / interval;
int max_per_zone = ai->max_mas / n_column;
int min_per_zone = ai->min_mas / n_column;
if (ai->min_mas % n_column)
min_per_zone++;
if (min_per_zone > UWB_MAS_PER_ZONE) {
return UWB_RSV_ALLOC_NOT_FOUND;
}
if (max_per_zone > UWB_MAS_PER_ZONE) {
max_per_zone = UWB_MAS_PER_ZONE;
}
for (n_mas = max_per_zone; n_mas >= min_per_zone; n_mas--) {
if (uwb_rsv_find_best_column_set(ai, interval, 0, n_mas) == UWB_RSV_ALLOC_NOT_FOUND)
continue;
for (n_safe = n_mas; n_safe >= 0; n_safe--) {
n_unsafe = n_mas - n_safe;
if (uwb_rsv_find_best_column_set(ai, interval, n_safe, n_unsafe) == UWB_RSV_ALLOC_FOUND) {
uwb_rsv_fill_column_alloc(ai);
return UWB_RSV_ALLOC_FOUND;
}
}
}
return UWB_RSV_ALLOC_NOT_FOUND;
}
int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *available,
struct uwb_mas_bm *result)
{
struct uwb_rsv_alloc_info *ai;
int interval;
int bit_index;
ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL);
ai->min_mas = rsv->min_mas;
ai->max_mas = rsv->max_mas;
ai->max_interval = rsv->max_interval;
/* fill the not available vector from the available bm */
for (bit_index = 0; bit_index < UWB_NUM_MAS; bit_index++) {
if (!test_bit(bit_index, available->bm))
ai->bm[bit_index] = UWB_RSV_MAS_NOT_AVAIL;
}
if (ai->max_interval == 1) {
get_row_descriptors(ai);
if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND)
goto alloc_found;
else
goto alloc_not_found;
}
get_column_descriptors(ai);
for (interval = 16; interval >= 2; interval>>=1) {
if (interval > ai->max_interval)
continue;
if (uwb_rsv_find_best_col_alloc(ai, interval) == UWB_RSV_ALLOC_FOUND)
goto alloc_found;
}
/* try row reservation if no column is found */
get_row_descriptors(ai);
if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND)
goto alloc_found;
else
goto alloc_not_found;
alloc_found:
bitmap_zero(result->bm, UWB_NUM_MAS);
bitmap_zero(result->unsafe_bm, UWB_NUM_MAS);
/* fill the safe and unsafe bitmaps */
for (bit_index = 0; bit_index < UWB_NUM_MAS; bit_index++) {
if (ai->bm[bit_index] == UWB_RSV_MAS_SAFE)
set_bit(bit_index, result->bm);
else if (ai->bm[bit_index] == UWB_RSV_MAS_UNSAFE)
set_bit(bit_index, result->unsafe_bm);
}
bitmap_or(result->bm, result->bm, result->unsafe_bm, UWB_NUM_MAS);
result->safe = ai->safe_allocated_mases;
result->unsafe = ai->unsafe_allocated_mases;
kfree(ai);
return UWB_RSV_ALLOC_FOUND;
alloc_not_found:
kfree(ai);
return UWB_RSV_ALLOC_NOT_FOUND;
}
......@@ -58,7 +58,7 @@ void uwb_drp_avail_init(struct uwb_rc *rc)
*
* avail = global & local & pending
*/
static void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail)
void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail)
{
bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS);
bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS);
......@@ -105,6 +105,7 @@ void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas)
bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS);
bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
rc->drp_avail.ie_valid = false;
uwb_rsv_handle_drp_avail_change(rc);
}
/**
......@@ -280,6 +281,7 @@ int uwbd_evt_handle_rc_drp_avail(struct uwb_event *evt)
mutex_lock(&rc->rsvs_mutex);
bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS);
rc->drp_avail.ie_valid = false;
uwb_rsv_handle_drp_avail_change(rc);
mutex_unlock(&rc->rsvs_mutex);
uwb_rsv_sched_update(rc);
......
......@@ -22,6 +22,96 @@
#include "uwb-internal.h"
/*
* Return the reason code for a reservations's DRP IE.
*/
int uwb_rsv_reason_code(struct uwb_rsv *rsv)
{
static const int reason_codes[] = {
[UWB_RSV_STATE_O_INITIATED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_O_PENDING] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_O_MODIFIED] = UWB_DRP_REASON_MODIFIED,
[UWB_RSV_STATE_O_ESTABLISHED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_O_TO_BE_MOVED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_O_MOVE_COMBINING] = UWB_DRP_REASON_MODIFIED,
[UWB_RSV_STATE_O_MOVE_REDUCING] = UWB_DRP_REASON_MODIFIED,
[UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_T_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_T_CONFLICT] = UWB_DRP_REASON_CONFLICT,
[UWB_RSV_STATE_T_PENDING] = UWB_DRP_REASON_PENDING,
[UWB_RSV_STATE_T_DENIED] = UWB_DRP_REASON_DENIED,
[UWB_RSV_STATE_T_RESIZED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT,
[UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING,
[UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED,
};
return reason_codes[rsv->state];
}
/*
* Return the reason code for a reservations's companion DRP IE .
*/
int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv)
{
static const int companion_reason_codes[] = {
[UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED,
[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT,
[UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING,
[UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED,
};
return companion_reason_codes[rsv->state];
}
/*
* Return the status bit for a reservations's DRP IE.
*/
int uwb_rsv_status(struct uwb_rsv *rsv)
{
static const int statuses[] = {
[UWB_RSV_STATE_O_INITIATED] = 0,
[UWB_RSV_STATE_O_PENDING] = 0,
[UWB_RSV_STATE_O_MODIFIED] = 1,
[UWB_RSV_STATE_O_ESTABLISHED] = 1,
[UWB_RSV_STATE_O_TO_BE_MOVED] = 0,
[UWB_RSV_STATE_O_MOVE_COMBINING] = 1,
[UWB_RSV_STATE_O_MOVE_REDUCING] = 1,
[UWB_RSV_STATE_O_MOVE_EXPANDING] = 1,
[UWB_RSV_STATE_T_ACCEPTED] = 1,
[UWB_RSV_STATE_T_CONFLICT] = 0,
[UWB_RSV_STATE_T_PENDING] = 0,
[UWB_RSV_STATE_T_DENIED] = 0,
[UWB_RSV_STATE_T_RESIZED] = 1,
[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1,
[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 1,
[UWB_RSV_STATE_T_EXPANDING_PENDING] = 1,
[UWB_RSV_STATE_T_EXPANDING_DENIED] = 1,
};
return statuses[rsv->state];
}
/*
* Return the status bit for a reservations's companion DRP IE .
*/
int uwb_rsv_companion_status(struct uwb_rsv *rsv)
{
static const int companion_statuses[] = {
[UWB_RSV_STATE_O_MOVE_EXPANDING] = 0,
[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1,
[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 0,
[UWB_RSV_STATE_T_EXPANDING_PENDING] = 0,
[UWB_RSV_STATE_T_EXPANDING_DENIED] = 0,
};
return companion_statuses[rsv->state];
}
/*
* Allocate a DRP IE.
*
......@@ -33,16 +123,12 @@
static struct uwb_ie_drp *uwb_drp_ie_alloc(void)
{
struct uwb_ie_drp *drp_ie;
unsigned tiebreaker;
drp_ie = kzalloc(sizeof(struct uwb_ie_drp) +
UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc),
GFP_KERNEL);
if (drp_ie) {
drp_ie->hdr.element_id = UWB_IE_DRP;
get_random_bytes(&tiebreaker, sizeof(unsigned));
uwb_ie_drp_set_tiebreaker(drp_ie, tiebreaker & 1);
}
return drp_ie;
}
......@@ -103,43 +189,17 @@ static void uwb_drp_ie_from_bm(struct uwb_ie_drp *drp_ie,
*/
int uwb_drp_ie_update(struct uwb_rsv *rsv)
{
struct device *dev = &rsv->rc->uwb_dev.dev;
struct uwb_ie_drp *drp_ie;
int reason_code, status;
struct uwb_rsv_move *mv;
int unsafe;
switch (rsv->state) {
case UWB_RSV_STATE_NONE:
if (rsv->state == UWB_RSV_STATE_NONE) {
kfree(rsv->drp_ie);
rsv->drp_ie = NULL;
return 0;
case UWB_RSV_STATE_O_INITIATED:
reason_code = UWB_DRP_REASON_ACCEPTED;
status = 0;
break;
case UWB_RSV_STATE_O_PENDING:
reason_code = UWB_DRP_REASON_ACCEPTED;
status = 0;
break;
case UWB_RSV_STATE_O_MODIFIED:
reason_code = UWB_DRP_REASON_MODIFIED;
status = 1;
break;
case UWB_RSV_STATE_O_ESTABLISHED:
reason_code = UWB_DRP_REASON_ACCEPTED;
status = 1;
break;
case UWB_RSV_STATE_T_ACCEPTED:
reason_code = UWB_DRP_REASON_ACCEPTED;
status = 1;
break;
case UWB_RSV_STATE_T_DENIED:
reason_code = UWB_DRP_REASON_DENIED;
status = 0;
break;
default:
dev_dbg(dev, "rsv with unhandled state (%d)\n", rsv->state);
return -EINVAL;
}
unsafe = rsv->mas.unsafe ? 1 : 0;
if (rsv->drp_ie == NULL) {
rsv->drp_ie = uwb_drp_ie_alloc();
......@@ -148,9 +208,11 @@ int uwb_drp_ie_update(struct uwb_rsv *rsv)
}
drp_ie = rsv->drp_ie;
uwb_ie_drp_set_unsafe(drp_ie, unsafe);
uwb_ie_drp_set_tiebreaker(drp_ie, rsv->tiebreaker);
uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv));
uwb_ie_drp_set_status(drp_ie, status);
uwb_ie_drp_set_reason_code(drp_ie, reason_code);
uwb_ie_drp_set_status(drp_ie, uwb_rsv_status(rsv));
uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_reason_code(rsv));
uwb_ie_drp_set_stream_index(drp_ie, rsv->stream);
uwb_ie_drp_set_type(drp_ie, rsv->type);
......@@ -168,6 +230,27 @@ int uwb_drp_ie_update(struct uwb_rsv *rsv)
uwb_drp_ie_from_bm(drp_ie, &rsv->mas);
if (uwb_rsv_has_two_drp_ies(rsv)) {
mv = &rsv->mv;
if (mv->companion_drp_ie == NULL) {
mv->companion_drp_ie = uwb_drp_ie_alloc();
if (mv->companion_drp_ie == NULL)
return -ENOMEM;
}
drp_ie = mv->companion_drp_ie;
/* keep all the same configuration of the main drp_ie */
memcpy(drp_ie, rsv->drp_ie, sizeof(struct uwb_ie_drp));
/* FIXME: handle properly the unsafe bit */
uwb_ie_drp_set_unsafe(drp_ie, 1);
uwb_ie_drp_set_status(drp_ie, uwb_rsv_companion_status(rsv));
uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_companion_reason_code(rsv));
uwb_drp_ie_from_bm(drp_ie, &mv->companion_mas);
}
rsv->ie_valid = true;
return 0;
}
......@@ -218,6 +301,8 @@ void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie)
u8 zone;
u16 zone_mask;
bitmap_zero(bm->bm, UWB_NUM_MAS);
for (cnt = 0; cnt < numallocs; cnt++) {
alloc = &drp_ie->allocs[cnt];
zone_bm = le16_to_cpu(alloc->zone_bm);
......@@ -229,3 +314,4 @@ void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie)
}
}
}
此差异已折叠。
此差异已折叠。
......@@ -82,29 +82,21 @@ struct uwb_dbg {
struct dentry *reservations_f;
struct dentry *accept_f;
struct dentry *drp_avail_f;
spinlock_t list_lock;
};
static struct dentry *root_dir;
static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv)
{
struct uwb_rc *rc = rsv->rc;
struct device *dev = &rc->uwb_dev.dev;
struct uwb_dev_addr devaddr;
char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
if (rsv->target.type == UWB_RSV_TARGET_DEV)
devaddr = rsv->target.dev->dev_addr;
else
devaddr = rsv->target.devaddr;
uwb_dev_addr_print(target, sizeof(target), &devaddr);
struct uwb_dbg *dbg = rsv->pal_priv;
dev_dbg(dev, "debug: rsv %s -> %s: %s\n",
owner, target, uwb_rsv_state_str(rsv->state));
uwb_rsv_dump("debug", rsv);
if (rsv->state == UWB_RSV_STATE_NONE) {
spin_lock(&dbg->list_lock);
list_del(&rsv->pal_node);
spin_unlock(&dbg->list_lock);
uwb_rsv_destroy(rsv);
}
}
......@@ -128,20 +120,21 @@ static int cmd_rsv_establish(struct uwb_rc *rc,
return -ENOMEM;
}
rsv->owner = &rc->uwb_dev;
rsv->target.type = UWB_RSV_TARGET_DEV;
rsv->target.dev = target;
rsv->type = cmd->type;
rsv->max_mas = cmd->max_mas;
rsv->min_mas = cmd->min_mas;
rsv->sparsity = cmd->sparsity;
rsv->target.type = UWB_RSV_TARGET_DEV;
rsv->target.dev = target;
rsv->type = cmd->type;
rsv->max_mas = cmd->max_mas;
rsv->min_mas = cmd->min_mas;
rsv->max_interval = cmd->max_interval;
ret = uwb_rsv_establish(rsv);
if (ret)
uwb_rsv_destroy(rsv);
else
else {
spin_lock(&(rc->dbg)->list_lock);
list_add_tail(&rsv->pal_node, &rc->dbg->rsvs);
spin_unlock(&(rc->dbg)->list_lock);
}
return ret;
}
......@@ -151,17 +144,24 @@ static int cmd_rsv_terminate(struct uwb_rc *rc,
struct uwb_rsv *rsv, *found = NULL;
int i = 0;
spin_lock(&(rc->dbg)->list_lock);
list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) {
if (i == cmd->index) {
found = rsv;
uwb_rsv_get(found);
break;
}
i++;
}
spin_unlock(&(rc->dbg)->list_lock);
if (!found)
return -EINVAL;
uwb_rsv_terminate(found);
uwb_rsv_put(found);
return 0;
}
......@@ -191,7 +191,7 @@ static ssize_t command_write(struct file *file, const char __user *buf,
struct uwb_rc *rc = file->private_data;
struct uwb_dbg_cmd cmd;
int ret = 0;
if (len != sizeof(struct uwb_dbg_cmd))
return -EINVAL;
......@@ -325,7 +325,9 @@ static void uwb_dbg_new_rsv(struct uwb_pal *pal, struct uwb_rsv *rsv)
struct uwb_dbg *dbg = container_of(pal, struct uwb_dbg, pal);
if (dbg->accept) {
spin_lock(&dbg->list_lock);
list_add_tail(&rsv->pal_node, &dbg->rsvs);
spin_unlock(&dbg->list_lock);
uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, dbg);
}
}
......@@ -341,6 +343,7 @@ void uwb_dbg_add_rc(struct uwb_rc *rc)
return;
INIT_LIST_HEAD(&rc->dbg->rsvs);
spin_lock_init(&(rc->dbg)->list_lock);
uwb_pal_init(&rc->dbg->pal);
rc->dbg->pal.rc = rc;
......
......@@ -92,6 +92,12 @@ extern const char *uwb_rc_strerror(unsigned code);
struct uwb_rc_neh;
extern int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
struct uwb_rccb *cmd, size_t cmd_size,
u8 expected_type, u16 expected_event,
uwb_rc_cmd_cb_f cb, void *arg);
void uwb_rc_neh_create(struct uwb_rc *rc);
void uwb_rc_neh_destroy(struct uwb_rc *rc);
......@@ -106,7 +112,69 @@ void uwb_rc_neh_put(struct uwb_rc_neh *neh);
extern int uwb_est_create(void);
extern void uwb_est_destroy(void);
/*
* UWB conflicting alien reservations
*/
struct uwb_cnflt_alien {
struct uwb_rc *rc;
struct list_head rc_node;
struct uwb_mas_bm mas;
struct timer_list timer;
struct work_struct cnflt_update_work;
};
enum uwb_uwb_rsv_alloc_result {
UWB_RSV_ALLOC_FOUND = 0,
UWB_RSV_ALLOC_NOT_FOUND,
};
enum uwb_rsv_mas_status {
UWB_RSV_MAS_NOT_AVAIL = 1,
UWB_RSV_MAS_SAFE,
UWB_RSV_MAS_UNSAFE,
};
struct uwb_rsv_col_set_info {
unsigned char start_col;
unsigned char interval;
unsigned char safe_mas_per_col;
unsigned char unsafe_mas_per_col;
};
struct uwb_rsv_col_info {
unsigned char max_avail_safe;
unsigned char max_avail_unsafe;
unsigned char highest_mas[UWB_MAS_PER_ZONE];
struct uwb_rsv_col_set_info csi;
};
struct uwb_rsv_row_info {
unsigned char avail[UWB_MAS_PER_ZONE];
unsigned char free_rows;
unsigned char used_rows;
};
/*
* UWB find allocation
*/
struct uwb_rsv_alloc_info {
unsigned char bm[UWB_MAS_PER_ZONE * UWB_NUM_ZONES];
struct uwb_rsv_col_info ci[UWB_NUM_ZONES];
struct uwb_rsv_row_info ri;
struct uwb_mas_bm *not_available;
struct uwb_mas_bm *result;
int min_mas;
int max_mas;
int max_interval;
int total_allocated_mases;
int safe_allocated_mases;
int unsafe_allocated_mases;
int interval;
};
int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *available,
struct uwb_mas_bm *result);
void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc);
/*
* UWB Events & management daemon
*/
......@@ -254,18 +322,28 @@ void uwb_rsv_init(struct uwb_rc *rc);
int uwb_rsv_setup(struct uwb_rc *rc);
void uwb_rsv_cleanup(struct uwb_rc *rc);
void uwb_rsv_remove_all(struct uwb_rc *rc);
void uwb_rsv_get(struct uwb_rsv *rsv);
void uwb_rsv_put(struct uwb_rsv *rsv);
bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv);
void uwb_rsv_dump(char *text, struct uwb_rsv *rsv);
int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available);
void uwb_rsv_backoff_win_timer(unsigned long arg);
void uwb_rsv_backoff_win_increment(struct uwb_rc *rc);
int uwb_rsv_status(struct uwb_rsv *rsv);
int uwb_rsv_companion_status(struct uwb_rsv *rsv);
void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state);
void uwb_rsv_remove(struct uwb_rsv *rsv);
struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src,
struct uwb_ie_drp *drp_ie);
void uwb_rsv_sched_update(struct uwb_rc *rc);
void uwb_rsv_queue_update(struct uwb_rc *rc);
void uwb_drp_handle_timeout(struct uwb_rsv *rsv);
int uwb_drp_ie_update(struct uwb_rsv *rsv);
void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie);
void uwb_drp_avail_init(struct uwb_rc *rc);
void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail);
int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas);
void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas);
void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas);
......
......@@ -67,6 +67,7 @@ struct uwb_dev {
struct uwb_dev_addr dev_addr;
int beacon_slot;
DECLARE_BITMAP(streams, UWB_NUM_STREAMS);
DECLARE_BITMAP(last_availability_bm, UWB_NUM_MAS);
};
#define to_uwb_dev(d) container_of(d, struct uwb_dev, dev)
......@@ -109,6 +110,9 @@ struct uwbd {
*/
struct uwb_mas_bm {
DECLARE_BITMAP(bm, UWB_NUM_MAS);
DECLARE_BITMAP(unsafe_bm, UWB_NUM_MAS);
int safe;
int unsafe;
};
/**
......@@ -134,14 +138,24 @@ struct uwb_mas_bm {
* FIXME: further target states TBD.
*/
enum uwb_rsv_state {
UWB_RSV_STATE_NONE,
UWB_RSV_STATE_NONE = 0,
UWB_RSV_STATE_O_INITIATED,
UWB_RSV_STATE_O_PENDING,
UWB_RSV_STATE_O_MODIFIED,
UWB_RSV_STATE_O_ESTABLISHED,
UWB_RSV_STATE_O_TO_BE_MOVED,
UWB_RSV_STATE_O_MOVE_EXPANDING,
UWB_RSV_STATE_O_MOVE_COMBINING,
UWB_RSV_STATE_O_MOVE_REDUCING,
UWB_RSV_STATE_T_ACCEPTED,
UWB_RSV_STATE_T_DENIED,
UWB_RSV_STATE_T_CONFLICT,
UWB_RSV_STATE_T_PENDING,
UWB_RSV_STATE_T_EXPANDING_ACCEPTED,
UWB_RSV_STATE_T_EXPANDING_CONFLICT,
UWB_RSV_STATE_T_EXPANDING_PENDING,
UWB_RSV_STATE_T_EXPANDING_DENIED,
UWB_RSV_STATE_T_RESIZED,
UWB_RSV_STATE_LAST,
};
......@@ -166,6 +180,12 @@ struct uwb_rsv_target {
};
};
struct uwb_rsv_move {
struct uwb_mas_bm final_mas;
struct uwb_ie_drp *companion_drp_ie;
struct uwb_mas_bm companion_mas;
};
/*
* Number of streams reserved for reservations targeted at DevAddrs.
*/
......@@ -203,6 +223,7 @@ typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv);
*
* @status: negotiation status
* @stream: stream index allocated for this reservation
* @tiebreaker: conflict tiebreaker for this reservation
* @mas: reserved MAS
* @drp_ie: the DRP IE
* @ie_valid: true iff the DRP IE matches the reservation parameters
......@@ -225,19 +246,22 @@ struct uwb_rsv {
enum uwb_drp_type type;
int max_mas;
int min_mas;
int sparsity;
int max_interval;
bool is_multicast;
uwb_rsv_cb_f callback;
void *pal_priv;
enum uwb_rsv_state state;
bool needs_release_companion_mas;
u8 stream;
u8 tiebreaker;
struct uwb_mas_bm mas;
struct uwb_ie_drp *drp_ie;
struct uwb_rsv_move mv;
bool ie_valid;
struct timer_list timer;
bool expired;
struct work_struct handle_timeout_work;
};
static const
......@@ -279,6 +303,13 @@ struct uwb_drp_avail {
bool ie_valid;
};
struct uwb_drp_backoff_win {
u8 window;
u8 n;
int total_expired;
struct timer_list timer;
bool can_reserve_extra_mases;
};
const char *uwb_rsv_state_str(enum uwb_rsv_state state);
const char *uwb_rsv_type_str(enum uwb_drp_type type);
......@@ -294,6 +325,8 @@ void uwb_rsv_terminate(struct uwb_rsv *rsv);
void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv);
void uwb_rsv_get_usable_mas(struct uwb_rsv *orig_rsv, struct uwb_mas_bm *mas);
/**
* Radio Control Interface instance
*
......@@ -364,12 +397,18 @@ struct uwb_rc {
struct uwbd uwbd;
struct uwb_drp_backoff_win bow;
struct uwb_drp_avail drp_avail;
struct list_head reservations;
struct list_head cnflt_alien_list;
struct uwb_mas_bm cnflt_alien_bitmap;
struct mutex rsvs_mutex;
spinlock_t rsvs_lock;
struct workqueue_struct *rsv_workq;
struct work_struct rsv_update_work;
struct delayed_work rsv_update_work;
struct delayed_work rsv_alien_bp_work;
int set_drp_ie_pending;
struct mutex ies_mutex;
struct uwb_rc_cmd_set_ie *ies;
size_t ies_capacity;
......
......@@ -43,7 +43,7 @@ struct uwb_dbg_cmd_rsv_establish {
__u8 type;
__u16 max_mas;
__u16 min_mas;
__u8 sparsity;
__u8 max_interval;
};
struct uwb_dbg_cmd_rsv_terminate {
......
......@@ -58,6 +58,11 @@ enum { UWB_NUM_ZONES = 16 };
*/
#define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES)
/*
* Number of MAS required before a row can be considered available.
*/
#define UWB_USABLE_MAS_PER_ROW (UWB_NUM_ZONES - 1)
/*
* Number of streams per DRP reservation between a pair of devices.
*
......@@ -93,6 +98,26 @@ enum { UWB_BEACON_SLOT_LENGTH_US = 85 };
*/
enum { UWB_MAX_LOST_BEACONS = 3 };
/*
* mDRPBackOffWinMin
*
* The minimum number of superframes to wait before trying to reserve
* extra MAS.
*
* [ECMA-368] section 17.16
*/
enum { UWB_DRP_BACKOFF_WIN_MIN = 2 };
/*
* mDRPBackOffWinMax
*
* The maximum number of superframes to wait before trying to reserve
* extra MAS.
*
* [ECMA-368] section 17.16
*/
enum { UWB_DRP_BACKOFF_WIN_MAX = 16 };
/*
* Length of a superframe in microseconds.
*/
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册