提交 2d7a8926 编写于 作者: D David S. Miller

Merge branch 'mlxse-resource-query'

Jiri Pirko says:

====================
mlxsw: Replace Hw related const with resource query results

Nogah says:

Many of the ASIC's properties can be read from the HW with resources query.
This patchset adds new resources to the resource query and implement
using them, instead of the constants that we currently use.
Those resources are lag, kvd and router related.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -1100,10 +1100,15 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_alloc_stats;
}
if (mlxsw_driver->profile->used_max_lag &&
mlxsw_driver->profile->used_max_port_per_lag) {
alloc_size = sizeof(u8) * mlxsw_driver->profile->max_lag *
mlxsw_driver->profile->max_port_per_lag;
err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
&mlxsw_core->resources);
if (err)
goto err_bus_init;
if (mlxsw_core->resources.max_lag_valid &&
mlxsw_core->resources.max_ports_in_lag_valid) {
alloc_size = sizeof(u8) * mlxsw_core->resources.max_lag *
mlxsw_core->resources.max_ports_in_lag;
mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
if (!mlxsw_core->lag.mapping) {
err = -ENOMEM;
......@@ -1111,11 +1116,6 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
}
}
err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
&mlxsw_core->resources);
if (err)
goto err_bus_init;
err = mlxsw_emad_init(mlxsw_core);
if (err)
goto err_emad_init;
......@@ -1146,10 +1146,10 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
err_devlink_register:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
mlxsw_bus->fini(bus_priv);
err_bus_init:
kfree(mlxsw_core->lag.mapping);
err_alloc_lag_mapping:
mlxsw_bus->fini(bus_priv);
err_bus_init:
free_percpu(mlxsw_core->pcpu_stats);
err_alloc_stats:
devlink_free(devlink);
......@@ -1615,7 +1615,7 @@ EXPORT_SYMBOL(mlxsw_core_skb_receive);
static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 port_index)
{
return mlxsw_core->driver->profile->max_port_per_lag * lag_id +
return mlxsw_core->resources.max_ports_in_lag * lag_id +
port_index;
}
......@@ -1644,7 +1644,7 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
{
int i;
for (i = 0; i < mlxsw_core->driver->profile->max_port_per_lag; i++) {
for (i = 0; i < mlxsw_core->resources.max_ports_in_lag; i++) {
int index = mlxsw_core_lag_mapping_index(mlxsw_core,
lag_id, i);
......
......@@ -179,8 +179,6 @@ struct mlxsw_swid_config {
struct mlxsw_config_profile {
u16 used_max_vepa_channels:1,
used_max_lag:1,
used_max_port_per_lag:1,
used_max_mid:1,
used_max_pgt:1,
used_max_system_port:1,
......@@ -192,10 +190,9 @@ struct mlxsw_config_profile {
used_max_pkey:1,
used_ar_sec:1,
used_adaptive_routing_group_cap:1,
used_kvd_sizes:1;
used_kvd_split_data:1; /* indicate for the kvd's values */
u8 max_vepa_channels;
u16 max_lag;
u16 max_port_per_lag;
u16 max_mid;
u16 max_pgt;
u16 max_system_port;
......@@ -214,8 +211,9 @@ struct mlxsw_config_profile {
u16 adaptive_routing_group_cap;
u8 arn;
u32 kvd_linear_size;
u32 kvd_hash_single_size;
u32 kvd_hash_double_size;
u16 kvd_hash_granularity;
u8 kvd_hash_single_parts;
u8 kvd_hash_double_parts;
u8 resource_query_enable;
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
};
......@@ -269,8 +267,35 @@ struct mlxsw_driver {
};
struct mlxsw_resources {
u8 max_span_valid:1;
u32 max_span_valid:1,
max_lag_valid:1,
max_ports_in_lag_valid:1,
kvd_size_valid:1,
kvd_single_min_size_valid:1,
kvd_double_min_size_valid:1,
max_virtual_routers_valid:1,
max_system_ports_valid:1,
max_vlan_groups_valid:1,
max_regions_valid:1,
max_rif_valid:1;
u8 max_span;
u8 max_lag;
u8 max_ports_in_lag;
u32 kvd_size;
u32 kvd_single_min_size;
u32 kvd_double_min_size;
u16 max_virtual_routers;
u16 max_system_ports;
u16 max_vlan_groups;
u16 max_regions;
u16 max_rif;
/* Internal resources.
* Determined by the SW, not queried from the HW.
*/
u32 kvd_single_size;
u32 kvd_double_size;
u32 kvd_linear_size;
};
struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core);
......
......@@ -1156,6 +1156,16 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
#define MLXSW_RESOURCES_TABLE_END_ID 0xffff
#define MLXSW_MAX_SPAN_ID 0x2420
#define MLXSW_MAX_LAG_ID 0x2520
#define MLXSW_MAX_PORTS_IN_LAG_ID 0x2521
#define MLXSW_KVD_SIZE_ID 0x1001
#define MLXSW_KVD_SINGLE_MIN_SIZE_ID 0x1002
#define MLXSW_KVD_DOUBLE_MIN_SIZE_ID 0x1003
#define MLXSW_MAX_VIRTUAL_ROUTERS_ID 0x2C01
#define MLXSW_MAX_SYSTEM_PORT_ID 0x2502
#define MLXSW_MAX_VLAN_GROUPS_ID 0x2906
#define MLXSW_MAX_REGIONS_ID 0x2901
#define MLXSW_MAX_RIF_ID 0x2C02
#define MLXSW_RESOURCES_QUERY_MAX_QUERIES 100
#define MLXSW_RESOURCES_PER_QUERY 32
......@@ -1167,6 +1177,46 @@ static void mlxsw_pci_resources_query_parse(int id, u64 val,
resources->max_span = val;
resources->max_span_valid = 1;
break;
case MLXSW_MAX_LAG_ID:
resources->max_lag = val;
resources->max_lag_valid = 1;
break;
case MLXSW_MAX_PORTS_IN_LAG_ID:
resources->max_ports_in_lag = val;
resources->max_ports_in_lag_valid = 1;
break;
case MLXSW_KVD_SIZE_ID:
resources->kvd_size = val;
resources->kvd_size_valid = 1;
break;
case MLXSW_KVD_SINGLE_MIN_SIZE_ID:
resources->kvd_single_min_size = val;
resources->kvd_single_min_size_valid = 1;
break;
case MLXSW_KVD_DOUBLE_MIN_SIZE_ID:
resources->kvd_double_min_size = val;
resources->kvd_double_min_size_valid = 1;
break;
case MLXSW_MAX_VIRTUAL_ROUTERS_ID:
resources->max_virtual_routers = val;
resources->max_virtual_routers_valid = 1;
break;
case MLXSW_MAX_SYSTEM_PORT_ID:
resources->max_system_ports = val;
resources->max_system_ports_valid = 1;
break;
case MLXSW_MAX_VLAN_GROUPS_ID:
resources->max_vlan_groups = val;
resources->max_vlan_groups_valid = 1;
break;
case MLXSW_MAX_REGIONS_ID:
resources->max_regions = val;
resources->max_regions_valid = 1;
break;
case MLXSW_MAX_RIF_ID:
resources->max_rif = val;
resources->max_rif_valid = 1;
break;
default:
break;
}
......@@ -1209,10 +1259,52 @@ static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
return -EIO;
}
static int mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile,
struct mlxsw_resources *resources)
{
u32 singles_size, doubles_size, linear_size;
if (!resources->kvd_single_min_size_valid ||
!resources->kvd_double_min_size_valid ||
!profile->used_kvd_split_data)
return -EIO;
linear_size = profile->kvd_linear_size;
/* The hash part is what left of the kvd without the
* linear part. It is split to the single size and
* double size by the parts ratio from the profile.
* Both sizes must be a multiplications of the
* granularity from the profile.
*/
doubles_size = (resources->kvd_size - linear_size);
doubles_size *= profile->kvd_hash_double_parts;
doubles_size /= (profile->kvd_hash_double_parts +
profile->kvd_hash_single_parts);
doubles_size /= profile->kvd_hash_granularity;
doubles_size *= profile->kvd_hash_granularity;
singles_size = resources->kvd_size - doubles_size -
linear_size;
/* Check results are legal. */
if (singles_size < resources->kvd_single_min_size ||
doubles_size < resources->kvd_double_min_size ||
resources->kvd_size < linear_size)
return -EIO;
resources->kvd_single_size = singles_size;
resources->kvd_double_size = doubles_size;
resources->kvd_linear_size = linear_size;
return 0;
}
static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
const struct mlxsw_config_profile *profile)
const struct mlxsw_config_profile *profile,
struct mlxsw_resources *resources)
{
int i;
int err;
mlxsw_cmd_mbox_zero(mbox);
......@@ -1222,18 +1314,6 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
mbox, profile->max_vepa_channels);
}
if (profile->used_max_lag) {
mlxsw_cmd_mbox_config_profile_set_max_lag_set(
mbox, 1);
mlxsw_cmd_mbox_config_profile_max_lag_set(
mbox, profile->max_lag);
}
if (profile->used_max_port_per_lag) {
mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
mbox, 1);
mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
mbox, profile->max_port_per_lag);
}
if (profile->used_max_mid) {
mlxsw_cmd_mbox_config_profile_set_max_mid_set(
mbox, 1);
......@@ -1310,19 +1390,22 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
if (profile->used_kvd_sizes) {
mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(
mbox, 1);
mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(
mbox, profile->kvd_linear_size);
mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(
mbox, 1);
mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(
mbox, profile->kvd_hash_single_size);
if (resources->kvd_size_valid) {
err = mlxsw_pci_profile_get_kvd_sizes(profile, resources);
if (err)
return err;
mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
resources->kvd_linear_size);
mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
1);
mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
resources->kvd_single_size);
mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
mbox, 1);
mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(
mbox, profile->kvd_hash_double_size);
mbox, 1);
mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
resources->kvd_double_size);
}
for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
......@@ -1524,7 +1607,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_query_resources;
err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, resources);
if (err)
goto err_config_profile;
......
......@@ -2887,7 +2887,9 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_resources *resources;
char slcr_pl[MLXSW_REG_SLCR_LEN];
int err;
mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
MLXSW_REG_SLCR_LAG_HASH_DMAC |
......@@ -2898,7 +2900,26 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
MLXSW_REG_SLCR_LAG_HASH_SPORT |
MLXSW_REG_SLCR_LAG_HASH_DPORT |
MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
if (err)
return err;
resources = mlxsw_core_resources_get(mlxsw_sp->core);
if (!(resources->max_lag_valid && resources->max_ports_in_lag_valid))
return -EIO;
mlxsw_sp->lags = kcalloc(resources->max_lag,
sizeof(struct mlxsw_sp_upper),
GFP_KERNEL);
if (!mlxsw_sp->lags)
return -ENOMEM;
return 0;
}
static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
{
kfree(mlxsw_sp->lags);
}
static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
......@@ -2982,6 +3003,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
err_router_init:
mlxsw_sp_switchdev_fini(mlxsw_sp);
err_switchdev_init:
mlxsw_sp_lag_fini(mlxsw_sp);
err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
err_buffers_init:
......@@ -2995,38 +3017,26 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
int i;
mlxsw_sp_ports_remove(mlxsw_sp);
mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_router_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
WARN_ON(!list_empty(&mlxsw_sp->fids));
for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
WARN_ON_ONCE(mlxsw_sp->rifs[i]);
}
static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.used_max_vepa_channels = 1,
.max_vepa_channels = 0,
.used_max_lag = 1,
.max_lag = MLXSW_SP_LAG_MAX,
.used_max_port_per_lag = 1,
.max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX,
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
.used_max_pgt = 1,
.max_pgt = 0,
.used_max_system_port = 1,
.max_system_port = 64,
.used_max_vlan_groups = 1,
.max_vlan_groups = 127,
.used_max_regions = 1,
.max_regions = 400,
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = 3,
......@@ -3038,10 +3048,11 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
.used_kvd_sizes = 1,
.used_kvd_split_data = 1,
.kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
.kvd_hash_single_parts = 2,
.kvd_hash_double_parts = 1,
.kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
.kvd_hash_single_size = MLXSW_SP_KVD_HASH_SINGLE_SIZE,
.kvd_hash_double_size = MLXSW_SP_KVD_HASH_DOUBLE_SIZE,
.swid_config = {
{
.used_type = 1,
......@@ -3158,13 +3169,15 @@ static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_resources *resources;
int i;
for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
resources = mlxsw_core_resources_get(mlxsw_sp->core);
for (i = 0; i < resources->max_rif; i++)
if (!mlxsw_sp->rifs[i])
return i;
return MLXSW_SP_RIF_MAX;
return MLXSW_SP_INVALID_RIF;
}
static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
......@@ -3244,7 +3257,7 @@ mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
int err;
rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
if (rif == MLXSW_SP_RIF_MAX)
if (rif == MLXSW_SP_INVALID_RIF)
return ERR_PTR(-ERANGE);
err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
......@@ -3476,7 +3489,7 @@ static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
int err;
rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
if (rif == MLXSW_SP_RIF_MAX)
if (rif == MLXSW_SP_INVALID_RIF)
return -ERANGE;
err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
......@@ -3683,12 +3696,14 @@ static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
u16 lag_id = mlxsw_sp_port->lag_id;
struct mlxsw_resources *resources;
int i, count = 0;
if (!mlxsw_sp_port->lagged)
return true;
for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
resources = mlxsw_core_resources_get(mlxsw_sp->core);
for (i = 0; i < resources->max_ports_in_lag; i++) {
struct mlxsw_sp_port *lag_port;
lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
......@@ -3894,11 +3909,13 @@ static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
struct net_device *lag_dev,
u16 *p_lag_id)
{
struct mlxsw_resources *resources;
struct mlxsw_sp_upper *lag;
int free_lag_id = -1;
int i;
for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
resources = mlxsw_core_resources_get(mlxsw_sp->core);
for (i = 0; i < resources->max_lag; i++) {
lag = mlxsw_sp_lag_get(mlxsw_sp, i);
if (lag->ref_count) {
if (lag->dev == lag_dev) {
......@@ -3932,9 +3949,11 @@ mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
u16 lag_id, u8 *p_port_index)
{
struct mlxsw_resources *resources;
int i;
for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
resources = mlxsw_core_resources_get(mlxsw_sp->core);
for (i = 0; i < resources->max_ports_in_lag; i++) {
if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
*p_port_index = i;
return 0;
......
......@@ -54,10 +54,7 @@
#define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */
#define MLXSW_SP_RFID_BASE 15360
#define MLXSW_SP_RIF_MAX 800
#define MLXSW_SP_LAG_MAX 64
#define MLXSW_SP_PORT_PER_LAG_MAX 16
#define MLXSW_SP_INVALID_RIF 0xffff
#define MLXSW_SP_MID_MAX 7000
......@@ -67,8 +64,6 @@
#define MLXSW_SP_LPM_TREE_MAX 22
#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)
#define MLXSW_SP_VIRTUAL_ROUTER_MAX 256
#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
#define MLXSW_SP_BYTES_PER_CELL 96
......@@ -77,8 +72,7 @@
#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
#define MLXSW_SP_KVD_HASH_SINGLE_SIZE 163840 /* entries */
#define MLXSW_SP_KVD_HASH_DOUBLE_SIZE 32768 /* entries */
#define MLXSW_SP_KVD_GRANULARITY 128
/* Maximum delay buffer needed in case of PAUSE frames, in cells.
* Assumes 100m cable and maximum MTU.
......@@ -253,7 +247,7 @@ struct mlxsw_sp_port_mall_tc_entry {
struct mlxsw_sp_router {
struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX];
struct mlxsw_sp_vr *vrs;
struct rhashtable neigh_ht;
struct {
struct delayed_work dw;
......@@ -275,7 +269,7 @@ struct mlxsw_sp {
DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
} br_mids;
struct list_head fids; /* VLAN-aware bridge FIDs */
struct mlxsw_sp_rif *rifs[MLXSW_SP_RIF_MAX];
struct mlxsw_sp_rif **rifs;
struct mlxsw_sp_port **ports;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
......@@ -290,7 +284,7 @@ struct mlxsw_sp {
#define MLXSW_SP_DEFAULT_AGEING_TIME 300
u32 ageing_time;
struct mlxsw_sp_upper master_bridge;
struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
struct mlxsw_sp_upper *lags;
u8 port_to_module[MLXSW_PORT_MAX_PORTS];
struct mlxsw_sp_sb sb;
struct mlxsw_sp_router router;
......@@ -483,9 +477,12 @@ static inline struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
struct mlxsw_resources *resources;
int i;
for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
resources = mlxsw_core_resources_get(mlxsw_sp->core);
for (i = 0; i < resources->max_rif; i++)
if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
return mlxsw_sp->rifs[i];
......
......@@ -372,10 +372,12 @@ static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
resources = mlxsw_core_resources_get(mlxsw_sp->core);
for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
if (!vr->used)
return vr;
......@@ -417,11 +419,14 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
u32 tb_id,
enum mlxsw_sp_l3proto proto)
{
struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
tb_id = mlxsw_sp_fix_tb_id(tb_id);
for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
resources = mlxsw_core_resources_get(mlxsw_sp->core);
for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
return vr;
......@@ -555,15 +560,33 @@ static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
&vr->fib->prefix_usage);
}
static void mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_resources *resources;
struct mlxsw_sp_vr *vr;
int i;
for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
resources = mlxsw_core_resources_get(mlxsw_sp->core);
if (!resources->max_virtual_routers_valid)
return -EIO;
mlxsw_sp->router.vrs = kcalloc(resources->max_virtual_routers,
sizeof(struct mlxsw_sp_vr),
GFP_KERNEL);
if (!mlxsw_sp->router.vrs)
return -ENOMEM;
for (i = 0; i < resources->max_virtual_routers; i++) {
vr = &mlxsw_sp->router.vrs[i];
vr->id = i;
}
return 0;
}
static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
{
kfree(mlxsw_sp->router.vrs);
}
struct mlxsw_sp_neigh_key {
......@@ -1499,19 +1522,46 @@ static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_resources *resources;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
int err;
resources = mlxsw_core_resources_get(mlxsw_sp->core);
if (!resources->max_rif_valid)
return -EIO;
mlxsw_sp->rifs = kcalloc(resources->max_rif,
sizeof(struct mlxsw_sp_rif *), GFP_KERNEL);
if (!mlxsw_sp->rifs)
return -ENOMEM;
mlxsw_reg_rgcr_pack(rgcr_pl, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, MLXSW_SP_RIF_MAX);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, resources->max_rif);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
if (err)
goto err_rgcr_fail;
return 0;
err_rgcr_fail:
kfree(mlxsw_sp->rifs);
return err;
}
static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_resources *resources;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
int i;
mlxsw_reg_rgcr_pack(rgcr_pl, false);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
resources = mlxsw_core_resources_get(mlxsw_sp->core);
for (i = 0; i < resources->max_rif; i++)
WARN_ON_ONCE(mlxsw_sp->rifs[i]);
kfree(mlxsw_sp->rifs);
}
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
......@@ -1523,14 +1573,21 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
return err;
mlxsw_sp_lpm_init(mlxsw_sp);
mlxsw_sp_vrs_init(mlxsw_sp);
err = mlxsw_sp_neigh_init(mlxsw_sp);
err = mlxsw_sp_vrs_init(mlxsw_sp);
if (err)
goto err_vrs_init;
err = mlxsw_sp_neigh_init(mlxsw_sp);
if (err)
goto err_neigh_init;
return 0;
err_neigh_init:
mlxsw_sp_vrs_fini(mlxsw_sp);
err_vrs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
return err;
}
......@@ -1538,6 +1595,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
}
......
......@@ -1205,9 +1205,11 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
u16 lag_id)
{
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_resources *resources;
int i;
for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
resources = mlxsw_core_resources_get(mlxsw_sp->core);
for (i = 0; i < resources->max_ports_in_lag; i++) {
mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
if (mlxsw_sp_port)
return mlxsw_sp_port;
......
......@@ -1512,10 +1512,6 @@ static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
static struct mlxsw_config_profile mlxsw_sx_config_profile = {
.used_max_vepa_channels = 1,
.max_vepa_channels = 0,
.used_max_lag = 1,
.max_lag = 64,
.used_max_port_per_lag = 1,
.max_port_per_lag = 16,
.used_max_mid = 1,
.max_mid = 7000,
.used_max_pgt = 1,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册