提交 0ca37375 编写于 作者: D David S. Miller

Merge branch 'mlxsw-Improve-extensibility'

Jiri Pirko says:

====================
mlxsw: Improve extensibility

Ido says:

Since the initial introduction of the bridge offload in commit
56ade8fe ("mlxsw: spectrum: Add initial support for Spectrum ASIC")
the per-port struct was used to store both physical properties of the
port as well as logical bridge properties such as learning and active
VLANs in the VLAN-aware bridge.

The above resulted in a bloated struct and code that is getting
increasingly difficult to extend when stacked devices are taken into
account as well as more advanced use cases such as IGMP snooping.

Due to the incremental development nature of this driver as well as the
complexity of the underlying hardware, subsequent design decisions failed
to generalize the FID and RIF resources, which could've benefited from
a more generic design, resulting in consolidated code paths and better
extensibility with regards to future ASICs and use cases.

This patchset tries to solve both of these design problems, as they're
tightly coupled. To ease the code review, the changes are done in a
bottom-up manner, in which the port struct is the first to be patched,
then the FIDs the ports are mapped to and finally the RIFs configured on
top.

The first half of the patchset gradually moves away from the previous
design to a design that is more in sync with the underlying hardware and
which clearly separates between hardware-specific structs and logical
ones such as a bridge port.

All the bridge-specific information is removed from the port struct, as
well as the list of VLAN devices ("vPorts") configured on top of it.
Instead, a linked list of VLANs is introduced, which allows each VLAN
to hold a state, such as mapping to a particular FID and membership in
a bridge. The data structures are depicted in the following figure:

                                  mlxsw_sp_bridge_device
                                       +----------+
                                       |          |
                                  +----+          |
                                  |    |          |
                                  |    +----------+
                                  |
             mlxsw_sp_bridge_port |
                 +----------+     |
                 |          |     |
              +-->          +-----+--> ..
              |  |          |
              |  +----+-----+
              |       |
              |       v
              | mlxsw_sp_bridge_vlan
              |  +----------+
              |  | vid X    |
              |  |          +--> ..
              |  |          |
              |  +----+-----+
              |       |
              +--+----v-----+
                 | vid X    |
              +--+          +--> ..
              |  |          |
mlxsw_sp_port |  +----------+
+----------+  | mlxsw_sp_port_vlan
|          |  |
|          +--+
|          |
+----------+

This model allows us to consolidate many of the code paths relating to
VLAN-aware and VLAN-unaware bridges, as the latter is simply represented
using a bridge port with a VLAN list size of one. Another advantage of
the model is that it's easy to extend it with future per-VLAN
attributes - such as mrouter indication - by merely pushing these down
from the bridge port struct to the bridge VLAN one.

The second half of the patchset builds on top of previous work and
prepares the driver for the common FID and RIF cores, which are finally
implemented in the last two patches. These exploit the fact that despite
the different kinds of FIDs and RIFs, they do share a common object on
which the core operations can operate on.

By hiding both objects from the rest of the driver and modeling their
operations using a VFT, it'll be easier to extend the driver for future
use cases such as VXLAN.

Tested using following LNST recipes:
https://github.com/jpirko/lnst/tree/master/recipes/switchdev
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -16,7 +16,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ ...@@ -16,7 +16,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \ spectrum_switchdev.o spectrum_router.o \
spectrum_kvdl.o spectrum_acl_tcam.o \ spectrum_kvdl.o spectrum_acl_tcam.o \
spectrum_acl.o spectrum_flower.o \ spectrum_acl.o spectrum_flower.o \
spectrum_cnt.o spectrum_dpipe.o spectrum_cnt.o spectrum_dpipe.o \
spectrum_fid.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
mlxsw_minimal-objs := minimal.o mlxsw_minimal-objs := minimal.o
...@@ -54,12 +54,7 @@ ...@@ -54,12 +54,7 @@
#include "core_acl_flex_keys.h" #include "core_acl_flex_keys.h"
#include "core_acl_flex_actions.h" #include "core_acl_flex_actions.h"
#define MLXSW_SP_VFID_BASE VLAN_N_VID #define MLXSW_SP_FID_8021D_MAX 1024
#define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */
#define MLXSW_SP_DUMMY_FID 15359
#define MLXSW_SP_RFID_BASE 15360
#define MLXSW_SP_MID_MAX 7000 #define MLXSW_SP_MID_MAX 7000
...@@ -78,13 +73,19 @@ struct mlxsw_sp_upper { ...@@ -78,13 +73,19 @@ struct mlxsw_sp_upper {
unsigned int ref_count; unsigned int ref_count;
}; };
struct mlxsw_sp_fid { enum mlxsw_sp_rif_type {
void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport); MLXSW_SP_RIF_TYPE_SUBPORT,
struct list_head list; MLXSW_SP_RIF_TYPE_VLAN,
unsigned int ref_count; MLXSW_SP_RIF_TYPE_FID,
struct net_device *dev; MLXSW_SP_RIF_TYPE_MAX,
struct mlxsw_sp_rif *rif; };
u16 fid;
enum mlxsw_sp_fid_type {
MLXSW_SP_FID_TYPE_8021Q,
MLXSW_SP_FID_TYPE_8021D,
MLXSW_SP_FID_TYPE_RFID,
MLXSW_SP_FID_TYPE_DUMMY,
MLXSW_SP_FID_TYPE_MAX,
}; };
struct mlxsw_sp_mid { struct mlxsw_sp_mid {
...@@ -95,21 +96,6 @@ struct mlxsw_sp_mid { ...@@ -95,21 +96,6 @@ struct mlxsw_sp_mid {
unsigned int ref_count; unsigned int ref_count;
}; };
static inline u16 mlxsw_sp_vfid_to_fid(u16 vfid)
{
return MLXSW_SP_VFID_BASE + vfid;
}
static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
{
return fid - MLXSW_SP_VFID_BASE;
}
static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
{
return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_DUMMY_FID;
}
enum mlxsw_sp_span_type { enum mlxsw_sp_span_type {
MLXSW_SP_SPAN_EGRESS, MLXSW_SP_SPAN_EGRESS,
MLXSW_SP_SPAN_INGRESS MLXSW_SP_SPAN_INGRESS
...@@ -153,13 +139,9 @@ struct mlxsw_sp_bridge; ...@@ -153,13 +139,9 @@ struct mlxsw_sp_bridge;
struct mlxsw_sp_router; struct mlxsw_sp_router;
struct mlxsw_sp_acl; struct mlxsw_sp_acl;
struct mlxsw_sp_counter_pool; struct mlxsw_sp_counter_pool;
struct mlxsw_sp_fid_core;
struct mlxsw_sp { struct mlxsw_sp {
struct {
struct list_head list;
DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX);
} vfids;
struct list_head fids; /* VLAN-aware bridge FIDs */
struct mlxsw_sp_port **ports; struct mlxsw_sp_port **ports;
struct mlxsw_core *core; struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info; const struct mlxsw_bus_info *bus_info;
...@@ -170,6 +152,7 @@ struct mlxsw_sp { ...@@ -170,6 +152,7 @@ struct mlxsw_sp {
struct mlxsw_sp_bridge *bridge; struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp_router *router; struct mlxsw_sp_router *router;
struct mlxsw_sp_acl *acl; struct mlxsw_sp_acl *acl;
struct mlxsw_sp_fid_core *fid_core;
struct { struct {
DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE); DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
} kvdl; } kvdl;
...@@ -203,28 +186,27 @@ struct mlxsw_sp_port_sample { ...@@ -203,28 +186,27 @@ struct mlxsw_sp_port_sample {
bool truncate; bool truncate;
}; };
struct mlxsw_sp_bridge_port;
struct mlxsw_sp_fid;
struct mlxsw_sp_port_vlan {
struct list_head list;
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp_fid *fid;
u16 vid;
struct mlxsw_sp_bridge_port *bridge_port;
struct list_head bridge_vlan_node;
};
struct mlxsw_sp_port { struct mlxsw_sp_port {
struct net_device *dev; struct net_device *dev;
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sp *mlxsw_sp; struct mlxsw_sp *mlxsw_sp;
u8 local_port; u8 local_port;
u8 stp_state; u8 lagged:1,
u16 learning:1,
learning_sync:1,
uc_flood:1,
mc_flood:1,
mc_router:1,
mc_disabled:1,
bridged:1,
lagged:1,
split:1; split:1;
u16 pvid; u16 pvid;
u16 lag_id; u16 lag_id;
struct {
struct list_head list;
struct mlxsw_sp_fid *f;
u16 vid;
} vport;
struct { struct {
u8 tx_pause:1, u8 tx_pause:1,
rx_pause:1, rx_pause:1,
...@@ -240,11 +222,6 @@ struct mlxsw_sp_port { ...@@ -240,11 +222,6 @@ struct mlxsw_sp_port {
u8 width; u8 width;
u8 lane; u8 lane;
} mapping; } mapping;
/* 802.1Q bridge VLANs */
unsigned long *active_vlans;
unsigned long *untagged_vlans;
/* VLAN interfaces */
struct list_head vports_list;
/* TC handles */ /* TC handles */
struct list_head mall_tc_list; struct list_head mall_tc_list;
struct { struct {
...@@ -253,10 +230,12 @@ struct mlxsw_sp_port { ...@@ -253,10 +230,12 @@ struct mlxsw_sp_port {
struct delayed_work update_dw; struct delayed_work update_dw;
} hw_stats; } hw_stats;
struct mlxsw_sp_port_sample *sample; struct mlxsw_sp_port_sample *sample;
struct list_head vlans_list;
}; };
bool mlxsw_sp_port_dev_check(const struct net_device *dev); bool mlxsw_sp_port_dev_check(const struct net_device *dev);
struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev); struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
...@@ -278,100 +257,25 @@ mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index) ...@@ -278,100 +257,25 @@ mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL; return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
} }
static inline u16 static inline struct mlxsw_sp_port_vlan *
mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid)
{ {
return mlxsw_sp_vport->vport.vid; struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
}
static inline bool list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port) list) {
{ if (mlxsw_sp_port_vlan->vid == vid)
u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); return mlxsw_sp_port_vlan;
return vid != 0;
}
static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport,
struct mlxsw_sp_fid *f)
{
mlxsw_sp_vport->vport.f = f;
}
static inline struct mlxsw_sp_fid *
mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
{
return mlxsw_sp_vport->vport.f;
}
static inline struct net_device *
mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
{
struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
return f ? f->dev : NULL;
}
static inline struct mlxsw_sp_port *
mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
vport.list) {
if (mlxsw_sp_vport_vid_get(mlxsw_sp_vport) == vid)
return mlxsw_sp_vport;
} }
return NULL; return NULL;
} }
static inline struct mlxsw_sp_port * enum mlxsw_sp_flood_type {
mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port, MLXSW_SP_FLOOD_TYPE_UC,
u16 fid) MLXSW_SP_FLOOD_TYPE_BC,
{ MLXSW_SP_FLOOD_TYPE_MC,
struct mlxsw_sp_port *mlxsw_sp_vport;
list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
vport.list) {
struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
if (f && f->fid == fid)
return mlxsw_sp_vport;
}
return NULL;
}
static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
u16 fid)
{
struct mlxsw_sp_fid *f;
list_for_each_entry(f, &mlxsw_sp->fids, list)
if (f->fid == fid)
return f;
return NULL;
}
static inline struct mlxsw_sp_fid *
mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev)
{
struct mlxsw_sp_fid *f;
list_for_each_entry(f, &mlxsw_sp->vfids.list, list)
if (f->dev == br_dev)
return f;
return NULL;
}
enum mlxsw_sp_flood_table {
MLXSW_SP_FLOOD_TABLE_UC,
MLXSW_SP_FLOOD_TABLE_BC,
MLXSW_SP_FLOOD_TABLE_MC,
}; };
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
...@@ -411,25 +315,23 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, ...@@ -411,25 +315,23 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells); u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells);
u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes); u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes);
struct mlxsw_sp_upper *mlxsw_sp_master_bridge(const struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port); void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port); void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
u16 vid);
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
u16 vid_end, bool is_member, bool untagged); u16 vid_end, bool is_member, bool untagged);
int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
bool set);
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
bool adding); bool adding);
struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid); void
void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f); mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev);
void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev);
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
bool dwrr, u8 dwrr_weight); bool dwrr, u8 dwrr_weight);
...@@ -443,9 +345,13 @@ int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, ...@@ -443,9 +345,13 @@ int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
u8 next_index, u32 maxrate); u8 next_index, u32 maxrate);
int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
u8 state); u8 state);
int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable);
int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
bool learn_enable); bool learn_enable);
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
#ifdef CONFIG_MLXSW_SPECTRUM_DCB #ifdef CONFIG_MLXSW_SPECTRUM_DCB
...@@ -471,10 +377,11 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, ...@@ -471,10 +377,11 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev); int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
int mlxsw_sp_inetaddr_event(struct notifier_block *unused, int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
unsigned long event, void *ptr); unsigned long event, void *ptr);
void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif);
int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
struct netdev_notifier_changeupper_info *info); struct netdev_notifier_changeupper_info *info);
void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
u32 *p_entry_index); u32 *p_entry_index);
...@@ -580,6 +487,8 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp, ...@@ -580,6 +487,8 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule, struct mlxsw_sp_acl_rule *rule,
u64 *packets, u64 *bytes, u64 *last_use); u64 *packets, u64 *bytes, u64 *last_use);
struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
...@@ -599,4 +508,31 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, ...@@ -599,4 +508,31 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index); unsigned int counter_index);
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
enum mlxsw_sp_flood_type packet_type, u8 local_port,
bool member);
int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
enum mlxsw_sp_rif_type mlxsw_sp_fid_rif_type(const struct mlxsw_sp_fid *fid);
u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid);
void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
enum mlxsw_sp_rif_type
mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_fid_type type);
u16 mlxsw_sp_fid_8021q_vid(const struct mlxsw_sp_fid *fid);
struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid);
struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp,
int br_ifindex);
struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp,
u16 rif_index);
struct mlxsw_sp_fid *mlxsw_sp_fid_dummy_get(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid);
int mlxsw_sp_port_fids_init(struct mlxsw_sp_port *mlxsw_sp_port);
void mlxsw_sp_port_fids_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp);
#endif #endif
...@@ -53,6 +53,7 @@ struct mlxsw_sp_acl { ...@@ -53,6 +53,7 @@ struct mlxsw_sp_acl {
struct mlxsw_sp *mlxsw_sp; struct mlxsw_sp *mlxsw_sp;
struct mlxsw_afk *afk; struct mlxsw_afk *afk;
struct mlxsw_afa *afa; struct mlxsw_afa *afa;
struct mlxsw_sp_fid *dummy_fid;
const struct mlxsw_sp_acl_ops *ops; const struct mlxsw_sp_acl_ops *ops;
struct rhashtable ruleset_ht; struct rhashtable ruleset_ht;
struct list_head rules; struct list_head rules;
...@@ -112,6 +113,11 @@ static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = { ...@@ -112,6 +113,11 @@ static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
.automatic_shrinking = true, .automatic_shrinking = true,
}; };
struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
{
return mlxsw_sp->acl->dummy_fid;
}
static struct mlxsw_sp_acl_ruleset * static struct mlxsw_sp_acl_ruleset *
mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_profile_ops *ops) const struct mlxsw_sp_acl_profile_ops *ops)
...@@ -676,6 +682,7 @@ static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = { ...@@ -676,6 +682,7 @@ static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
{ {
const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops; const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
struct mlxsw_sp_fid *fid;
struct mlxsw_sp_acl *acl; struct mlxsw_sp_acl *acl;
int err; int err;
...@@ -706,6 +713,13 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) ...@@ -706,6 +713,13 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
if (err) if (err)
goto err_rhashtable_init; goto err_rhashtable_init;
fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
goto err_fid_get;
}
acl->dummy_fid = fid;
INIT_LIST_HEAD(&acl->rules); INIT_LIST_HEAD(&acl->rules);
err = acl_ops->init(mlxsw_sp, acl->priv); err = acl_ops->init(mlxsw_sp, acl->priv);
if (err) if (err)
...@@ -721,6 +735,8 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) ...@@ -721,6 +735,8 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
return 0; return 0;
err_acl_ops_init: err_acl_ops_init:
mlxsw_sp_fid_put(fid);
err_fid_get:
rhashtable_destroy(&acl->ruleset_ht); rhashtable_destroy(&acl->ruleset_ht);
err_rhashtable_init: err_rhashtable_init:
mlxsw_afa_destroy(acl->afa); mlxsw_afa_destroy(acl->afa);
...@@ -739,6 +755,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) ...@@ -739,6 +755,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw); cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
acl_ops->fini(mlxsw_sp, acl->priv); acl_ops->fini(mlxsw_sp, acl->priv);
WARN_ON(!list_empty(&acl->rules)); WARN_ON(!list_empty(&acl->rules));
mlxsw_sp_fid_put(acl->dummy_fid);
rhashtable_destroy(&acl->ruleset_ht); rhashtable_destroy(&acl->ruleset_ht);
mlxsw_afa_destroy(acl->afa); mlxsw_afa_destroy(acl->afa);
mlxsw_afk_destroy(acl->afk); mlxsw_afk_destroy(acl->afk);
......
此差异已折叠。
...@@ -70,9 +70,13 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, ...@@ -70,9 +70,13 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
} else if (is_tcf_mirred_egress_redirect(a)) { } else if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a); int ifindex = tcf_mirred_ifindex(a);
struct net_device *out_dev; struct net_device *out_dev;
struct mlxsw_sp_fid *fid;
u16 fid_index;
fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
fid_index = mlxsw_sp_fid_index(fid);
err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei, err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
MLXSW_SP_DUMMY_FID); fid_index);
if (err) if (err)
return err; return err;
......
...@@ -62,6 +62,7 @@ int br_multicast_list_adjacent(struct net_device *dev, ...@@ -62,6 +62,7 @@ int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list); struct list_head *br_ip_list);
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto); bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto);
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto); bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto);
bool br_multicast_enabled(const struct net_device *dev);
#else #else
static inline int br_multicast_list_adjacent(struct net_device *dev, static inline int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list) struct list_head *br_ip_list)
...@@ -78,6 +79,19 @@ static inline bool br_multicast_has_querier_adjacent(struct net_device *dev, ...@@ -78,6 +79,19 @@ static inline bool br_multicast_has_querier_adjacent(struct net_device *dev,
{ {
return false; return false;
} }
static inline bool br_multicast_enabled(const struct net_device *dev)
{
return false;
}
#endif
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
bool br_vlan_enabled(const struct net_device *dev);
#else
static inline bool br_vlan_enabled(const struct net_device *dev)
{
return false;
}
#endif #endif
#endif #endif
...@@ -138,7 +138,7 @@ void br_manage_promisc(struct net_bridge *br) ...@@ -138,7 +138,7 @@ void br_manage_promisc(struct net_bridge *br)
/* If vlan filtering is disabled or bridge interface is placed /* If vlan filtering is disabled or bridge interface is placed
* into promiscuous mode, place all ports in promiscuous mode. * into promiscuous mode, place all ports in promiscuous mode.
*/ */
if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br)) if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br->dev))
set_all = true; set_all = true;
list_for_each_entry(p, &br->port_list, list) { list_for_each_entry(p, &br->port_list, list) {
......
...@@ -599,7 +599,7 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, ...@@ -599,7 +599,7 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL; return -EINVAL;
vg = nbp_vlan_group(p); vg = nbp_vlan_group(p);
if (br_vlan_enabled(br) && vg && entry->vid == 0) { if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
list_for_each_entry(v, &vg->vlan_list, vlist) { list_for_each_entry(v, &vg->vlan_list, vlist) {
entry->vid = v->vid; entry->vid = v->vid;
err = __br_mdb_add(net, br, entry); err = __br_mdb_add(net, br, entry);
...@@ -694,7 +694,7 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, ...@@ -694,7 +694,7 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL; return -EINVAL;
vg = nbp_vlan_group(p); vg = nbp_vlan_group(p);
if (br_vlan_enabled(br) && vg && entry->vid == 0) { if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
list_for_each_entry(v, &vg->vlan_list, vlist) { list_for_each_entry(v, &vg->vlan_list, vlist) {
entry->vid = v->vid; entry->vid = v->vid;
err = __br_mdb_del(br, entry); err = __br_mdb_del(br, entry);
......
...@@ -2176,6 +2176,14 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val) ...@@ -2176,6 +2176,14 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
return err; return err;
} }
bool br_multicast_enabled(const struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
return !br->multicast_disabled;
}
EXPORT_SYMBOL_GPL(br_multicast_enabled);
int br_multicast_set_querier(struct net_bridge *br, unsigned long val) int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
{ {
unsigned long max_delay; unsigned long max_delay;
......
...@@ -1251,7 +1251,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) ...@@ -1251,7 +1251,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
u32 ageing_time = jiffies_to_clock_t(br->ageing_time); u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
u32 stp_enabled = br->stp_enabled; u32 stp_enabled = br->stp_enabled;
u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]; u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
u8 vlan_enabled = br_vlan_enabled(br); u8 vlan_enabled = br_vlan_enabled(br->dev);
u64 clockval; u64 clockval;
clockval = br_timer_value(&br->hello_timer); clockval = br_timer_value(&br->hello_timer);
......
...@@ -854,10 +854,6 @@ static inline u16 br_get_pvid(const struct net_bridge_vlan_group *vg) ...@@ -854,10 +854,6 @@ static inline u16 br_get_pvid(const struct net_bridge_vlan_group *vg)
return vg->pvid; return vg->pvid;
} }
static inline int br_vlan_enabled(struct net_bridge *br)
{
return br->vlan_enabled;
}
#else #else
static inline bool br_allowed_ingress(const struct net_bridge *br, static inline bool br_allowed_ingress(const struct net_bridge *br,
struct net_bridge_vlan_group *vg, struct net_bridge_vlan_group *vg,
...@@ -945,11 +941,6 @@ static inline u16 br_get_pvid(const struct net_bridge_vlan_group *vg) ...@@ -945,11 +941,6 @@ static inline u16 br_get_pvid(const struct net_bridge_vlan_group *vg)
return 0; return 0;
} }
static inline int br_vlan_enabled(struct net_bridge *br)
{
return 0;
}
static inline int __br_vlan_filter_toggle(struct net_bridge *br, static inline int __br_vlan_filter_toggle(struct net_bridge *br,
unsigned long val) unsigned long val)
{ {
......
...@@ -706,6 +706,14 @@ int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val) ...@@ -706,6 +706,14 @@ int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
return __br_vlan_filter_toggle(br, val); return __br_vlan_filter_toggle(br, val);
} }
bool br_vlan_enabled(const struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
return !!br->vlan_enabled;
}
EXPORT_SYMBOL_GPL(br_vlan_enabled);
int __br_vlan_set_proto(struct net_bridge *br, __be16 proto) int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
{ {
int err = 0; int err = 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册