diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 0a66d27174425213abf79b1e96879d98940f96e9..3bd38ed6d68bd6652884b8298c6d645fd6c8f815 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -22,6 +22,7 @@ #define MAX_CGX 3 #define MAX_LMAC_PER_CGX 4 +#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */ #define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX) /* Registers */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index d39ada404c8fd839cb2bd7c43a588d49df0e8c7a..9bddb032dd7ecb25e29ebb0d4ebba6157a1c5e6b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -143,6 +143,11 @@ enum nix_scheduler { NIX_TXSCH_LVL_CNT = 0x5, }; +/* Min/Max packet sizes, excluding FCS */ +#define NIC_HW_MIN_FRS 40 +#define NIC_HW_MAX_FRS 9212 +#define SDP_HW_MAX_FRS 65535 + /* NIX RX action operation*/ #define NIX_RX_ACTIONOP_DROP (0x0ull) #define NIX_RX_ACTIONOP_UCAST (0x1ull) @@ -169,7 +174,9 @@ enum nix_scheduler { #define MAX_LMAC_PKIND 12 #define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b)) +#define NIX_LINK_LBK(a) (12 + (a)) #define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c)) +#define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b)) /* NIX LSO format indices. * As of now TSO is the only one using, so statically assigning indices. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c index 85ba24a057745ed102c8e5a48da8b6ff52d4450f..d6f9ed8ea966ce7fb035c47c8c0d4007830d878a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -290,7 +290,7 @@ EXPORT_SYMBOL(otx2_mbox_nonempty); const char *otx2_mbox_id2name(u16 id) { switch (id) { -#define M(_name, _id, _1, _2) case _id: return # _name; +#define M(_name, _id, _1, _2, _3) case _id: return # _name; MBOX_MESSAGES #undef M default: diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index a15a59c9a2397ac5176b63eb73249065ad6868a3..292c13a901df5a149029c12ffe793364fcbd2619 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -120,54 +120,93 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox, #define MBOX_MESSAGES \ /* Generic mbox IDs (range 0x000 - 0x1FF) */ \ -M(READY, 0x001, msg_req, ready_msg_rsp) \ -M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \ -M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \ -M(MSIX_OFFSET, 0x004, msg_req, msix_offset_rsp) \ +M(READY, 0x001, ready, msg_req, ready_msg_rsp) \ +M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \ +M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \ +M(MSIX_OFFSET, 0x004, msix_offset, msg_req, msix_offset_rsp) \ +M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \ /* CGX mbox IDs (range 0x200 - 0x3FF) */ \ -M(CGX_START_RXTX, 0x200, msg_req, msg_rsp) \ -M(CGX_STOP_RXTX, 0x201, msg_req, msg_rsp) \ -M(CGX_STATS, 0x202, msg_req, cgx_stats_rsp) \ -M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set_or_get, \ +M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \ +M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \ +M(CGX_STATS, 0x202, cgx_stats, msg_req, cgx_stats_rsp) \ +M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set, cgx_mac_addr_set_or_get, \ cgx_mac_addr_set_or_get) \ -M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_set_or_get, \ +M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_get, cgx_mac_addr_set_or_get, \ cgx_mac_addr_set_or_get) \ -M(CGX_PROMISC_ENABLE, 0x205, msg_req, msg_rsp) \ -M(CGX_PROMISC_DISABLE, 0x206, msg_req, msg_rsp) \ -M(CGX_START_LINKEVENTS, 0x207, msg_req, msg_rsp) \ -M(CGX_STOP_LINKEVENTS, 0x208, msg_req, msg_rsp) \ -M(CGX_GET_LINKINFO, 0x209, msg_req, cgx_link_info_msg) \ -M(CGX_INTLBK_ENABLE, 0x20A, msg_req, msg_rsp) \ -M(CGX_INTLBK_DISABLE, 0x20B, msg_req, msg_rsp) \ +M(CGX_PROMISC_ENABLE, 0x205, cgx_promisc_enable, msg_req, msg_rsp) \ +M(CGX_PROMISC_DISABLE, 0x206, cgx_promisc_disable, msg_req, msg_rsp) \ +M(CGX_START_LINKEVENTS, 0x207, cgx_start_linkevents, msg_req, msg_rsp) \ +M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \ +M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \ +M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \ +M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \ /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ -M(NPA_LF_ALLOC, 0x400, npa_lf_alloc_req, npa_lf_alloc_rsp) \ -M(NPA_LF_FREE, 0x401, msg_req, msg_rsp) \ -M(NPA_AQ_ENQ, 0x402, npa_aq_enq_req, npa_aq_enq_rsp) \ -M(NPA_HWCTX_DISABLE, 0x403, hwctx_disable_req, msg_rsp) \ +M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \ + npa_lf_alloc_req, npa_lf_alloc_rsp) \ +M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \ +M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp) \ +M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\ /* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \ /* TIM mbox IDs (range 0x800 - 0x9FF) */ \ /* CPT mbox IDs (range 0xA00 - 0xBFF) */ \ /* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \ +M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\ + npc_mcam_alloc_entry_rsp) \ +M(NPC_MCAM_FREE_ENTRY, 0x6001, npc_mcam_free_entry, \ + npc_mcam_free_entry_req, msg_rsp) \ +M(NPC_MCAM_WRITE_ENTRY, 0x6002, npc_mcam_write_entry, \ + npc_mcam_write_entry_req, msg_rsp) \ +M(NPC_MCAM_ENA_ENTRY, 0x6003, npc_mcam_ena_entry, \ + npc_mcam_ena_dis_entry_req, msg_rsp) \ +M(NPC_MCAM_DIS_ENTRY, 0x6004, npc_mcam_dis_entry, \ + npc_mcam_ena_dis_entry_req, msg_rsp) \ +M(NPC_MCAM_SHIFT_ENTRY, 0x6005, npc_mcam_shift_entry, npc_mcam_shift_entry_req,\ + npc_mcam_shift_entry_rsp) \ +M(NPC_MCAM_ALLOC_COUNTER, 0x6006, npc_mcam_alloc_counter, \ + npc_mcam_alloc_counter_req, \ + npc_mcam_alloc_counter_rsp) \ +M(NPC_MCAM_FREE_COUNTER, 0x6007, npc_mcam_free_counter, \ + npc_mcam_oper_counter_req, msg_rsp) \ +M(NPC_MCAM_UNMAP_COUNTER, 0x6008, npc_mcam_unmap_counter, \ + npc_mcam_unmap_counter_req, msg_rsp) \ +M(NPC_MCAM_CLEAR_COUNTER, 0x6009, npc_mcam_clear_counter, \ + npc_mcam_oper_counter_req, msg_rsp) \ +M(NPC_MCAM_COUNTER_STATS, 0x600a, npc_mcam_counter_stats, \ + npc_mcam_oper_counter_req, \ + npc_mcam_oper_counter_rsp) \ +M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry, \ + npc_mcam_alloc_and_write_entry_req, \ + npc_mcam_alloc_and_write_entry_rsp) \ +M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \ + msg_req, npc_get_kex_cfg_rsp) \ /* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \ -M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc_req, nix_lf_alloc_rsp) \ -M(NIX_LF_FREE, 0x8001, msg_req, msg_rsp) \ -M(NIX_AQ_ENQ, 0x8002, nix_aq_enq_req, nix_aq_enq_rsp) \ -M(NIX_HWCTX_DISABLE, 0x8003, hwctx_disable_req, msg_rsp) \ -M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc_req, nix_txsch_alloc_rsp) \ -M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free_req, msg_rsp) \ -M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_config, msg_rsp) \ -M(NIX_STATS_RST, 0x8007, msg_req, msg_rsp) \ -M(NIX_VTAG_CFG, 0x8008, nix_vtag_config, msg_rsp) \ -M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, msg_rsp) \ -M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, msg_rsp) \ -M(NIX_SET_RX_MODE, 0x800b, nix_rx_mode, msg_rsp) +M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \ + nix_lf_alloc_req, nix_lf_alloc_rsp) \ +M(NIX_LF_FREE, 0x8001, nix_lf_free, msg_req, msg_rsp) \ +M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp) \ +M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \ + hwctx_disable_req, msg_rsp) \ +M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \ + nix_txsch_alloc_req, nix_txsch_alloc_rsp) \ +M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \ +M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \ +M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \ +M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \ +M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \ + nix_rss_flowkey_cfg, msg_rsp) \ +M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, nix_set_mac_addr, msg_rsp) \ +M(NIX_SET_RX_MODE, 0x800b, nix_set_rx_mode, nix_rx_mode, msg_rsp) \ +M(NIX_SET_HW_FRS, 0x800c, nix_set_hw_frs, nix_frs_cfg, msg_rsp) \ +M(NIX_LF_START_RX, 0x800d, nix_lf_start_rx, msg_req, msg_rsp) \ +M(NIX_LF_STOP_RX, 0x800e, nix_lf_stop_rx, msg_req, msg_rsp) \ +M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) /* Messages initiated by AF (range 0xC00 - 0xDFF) */ #define MBOX_UP_CGX_MESSAGES \ -M(CGX_LINK_EVENT, 0xC00, cgx_link_info_msg, msg_rsp) +M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp) enum { -#define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id, +#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id, MBOX_MESSAGES MBOX_UP_CGX_MESSAGES #undef M @@ -191,6 +230,13 @@ struct msg_rsp { struct mbox_msghdr hdr; }; +/* RVU mailbox error codes + * Range 256 - 300. + */ +enum rvu_af_status { + RVU_INVALID_VF_ID = -256, +}; + struct ready_msg_rsp { struct mbox_msghdr hdr; u16 sclk_feq; /* SCLK frequency */ @@ -347,6 +393,8 @@ struct hwctx_disable_req { u8 ctype; }; +/* NIX mbox message formats */ + /* NIX mailbox error codes * Range 401 - 500. */ @@ -365,6 +413,8 @@ enum nix_af_status { NIX_AF_INVAL_TXSCHQ_CFG = -412, NIX_AF_SMQ_FLUSH_FAILED = -413, NIX_AF_ERR_LF_RESET = -414, + NIX_AF_INVAL_NPA_PF_FUNC = -419, + NIX_AF_INVAL_SSO_PF_FUNC = -420, }; /* For NIX LF context alloc and init */ @@ -392,6 +442,10 @@ struct nix_lf_alloc_rsp { u8 lso_tsov4_idx; u8 lso_tsov6_idx; u8 mac_addr[ETH_ALEN]; + u8 lf_rx_stats; /* NIX_AF_CONST1::LF_RX_STATS */ + u8 lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */ + u16 cints; /* NIX_AF_CONST2::CINTS */ + u16 qints; /* NIX_AF_CONST2::QINTS */ }; /* NIX AQ enqueue msg */ @@ -472,6 +526,7 @@ struct nix_txschq_config { struct nix_vtag_config { struct mbox_msghdr hdr; + /* '0' for 4 octet VTAG, '1' for 8 octet VTAG */ u8 vtag_size; /* cfg_type is '0' for tx vlan cfg * cfg_type is '1' for rx vlan cfg @@ -492,7 +547,7 @@ struct nix_vtag_config { /* valid when cfg_type is '1' */ struct { - /* rx vtag type index */ + /* rx vtag type index, valid values are in 0..7 range */ u8 vtag_type; /* rx vtag strip */ u8 strip_vtag :1; @@ -522,4 +577,159 @@ struct nix_rx_mode { u16 mode; }; +struct nix_frs_cfg { + struct mbox_msghdr hdr; + u8 update_smq; /* Update SMQ's min/max lens */ + u8 update_minlen; /* Set minlen also */ + u8 sdp_link; /* Set SDP RX link */ + u16 maxlen; + u16 minlen; +}; + +/* NPC mbox message structs */ + +#define NPC_MCAM_ENTRY_INVALID 0xFFFF +#define NPC_MCAM_INVALID_MAP 0xFFFF + +/* NPC mailbox error codes + * Range 701 - 800. + */ +enum npc_af_status { + NPC_MCAM_INVALID_REQ = -701, + NPC_MCAM_ALLOC_DENIED = -702, + NPC_MCAM_ALLOC_FAILED = -703, + NPC_MCAM_PERM_DENIED = -704, +}; + +struct npc_mcam_alloc_entry_req { + struct mbox_msghdr hdr; +#define NPC_MAX_NONCONTIG_ENTRIES 256 + u8 contig; /* Contiguous entries ? */ +#define NPC_MCAM_ANY_PRIO 0 +#define NPC_MCAM_LOWER_PRIO 1 +#define NPC_MCAM_HIGHER_PRIO 2 + u8 priority; /* Lower or higher w.r.t ref_entry */ + u16 ref_entry; + u16 count; /* Number of entries requested */ +}; + +struct npc_mcam_alloc_entry_rsp { + struct mbox_msghdr hdr; + u16 entry; /* Entry allocated or start index if contiguous. + * Invalid incase of non-contiguous. + */ + u16 count; /* Number of entries allocated */ + u16 free_count; /* Number of entries available */ + u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES]; +}; + +struct npc_mcam_free_entry_req { + struct mbox_msghdr hdr; + u16 entry; /* Entry index to be freed */ + u8 all; /* If all entries allocated to this PFVF to be freed */ +}; + +struct mcam_entry { +#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */ + u64 kw[NPC_MAX_KWS_IN_KEY]; + u64 kw_mask[NPC_MAX_KWS_IN_KEY]; + u64 action; + u64 vtag_action; +}; + +struct npc_mcam_write_entry_req { + struct mbox_msghdr hdr; + struct mcam_entry entry_data; + u16 entry; /* MCAM entry to write this match key */ + u16 cntr; /* Counter for this MCAM entry */ + u8 intf; /* Rx or Tx interface */ + u8 enable_entry;/* Enable this MCAM entry ? */ + u8 set_cntr; /* Set counter for this entry ? */ +}; + +/* Enable/Disable a given entry */ +struct npc_mcam_ena_dis_entry_req { + struct mbox_msghdr hdr; + u16 entry; +}; + +struct npc_mcam_shift_entry_req { + struct mbox_msghdr hdr; +#define NPC_MCAM_MAX_SHIFTS 64 + u16 curr_entry[NPC_MCAM_MAX_SHIFTS]; + u16 new_entry[NPC_MCAM_MAX_SHIFTS]; + u16 shift_count; /* Number of entries to shift */ +}; + +struct npc_mcam_shift_entry_rsp { + struct mbox_msghdr hdr; + u16 failed_entry_idx; /* Index in 'curr_entry', not entry itself */ +}; + +struct npc_mcam_alloc_counter_req { + struct mbox_msghdr hdr; + u8 contig; /* Contiguous counters ? */ +#define NPC_MAX_NONCONTIG_COUNTERS 64 + u16 count; /* Number of counters requested */ +}; + +struct npc_mcam_alloc_counter_rsp { + struct mbox_msghdr hdr; + u16 cntr; /* Counter allocated or start index if contiguous. + * Invalid incase of non-contiguous. + */ + u16 count; /* Number of counters allocated */ + u16 cntr_list[NPC_MAX_NONCONTIG_COUNTERS]; +}; + +struct npc_mcam_oper_counter_req { + struct mbox_msghdr hdr; + u16 cntr; /* Free a counter or clear/fetch it's stats */ +}; + +struct npc_mcam_oper_counter_rsp { + struct mbox_msghdr hdr; + u64 stat; /* valid only while fetching counter's stats */ +}; + +struct npc_mcam_unmap_counter_req { + struct mbox_msghdr hdr; + u16 cntr; + u16 entry; /* Entry and counter to be unmapped */ + u8 all; /* Unmap all entries using this counter ? */ +}; + +struct npc_mcam_alloc_and_write_entry_req { + struct mbox_msghdr hdr; + struct mcam_entry entry_data; + u16 ref_entry; + u8 priority; /* Lower or higher w.r.t ref_entry */ + u8 intf; /* Rx or Tx interface */ + u8 enable_entry;/* Enable this MCAM entry ? */ + u8 alloc_cntr; /* Allocate counter and map ? */ +}; + +struct npc_mcam_alloc_and_write_entry_rsp { + struct mbox_msghdr hdr; + u16 entry; + u16 cntr; +}; + +struct npc_get_kex_cfg_rsp { + struct mbox_msghdr hdr; + u64 rx_keyx_cfg; /* NPC_AF_INTF(0)_KEX_CFG */ + u64 tx_keyx_cfg; /* NPC_AF_INTF(1)_KEX_CFG */ +#define NPC_MAX_INTF 2 +#define NPC_MAX_LID 8 +#define NPC_MAX_LT 16 +#define NPC_MAX_LD 2 +#define NPC_MAX_LFL 16 + /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */ + u64 kex_ld_flags[NPC_MAX_LD]; + /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */ + u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD]; + /* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */ + u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL]; +}; + #endif /* MBOX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index f98b0113def38c52af69808edea9571919292370..a7a20afb3ba0c6077688d727a8ae7b7edf8ca168 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -259,4 +259,10 @@ struct nix_rx_action { #endif }; +/* NIX Receive Vtag Action Structure */ +#define VTAG0_VALID_BIT BIT_ULL(15) +#define VTAG0_TYPE_MASK GENMASK_ULL(14, 12) +#define VTAG0_LID_MASK GENMASK_ULL(10, 8) +#define VTAG0_RELPTR_MASK GENMASK_ULL(7, 0) + #endif /* NPC_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index dc28fa2b9481c9c364c9625e950c3ac43d4f545e..40eb3ad725f5ff3be1546a029bfbb7bb01ae3972 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -29,6 +29,16 @@ static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, struct rvu_block *block, int lf); static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, struct rvu_block *block, int lf); +static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc); + +static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, + int type, int num, + void (mbox_handler)(struct work_struct *), + void (mbox_up_handler)(struct work_struct *)); +enum { + TYPE_AFVF, + TYPE_AFPF, +}; /* Supported devices */ static const struct pci_device_id rvu_id_table[] = { @@ -153,17 +163,17 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot) u16 match = 0; int lf; - spin_lock(&rvu->rsrc_lock); + mutex_lock(&rvu->rsrc_lock); for (lf = 0; lf < block->lf.max; lf++) { if (block->fn_map[lf] == pcifunc) { if (slot == match) { - spin_unlock(&rvu->rsrc_lock); + mutex_unlock(&rvu->rsrc_lock); return lf; } match++; } } - spin_unlock(&rvu->rsrc_lock); + mutex_unlock(&rvu->rsrc_lock); return -ENODEV; } @@ -337,6 +347,28 @@ struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc) return &rvu->pf[rvu_get_pf(pcifunc)]; } +static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc) +{ + int pf, vf, nvfs; + u64 cfg; + + pf = rvu_get_pf(pcifunc); + if (pf >= rvu->hw->total_pfs) + return false; + + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) + return true; + + /* Check if VF is within number of VFs attached to this PF */ + vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + nvfs = (cfg >> 12) & 0xFF; + if (vf >= nvfs) + return false; + + return true; +} + bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr) { struct rvu_block *block; @@ -597,6 +629,8 @@ static void rvu_free_hw_resources(struct rvu *rvu) dma_unmap_resource(rvu->dev, rvu->msix_base_iova, max_msix * PCI_MSIX_ENTRY_SIZE, DMA_BIDIRECTIONAL, 0); + + mutex_destroy(&rvu->rsrc_lock); } static int rvu_setup_hw_resources(struct rvu *rvu) @@ -752,7 +786,7 @@ static int rvu_setup_hw_resources(struct rvu *rvu) if (!rvu->hwvf) return -ENOMEM; - spin_lock_init(&rvu->rsrc_lock); + mutex_init(&rvu->rsrc_lock); err = rvu_setup_msix_resources(rvu); if (err) @@ -830,7 +864,7 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, return 0; } -static int rvu_mbox_handler_READY(struct rvu *rvu, struct msg_req *req, +static int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req, struct ready_msg_rsp *rsp) { return 0; @@ -858,6 +892,22 @@ static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype) return 0; } +bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype) +{ + struct rvu_pfvf *pfvf; + + if (!is_pf_func_valid(rvu, pcifunc)) + return false; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + + /* Check if this PFFUNC has a LF of type blktype attached */ + if (!rvu_get_rsrc_mapcount(pfvf, blktype)) + return false; + + return true; +} + static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block, int pcifunc, int slot) { @@ -926,7 +976,7 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, struct rvu_block *block; int blkid; - spin_lock(&rvu->rsrc_lock); + mutex_lock(&rvu->rsrc_lock); /* Check for partial resource detach */ if (detach && detach->partial) @@ -956,11 +1006,11 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, rvu_detach_block(rvu, pcifunc, block->type); } - spin_unlock(&rvu->rsrc_lock); + mutex_unlock(&rvu->rsrc_lock); return 0; } -static int rvu_mbox_handler_DETACH_RESOURCES(struct rvu *rvu, +static int rvu_mbox_handler_detach_resources(struct rvu *rvu, struct rsrc_detach *detach, struct msg_rsp *rsp) { @@ -1108,7 +1158,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu, return -ENOSPC; } -static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu, +static int rvu_mbox_handler_attach_resources(struct rvu *rvu, struct rsrc_attach *attach, struct msg_rsp *rsp) { @@ -1119,7 +1169,7 @@ static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu, if (!attach->modify) rvu_detach_rsrcs(rvu, NULL, pcifunc); - spin_lock(&rvu->rsrc_lock); + mutex_lock(&rvu->rsrc_lock); /* Check if the request can be accommodated */ err = rvu_check_rsrc_availability(rvu, attach, pcifunc); @@ -1163,7 +1213,7 @@ static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu, } exit: - spin_unlock(&rvu->rsrc_lock); + mutex_unlock(&rvu->rsrc_lock); return err; } @@ -1231,7 +1281,7 @@ static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset); } -static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req, +static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req, struct msix_offset_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; @@ -1280,22 +1330,51 @@ static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req, return 0; } -static int rvu_process_mbox_msg(struct rvu *rvu, int devid, +static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + u16 vf, numvfs; + u64 cfg; + + vf = pcifunc & RVU_PFVF_FUNC_MASK; + cfg = rvu_read64(rvu, BLKADDR_RVUM, + RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc))); + numvfs = (cfg >> 12) & 0xFF; + + if (vf && vf <= numvfs) + __rvu_flr_handler(rvu, pcifunc); + else + return RVU_INVALID_VF_ID; + + return 0; +} + +static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid, struct mbox_msghdr *req) { + struct rvu *rvu = pci_get_drvdata(mbox->pdev); + /* Check if valid, if not reply with a invalid msg */ if (req->sig != OTX2_MBOX_REQ_SIG) goto bad_message; switch (req->id) { -#define M(_name, _id, _req_type, _rsp_type) \ +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ case _id: { \ struct _rsp_type *rsp; \ int err; \ \ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \ - &rvu->mbox, devid, \ + mbox, devid, \ sizeof(struct _rsp_type)); \ + /* some handlers should complete even if reply */ \ + /* could not be allocated */ \ + if (!rsp && \ + _id != MBOX_MSG_DETACH_RESOURCES && \ + _id != MBOX_MSG_NIX_TXSCH_FREE && \ + _id != MBOX_MSG_VF_FLR) \ + return -ENOMEM; \ if (rsp) { \ rsp->hdr.id = _id; \ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \ @@ -1303,9 +1382,9 @@ static int rvu_process_mbox_msg(struct rvu *rvu, int devid, rsp->hdr.rc = 0; \ } \ \ - err = rvu_mbox_handler_ ## _name(rvu, \ - (struct _req_type *)req, \ - rsp); \ + err = rvu_mbox_handler_ ## _fn_name(rvu, \ + (struct _req_type *)req, \ + rsp); \ if (rsp && err) \ rsp->hdr.rc = err; \ \ @@ -1313,29 +1392,38 @@ static int rvu_process_mbox_msg(struct rvu *rvu, int devid, } MBOX_MESSAGES #undef M - break; + bad_message: default: - otx2_reply_invalid_msg(&rvu->mbox, devid, req->pcifunc, - req->id); + otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id); return -ENODEV; } } -static void rvu_mbox_handler(struct work_struct *work) +static void __rvu_mbox_handler(struct rvu_work *mwork, int type) { - struct rvu_work *mwork = container_of(work, struct rvu_work, work); struct rvu *rvu = mwork->rvu; + int offset, err, id, devid; struct otx2_mbox_dev *mdev; struct mbox_hdr *req_hdr; struct mbox_msghdr *msg; + struct mbox_wq_info *mw; struct otx2_mbox *mbox; - int offset, id, err; - u16 pf; - mbox = &rvu->mbox; - pf = mwork - rvu->mbox_wrk; - mdev = &mbox->dev[pf]; + switch (type) { + case TYPE_AFPF: + mw = &rvu->afpf_wq_info; + break; + case TYPE_AFVF: + mw = &rvu->afvf_wq_info; + break; + default: + return; + } + + devid = mwork - mw->mbox_wrk; + mbox = &mw->mbox; + mdev = &mbox->dev[devid]; /* Process received mbox messages */ req_hdr = mdev->mbase + mbox->rx_start; @@ -1347,10 +1435,21 @@ static void rvu_mbox_handler(struct work_struct *work) for (id = 0; id < req_hdr->num_msgs; id++) { msg = mdev->mbase + offset; - /* Set which PF sent this message based on mbox IRQ */ - msg->pcifunc &= ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT); - msg->pcifunc |= (pf << RVU_PFVF_PF_SHIFT); - err = rvu_process_mbox_msg(rvu, pf, msg); + /* Set which PF/VF sent this message based on mbox IRQ */ + switch (type) { + case TYPE_AFPF: + msg->pcifunc &= + ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT); + msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT); + break; + case TYPE_AFVF: + msg->pcifunc &= + ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT); + msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1; + break; + } + + err = rvu_process_mbox_msg(mbox, devid, msg); if (!err) { offset = mbox->rx_start + msg->next_msgoff; continue; @@ -1358,31 +1457,57 @@ static void rvu_mbox_handler(struct work_struct *work) if (msg->pcifunc & RVU_PFVF_FUNC_MASK) dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n", - err, otx2_mbox_id2name(msg->id), msg->id, pf, + err, otx2_mbox_id2name(msg->id), + msg->id, devid, (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1); else dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n", - err, otx2_mbox_id2name(msg->id), msg->id, pf); + err, otx2_mbox_id2name(msg->id), + msg->id, devid); } - /* Send mbox responses to PF */ - otx2_mbox_msg_send(mbox, pf); + /* Send mbox responses to VF/PF */ + otx2_mbox_msg_send(mbox, devid); } -static void rvu_mbox_up_handler(struct work_struct *work) +static inline void rvu_afpf_mbox_handler(struct work_struct *work) { struct rvu_work *mwork = container_of(work, struct rvu_work, work); + + __rvu_mbox_handler(mwork, TYPE_AFPF); +} + +static inline void rvu_afvf_mbox_handler(struct work_struct *work) +{ + struct rvu_work *mwork = container_of(work, struct rvu_work, work); + + __rvu_mbox_handler(mwork, TYPE_AFVF); +} + +static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type) +{ struct rvu *rvu = mwork->rvu; struct otx2_mbox_dev *mdev; struct mbox_hdr *rsp_hdr; struct mbox_msghdr *msg; + struct mbox_wq_info *mw; struct otx2_mbox *mbox; - int offset, id; - u16 pf; + int offset, id, devid; + + switch (type) { + case TYPE_AFPF: + mw = &rvu->afpf_wq_info; + break; + case TYPE_AFVF: + mw = &rvu->afvf_wq_info; + break; + default: + return; + } - mbox = &rvu->mbox_up; - pf = mwork - rvu->mbox_wrk_up; - mdev = &mbox->dev[pf]; + devid = mwork - mw->mbox_wrk_up; + mbox = &mw->mbox_up; + mdev = &mbox->dev[devid]; rsp_hdr = mdev->mbase + mbox->rx_start; if (rsp_hdr->num_msgs == 0) { @@ -1423,128 +1548,182 @@ static void rvu_mbox_up_handler(struct work_struct *work) mdev->msgs_acked++; } - otx2_mbox_reset(mbox, 0); + otx2_mbox_reset(mbox, devid); } -static int rvu_mbox_init(struct rvu *rvu) +static inline void rvu_afpf_mbox_up_handler(struct work_struct *work) { - struct rvu_hwinfo *hw = rvu->hw; - void __iomem *hwbase = NULL; + struct rvu_work *mwork = container_of(work, struct rvu_work, work); + + __rvu_mbox_up_handler(mwork, TYPE_AFPF); +} + +static inline void rvu_afvf_mbox_up_handler(struct work_struct *work) +{ + struct rvu_work *mwork = container_of(work, struct rvu_work, work); + + __rvu_mbox_up_handler(mwork, TYPE_AFVF); +} + +static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, + int type, int num, + void (mbox_handler)(struct work_struct *), + void (mbox_up_handler)(struct work_struct *)) +{ + void __iomem *hwbase = NULL, *reg_base; + int err, i, dir, dir_up; struct rvu_work *mwork; + const char *name; u64 bar4_addr; - int err, pf; - rvu->mbox_wq = alloc_workqueue("rvu_afpf_mailbox", - WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, - hw->total_pfs); - if (!rvu->mbox_wq) + switch (type) { + case TYPE_AFPF: + name = "rvu_afpf_mailbox"; + bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR); + dir = MBOX_DIR_AFPF; + dir_up = MBOX_DIR_AFPF_UP; + reg_base = rvu->afreg_base; + break; + case TYPE_AFVF: + name = "rvu_afvf_mailbox"; + bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR); + dir = MBOX_DIR_PFVF; + dir_up = MBOX_DIR_PFVF_UP; + reg_base = rvu->pfreg_base; + break; + default: + return -EINVAL; + } + + mw->mbox_wq = alloc_workqueue(name, + WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, + num); + if (!mw->mbox_wq) return -ENOMEM; - rvu->mbox_wrk = devm_kcalloc(rvu->dev, hw->total_pfs, - sizeof(struct rvu_work), GFP_KERNEL); - if (!rvu->mbox_wrk) { + mw->mbox_wrk = devm_kcalloc(rvu->dev, num, + sizeof(struct rvu_work), GFP_KERNEL); + if (!mw->mbox_wrk) { err = -ENOMEM; goto exit; } - rvu->mbox_wrk_up = devm_kcalloc(rvu->dev, hw->total_pfs, - sizeof(struct rvu_work), GFP_KERNEL); - if (!rvu->mbox_wrk_up) { + mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num, + sizeof(struct rvu_work), GFP_KERNEL); + if (!mw->mbox_wrk_up) { err = -ENOMEM; goto exit; } - /* Map mbox region shared with PFs */ - bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR); /* Mailbox is a reserved memory (in RAM) region shared between * RVU devices, shouldn't be mapped as device memory to allow * unaligned accesses. */ - hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * hw->total_pfs); + hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num); if (!hwbase) { dev_err(rvu->dev, "Unable to map mailbox region\n"); err = -ENOMEM; goto exit; } - err = otx2_mbox_init(&rvu->mbox, hwbase, rvu->pdev, rvu->afreg_base, - MBOX_DIR_AFPF, hw->total_pfs); + err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num); if (err) goto exit; - err = otx2_mbox_init(&rvu->mbox_up, hwbase, rvu->pdev, rvu->afreg_base, - MBOX_DIR_AFPF_UP, hw->total_pfs); + err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev, + reg_base, dir_up, num); if (err) goto exit; - for (pf = 0; pf < hw->total_pfs; pf++) { - mwork = &rvu->mbox_wrk[pf]; + for (i = 0; i < num; i++) { + mwork = &mw->mbox_wrk[i]; mwork->rvu = rvu; - INIT_WORK(&mwork->work, rvu_mbox_handler); - } + INIT_WORK(&mwork->work, mbox_handler); - for (pf = 0; pf < hw->total_pfs; pf++) { - mwork = &rvu->mbox_wrk_up[pf]; + mwork = &mw->mbox_wrk_up[i]; mwork->rvu = rvu; - INIT_WORK(&mwork->work, rvu_mbox_up_handler); + INIT_WORK(&mwork->work, mbox_up_handler); } return 0; exit: if (hwbase) iounmap((void __iomem *)hwbase); - destroy_workqueue(rvu->mbox_wq); + destroy_workqueue(mw->mbox_wq); return err; } -static void rvu_mbox_destroy(struct rvu *rvu) +static void rvu_mbox_destroy(struct mbox_wq_info *mw) { - if (rvu->mbox_wq) { - flush_workqueue(rvu->mbox_wq); - destroy_workqueue(rvu->mbox_wq); - rvu->mbox_wq = NULL; + if (mw->mbox_wq) { + flush_workqueue(mw->mbox_wq); + destroy_workqueue(mw->mbox_wq); + mw->mbox_wq = NULL; } - if (rvu->mbox.hwbase) - iounmap((void __iomem *)rvu->mbox.hwbase); + if (mw->mbox.hwbase) + iounmap((void __iomem *)mw->mbox.hwbase); - otx2_mbox_destroy(&rvu->mbox); - otx2_mbox_destroy(&rvu->mbox_up); + otx2_mbox_destroy(&mw->mbox); + otx2_mbox_destroy(&mw->mbox_up); } -static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) +static void rvu_queue_work(struct mbox_wq_info *mw, int first, + int mdevs, u64 intr) { - struct rvu *rvu = (struct rvu *)rvu_irq; struct otx2_mbox_dev *mdev; struct otx2_mbox *mbox; struct mbox_hdr *hdr; + int i; + + for (i = first; i < mdevs; i++) { + /* start from 0 */ + if (!(intr & BIT_ULL(i - first))) + continue; + + mbox = &mw->mbox; + mdev = &mbox->dev[i]; + hdr = mdev->mbase + mbox->rx_start; + if (hdr->num_msgs) + queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work); + + mbox = &mw->mbox_up; + mdev = &mbox->dev[i]; + hdr = mdev->mbase + mbox->rx_start; + if (hdr->num_msgs) + queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work); + } +} + +static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + int vfs = rvu->vfs; u64 intr; - u8 pf; intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT); /* Clear interrupts */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr); /* Sync with mbox memory region */ - smp_wmb(); + rmb(); - for (pf = 0; pf < rvu->hw->total_pfs; pf++) { - if (intr & (1ULL << pf)) { - mbox = &rvu->mbox; - mdev = &mbox->dev[pf]; - hdr = mdev->mbase + mbox->rx_start; - if (hdr->num_msgs) - queue_work(rvu->mbox_wq, - &rvu->mbox_wrk[pf].work); - mbox = &rvu->mbox_up; - mdev = &mbox->dev[pf]; - hdr = mdev->mbase + mbox->rx_start; - if (hdr->num_msgs) - queue_work(rvu->mbox_wq, - &rvu->mbox_wrk_up[pf].work); - } + rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr); + + /* Handle VF interrupts */ + if (vfs > 64) { + intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1)); + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr); + + rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr); + vfs -= 64; } + intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0)); + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr); + + rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr); + return IRQ_HANDLED; } @@ -1561,6 +1740,216 @@ static void rvu_enable_mbox_intr(struct rvu *rvu) INTR_MASK(hw->total_pfs) & ~1ULL); } +static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr) +{ + struct rvu_block *block; + int slot, lf, num_lfs; + int err; + + block = &rvu->hw->block[blkaddr]; + num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), + block->type); + if (!num_lfs) + return; + for (slot = 0; slot < num_lfs; slot++) { + lf = rvu_get_lf(rvu, block, pcifunc, slot); + if (lf < 0) + continue; + + /* Cleanup LF and reset it */ + if (block->addr == BLKADDR_NIX0) + rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf); + else if (block->addr == BLKADDR_NPA) + rvu_npa_lf_teardown(rvu, pcifunc, lf); + + err = rvu_lf_reset(rvu, block, lf); + if (err) { + dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n", + block->addr, lf); + } + } +} + +static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) +{ + mutex_lock(&rvu->flr_lock); + /* Reset order should reflect inter-block dependencies: + * 1. Reset any packet/work sources (NIX, CPT, TIM) + * 2. Flush and reset SSO/SSOW + * 3. Cleanup pools (NPA) + */ + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0); + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0); + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM); + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW); + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO); + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA); + rvu_detach_rsrcs(rvu, NULL, pcifunc); + mutex_unlock(&rvu->flr_lock); +} + +static void rvu_afvf_flr_handler(struct rvu *rvu, int vf) +{ + int reg = 0; + + /* pcifunc = 0(PF0) | (vf + 1) */ + __rvu_flr_handler(rvu, vf + 1); + + if (vf >= 64) { + reg = 1; + vf = vf - 64; + } + + /* Signal FLR finish and enable IRQ */ + rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf)); +} + +static void rvu_flr_handler(struct work_struct *work) +{ + struct rvu_work *flrwork = container_of(work, struct rvu_work, work); + struct rvu *rvu = flrwork->rvu; + u16 pcifunc, numvfs, vf; + u64 cfg; + int pf; + + pf = flrwork - rvu->flr_wrk; + if (pf >= rvu->hw->total_pfs) { + rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs); + return; + } + + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + numvfs = (cfg >> 12) & 0xFF; + pcifunc = pf << RVU_PFVF_PF_SHIFT; + + for (vf = 0; vf < numvfs; vf++) + __rvu_flr_handler(rvu, (pcifunc | (vf + 1))); + + __rvu_flr_handler(rvu, pcifunc); + + /* Signal FLR finish */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf)); + + /* Enable interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf)); +} + +static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs) +{ + int dev, vf, reg = 0; + u64 intr; + + if (start_vf >= 64) + reg = 1; + + intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg)); + if (!intr) + return; + + for (vf = 0; vf < numvfs; vf++) { + if (!(intr & BIT_ULL(vf))) + continue; + dev = vf + start_vf + rvu->hw->total_pfs; + queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work); + /* Clear and disable the interrupt */ + rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf)); + } +} + +static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + u64 intr; + u8 pf; + + intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT); + if (!intr) + goto afvf_flr; + + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + if (intr & (1ULL << pf)) { + /* PF is already dead do only AF related operations */ + queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work); + /* clear interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT, + BIT_ULL(pf)); + /* Disable the interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, + BIT_ULL(pf)); + } + } + +afvf_flr: + rvu_afvf_queue_flr_work(rvu, 0, 64); + if (rvu->vfs > 64) + rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64); + + return IRQ_HANDLED; +} + +static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr) +{ + int vf; + + /* Nothing to be done here other than clearing the + * TRPEND bit. + */ + for (vf = 0; vf < 64; vf++) { + if (intr & (1ULL << vf)) { + /* clear the trpend due to ME(master enable) */ + rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf)); + /* clear interrupt */ + rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf)); + } + } +} + +/* Handles ME interrupts from VFs of AF */ +static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + int vfset; + u64 intr; + + intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT); + + for (vfset = 0; vfset <= 1; vfset++) { + intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset)); + if (intr) + rvu_me_handle_vfset(rvu, vfset, intr); + } + + return IRQ_HANDLED; +} + +/* Handles ME interrupts from PFs */ +static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + u64 intr; + u8 pf; + + intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT); + + /* Nothing to be done here other than clearing the + * TRPEND bit. + */ + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + if (intr & (1ULL << pf)) { + /* clear the trpend due to ME(master enable) */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, + BIT_ULL(pf)); + /* clear interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT, + BIT_ULL(pf)); + } + } + + return IRQ_HANDLED; +} + static void rvu_unregister_interrupts(struct rvu *rvu) { int irq; @@ -1569,6 +1958,14 @@ static void rvu_unregister_interrupts(struct rvu *rvu) rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + /* Disable the PF FLR interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + /* Disable the PF ME interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + for (irq = 0; irq < rvu->num_vec; irq++) { if (rvu->irq_allocated[irq]) free_irq(pci_irq_vector(rvu->pdev, irq), rvu); @@ -1578,9 +1975,25 @@ static void rvu_unregister_interrupts(struct rvu *rvu) rvu->num_vec = 0; } +static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu) +{ + struct rvu_pfvf *pfvf = &rvu->pf[0]; + int offset; + + pfvf = &rvu->pf[0]; + offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; + + /* Make sure there are enough MSIX vectors configured so that + * VF interrupts can be handled. Offset equal to zero means + * that PF vectors are not configured and overlapping AF vectors. + */ + return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) && + offset; +} + static int rvu_register_interrupts(struct rvu *rvu) { - int ret; + int ret, offset, pf_vec_start; rvu->num_vec = pci_msix_vec_count(rvu->pdev); @@ -1620,13 +2033,323 @@ static int rvu_register_interrupts(struct rvu *rvu) /* Enable mailbox interrupts from all PFs */ rvu_enable_mbox_intr(rvu); + /* Register FLR interrupt handler */ + sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE], + "RVUAF FLR"); + ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR), + rvu_flr_intr_handler, 0, + &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE], + rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for FLR\n"); + goto fail; + } + rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true; + + /* Enable FLR interrupt for all PFs*/ + rvu_write64(rvu, BLKADDR_RVUM, + RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs)); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + /* Register ME interrupt handler */ + sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE], + "RVUAF ME"); + ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME), + rvu_me_pf_intr_handler, 0, + &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE], + rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for ME\n"); + } + rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true; + + /* Enable ME interrupt for all PFs*/ + rvu_write64(rvu, BLKADDR_RVUM, + RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs)); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + if (!rvu_afvf_msix_vectors_num_ok(rvu)) + return 0; + + /* Get PF MSIX vectors offset. */ + pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM, + RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; + + /* Register MBOX0 interrupt. */ + offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu_mbox_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], + rvu); + if (ret) + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for Mbox0\n"); + + rvu->irq_allocated[offset] = true; + + /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so + * simply increment current offset by 1. + */ + offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu_mbox_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], + rvu); + if (ret) + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for Mbox1\n"); + + rvu->irq_allocated[offset] = true; + + /* Register FLR interrupt handler for AF's VFs */ + offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu_flr_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for RVUAFVF FLR0\n"); + goto fail; + } + rvu->irq_allocated[offset] = true; + + offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu_flr_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for RVUAFVF FLR1\n"); + goto fail; + } + rvu->irq_allocated[offset] = true; + + /* Register ME interrupt handler for AF's VFs */ + offset = pf_vec_start + RVU_PF_INT_VEC_VFME0; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu_me_vf_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for RVUAFVF ME0\n"); + goto fail; + } + rvu->irq_allocated[offset] = true; + + offset = pf_vec_start + RVU_PF_INT_VEC_VFME1; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu_me_vf_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for RVUAFVF ME1\n"); + goto fail; + } + rvu->irq_allocated[offset] = true; return 0; fail: - pci_free_irq_vectors(rvu->pdev); + rvu_unregister_interrupts(rvu); return ret; } +static void rvu_flr_wq_destroy(struct rvu *rvu) +{ + if (rvu->flr_wq) { + flush_workqueue(rvu->flr_wq); + destroy_workqueue(rvu->flr_wq); + rvu->flr_wq = NULL; + } +} + +static int rvu_flr_init(struct rvu *rvu) +{ + int dev, num_devs; + u64 cfg; + int pf; + + /* Enable FLR for all PFs*/ + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf), + cfg | BIT_ULL(22)); + } + + rvu->flr_wq = alloc_workqueue("rvu_afpf_flr", + WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, + 1); + if (!rvu->flr_wq) + return -ENOMEM; + + num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev); + rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs, + sizeof(struct rvu_work), GFP_KERNEL); + if (!rvu->flr_wrk) { + destroy_workqueue(rvu->flr_wq); + return -ENOMEM; + } + + for (dev = 0; dev < num_devs; dev++) { + rvu->flr_wrk[dev].rvu = rvu; + INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler); + } + + mutex_init(&rvu->flr_lock); + + return 0; +} + +static void rvu_disable_afvf_intr(struct rvu *rvu) +{ + int vfs = rvu->vfs; + + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); + if (vfs <= 64) + return; + + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), + INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); +} + +static void rvu_enable_afvf_intr(struct rvu *rvu) +{ + int vfs = rvu->vfs; + + /* Clear any pending interrupts and enable AF VF interrupts for + * the first 64 VFs. + */ + /* Mbox */ + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs)); + + /* FLR */ + rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs)); + + /* Same for remaining VFs, if any. */ + if (vfs <= 64) + return; + + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1), + INTR_MASK(vfs - 64)); + + rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); +} + +#define PCI_DEVID_OCTEONTX2_LBK 0xA061 + +static int lbk_get_num_chans(void) +{ + struct pci_dev *pdev; + void __iomem *base; + int ret = -EIO; + + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK, + NULL); + if (!pdev) + goto err; + + base = pci_ioremap_bar(pdev, 0); + if (!base) + goto err_put; + + /* Read number of available LBK channels from LBK(0)_CONST register. */ + ret = (readq(base + 0x10) >> 32) & 0xffff; + iounmap(base); +err_put: + pci_dev_put(pdev); +err: + return ret; +} + +static int rvu_enable_sriov(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + int err, chans, vfs; + + if (!rvu_afvf_msix_vectors_num_ok(rvu)) { + dev_warn(&pdev->dev, + "Skipping SRIOV enablement since not enough IRQs are available\n"); + return 0; + } + + chans = lbk_get_num_chans(); + if (chans < 0) + return chans; + + vfs = pci_sriov_get_totalvfs(pdev); + + /* Limit VFs in case we have more VFs than LBK channels available. */ + if (vfs > chans) + vfs = chans; + + /* AF's VFs work in pairs and talk over consecutive loopback channels. + * Thus we want to enable maximum even number of VFs. In case + * odd number of VFs are available then the last VF on the list + * remains disabled. + */ + if (vfs & 0x1) { + dev_warn(&pdev->dev, + "Number of VFs should be even. Enabling %d out of %d.\n", + vfs - 1, vfs); + vfs--; + } + + if (!vfs) + return 0; + + /* Save VFs number for reference in VF interrupts handlers. + * Since interrupts might start arriving during SRIOV enablement + * ordinary API cannot be used to get number of enabled VFs. + */ + rvu->vfs = vfs; + + err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs, + rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler); + if (err) + return err; + + rvu_enable_afvf_intr(rvu); + /* Make sure IRQs are enabled before SRIOV. */ + mb(); + + err = pci_enable_sriov(pdev, vfs); + if (err) { + rvu_disable_afvf_intr(rvu); + rvu_mbox_destroy(&rvu->afvf_wq_info); + return err; + } + + return 0; +} + +static void rvu_disable_sriov(struct rvu *rvu) +{ + rvu_disable_afvf_intr(rvu); + rvu_mbox_destroy(&rvu->afvf_wq_info); + pci_disable_sriov(rvu->pdev); +} + static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct device *dev = &pdev->dev; @@ -1689,7 +2412,10 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_release_regions; - err = rvu_mbox_init(rvu); + /* Init mailbox btw AF and PFs */ + err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF, + rvu->hw->total_pfs, rvu_afpf_mbox_handler, + rvu_afpf_mbox_up_handler); if (err) goto err_hwsetup; @@ -1697,15 +2423,28 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_mbox; - err = rvu_register_interrupts(rvu); + err = rvu_flr_init(rvu); if (err) goto err_cgx; + err = rvu_register_interrupts(rvu); + if (err) + goto err_flr; + + /* Enable AF's VFs (if any) */ + err = rvu_enable_sriov(rvu); + if (err) + goto err_irq; + return 0; +err_irq: + rvu_unregister_interrupts(rvu); +err_flr: + rvu_flr_wq_destroy(rvu); err_cgx: rvu_cgx_wq_destroy(rvu); err_mbox: - rvu_mbox_destroy(rvu); + rvu_mbox_destroy(&rvu->afpf_wq_info); err_hwsetup: rvu_reset_all_blocks(rvu); rvu_free_hw_resources(rvu); @@ -1725,8 +2464,10 @@ static void rvu_remove(struct pci_dev *pdev) struct rvu *rvu = pci_get_drvdata(pdev); rvu_unregister_interrupts(rvu); + rvu_flr_wq_destroy(rvu); rvu_cgx_wq_destroy(rvu); - rvu_mbox_destroy(rvu); + rvu_mbox_destroy(&rvu->afpf_wq_info); + rvu_disable_sriov(rvu); rvu_reset_all_blocks(rvu); rvu_free_hw_resources(rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 2c0580cd28078a8741902667c86e9b80efca5a91..c8409bc5d9c38b296bd21f00b87a73bb70a53486 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -11,6 +11,7 @@ #ifndef RVU_H #define RVU_H +#include #include "rvu_struct.h" #include "common.h" #include "mbox.h" @@ -18,6 +19,9 @@ /* PCI device IDs */ #define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065 +/* Subsystem Device ID */ +#define PCI_SUBSYS_DEVID_96XX 0xB200 + /* PCI BAR nos */ #define PCI_AF_REG_BAR_NUM 0 #define PCI_PF_REG_BAR_NUM 2 @@ -64,7 +68,7 @@ struct nix_mcast { struct qmem *mcast_buf; int replay_pkind; int next_free_mce; - spinlock_t mce_lock; /* Serialize MCE updates */ + struct mutex mce_lock; /* Serialize MCE updates */ }; struct nix_mce_list { @@ -74,15 +78,27 @@ struct nix_mce_list { }; struct npc_mcam { - spinlock_t lock; /* MCAM entries and counters update lock */ + struct rsrc_bmap counters; + struct mutex lock; /* MCAM entries and counters update lock */ + unsigned long *bmap; /* bitmap, 0 => bmap_entries */ + unsigned long *bmap_reverse; /* Reverse bitmap, bmap_entries => 0 */ + u16 bmap_entries; /* Number of unreserved MCAM entries */ + u16 bmap_fcnt; /* MCAM entries free count */ + u16 *entry2pfvf_map; + u16 *entry2cntr_map; + u16 *cntr2pfvf_map; + u16 *cntr_refcnt; u8 keysize; /* MCAM keysize 112/224/448 bits */ u8 banks; /* Number of MCAM banks */ u8 banks_per_entry;/* Number of keywords in key */ u16 banksize; /* Number of MCAM entries in each bank */ u16 total_entries; /* Total number of MCAM entries */ - u16 entries; /* Total minus reserved for NIX LFs */ u16 nixlf_offset; /* Offset of nixlf rsvd uncast entries */ u16 pf_offset; /* Offset of PF's rsvd bcast, promisc entries */ + u16 lprio_count; + u16 lprio_start; + u16 hprio_count; + u16 hprio_end; }; /* Structure for per RVU func info ie PF/VF */ @@ -122,12 +138,19 @@ struct rvu_pfvf { u16 tx_chan_base; u8 rx_chan_cnt; /* total number of RX channels */ u8 tx_chan_cnt; /* total number of TX channels */ + u16 maxlen; + u16 minlen; u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */ /* Broadcast pkt replication info */ u16 bcast_mce_idx; struct nix_mce_list bcast_mce_list; + + /* VLAN offload */ + struct mcam_entry entry; + int rxvlan_index; + bool rxvlan; }; struct nix_txsch { @@ -164,6 +187,16 @@ struct rvu_hwinfo { struct npc_mcam mcam; }; +struct mbox_wq_info { + struct otx2_mbox mbox; + struct rvu_work *mbox_wrk; + + struct otx2_mbox mbox_up; + struct rvu_work *mbox_wrk_up; + + struct workqueue_struct *mbox_wq; +}; + struct rvu { void __iomem *afreg_base; void __iomem *pfreg_base; @@ -172,14 +205,17 @@ struct rvu { struct rvu_hwinfo *hw; struct rvu_pfvf *pf; struct rvu_pfvf *hwvf; - spinlock_t rsrc_lock; /* Serialize resource alloc/free */ + struct mutex rsrc_lock; /* Serialize resource alloc/free */ + int vfs; /* Number of VFs attached to RVU */ /* Mbox */ - struct otx2_mbox mbox; - struct rvu_work *mbox_wrk; - struct otx2_mbox mbox_up; - struct rvu_work *mbox_wrk_up; - struct workqueue_struct *mbox_wq; + struct mbox_wq_info afpf_wq_info; + struct mbox_wq_info afvf_wq_info; + + /* PF FLR */ + struct rvu_work *flr_wrk; + struct workqueue_struct *flr_wq; + struct mutex flr_lock; /* Serialize FLRs */ /* MSI-X */ u16 num_vec; @@ -223,9 +259,22 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset) return readq(rvu->pfreg_base + offset); } +static inline bool is_rvu_9xxx_A0(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + return (pdev->revision == 0x00) && + (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX); +} + /* Function Prototypes * RVU */ +static inline int is_afvf(u16 pcifunc) +{ + return !(pcifunc & ~RVU_PFVF_FUNC_MASK); +} + int rvu_alloc_bitmap(struct rsrc_bmap *rsrc); int rvu_alloc_rsrc(struct rsrc_bmap *rsrc); void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id); @@ -236,6 +285,7 @@ int rvu_get_pf(u16 pcifunc); struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc); void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf); bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr); +bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype); int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot); int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf); int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc); @@ -270,85 +320,96 @@ int rvu_cgx_probe(struct rvu *rvu); void rvu_cgx_wq_destroy(struct rvu *rvu); void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu); int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start); -int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req, struct cgx_stats_rsp *rsp); -int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu, +int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp); -int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu, +int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp); -int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req, struct cgx_link_info_msg *rsp); -int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); /* NPA APIs */ int rvu_npa_init(struct rvu *rvu); void rvu_npa_freemem(struct rvu *rvu); -int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu, +void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf); +int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu, struct npa_aq_enq_req *req, struct npa_aq_enq_rsp *rsp); -int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu, +int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu, +int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu, struct npa_lf_alloc_req *req, struct npa_lf_alloc_rsp *rsp); -int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); /* NIX APIs */ +bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc); int rvu_nix_init(struct rvu *rvu); void rvu_nix_freemem(struct rvu *rvu); int rvu_get_nixlf_count(struct rvu *rvu); -int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu, +void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf); +int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, struct nix_lf_alloc_req *req, struct nix_lf_alloc_rsp *rsp); -int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu, +int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp); -int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu, +int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu, +int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, struct nix_txsch_alloc_req *req, struct nix_txsch_alloc_rsp *rsp); -int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu, +int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, struct nix_txsch_free_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu, +int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, struct nix_txschq_config *req, struct msg_rsp *rsp); -int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu, +int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, struct nix_vtag_config *req, struct msg_rsp *rsp); -int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu, +int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, struct nix_rss_flowkey_cfg *req, struct msg_rsp *rsp); -int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu, +int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, struct nix_set_mac_addr *req, struct msg_rsp *rsp); -int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req, +int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); +int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp); /* NPC APIs */ int rvu_npc_init(struct rvu *rvu); @@ -360,9 +421,48 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan, bool allmulti); void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf); +void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan); +int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf); +void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); +void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, int group, int alg_idx, int mcam_index); +int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, + struct npc_mcam_alloc_entry_req *req, + struct npc_mcam_alloc_entry_rsp *rsp); +int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, + struct npc_mcam_free_entry_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, + struct npc_mcam_write_entry_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu, + struct npc_mcam_ena_dis_entry_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu, + struct npc_mcam_ena_dis_entry_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu, + struct npc_mcam_shift_entry_req *req, + struct npc_mcam_shift_entry_rsp *rsp); +int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, + struct npc_mcam_alloc_counter_req *req, + struct npc_mcam_alloc_counter_rsp *rsp); +int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp); +int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp); +int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu, + struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp); +int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, + struct npc_mcam_oper_counter_rsp *rsp); +int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, + struct npc_mcam_alloc_and_write_entry_req *req, + struct npc_mcam_alloc_and_write_entry_rsp *rsp); +int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, + struct npc_get_kex_cfg_rsp *rsp); #endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 188185c15b4a7a26504efcf03a0de12d642a40f9..1de6eb528d08d3b3fa4daf05432b699c2b20de51 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -20,14 +20,14 @@ struct cgx_evq_entry { struct cgx_link_event link_event; }; -#define M(_name, _id, _req_type, _rsp_type) \ +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ static struct _req_type __maybe_unused \ -*otx2_mbox_alloc_msg_ ## _name(struct rvu *rvu, int devid) \ +*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \ { \ struct _req_type *req; \ \ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ - &rvu->mbox_up, devid, sizeof(struct _req_type), \ + &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \ sizeof(struct _rsp_type)); \ if (!req) \ return NULL; \ @@ -177,12 +177,12 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) } /* Send mbox message to PF */ - msg = otx2_mbox_alloc_msg_CGX_LINK_EVENT(rvu, pfid); + msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid); if (!msg) continue; msg->link_info = *linfo; - otx2_mbox_msg_send(&rvu->mbox_up, pfid); - err = otx2_mbox_wait_for_rsp(&rvu->mbox_up, pfid); + otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid); + err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid); if (err) dev_warn(rvu->dev, "notification to pf %d failed\n", pfid); @@ -303,21 +303,21 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) return 0; } -int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true); return 0; } -int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false); return 0; } -int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req, struct cgx_stats_rsp *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); @@ -354,7 +354,7 @@ int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req, return 0; } -int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu, +int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) { @@ -368,7 +368,7 @@ int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu, return 0; } -int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu, +int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) { @@ -387,7 +387,7 @@ int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu, return 0; } -int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; @@ -407,7 +407,7 @@ int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req, return 0; } -int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; @@ -451,21 +451,21 @@ static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en) return 0; } -int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true); return 0; } -int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false); return 0; } -int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req, struct cgx_link_info_msg *rsp) { u8 cgx_id, lmac_id; @@ -500,14 +500,14 @@ static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en) lmac_id, en); } -int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true); return 0; } -int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index a5ab7eff23014cc8a8c97469006c978241fdf900..b7998f6be386a2c862a8b20f966142d767aa7af3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -55,6 +55,17 @@ struct mce { u16 pcifunc; }; +bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return false; + return true; +} + int rvu_get_nixlf_count(struct rvu *rvu) { struct rvu_block *block; @@ -94,6 +105,23 @@ static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) return NULL; } +static void nix_rx_sync(struct rvu *rvu, int blkaddr) +{ + int err; + + /*Sync all in flight RX packets to LLC/DRAM */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); + err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); + if (err) + dev_err(rvu->dev, "NIX RX software sync failed\n"); + + /* As per a HW errata in 9xxx A0 silicon, HW may clear SW_SYNC[ENA] + * bit too early. Hence wait for 50us more. + */ + if (is_rvu_9xxx_A0(rvu)) + usleep_range(50, 60); +} + static bool is_valid_txschq(struct rvu *rvu, int blkaddr, int lvl, u16 pcifunc, u16 schq) { @@ -109,12 +137,12 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, if (schq >= txsch->schq.max) return false; - spin_lock(&rvu->rsrc_lock); + mutex_lock(&rvu->rsrc_lock); if (txsch->pfvf_map[schq] != pcifunc) { - spin_unlock(&rvu->rsrc_lock); + mutex_unlock(&rvu->rsrc_lock); return false; } - spin_unlock(&rvu->rsrc_lock); + mutex_unlock(&rvu->rsrc_lock); return true; } @@ -122,7 +150,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); u8 cgx_id, lmac_id; - int pkind, pf; + int pkind, pf, vf; int err; pf = rvu_get_pf(pcifunc); @@ -148,6 +176,14 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) rvu_npc_set_pkind(rvu, pkind, pfvf); break; case NIX_INTF_TYPE_LBK: + vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; + pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf); + pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) : + NIX_CHAN_LBK_CHX(0, vf + 1); + pfvf->rx_chan_cnt = 1; + pfvf->tx_chan_cnt = 1; + rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, + pfvf->rx_chan_base, false); break; } @@ -168,14 +204,21 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) rvu_npc_install_bcast_match_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base); + pfvf->maxlen = NIC_HW_MIN_FRS; + pfvf->minlen = NIC_HW_MIN_FRS; return 0; } static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) { + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int err; + pfvf->maxlen = 0; + pfvf->minlen = 0; + pfvf->rxvlan = false; + /* Remove this PF_FUNC from bcast pkt replication list */ err = nix_update_bcast_mce_list(rvu, pcifunc, false); if (err) { @@ -637,25 +680,25 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) return err; } -int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu, +int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) { return rvu_nix_aq_enq_inst(rvu, req, rsp); } -int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu, +int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, struct msg_rsp *rsp) { return nix_lf_hwctx_disable(rvu, req); } -int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu, +int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, struct nix_lf_alloc_req *req, struct nix_lf_alloc_rsp *rsp) { - int nixlf, qints, hwctx_size, err, rc = 0; + int nixlf, qints, hwctx_size, intf, err, rc = 0; struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; struct rvu_block *block; @@ -676,6 +719,24 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu, if (nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; + /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ + if (req->npa_func) { + /* If default, use 'this' NIXLF's PFFUNC */ + if (req->npa_func == RVU_DEFAULT_PF_FUNC) + req->npa_func = pcifunc; + if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) + return NIX_AF_INVAL_NPA_PF_FUNC; + } + + /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ + if (req->sso_func) { + /* If default, use 'this' NIXLF's PFFUNC */ + if (req->sso_func == RVU_DEFAULT_PF_FUNC) + req->sso_func = pcifunc; + if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) + return NIX_AF_INVAL_SSO_PF_FUNC; + } + /* If RSS is being enabled, check if requested config is valid. * RSS table size should be power of two, otherwise * RSS_GRP::OFFSET + adder might go beyond that group or @@ -780,18 +841,10 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu, /* Enable LMTST for this NIX LF */ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); - /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC - * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's - * PCIFUNC itself. - */ - if (req->npa_func == RVU_DEFAULT_PF_FUNC) - cfg = pcifunc; - else + /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ + if (req->npa_func) cfg = req->npa_func; - - if (req->sso_func == RVU_DEFAULT_PF_FUNC) - cfg |= (u64)pcifunc << 16; - else + if (req->sso_func) cfg |= (u64)req->sso_func << 16; cfg |= (u64)req->xqe_sz << 33; @@ -800,10 +853,14 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu, /* Config Rx pkt length, csum checks and apad enable / disable */ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); - err = nix_interface_init(rvu, pcifunc, NIX_INTF_TYPE_CGX, nixlf); + intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + err = nix_interface_init(rvu, pcifunc, intf, nixlf); if (err) goto free_mem; + /* Disable NPC entries as NIXLF's contexts are not initialized yet */ + rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); + goto exit; free_mem: @@ -823,10 +880,18 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu, rsp->tx_chan_cnt = pfvf->tx_chan_cnt; rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; + /* Get HW supported stat count */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); + rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); + rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); + /* Get count of CQ IRQs and error IRQs supported per LF */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); + rsp->qints = ((cfg >> 12) & 0xFFF); + rsp->cints = ((cfg >> 24) & 0xFFF); return rc; } -int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; @@ -918,7 +983,7 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); } -int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu, +int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, struct nix_txsch_alloc_req *req, struct nix_txsch_alloc_rsp *rsp) { @@ -939,7 +1004,7 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu, if (!nix_hw) return -EINVAL; - spin_lock(&rvu->rsrc_lock); + mutex_lock(&rvu->rsrc_lock); for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { txsch = &nix_hw->txsch[lvl]; req_schq = req->schq_contig[lvl] + req->schq[lvl]; @@ -995,7 +1060,7 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu, err: rc = NIX_AF_ERR_TLX_ALLOC_FAIL; exit: - spin_unlock(&rvu->rsrc_lock); + mutex_unlock(&rvu->rsrc_lock); return rc; } @@ -1020,7 +1085,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) return NIX_AF_ERR_AF_LF_INVALID; /* Disable TL2/3 queue links before SMQ flush*/ - spin_lock(&rvu->rsrc_lock); + mutex_lock(&rvu->rsrc_lock); for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) { if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4) continue; @@ -1062,7 +1127,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) txsch->pfvf_map[schq] = 0; } } - spin_unlock(&rvu->rsrc_lock); + mutex_unlock(&rvu->rsrc_lock); /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); @@ -1073,7 +1138,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) return 0; } -int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu, +int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, struct nix_txsch_free_req *req, struct msg_rsp *rsp) { @@ -1118,7 +1183,7 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, return true; } -int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu, +int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, struct nix_txschq_config *req, struct msg_rsp *rsp) { @@ -1181,35 +1246,22 @@ int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu, static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, struct nix_vtag_config *req) { - u64 regval = 0; - -#define NIX_VTAGTYPE_MAX 0x8ull -#define NIX_VTAGSIZE_MASK 0x7ull -#define NIX_VTAGSTRIP_CAP_MASK 0x30ull + u64 regval = req->vtag_size; - if (req->rx.vtag_type >= NIX_VTAGTYPE_MAX || - req->vtag_size > VTAGSIZE_T8) + if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8) return -EINVAL; - regval = rvu_read64(rvu, blkaddr, - NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type)); - - if (req->rx.strip_vtag && req->rx.capture_vtag) - regval |= BIT_ULL(4) | BIT_ULL(5); - else if (req->rx.strip_vtag) + if (req->rx.capture_vtag) + regval |= BIT_ULL(5); + if (req->rx.strip_vtag) regval |= BIT_ULL(4); - else - regval &= ~(BIT_ULL(4) | BIT_ULL(5)); - - regval &= ~NIX_VTAGSIZE_MASK; - regval |= req->vtag_size & NIX_VTAGSIZE_MASK; rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); return 0; } -int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu, +int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, struct nix_vtag_config *req, struct msg_rsp *rsp) { @@ -1294,7 +1346,7 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list, return 0; /* Add a new one to the list, at the tail */ - mce = kzalloc(sizeof(*mce), GFP_ATOMIC); + mce = kzalloc(sizeof(*mce), GFP_KERNEL); if (!mce) return -ENOMEM; mce->idx = idx; @@ -1317,6 +1369,10 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) struct rvu_pfvf *pfvf; int blkaddr; + /* Broadcast pkt replication is not needed for AF's VFs, hence skip */ + if (is_afvf(pcifunc)) + return 0; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return 0; @@ -1340,7 +1396,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) return -EINVAL; } - spin_lock(&mcast->mce_lock); + mutex_lock(&mcast->mce_lock); err = nix_update_mce_list(mce_list, pcifunc, idx, add); if (err) @@ -1370,7 +1426,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) } end: - spin_unlock(&mcast->mce_lock); + mutex_unlock(&mcast->mce_lock); return err; } @@ -1455,7 +1511,7 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) BIT_ULL(63) | (mcast->replay_pkind << 24) | BIT_ULL(20) | MC_BUF_CNT); - spin_lock_init(&mcast->mce_lock); + mutex_init(&mcast->mce_lock); return nix_setup_bcast_tables(rvu, nix_hw); } @@ -1506,7 +1562,7 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) return 0; } -int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; @@ -1564,7 +1620,7 @@ static int get_flowkey_alg_idx(u32 flow_cfg) return FLOW_KEY_ALG_PORT; } -int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu, +int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, struct nix_rss_flowkey_cfg *req, struct msg_rsp *rsp) { @@ -1720,7 +1776,7 @@ static void nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) } } -int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu, +int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, struct nix_set_mac_addr *req, struct msg_rsp *rsp) { @@ -1742,10 +1798,13 @@ int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu, rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, req->mac_addr); + + rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); + return 0; } -int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req, +int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, struct msg_rsp *rsp) { bool allmulti = false, disable_promisc = false; @@ -1775,9 +1834,261 @@ int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req, else rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, allmulti); + + rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); + return 0; } +static void nix_find_link_frs(struct rvu *rvu, + struct nix_frs_cfg *req, u16 pcifunc) +{ + int pf = rvu_get_pf(pcifunc); + struct rvu_pfvf *pfvf; + int maxlen, minlen; + int numvfs, hwvf; + int vf; + + /* Update with requester's min/max lengths */ + pfvf = rvu_get_pfvf(rvu, pcifunc); + pfvf->maxlen = req->maxlen; + if (req->update_minlen) + pfvf->minlen = req->minlen; + + maxlen = req->maxlen; + minlen = req->update_minlen ? req->minlen : 0; + + /* Get this PF's numVFs and starting hwvf */ + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); + + /* For each VF, compare requested max/minlen */ + for (vf = 0; vf < numvfs; vf++) { + pfvf = &rvu->hwvf[hwvf + vf]; + if (pfvf->maxlen > maxlen) + maxlen = pfvf->maxlen; + if (req->update_minlen && + pfvf->minlen && pfvf->minlen < minlen) + minlen = pfvf->minlen; + } + + /* Compare requested max/minlen with PF's max/minlen */ + pfvf = &rvu->pf[pf]; + if (pfvf->maxlen > maxlen) + maxlen = pfvf->maxlen; + if (req->update_minlen && + pfvf->minlen && pfvf->minlen < minlen) + minlen = pfvf->minlen; + + /* Update the request with max/min PF's and it's VF's max/min */ + req->maxlen = maxlen; + if (req->update_minlen) + req->minlen = minlen; +} + +int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + int pf = rvu_get_pf(pcifunc); + int blkaddr, schq, link = -1; + struct nix_txsch *txsch; + u64 cfg, lmac_fifo_len; + struct nix_hw *nix_hw; + u8 cgx = 0, lmac = 0; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS) + return NIX_AF_ERR_FRS_INVALID; + + if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) + return NIX_AF_ERR_FRS_INVALID; + + /* Check if requester wants to update SMQ's */ + if (!req->update_smq) + goto rx_frscfg; + + /* Update min/maxlen in each of the SMQ attached to this PF/VF */ + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; + mutex_lock(&rvu->rsrc_lock); + for (schq = 0; schq < txsch->schq.max; schq++) { + if (txsch->pfvf_map[schq] != pcifunc) + continue; + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); + cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); + if (req->update_minlen) + cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); + rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); + } + mutex_unlock(&rvu->rsrc_lock); + +rx_frscfg: + /* Check if config is for SDP link */ + if (req->sdp_link) { + if (!hw->sdp_links) + return NIX_AF_ERR_RX_LINK_INVALID; + link = hw->cgx_links + hw->lbk_links; + goto linkcfg; + } + + /* Check if the request is from CGX mapped RVU PF */ + if (is_pf_cgxmapped(rvu, pf)) { + /* Get CGX and LMAC to which this PF is mapped and find link */ + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); + link = (cgx * hw->lmac_per_cgx) + lmac; + } else if (pf == 0) { + /* For VFs of PF0 ingress is LBK port, so config LBK link */ + link = hw->cgx_links; + } + + if (link < 0) + return NIX_AF_ERR_RX_LINK_INVALID; + + nix_find_link_frs(rvu, req, pcifunc); + +linkcfg: + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); + cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); + if (req->update_minlen) + cfg = (cfg & ~0xFFFFULL) | req->minlen; + rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); + + if (req->sdp_link || pf == 0) + return 0; + + /* Update transmit credits for CGX links */ + lmac_fifo_len = + CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); + cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link)); + cfg &= ~(0xFFFFFULL << 12); + cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12; + rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); + rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg); + + return 0; +} + +int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + struct npc_mcam_alloc_entry_req alloc_req = { }; + struct npc_mcam_alloc_entry_rsp alloc_rsp = { }; + struct npc_mcam_free_entry_req free_req = { }; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, nixlf, err; + struct rvu_pfvf *pfvf; + + /* LBK VFs do not have separate MCAM UCAST entry hence + * skip allocating rxvlan for them + */ + if (is_afvf(pcifunc)) + return 0; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (pfvf->rxvlan) + return 0; + + /* alloc new mcam entry */ + alloc_req.hdr.pcifunc = pcifunc; + alloc_req.count = 1; + + err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req, + &alloc_rsp); + if (err) + return err; + + /* update entry to enable rxvlan offload */ + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) { + err = NIX_AF_ERR_AF_LF_INVALID; + goto free_entry; + } + + nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0); + if (nixlf < 0) { + err = NIX_AF_ERR_AF_LF_INVALID; + goto free_entry; + } + + pfvf->rxvlan_index = alloc_rsp.entry_list[0]; + /* all it means is that rxvlan_index is valid */ + pfvf->rxvlan = true; + + err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); + if (err) + goto free_entry; + + return 0; +free_entry: + free_req.hdr.pcifunc = pcifunc; + free_req.entry = alloc_rsp.entry_list[0]; + rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp); + pfvf->rxvlan = false; + return err; +} + +static void nix_link_config(struct rvu *rvu, int blkaddr) +{ + struct rvu_hwinfo *hw = rvu->hw; + int cgx, lmac_cnt, slink, link; + u64 tx_credits; + + /* Set default min/max packet lengths allowed on NIX Rx links. + * + * With HW reset minlen value of 60byte, HW will treat ARP pkts + * as undersize and report them to SW as error pkts, hence + * setting it to 40 bytes. + */ + for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) { + rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), + NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); + } + + if (hw->sdp_links) { + link = hw->cgx_links + hw->lbk_links; + rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), + SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); + } + + /* Set credits for Tx links assuming max packet length allowed. + * This will be reconfigured based on MTU set for PF/VF. + */ + for (cgx = 0; cgx < hw->cgx; cgx++) { + lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); + tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16; + /* Enable credits and set credit pkt count to max allowed */ + tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); + slink = cgx * hw->lmac_per_cgx; + for (link = slink; link < (slink + lmac_cnt); link++) { + rvu_write64(rvu, blkaddr, + NIX_AF_TX_LINKX_NORM_CREDIT(link), + tx_credits); + rvu_write64(rvu, blkaddr, + NIX_AF_TX_LINKX_EXPR_CREDIT(link), + tx_credits); + } + } + + /* Set Tx credits for LBK link */ + slink = hw->cgx_links; + for (link = slink; link < (slink + hw->lbk_links); link++) { + tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */ + /* Enable credits and set credit pkt count to max allowed */ + tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); + rvu_write64(rvu, blkaddr, + NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); + rvu_write64(rvu, blkaddr, + NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits); + } +} + static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) { int idx, err; @@ -1830,10 +2141,10 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) /* Set admin queue endianness */ cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); #ifdef __BIG_ENDIAN - cfg |= BIT_ULL(1); + cfg |= BIT_ULL(8); rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); #else - cfg &= ~BIT_ULL(1); + cfg &= ~BIT_ULL(8); rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); #endif @@ -1870,6 +2181,14 @@ int rvu_nix_init(struct rvu *rvu) return 0; block = &hw->block[blkaddr]; + /* As per a HW errata in 9xxx A0 silicon, NIX may corrupt + * internal state when conditional clocks are turned off. + * Hence enable them. + */ + if (is_rvu_9xxx_A0(rvu)) + rvu_write64(rvu, blkaddr, NIX_AF_CFG, + rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL); + /* Calibrate X2P bus to check if CGX/LBK links are fine */ err = nix_calibrate_x2p(rvu, blkaddr); if (err) @@ -1922,6 +2241,9 @@ int rvu_nix_init(struct rvu *rvu) (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F); nix_rx_flowkey_alg_cfg(rvu, blkaddr); + + /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ + nix_link_config(rvu, blkaddr); } return 0; } @@ -1955,5 +2277,88 @@ void rvu_nix_freemem(struct rvu *rvu) mcast = &nix_hw->mcast; qmem_free(rvu->dev, mcast->mce_ctx); qmem_free(rvu->dev, mcast->mcast_buf); + mutex_destroy(&mcast->mce_lock); + } +} + +static int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_hwinfo *hw = rvu->hw; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); + if (*nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + return 0; +} + +int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int nixlf, err; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf); + if (err) + return err; + + rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); + return 0; +} + +int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int nixlf, err; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf); + if (err) + return err; + + rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); + return 0; +} + +void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct hwctx_disable_req ctx_req; + int err; + + ctx_req.hdr.pcifunc = pcifunc; + + /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ + nix_interface_deinit(rvu, pcifunc, nixlf); + nix_rx_sync(rvu, blkaddr); + nix_txschq_free(rvu, pcifunc); + + if (pfvf->sq_ctx) { + ctx_req.ctype = NIX_AQ_CTYPE_SQ; + err = nix_lf_hwctx_disable(rvu, &ctx_req); + if (err) + dev_err(rvu->dev, "SQ ctx disable failed\n"); + } + + if (pfvf->rq_ctx) { + ctx_req.ctype = NIX_AQ_CTYPE_RQ; + err = nix_lf_hwctx_disable(rvu, &ctx_req); + if (err) + dev_err(rvu->dev, "RQ ctx disable failed\n"); + } + + if (pfvf->cq_ctx) { + ctx_req.ctype = NIX_AQ_CTYPE_CQ; + err = nix_lf_hwctx_disable(rvu, &ctx_req); + if (err) + dev_err(rvu->dev, "CQ ctx disable failed\n"); } + + nix_ctx_free(rvu, pfvf); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c index 7531fdc54fa1b96d047bd6e270f55c143f7e2e71..c0e165dfc40396c1945b19a92ca3886b4a14e95c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c @@ -241,14 +241,14 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) return err; } -int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu, +int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu, struct npa_aq_enq_req *req, struct npa_aq_enq_rsp *rsp) { return rvu_npa_aq_enq_inst(rvu, req, rsp); } -int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu, +int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, struct msg_rsp *rsp) { @@ -273,7 +273,7 @@ static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) pfvf->npa_qints_ctx = NULL; } -int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu, +int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu, struct npa_lf_alloc_req *req, struct npa_lf_alloc_rsp *rsp) { @@ -372,7 +372,7 @@ int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu, return rc; } -int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; @@ -470,3 +470,20 @@ void rvu_npa_freemem(struct rvu *rvu) block = &hw->block[blkaddr]; rvu_aq_free(rvu, block->aq); } + +void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct hwctx_disable_req ctx_req; + + /* Disable all pools */ + ctx_req.hdr.pcifunc = pcifunc; + ctx_req.ctype = NPA_AQ_CTYPE_POOL; + npa_lf_hwctx_disable(rvu, &ctx_req); + + /* Disable all auras */ + ctx_req.ctype = NPA_AQ_CTYPE_AURA; + npa_lf_hwctx_disable(rvu, &ctx_req); + + npa_ctx_free(rvu, pfvf); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 23ff47f7efc56563ac12a2c586693502db45f562..6ea2b0e2df426dc63f3960b798a298e8880ed27a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -8,6 +8,7 @@ * published by the Free Software Foundation. */ +#include #include #include @@ -26,13 +27,10 @@ #define NPC_PARSE_RESULT_DMAC_OFFSET 8 -struct mcam_entry { -#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */ - u64 kw[NPC_MAX_KWS_IN_KEY]; - u64 kw_mask[NPC_MAX_KWS_IN_KEY]; - u64 action; - u64 vtag_action; -}; +static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, u16 pcifunc); +static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, + u16 pcifunc); void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) { @@ -256,6 +254,46 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false); } +static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, u16 src, u16 dest) +{ + int dbank = npc_get_bank(mcam, dest); + int sbank = npc_get_bank(mcam, src); + u64 cfg, sreg, dreg; + int bank, i; + + src &= (mcam->banksize - 1); + dest &= (mcam->banksize - 1); + + /* Copy INTF's, W0's, W1's CAM0 and CAM1 configuration */ + for (bank = 0; bank < mcam->banks_per_entry; bank++) { + sreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank + bank, 0); + dreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(dest, dbank + bank, 0); + for (i = 0; i < 6; i++) { + cfg = rvu_read64(rvu, blkaddr, sreg + (i * 8)); + rvu_write64(rvu, blkaddr, dreg + (i * 8), cfg); + } + } + + /* Copy action */ + cfg = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(src, sbank)); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(dest, dbank), cfg); + + /* Copy TAG action */ + cfg = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank)); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_TAG_ACT(dest, dbank), cfg); + + /* Enable or disable */ + cfg = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CFG(src, sbank)); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg); +} + static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, int index) { @@ -269,12 +307,17 @@ static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan, u8 *mac_addr) { + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct npc_mcam *mcam = &rvu->hw->mcam; struct mcam_entry entry = { {0} }; struct nix_rx_action action; int blkaddr, index, kwi; u64 mac = 0; + /* AF's VFs work in promiscuous mode */ + if (is_afvf(pcifunc)) + return; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; @@ -308,6 +351,17 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, entry.action = *(u64 *)&action; npc_config_mcam_entry(rvu, mcam, blkaddr, index, NIX_INTF_RX, &entry, true); + + /* add VLAN matching, setup action and save entry back for later */ + entry.kw[0] |= (NPC_LT_LB_STAG | NPC_LT_LB_CTAG) << 20; + entry.kw_mask[0] |= (NPC_LT_LB_STAG & NPC_LT_LB_CTAG) << 20; + + entry.vtag_action = VTAG0_VALID_BIT | + FIELD_PREP(VTAG0_TYPE_MASK, 0) | + FIELD_PREP(VTAG0_LID_MASK, NPC_LID_LA) | + FIELD_PREP(VTAG0_RELPTR_MASK, 12); + + memcpy(&pfvf->entry, &entry, sizeof(entry)); } void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, @@ -315,15 +369,15 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, { struct npc_mcam *mcam = &rvu->hw->mcam; struct mcam_entry entry = { {0} }; - struct nix_rx_action action; + struct nix_rx_action action = { }; int blkaddr, index, kwi; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); - if (blkaddr < 0) + /* Only PF or AF VF can add a promiscuous entry */ + if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc)) return; - /* Only PF or AF VF can add a promiscuous entry */ - if (pcifunc & RVU_PFVF_FUNC_MASK) + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) return; index = npc_get_nixlf_mcam_index(mcam, pcifunc, @@ -347,7 +401,8 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, NIX_INTF_RX, &entry, true); } -void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) +static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc, + int nixlf, bool enable) { struct npc_mcam *mcam = &rvu->hw->mcam; int blkaddr, index; @@ -362,7 +417,17 @@ void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_PROMISC_ENTRY); - npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); +} + +void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + npc_enadis_promisc_entry(rvu, pcifunc, nixlf, false); +} + +void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + npc_enadis_promisc_entry(rvu, pcifunc, nixlf, true); } void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, @@ -390,9 +455,28 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_BCAST_ENTRY); - /* Check for L2B bit and LMAC channel */ - entry.kw[0] = BIT_ULL(25) | chan; - entry.kw_mask[0] = BIT_ULL(25) | 0xFFFULL; + /* Check for L2B bit and LMAC channel + * NOTE: Since MKEX default profile(a reduced version intended to + * accommodate more capability but igoring few bits) a stap-gap + * approach. + * Since we care for L2B which by HRM NPC_PARSE_KEX_S at BIT_POS[25], So + * moved to BIT_POS[13], ignoring ERRCODE, ERRLEV as we'll loose out + * on capability features needed for CoS (/from ODP PoV) e.g: VLAN, + * DSCP. + * + * Reduced layout of MKEX default profile - + * Includes following are (i.e.CHAN, L2/3{B/M}, LA, LB, LC, LD): + * + * BIT_POS[31:28] : LD + * BIT_POS[27:24] : LC + * BIT_POS[23:20] : LB + * BIT_POS[19:16] : LA + * BIT_POS[15:12] : L3B, L3M, L2B, L2M + * BIT_POS[11:00] : CHAN + * + */ + entry.kw[0] = BIT_ULL(13) | chan; + entry.kw_mask[0] = ~entry.kw[0] & (BIT_ULL(13) | 0xFFFULL); *(u64 *)&action = 0x00; #ifdef MCAST_MCE @@ -454,51 +538,95 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action); + + rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); } -void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) +static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, + int nixlf, bool enable) { struct npc_mcam *mcam = &rvu->hw->mcam; struct nix_rx_action action; - int blkaddr, index, bank; + int index, bank, blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; - /* Disable ucast MCAM match entry of this PF/VF */ + /* Ucast MCAM match entry of this PF/VF */ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_UCAST_ENTRY); - npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); - /* For PF, disable promisc and bcast MCAM match entries */ - if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { - index = npc_get_nixlf_mcam_index(mcam, pcifunc, - nixlf, NIXLF_BCAST_ENTRY); - /* For bcast, disable only if it's action is not - * packet replication, incase if action is replication - * then this PF's nixlf is removed from bcast replication - * list. - */ - bank = npc_get_bank(mcam, index); - index &= (mcam->banksize - 1); - *(u64 *)&action = rvu_read64(rvu, blkaddr, - NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); - if (action.op != NIX_RX_ACTIONOP_MCAST) - npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); + /* For PF, ena/dis promisc and bcast MCAM match entries */ + if (pcifunc & RVU_PFVF_FUNC_MASK) + return; + /* For bcast, enable/disable only if it's action is not + * packet replication, incase if action is replication + * then this PF's nixlf is removed from bcast replication + * list. + */ + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_BCAST_ENTRY); + bank = npc_get_bank(mcam, index); + *(u64 *)&action = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(index & (mcam->banksize - 1), bank)); + if (action.op != NIX_RX_ACTIONOP_MCAST) + npc_enable_mcam_entry(rvu, mcam, + blkaddr, index, enable); + if (enable) + rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf); + else rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf); - } + + rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); } -#define LDATA_EXTRACT_CONFIG(intf, lid, ltype, ld, cfg) \ +void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + npc_enadis_default_entries(rvu, pcifunc, nixlf, false); +} + +void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + npc_enadis_default_entries(rvu, pcifunc, nixlf, true); +} + +void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + mutex_lock(&mcam->lock); + + /* Disable and free all MCAM entries mapped to this 'pcifunc' */ + npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); + + /* Free all MCAM counters mapped to this 'pcifunc' */ + npc_mcam_free_all_counters(rvu, mcam, pcifunc); + + mutex_unlock(&mcam->lock); + + rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); +} + +#define SET_KEX_LD(intf, lid, ltype, ld, cfg) \ rvu_write64(rvu, blkaddr, \ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg) -#define LDATA_FLAGS_CONFIG(intf, ld, flags, cfg) \ +#define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \ rvu_write64(rvu, blkaddr, \ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg) +#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \ + (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \ + ((flags_ena) << 6) | ((key_ofs) & 0x3F)) + static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr) { struct npc_mcam *mcam = &rvu->hw->mcam; @@ -514,28 +642,66 @@ static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr) */ for (lid = 0; lid < lid_count; lid++) { for (ltype = 0; ltype < 16; ltype++) { - LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 0, 0ULL); - LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 1, 0ULL); - LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 0, 0ULL); - LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 1, 0ULL); - - LDATA_FLAGS_CONFIG(NIX_INTF_RX, 0, ltype, 0ULL); - LDATA_FLAGS_CONFIG(NIX_INTF_RX, 1, ltype, 0ULL); - LDATA_FLAGS_CONFIG(NIX_INTF_TX, 0, ltype, 0ULL); - LDATA_FLAGS_CONFIG(NIX_INTF_TX, 1, ltype, 0ULL); + SET_KEX_LD(NIX_INTF_RX, lid, ltype, 0, 0ULL); + SET_KEX_LD(NIX_INTF_RX, lid, ltype, 1, 0ULL); + SET_KEX_LD(NIX_INTF_TX, lid, ltype, 0, 0ULL); + SET_KEX_LD(NIX_INTF_TX, lid, ltype, 1, 0ULL); + + SET_KEX_LDFLAGS(NIX_INTF_RX, 0, ltype, 0ULL); + SET_KEX_LDFLAGS(NIX_INTF_RX, 1, ltype, 0ULL); + SET_KEX_LDFLAGS(NIX_INTF_TX, 0, ltype, 0ULL); + SET_KEX_LDFLAGS(NIX_INTF_TX, 1, ltype, 0ULL); } } - /* If we plan to extract Outer IPv4 tuple for TCP/UDP pkts - * then 112bit key is not sufficient - */ if (mcam->keysize != NPC_MCAM_KEY_X2) return; - /* Start placing extracted data/flags from 64bit onwards, for now */ - /* Extract DMAC from the packet */ - cfg = (0x05 << 16) | BIT_ULL(7) | NPC_PARSE_RESULT_DMAC_OFFSET; - LDATA_EXTRACT_CONFIG(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg); + /* Default MCAM KEX profile */ + /* Layer A: Ethernet: */ + + /* DMAC: 6 bytes, KW1[47:0] */ + cfg = KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_PARSE_RESULT_DMAC_OFFSET); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg); + + /* Ethertype: 2 bytes, KW0[47:32] */ + cfg = KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 1, cfg); + + /* Layer B: Single VLAN (CTAG) */ + /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */ + cfg = KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_CTAG, 0, cfg); + + /* Layer B: Stacked VLAN (STAG|QinQ) */ + /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */ + cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG, 0, cfg); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_QINQ, 0, cfg); + + /* Layer C: IPv4 */ + /* SIP+DIP: 8 bytes, KW2[63:0] */ + cfg = KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 0, cfg); + /* TOS: 1 byte, KW1[63:56] */ + cfg = KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 1, cfg); + + /* Layer D:UDP */ + /* SPORT: 2 bytes, KW3[15:0] */ + cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 0, cfg); + /* DPORT: 2 bytes, KW3[31:16] */ + cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 1, cfg); + + /* Layer D:TCP */ + /* SPORT: 2 bytes, KW3[15:0] */ + cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 0, cfg); + /* DPORT: 2 bytes, KW3[31:16] */ + cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 1, cfg); } static void npc_config_kpuaction(struct rvu *rvu, int blkaddr, @@ -690,13 +856,14 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) { int nixlf_count = rvu_get_nixlf_count(rvu); struct npc_mcam *mcam = &rvu->hw->mcam; - int rsvd; + int rsvd, err; u64 cfg; /* Get HW limits */ cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); mcam->banks = (cfg >> 44) & 0xF; mcam->banksize = (cfg >> 28) & 0xFFFF; + mcam->counters.max = (cfg >> 48) & 0xFFFF; /* Actual number of MCAM entries vary by entry size */ cfg = (rvu_read64(rvu, blkaddr, @@ -728,20 +895,82 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) return -ENOMEM; } - mcam->entries = mcam->total_entries - rsvd; - mcam->nixlf_offset = mcam->entries; + mcam->bmap_entries = mcam->total_entries - rsvd; + mcam->nixlf_offset = mcam->bmap_entries; mcam->pf_offset = mcam->nixlf_offset + nixlf_count; - spin_lock_init(&mcam->lock); + /* Allocate bitmaps for managing MCAM entries */ + mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries), + sizeof(long), GFP_KERNEL); + if (!mcam->bmap) + return -ENOMEM; + + mcam->bmap_reverse = devm_kcalloc(rvu->dev, + BITS_TO_LONGS(mcam->bmap_entries), + sizeof(long), GFP_KERNEL); + if (!mcam->bmap_reverse) + return -ENOMEM; + + mcam->bmap_fcnt = mcam->bmap_entries; + + /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ + mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, + sizeof(u16), GFP_KERNEL); + if (!mcam->entry2pfvf_map) + return -ENOMEM; + + /* Reserve 1/8th of MCAM entries at the bottom for low priority + * allocations and another 1/8th at the top for high priority + * allocations. + */ + mcam->lprio_count = mcam->bmap_entries / 8; + if (mcam->lprio_count > BITS_PER_LONG) + mcam->lprio_count = round_down(mcam->lprio_count, + BITS_PER_LONG); + mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count; + mcam->hprio_count = mcam->lprio_count; + mcam->hprio_end = mcam->hprio_count; + + /* Allocate bitmap for managing MCAM counters and memory + * for saving counter to RVU PFFUNC allocation mapping. + */ + err = rvu_alloc_bitmap(&mcam->counters); + if (err) + return err; + + mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max, + sizeof(u16), GFP_KERNEL); + if (!mcam->cntr2pfvf_map) + goto free_mem; + + /* Alloc memory for MCAM entry to counter mapping and for tracking + * counter's reference count. + */ + mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, + sizeof(u16), GFP_KERNEL); + if (!mcam->entry2cntr_map) + goto free_mem; + + mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max, + sizeof(u16), GFP_KERNEL); + if (!mcam->cntr_refcnt) + goto free_mem; + + mutex_init(&mcam->lock); return 0; + +free_mem: + kfree(mcam->counters.bmap); + return -ENOMEM; } int rvu_npc_init(struct rvu *rvu) { struct npc_pkind *pkind = &rvu->hw->pkind; u64 keyz = NPC_MCAM_KEY_X2; - int blkaddr, err; + int blkaddr, entry, bank, err; + u64 cfg, nibble_ena; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) { @@ -749,6 +978,14 @@ int rvu_npc_init(struct rvu *rvu) return -ENODEV; } + /* First disable all MCAM entries, to stop traffic towards NIXLFs */ + cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); + for (bank = 0; bank < ((cfg >> 44) & 0xF); bank++) { + for (entry = 0; entry < ((cfg >> 28) & 0xFFFF); entry++) + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0); + } + /* Allocate resource bimap for pkind*/ pkind->rsrc.max = (rvu_read64(rvu, blkaddr, NPC_AF_CONST1) >> 12) & 0xFF; @@ -780,13 +1017,18 @@ int rvu_npc_init(struct rvu *rvu) BIT_ULL(6) | BIT_ULL(2)); /* Set RX and TX side MCAM search key size. - * Also enable parse key extract nibbles suchthat except - * layer E to H, rest of the key is included for MCAM search. + * LA..LD (ltype only) + Channel */ + nibble_ena = 0x49247; rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), - ((keyz & 0x3) << 32) | ((1ULL << 20) - 1)); + ((keyz & 0x3) << 32) | nibble_ena); + /* Due to an errata (35786) in A0 pass silicon, parse nibble enable + * configuration has to be identical for both Rx and Tx interfaces. + */ + if (!is_rvu_9xxx_A0(rvu)) + nibble_ena = (1ULL << 19) - 1; rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), - ((keyz & 0x3) << 32) | ((1ULL << 20) - 1)); + ((keyz & 0x3) << 32) | nibble_ena); err = npc_mcam_rsrcs_init(rvu, blkaddr); if (err) @@ -811,6 +1053,1019 @@ int rvu_npc_init(struct rvu *rvu) void rvu_npc_freemem(struct rvu *rvu) { struct npc_pkind *pkind = &rvu->hw->pkind; + struct npc_mcam *mcam = &rvu->hw->mcam; kfree(pkind->rsrc.bmap); + kfree(mcam->counters.bmap); + mutex_destroy(&mcam->lock); +} + +static int npc_mcam_verify_entry(struct npc_mcam *mcam, + u16 pcifunc, int entry) +{ + /* Verify if entry is valid and if it is indeed + * allocated to the requesting PFFUNC. + */ + if (entry >= mcam->bmap_entries) + return NPC_MCAM_INVALID_REQ; + + if (pcifunc != mcam->entry2pfvf_map[entry]) + return NPC_MCAM_PERM_DENIED; + + return 0; +} + +static int npc_mcam_verify_counter(struct npc_mcam *mcam, + u16 pcifunc, int cntr) +{ + /* Verify if counter is valid and if it is indeed + * allocated to the requesting PFFUNC. + */ + if (cntr >= mcam->counters.max) + return NPC_MCAM_INVALID_REQ; + + if (pcifunc != mcam->cntr2pfvf_map[cntr]) + return NPC_MCAM_PERM_DENIED; + + return 0; +} + +static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, u16 entry, u16 cntr) +{ + u16 index = entry & (mcam->banksize - 1); + u16 bank = npc_get_bank(mcam, entry); + + /* Set mapping and increment counter's refcnt */ + mcam->entry2cntr_map[entry] = cntr; + mcam->cntr_refcnt[cntr]++; + /* Enable stats */ + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), + BIT_ULL(9) | cntr); +} + +static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu, + struct npc_mcam *mcam, + int blkaddr, u16 entry, u16 cntr) +{ + u16 index = entry & (mcam->banksize - 1); + u16 bank = npc_get_bank(mcam, entry); + + /* Remove mapping and reduce counter's refcnt */ + mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP; + mcam->cntr_refcnt[cntr]--; + /* Disable stats */ + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 0x00); +} + +/* Sets MCAM entry in bitmap as used. Update + * reverse bitmap too. Should be called with + * 'mcam->lock' held. + */ +static void npc_mcam_set_bit(struct npc_mcam *mcam, u16 index) +{ + u16 entry, rentry; + + entry = index; + rentry = mcam->bmap_entries - index - 1; + + __set_bit(entry, mcam->bmap); + __set_bit(rentry, mcam->bmap_reverse); + mcam->bmap_fcnt--; +} + +/* Sets MCAM entry in bitmap as free. Update + * reverse bitmap too. Should be called with + * 'mcam->lock' held. + */ +static void npc_mcam_clear_bit(struct npc_mcam *mcam, u16 index) +{ + u16 entry, rentry; + + entry = index; + rentry = mcam->bmap_entries - index - 1; + + __clear_bit(entry, mcam->bmap); + __clear_bit(rentry, mcam->bmap_reverse); + mcam->bmap_fcnt++; +} + +static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, u16 pcifunc) +{ + u16 index, cntr; + + /* Scan all MCAM entries and free the ones mapped to 'pcifunc' */ + for (index = 0; index < mcam->bmap_entries; index++) { + if (mcam->entry2pfvf_map[index] == pcifunc) { + mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; + /* Free the entry in bitmap */ + npc_mcam_clear_bit(mcam, index); + /* Disable the entry */ + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); + + /* Update entry2counter mapping */ + cntr = mcam->entry2cntr_map[index]; + if (cntr != NPC_MCAM_INVALID_MAP) + npc_unmap_mcam_entry_and_cntr(rvu, mcam, + blkaddr, index, + cntr); + } + } +} + +static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, + u16 pcifunc) +{ + u16 cntr; + + /* Scan all MCAM counters and free the ones mapped to 'pcifunc' */ + for (cntr = 0; cntr < mcam->counters.max; cntr++) { + if (mcam->cntr2pfvf_map[cntr] == pcifunc) { + mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; + mcam->cntr_refcnt[cntr] = 0; + rvu_free_rsrc(&mcam->counters, cntr); + /* This API is expected to be called after freeing + * MCAM entries, which inturn will remove + * 'entry to counter' mapping. + * No need to do it again. + */ + } + } +} + +/* Find area of contiguous free entries of size 'nr'. + * If not found return max contiguous free entries available. + */ +static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start, + u16 nr, u16 *max_area) +{ + u16 max_area_start = 0; + u16 index, next, end; + + *max_area = 0; + +again: + index = find_next_zero_bit(map, size, start); + if (index >= size) + return max_area_start; + + end = ((index + nr) >= size) ? size : index + nr; + next = find_next_bit(map, end, index); + if (*max_area < (next - index)) { + *max_area = next - index; + max_area_start = index; + } + + if (next < end) { + start = next + 1; + goto again; + } + + return max_area_start; +} + +/* Find number of free MCAM entries available + * within range i.e in between 'start' and 'end'. + */ +static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end) +{ + u16 index, next; + u16 fcnt = 0; + +again: + if (start >= end) + return fcnt; + + index = find_next_zero_bit(map, end, start); + if (index >= end) + return fcnt; + + next = find_next_bit(map, end, index); + if (next <= end) { + fcnt += next - index; + start = next + 1; + goto again; + } + + fcnt += end - index; + return fcnt; +} + +static void +npc_get_mcam_search_range_priority(struct npc_mcam *mcam, + struct npc_mcam_alloc_entry_req *req, + u16 *start, u16 *end, bool *reverse) +{ + u16 fcnt; + + if (req->priority == NPC_MCAM_HIGHER_PRIO) + goto hprio; + + /* For a low priority entry allocation + * - If reference entry is not in hprio zone then + * search range: ref_entry to end. + * - If reference entry is in hprio zone and if + * request can be accomodated in non-hprio zone then + * search range: 'start of middle zone' to 'end' + * - else search in reverse, so that less number of hprio + * zone entries are allocated. + */ + + *reverse = false; + *start = req->ref_entry + 1; + *end = mcam->bmap_entries; + + if (req->ref_entry >= mcam->hprio_end) + return; + + fcnt = npc_mcam_get_free_count(mcam->bmap, + mcam->hprio_end, mcam->bmap_entries); + if (fcnt > req->count) + *start = mcam->hprio_end; + else + *reverse = true; + return; + +hprio: + /* For a high priority entry allocation, search is always + * in reverse to preserve hprio zone entries. + * - If reference entry is not in lprio zone then + * search range: 0 to ref_entry. + * - If reference entry is in lprio zone and if + * request can be accomodated in middle zone then + * search range: 'hprio_end' to 'lprio_start' + */ + + *reverse = true; + *start = 0; + *end = req->ref_entry; + + if (req->ref_entry <= mcam->lprio_start) + return; + + fcnt = npc_mcam_get_free_count(mcam->bmap, + mcam->hprio_end, mcam->lprio_start); + if (fcnt < req->count) + return; + *start = mcam->hprio_end; + *end = mcam->lprio_start; +} + +static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, + struct npc_mcam_alloc_entry_req *req, + struct npc_mcam_alloc_entry_rsp *rsp) +{ + u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES]; + u16 fcnt, hp_fcnt, lp_fcnt; + u16 start, end, index; + int entry, next_start; + bool reverse = false; + unsigned long *bmap; + u16 max_contig; + + mutex_lock(&mcam->lock); + + /* Check if there are any free entries */ + if (!mcam->bmap_fcnt) { + mutex_unlock(&mcam->lock); + return NPC_MCAM_ALLOC_FAILED; + } + + /* MCAM entries are divided into high priority, middle and + * low priority zones. Idea is to not allocate top and lower + * most entries as much as possible, this is to increase + * probability of honouring priority allocation requests. + * + * Two bitmaps are used for mcam entry management, + * mcam->bmap for forward search i.e '0 to mcam->bmap_entries'. + * mcam->bmap_reverse for reverse search i.e 'mcam->bmap_entries to 0'. + * + * Reverse bitmap is used to allocate entries + * - when a higher priority entry is requested + * - when available free entries are less. + * Lower priority ones out of avaialble free entries are always + * chosen when 'high vs low' question arises. + */ + + /* Get the search range for priority allocation request */ + if (req->priority) { + npc_get_mcam_search_range_priority(mcam, req, + &start, &end, &reverse); + goto alloc; + } + + /* Find out the search range for non-priority allocation request + * + * Get MCAM free entry count in middle zone. + */ + lp_fcnt = npc_mcam_get_free_count(mcam->bmap, + mcam->lprio_start, + mcam->bmap_entries); + hp_fcnt = npc_mcam_get_free_count(mcam->bmap, 0, mcam->hprio_end); + fcnt = mcam->bmap_fcnt - lp_fcnt - hp_fcnt; + + /* Check if request can be accomodated in the middle zone */ + if (fcnt > req->count) { + start = mcam->hprio_end; + end = mcam->lprio_start; + } else if ((fcnt + (hp_fcnt / 2) + (lp_fcnt / 2)) > req->count) { + /* Expand search zone from half of hprio zone to + * half of lprio zone. + */ + start = mcam->hprio_end / 2; + end = mcam->bmap_entries - (mcam->lprio_count / 2); + reverse = true; + } else { + /* Not enough free entries, search all entries in reverse, + * so that low priority ones will get used up. + */ + reverse = true; + start = 0; + end = mcam->bmap_entries; + } + +alloc: + if (reverse) { + bmap = mcam->bmap_reverse; + start = mcam->bmap_entries - start; + end = mcam->bmap_entries - end; + index = start; + start = end; + end = index; + } else { + bmap = mcam->bmap; + } + + if (req->contig) { + /* Allocate requested number of contiguous entries, if + * unsuccessful find max contiguous entries available. + */ + index = npc_mcam_find_zero_area(bmap, end, start, + req->count, &max_contig); + rsp->count = max_contig; + if (reverse) + rsp->entry = mcam->bmap_entries - index - max_contig; + else + rsp->entry = index; + } else { + /* Allocate requested number of non-contiguous entries, + * if unsuccessful allocate as many as possible. + */ + rsp->count = 0; + next_start = start; + for (entry = 0; entry < req->count; entry++) { + index = find_next_zero_bit(bmap, end, next_start); + if (index >= end) + break; + + next_start = start + (index - start) + 1; + + /* Save the entry's index */ + if (reverse) + index = mcam->bmap_entries - index - 1; + entry_list[entry] = index; + rsp->count++; + } + } + + /* If allocating requested no of entries is unsucessful, + * expand the search range to full bitmap length and retry. + */ + if (!req->priority && (rsp->count < req->count) && + ((end - start) != mcam->bmap_entries)) { + reverse = true; + start = 0; + end = mcam->bmap_entries; + goto alloc; + } + + /* For priority entry allocation requests, if allocation is + * failed then expand search to max possible range and retry. + */ + if (req->priority && rsp->count < req->count) { + if (req->priority == NPC_MCAM_LOWER_PRIO && + (start != (req->ref_entry + 1))) { + start = req->ref_entry + 1; + end = mcam->bmap_entries; + reverse = false; + goto alloc; + } else if ((req->priority == NPC_MCAM_HIGHER_PRIO) && + ((end - start) != req->ref_entry)) { + start = 0; + end = req->ref_entry; + reverse = true; + goto alloc; + } + } + + /* Copy MCAM entry indices into mbox response entry_list. + * Requester always expects indices in ascending order, so + * so reverse the list if reverse bitmap is used for allocation. + */ + if (!req->contig && rsp->count) { + index = 0; + for (entry = rsp->count - 1; entry >= 0; entry--) { + if (reverse) + rsp->entry_list[index++] = entry_list[entry]; + else + rsp->entry_list[entry] = entry_list[entry]; + } + } + + /* Mark the allocated entries as used and set nixlf mapping */ + for (entry = 0; entry < rsp->count; entry++) { + index = req->contig ? + (rsp->entry + entry) : rsp->entry_list[entry]; + npc_mcam_set_bit(mcam, index); + mcam->entry2pfvf_map[index] = pcifunc; + mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; + } + + /* Update available free count in mbox response */ + rsp->free_count = mcam->bmap_fcnt; + + mutex_unlock(&mcam->lock); + return 0; +} + +int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, + struct npc_mcam_alloc_entry_req *req, + struct npc_mcam_alloc_entry_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + rsp->entry = NPC_MCAM_ENTRY_INVALID; + rsp->free_count = 0; + + /* Check if ref_entry is within range */ + if (req->priority && req->ref_entry >= mcam->bmap_entries) + return NPC_MCAM_INVALID_REQ; + + /* ref_entry can't be '0' if requested priority is high. + * Can't be last entry if requested priority is low. + */ + if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) || + ((req->ref_entry == (mcam->bmap_entries - 1)) && + req->priority == NPC_MCAM_LOWER_PRIO)) + return NPC_MCAM_INVALID_REQ; + + /* Since list of allocated indices needs to be sent to requester, + * max number of non-contiguous entries per mbox msg is limited. + */ + if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) + return NPC_MCAM_INVALID_REQ; + + /* Alloc request from PFFUNC with no NIXLF attached should be denied */ + if (!is_nixlf_attached(rvu, pcifunc)) + return NPC_MCAM_ALLOC_DENIED; + + return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp); +} + +int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, + struct npc_mcam_free_entry_req *req, + struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, rc = 0; + u16 cntr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + /* Free request from PFFUNC with no NIXLF attached, ignore */ + if (!is_nixlf_attached(rvu, pcifunc)) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + + if (req->all) + goto free_all; + + rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); + if (rc) + goto exit; + + mcam->entry2pfvf_map[req->entry] = 0; + npc_mcam_clear_bit(mcam, req->entry); + npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); + + /* Update entry2counter mapping */ + cntr = mcam->entry2cntr_map[req->entry]; + if (cntr != NPC_MCAM_INVALID_MAP) + npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, + req->entry, cntr); + + goto exit; + +free_all: + /* Free up all entries allocated to requesting PFFUNC */ + npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); +exit: + mutex_unlock(&mcam->lock); + return rc; +} + +int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, + struct npc_mcam_write_entry_req *req, + struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); + if (rc) + goto exit; + + if (req->set_cntr && + npc_mcam_verify_counter(mcam, pcifunc, req->cntr)) { + rc = NPC_MCAM_INVALID_REQ; + goto exit; + } + + if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) { + rc = NPC_MCAM_INVALID_REQ; + goto exit; + } + + npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, req->intf, + &req->entry_data, req->enable_entry); + + if (req->set_cntr) + npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, + req->entry, req->cntr); + + rc = 0; +exit: + mutex_unlock(&mcam->lock); + return rc; +} + +int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu, + struct npc_mcam_ena_dis_entry_req *req, + struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); + mutex_unlock(&mcam->lock); + if (rc) + return rc; + + npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, true); + + return 0; +} + +int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu, + struct npc_mcam_ena_dis_entry_req *req, + struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); + mutex_unlock(&mcam->lock); + if (rc) + return rc; + + npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); + + return 0; +} + +int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu, + struct npc_mcam_shift_entry_req *req, + struct npc_mcam_shift_entry_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + u16 old_entry, new_entry; + u16 index, cntr; + int blkaddr, rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + if (req->shift_count > NPC_MCAM_MAX_SHIFTS) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + for (index = 0; index < req->shift_count; index++) { + old_entry = req->curr_entry[index]; + new_entry = req->new_entry[index]; + + /* Check if both old and new entries are valid and + * does belong to this PFFUNC or not. + */ + rc = npc_mcam_verify_entry(mcam, pcifunc, old_entry); + if (rc) + break; + + rc = npc_mcam_verify_entry(mcam, pcifunc, new_entry); + if (rc) + break; + + /* new_entry should not have a counter mapped */ + if (mcam->entry2cntr_map[new_entry] != NPC_MCAM_INVALID_MAP) { + rc = NPC_MCAM_PERM_DENIED; + break; + } + + /* Disable the new_entry */ + npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false); + + /* Copy rule from old entry to new entry */ + npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry); + + /* Copy counter mapping, if any */ + cntr = mcam->entry2cntr_map[old_entry]; + if (cntr != NPC_MCAM_INVALID_MAP) { + npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, + old_entry, cntr); + npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, + new_entry, cntr); + } + + /* Enable new_entry and disable old_entry */ + npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, true); + npc_enable_mcam_entry(rvu, mcam, blkaddr, old_entry, false); + } + + /* If shift has failed then report the failed index */ + if (index != req->shift_count) { + rc = NPC_MCAM_PERM_DENIED; + rsp->failed_entry_idx = index; + } + + mutex_unlock(&mcam->lock); + return rc; +} + +int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, + struct npc_mcam_alloc_counter_req *req, + struct npc_mcam_alloc_counter_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + u16 max_contig, cntr; + int blkaddr, index; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + /* If the request is from a PFFUNC with no NIXLF attached, ignore */ + if (!is_nixlf_attached(rvu, pcifunc)) + return NPC_MCAM_INVALID_REQ; + + /* Since list of allocated counter IDs needs to be sent to requester, + * max number of non-contiguous counters per mbox msg is limited. + */ + if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + + /* Check if unused counters are available or not */ + if (!rvu_rsrc_free_count(&mcam->counters)) { + mutex_unlock(&mcam->lock); + return NPC_MCAM_ALLOC_FAILED; + } + + rsp->count = 0; + + if (req->contig) { + /* Allocate requested number of contiguous counters, if + * unsuccessful find max contiguous entries available. + */ + index = npc_mcam_find_zero_area(mcam->counters.bmap, + mcam->counters.max, 0, + req->count, &max_contig); + rsp->count = max_contig; + rsp->cntr = index; + for (cntr = index; cntr < (index + max_contig); cntr++) { + __set_bit(cntr, mcam->counters.bmap); + mcam->cntr2pfvf_map[cntr] = pcifunc; + } + } else { + /* Allocate requested number of non-contiguous counters, + * if unsuccessful allocate as many as possible. + */ + for (cntr = 0; cntr < req->count; cntr++) { + index = rvu_alloc_rsrc(&mcam->counters); + if (index < 0) + break; + rsp->cntr_list[cntr] = index; + rsp->count++; + mcam->cntr2pfvf_map[index] = pcifunc; + } + } + + mutex_unlock(&mcam->lock); + return 0; +} + +int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 index, entry = 0; + int blkaddr, err; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); + if (err) { + mutex_unlock(&mcam->lock); + return err; + } + + /* Mark counter as free/unused */ + mcam->cntr2pfvf_map[req->cntr] = NPC_MCAM_INVALID_MAP; + rvu_free_rsrc(&mcam->counters, req->cntr); + + /* Disable all MCAM entry's stats which are using this counter */ + while (entry < mcam->bmap_entries) { + if (!mcam->cntr_refcnt[req->cntr]) + break; + + index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); + if (index >= mcam->bmap_entries) + break; + if (mcam->entry2cntr_map[index] != req->cntr) + continue; + + entry = index + 1; + npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, + index, req->cntr); + } + + mutex_unlock(&mcam->lock); + return 0; +} + +int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu, + struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 index, entry = 0; + int blkaddr, rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + rc = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); + if (rc) + goto exit; + + /* Unmap the MCAM entry and counter */ + if (!req->all) { + rc = npc_mcam_verify_entry(mcam, req->hdr.pcifunc, req->entry); + if (rc) + goto exit; + npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, + req->entry, req->cntr); + goto exit; + } + + /* Disable all MCAM entry's stats which are using this counter */ + while (entry < mcam->bmap_entries) { + if (!mcam->cntr_refcnt[req->cntr]) + break; + + index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); + if (index >= mcam->bmap_entries) + break; + if (mcam->entry2cntr_map[index] != req->cntr) + continue; + + entry = index + 1; + npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, + index, req->cntr); + } +exit: + mutex_unlock(&mcam->lock); + return rc; +} + +int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr, err; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); + mutex_unlock(&mcam->lock); + if (err) + return err; + + rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr), 0x00); + + return 0; +} + +int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, + struct npc_mcam_oper_counter_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr, err; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); + mutex_unlock(&mcam->lock); + if (err) + return err; + + rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr)); + rsp->stat &= BIT_ULL(48) - 1; + + return 0; +} + +int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, + struct npc_mcam_alloc_and_write_entry_req *req, + struct npc_mcam_alloc_and_write_entry_rsp *rsp) +{ + struct npc_mcam_alloc_counter_req cntr_req; + struct npc_mcam_alloc_counter_rsp cntr_rsp; + struct npc_mcam_alloc_entry_req entry_req; + struct npc_mcam_alloc_entry_rsp entry_rsp; + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 entry = NPC_MCAM_ENTRY_INVALID; + u16 cntr = NPC_MCAM_ENTRY_INVALID; + int blkaddr, rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) + return NPC_MCAM_INVALID_REQ; + + /* Try to allocate a MCAM entry */ + entry_req.hdr.pcifunc = req->hdr.pcifunc; + entry_req.contig = true; + entry_req.priority = req->priority; + entry_req.ref_entry = req->ref_entry; + entry_req.count = 1; + + rc = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, + &entry_req, &entry_rsp); + if (rc) + return rc; + + if (!entry_rsp.count) + return NPC_MCAM_ALLOC_FAILED; + + entry = entry_rsp.entry; + + if (!req->alloc_cntr) + goto write_entry; + + /* Now allocate counter */ + cntr_req.hdr.pcifunc = req->hdr.pcifunc; + cntr_req.contig = true; + cntr_req.count = 1; + + rc = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); + if (rc) { + /* Free allocated MCAM entry */ + mutex_lock(&mcam->lock); + mcam->entry2pfvf_map[entry] = 0; + npc_mcam_clear_bit(mcam, entry); + mutex_unlock(&mcam->lock); + return rc; + } + + cntr = cntr_rsp.cntr; + +write_entry: + mutex_lock(&mcam->lock); + npc_config_mcam_entry(rvu, mcam, blkaddr, entry, req->intf, + &req->entry_data, req->enable_entry); + + if (req->alloc_cntr) + npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, entry, cntr); + mutex_unlock(&mcam->lock); + + rsp->entry = entry; + rsp->cntr = cntr; + + return 0; +} + +#define GET_KEX_CFG(intf) \ + rvu_read64(rvu, BLKADDR_NPC, NPC_AF_INTFX_KEX_CFG(intf)) + +#define GET_KEX_FLAGS(ld) \ + rvu_read64(rvu, BLKADDR_NPC, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld)) + +#define GET_KEX_LD(intf, lid, lt, ld) \ + rvu_read64(rvu, BLKADDR_NPC, \ + NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld)) + +#define GET_KEX_LDFLAGS(intf, ld, fl) \ + rvu_read64(rvu, BLKADDR_NPC, \ + NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, fl)) + +int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, + struct npc_get_kex_cfg_rsp *rsp) +{ + int lid, lt, ld, fl; + + rsp->rx_keyx_cfg = GET_KEX_CFG(NIX_INTF_RX); + rsp->tx_keyx_cfg = GET_KEX_CFG(NIX_INTF_TX); + for (lid = 0; lid < NPC_MAX_LID; lid++) { + for (lt = 0; lt < NPC_MAX_LT; lt++) { + for (ld = 0; ld < NPC_MAX_LD; ld++) { + rsp->intf_lid_lt_ld[NIX_INTF_RX][lid][lt][ld] = + GET_KEX_LD(NIX_INTF_RX, lid, lt, ld); + rsp->intf_lid_lt_ld[NIX_INTF_TX][lid][lt][ld] = + GET_KEX_LD(NIX_INTF_TX, lid, lt, ld); + } + } + } + for (ld = 0; ld < NPC_MAX_LD; ld++) + rsp->kex_ld_flags[ld] = GET_KEX_FLAGS(ld); + + for (ld = 0; ld < NPC_MAX_LD; ld++) { + for (fl = 0; fl < NPC_MAX_LFL; fl++) { + rsp->intf_ld_flags[NIX_INTF_RX][ld][fl] = + GET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl); + rsp->intf_ld_flags[NIX_INTF_TX][ld][fl] = + GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl); + } + } + return 0; +} + +int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr, index; + bool enable; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + if (!pfvf->rxvlan) + return 0; + + index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, + NIXLF_UCAST_ENTRY); + pfvf->entry.action = npc_get_mcam_action(rvu, mcam, blkaddr, index); + enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, index); + npc_config_mcam_entry(rvu, mcam, blkaddr, pfvf->rxvlan_index, + NIX_INTF_RX, &pfvf->entry, enable); + + return 0; }