提交 0f230ca9 编写于 作者: D David S. Miller

Merge branch 'mlxsw-tc-flower-offload-stats'

Jiri Pirko says:

====================
mlxsw: Add support for TC flower offload statistics

Arkadi says:
This patchset adds support for retrieving TC flower statistics for
offloaded rules, which includes packets count, bytes count and last used
time stamp. Currently The statistics are gathered on a per-rule basis.

This patchset also includes generic allocator for counters.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -15,7 +15,8 @@ obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
spectrum_kvdl.o spectrum_acl_tcam.o \
spectrum_acl.o spectrum_flower.o
spectrum_acl.o spectrum_flower.o \
spectrum_cnt.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
mlxsw_minimal-objs := minimal.o
......@@ -760,3 +760,54 @@ int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
return err;
}
EXPORT_SYMBOL(mlxsw_afa_block_append_fwd);
/* Policing and Counting Action
* ----------------------------
* Policing and Counting action is used for binding policer and counter
* to ACL rules.
*/
#define MLXSW_AFA_POLCNT_CODE 0x08
#define MLXSW_AFA_POLCNT_SIZE 1
enum mlxsw_afa_polcnt_counter_set_type {
/* No count */
MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_NO_COUNT = 0x00,
/* Count packets and bytes */
MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
/* Count only packets */
MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS = 0x05,
};
/* afa_polcnt_counter_set_type
* Counter set type for flow counters.
*/
MLXSW_ITEM32(afa, polcnt, counter_set_type, 0x04, 24, 8);
/* afa_polcnt_counter_index
* Counter index for flow counters.
*/
MLXSW_ITEM32(afa, polcnt, counter_index, 0x04, 0, 24);
static inline void
mlxsw_afa_polcnt_pack(char *payload,
enum mlxsw_afa_polcnt_counter_set_type set_type,
u32 counter_index)
{
mlxsw_afa_polcnt_counter_set_type_set(payload, set_type);
mlxsw_afa_polcnt_counter_index_set(payload, counter_index);
}
int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
u32 counter_index)
{
char *act = mlxsw_afa_block_append_action(block,
MLXSW_AFA_POLCNT_CODE,
MLXSW_AFA_POLCNT_SIZE);
if (!act)
return -ENOBUFS;
mlxsw_afa_polcnt_pack(act, MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_PACKETS_BYTES,
counter_index);
return 0;
}
EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
......@@ -64,5 +64,7 @@ int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
u8 local_port, bool in_port);
int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block,
u16 vid, u8 pcp, u8 et);
int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block,
u32 counter_index);
#endif
......@@ -5506,6 +5506,70 @@ static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e,
mlxsw_reg_mpsc_rate_set(payload, rate);
}
/* MGPC - Monitoring General Purpose Counter Set Register
* The MGPC register retrieves and sets the General Purpose Counter Set.
*/
#define MLXSW_REG_MGPC_ID 0x9081
#define MLXSW_REG_MGPC_LEN 0x18
MLXSW_REG_DEFINE(mgpc, MLXSW_REG_MGPC_ID, MLXSW_REG_MGPC_LEN);
enum mlxsw_reg_mgpc_counter_set_type {
/* No count */
MLXSW_REG_MGPC_COUNTER_SET_TYPE_NO_COUT = 0x00,
/* Count packets and bytes */
MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03,
/* Count only packets */
MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS = 0x05,
};
/* reg_mgpc_counter_set_type
* Counter set type.
* Access: OP
*/
MLXSW_ITEM32(reg, mgpc, counter_set_type, 0x00, 24, 8);
/* reg_mgpc_counter_index
* Counter index.
* Access: Index
*/
MLXSW_ITEM32(reg, mgpc, counter_index, 0x00, 0, 24);
enum mlxsw_reg_mgpc_opcode {
/* Nop */
MLXSW_REG_MGPC_OPCODE_NOP = 0x00,
/* Clear counters */
MLXSW_REG_MGPC_OPCODE_CLEAR = 0x08,
};
/* reg_mgpc_opcode
* Opcode.
* Access: OP
*/
MLXSW_ITEM32(reg, mgpc, opcode, 0x04, 28, 4);
/* reg_mgpc_byte_counter
* Byte counter value.
* Access: RW
*/
MLXSW_ITEM64(reg, mgpc, byte_counter, 0x08, 0, 64);
/* reg_mgpc_packet_counter
* Packet counter value.
* Access: RW
*/
MLXSW_ITEM64(reg, mgpc, packet_counter, 0x10, 0, 64);
static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
enum mlxsw_reg_mgpc_opcode opcode,
enum mlxsw_reg_mgpc_counter_set_type set_type)
{
MLXSW_REG_ZERO(mgpc, payload);
mlxsw_reg_mgpc_counter_index_set(payload, counter_index);
mlxsw_reg_mgpc_counter_set_type_set(payload, set_type);
mlxsw_reg_mgpc_opcode_set(payload, opcode);
}
/* SBPR - Shared Buffer Pools Register
* -----------------------------------
* The SBPR configures and retrieves the shared buffer pools and configuration.
......@@ -5979,6 +6043,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mpar),
MLXSW_REG(mlcr),
MLXSW_REG(mpsc),
MLXSW_REG(mgpc),
MLXSW_REG(sbpr),
MLXSW_REG(sbcm),
MLXSW_REG(sbpm),
......
......@@ -43,7 +43,9 @@ enum mlxsw_res_id {
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
MLXSW_RES_ID_MAX_TRAP_GROUPS,
MLXSW_RES_ID_COUNTER_POOL_SIZE,
MLXSW_RES_ID_MAX_SPAN,
MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
MLXSW_RES_ID_MAX_SYSTEM_PORT,
MLXSW_RES_ID_MAX_LAG,
MLXSW_RES_ID_MAX_LAG_MEMBERS,
......@@ -75,7 +77,9 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
[MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410,
[MLXSW_RES_ID_MAX_SPAN] = 0x2420,
[MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
[MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502,
[MLXSW_RES_ID_MAX_LAG] = 0x2520,
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
......
......@@ -66,6 +66,7 @@
#include "port.h"
#include "trap.h"
#include "txheader.h"
#include "spectrum_cnt.h"
static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp_driver_version[] = "1.0";
......@@ -138,6 +139,60 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
*/
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index, u64 *packets,
u64 *bytes)
{
char mgpc_pl[MLXSW_REG_MGPC_LEN];
int err;
mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
if (err)
return err;
*packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
*bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
return 0;
}
static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index)
{
char mgpc_pl[MLXSW_REG_MGPC_LEN];
mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
MLXSW_REG_MGPC_COUNTER_SET_TYPE_PACKETS_BYTES);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
}
int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index)
{
int err;
err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
p_counter_index);
if (err)
return err;
err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
if (err)
goto err_counter_clear;
return 0;
err_counter_clear:
mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
*p_counter_index);
return err;
}
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index)
{
mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
counter_index);
}
static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info)
{
......@@ -1379,6 +1434,9 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
tc->cls_flower);
return 0;
case TC_CLSFLOWER_STATS:
return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress,
tc->cls_flower);
default:
return -EOPNOTSUPP;
}
......@@ -3224,6 +3282,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_acl_init;
}
err = mlxsw_sp_counter_pool_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
goto err_counter_pool_init;
}
err = mlxsw_sp_ports_create(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
......@@ -3233,6 +3297,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
return 0;
err_ports_create:
mlxsw_sp_counter_pool_fini(mlxsw_sp);
err_counter_pool_init:
mlxsw_sp_acl_fini(mlxsw_sp);
err_acl_init:
mlxsw_sp_span_fini(mlxsw_sp);
......@@ -3255,6 +3321,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sp_ports_remove(mlxsw_sp);
mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_router_fini(mlxsw_sp);
......
......@@ -246,6 +246,7 @@ struct mlxsw_sp_router {
};
struct mlxsw_sp_acl;
struct mlxsw_sp_counter_pool;
struct mlxsw_sp {
struct {
......@@ -281,6 +282,7 @@ struct mlxsw_sp {
DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
} kvdl;
struct mlxsw_sp_counter_pool *counter_pool;
struct {
struct mlxsw_sp_span_entry *entries;
int entries_count;
......@@ -586,6 +588,8 @@ struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
struct mlxsw_afa_block *act_block;
unsigned int counter_index;
bool counter_valid;
};
enum mlxsw_sp_acl_profile {
......@@ -605,6 +609,8 @@ struct mlxsw_sp_acl_profile_ops {
void *ruleset_priv, void *rule_priv,
struct mlxsw_sp_acl_rule_info *rulei);
void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
bool *activity);
};
struct mlxsw_sp_acl_ops {
......@@ -648,6 +654,8 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u32 action, u16 vid, u16 proto, u8 prio);
int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei);
struct mlxsw_sp_acl_rule;
......@@ -667,6 +675,9 @@ mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
unsigned long cookie);
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule);
int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule,
u64 *packets, u64 *bytes, u64 *last_use);
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
......@@ -677,5 +688,14 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
__be16 protocol, struct tc_cls_flower_offload *f);
void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
struct tc_cls_flower_offload *f);
int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
struct tc_cls_flower_offload *f);
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index, u64 *packets,
u64 *bytes);
int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int *p_counter_index);
void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index);
#endif
......@@ -50,10 +50,17 @@
#include "spectrum_acl_flex_keys.h"
struct mlxsw_sp_acl {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_afk *afk;
struct mlxsw_afa *afa;
const struct mlxsw_sp_acl_ops *ops;
struct rhashtable ruleset_ht;
struct list_head rules;
struct {
struct delayed_work dw;
unsigned long interval; /* ms */
#define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
} rule_activity_update;
unsigned long priv[0];
/* priv has to be always the last item */
};
......@@ -80,9 +87,13 @@ struct mlxsw_sp_acl_ruleset {
struct mlxsw_sp_acl_rule {
struct rhash_head ht_node; /* Member of rule HT */
struct list_head list;
unsigned long cookie; /* HT key */
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule_info *rulei;
u64 last_used;
u64 last_packets;
u64 last_bytes;
unsigned long priv[0];
/* priv has to be always the last item */
};
......@@ -238,6 +249,27 @@ void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
}
static int
mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei)
{
int err;
err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index);
if (err)
return err;
rulei->counter_valid = true;
return 0;
}
static void
mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei)
{
rulei->counter_valid = false;
mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index);
}
struct mlxsw_sp_acl_rule_info *
mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
{
......@@ -364,6 +396,13 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
}
}
int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei)
{
return mlxsw_afa_block_append_counter(rulei->act_block,
rulei->counter_index);
}
struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
......@@ -387,8 +426,14 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
err = PTR_ERR(rule->rulei);
goto err_rulei_create;
}
err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei);
if (err)
goto err_counter_alloc;
return rule;
err_counter_alloc:
mlxsw_sp_acl_rulei_destroy(rule->rulei);
err_rulei_create:
kfree(rule);
err_alloc:
......@@ -401,6 +446,7 @@ void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei);
mlxsw_sp_acl_rulei_destroy(rule->rulei);
kfree(rule);
mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
......@@ -422,6 +468,7 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rhashtable_insert;
list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
return 0;
err_rhashtable_insert:
......@@ -435,6 +482,7 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
list_del(&rule->list);
rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
mlxsw_sp_acl_rule_ht_params);
ops->rule_del(mlxsw_sp, rule->priv);
......@@ -455,6 +503,90 @@ mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
return rule->rulei;
}
static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule)
{
struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
bool active;
int err;
err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
if (err)
return err;
if (active)
rule->last_used = jiffies;
return 0;
}
static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
{
struct mlxsw_sp_acl_rule *rule;
int err;
/* Protect internal structures from changes */
rtnl_lock();
list_for_each_entry(rule, &acl->rules, list) {
err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
rule);
if (err)
goto err_rule_update;
}
rtnl_unlock();
return 0;
err_rule_update:
rtnl_unlock();
return err;
}
static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
{
unsigned long interval = acl->rule_activity_update.interval;
mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
msecs_to_jiffies(interval));
}
static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work)
{
struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
rule_activity_update.dw.work);
int err;
err = mlxsw_sp_acl_rules_activity_update(acl);
if (err)
dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
mlxsw_sp_acl_rule_activity_work_schedule(acl);
}
int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule,
u64 *packets, u64 *bytes, u64 *last_use)
{
struct mlxsw_sp_acl_rule_info *rulei;
u64 current_packets;
u64 current_bytes;
int err;
rulei = mlxsw_sp_acl_rule_rulei(rule);
err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
&current_packets, &current_bytes);
if (err)
return err;
*packets = current_packets - rule->last_packets;
*bytes = current_bytes - rule->last_bytes;
*last_use = rule->last_used;
rule->last_bytes = current_bytes;
rule->last_packets = current_packets;
return 0;
}
#define MLXSW_SP_KDVL_ACT_EXT_SIZE 1
static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
......@@ -547,7 +679,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
if (!acl)
return -ENOMEM;
mlxsw_sp->acl = acl;
acl->mlxsw_sp = mlxsw_sp;
acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
ACL_FLEX_KEYS),
mlxsw_sp_afk_blocks,
......@@ -570,11 +702,18 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
if (err)
goto err_rhashtable_init;
INIT_LIST_HEAD(&acl->rules);
err = acl_ops->init(mlxsw_sp, acl->priv);
if (err)
goto err_acl_ops_init;
acl->ops = acl_ops;
/* Create the delayed work for the rule activity_update */
INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
mlxsw_sp_acl_rul_activity_update_work);
acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
return 0;
err_acl_ops_init:
......@@ -593,7 +732,9 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
acl_ops->fini(mlxsw_sp, acl->priv);
WARN_ON(!list_empty(&acl->rules));
rhashtable_destroy(&acl->ruleset_ht);
mlxsw_afa_destroy(acl->afa);
mlxsw_afk_destroy(acl->afk);
......
......@@ -561,6 +561,24 @@ mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
}
static int
mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region,
unsigned int offset,
bool *activity)
{
char ptce2_pl[MLXSW_REG_PTCE2_LEN];
int err;
mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
region->tcam_region_info, offset);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
if (err)
return err;
*activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
return 0;
}
#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
static int
......@@ -940,6 +958,19 @@ static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
}
static int
mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_entry *entry,
bool *activity)
{
struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
struct mlxsw_sp_acl_tcam_region *region = chunk->region;
return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region,
entry->parman_item.index,
activity);
}
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
MLXSW_AFK_ELEMENT_DMAC,
......@@ -1048,6 +1079,16 @@ mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
}
static int
mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
void *rule_priv, bool *activity)
{
struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
activity);
}
static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
.ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
.ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add,
......@@ -1057,6 +1098,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
.rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
.rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
.rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
.rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
};
static const struct mlxsw_sp_acl_profile_ops *
......
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017 Arkadi Sharshevsky <arkadis@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/bitops.h>
#include "spectrum_cnt.h"
#define MLXSW_SP_COUNTER_POOL_BANK_SIZE 4096
struct mlxsw_sp_counter_sub_pool {
unsigned int base_index;
unsigned int size;
unsigned int entry_size;
unsigned int bank_count;
};
struct mlxsw_sp_counter_pool {
unsigned int pool_size;
unsigned long *usage; /* Usage bitmap */
struct mlxsw_sp_counter_sub_pool *sub_pools;
};
static struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
[MLXSW_SP_COUNTER_SUB_POOL_FLOW] = {
.bank_count = 6,
},
};
static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp)
{
unsigned int total_bank_config = 0;
unsigned int pool_size;
int i;
pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
/* Check config is valid, no bank over subscription */
for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++)
total_bank_config += mlxsw_sp_counter_sub_pools[i].bank_count;
if (total_bank_config > pool_size / MLXSW_SP_COUNTER_POOL_BANK_SIZE + 1)
return -EINVAL;
return 0;
}
static int mlxsw_sp_counter_sub_pools_prepare(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_counter_sub_pool *sub_pool;
/* Prepare generic flow pool*/
sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_FLOW];
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_PACKETS_BYTES))
return -EIO;
sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
COUNTER_SIZE_PACKETS_BYTES);
return 0;
}
int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_counter_sub_pool *sub_pool;
struct mlxsw_sp_counter_pool *pool;
unsigned int base_index;
unsigned int map_size;
int i;
int err;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_POOL_SIZE))
return -EIO;
err = mlxsw_sp_counter_pool_validate(mlxsw_sp);
if (err)
return err;
err = mlxsw_sp_counter_sub_pools_prepare(mlxsw_sp);
if (err)
return err;
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool)
return -ENOMEM;
pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
pool->usage = kzalloc(map_size, GFP_KERNEL);
if (!pool->usage) {
err = -ENOMEM;
goto err_usage_alloc;
}
pool->sub_pools = mlxsw_sp_counter_sub_pools;
/* Allocation is based on bank count which should be
* specified for each sub pool statically.
*/
base_index = 0;
for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) {
sub_pool = &pool->sub_pools[i];
sub_pool->size = sub_pool->bank_count *
MLXSW_SP_COUNTER_POOL_BANK_SIZE;
sub_pool->base_index = base_index;
base_index += sub_pool->size;
/* The last bank can't be fully used */
if (sub_pool->base_index + sub_pool->size > pool->pool_size)
sub_pool->size = pool->pool_size - sub_pool->base_index;
}
mlxsw_sp->counter_pool = pool;
return 0;
err_usage_alloc:
kfree(pool);
return err;
}
void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
pool->pool_size);
kfree(pool->usage);
kfree(pool);
}
int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
unsigned int *p_counter_index)
{
struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
struct mlxsw_sp_counter_sub_pool *sub_pool;
unsigned int entry_index;
unsigned int stop_index;
int i;
sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
stop_index = sub_pool->base_index + sub_pool->size;
entry_index = sub_pool->base_index;
entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index);
if (entry_index == stop_index)
return -ENOBUFS;
/* The sub-pools can contain non-integer number of entries
* so we must check for overflow
*/
if (entry_index + sub_pool->entry_size > stop_index)
return -ENOBUFS;
for (i = 0; i < sub_pool->entry_size; i++)
__set_bit(entry_index + i, pool->usage);
*p_counter_index = entry_index;
return 0;
}
void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
unsigned int counter_index)
{
struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
struct mlxsw_sp_counter_sub_pool *sub_pool;
int i;
if (WARN_ON(counter_index >= pool->pool_size))
return;
sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id];
for (i = 0; i < sub_pool->entry_size; i++)
__clear_bit(counter_index + i, pool->usage);
}
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h
* Copyright (c) 2017 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017 Arkadi Sharshevsky <arkdis@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _MLXSW_SPECTRUM_CNT_H
#define _MLXSW_SPECTRUM_CNT_H
#include "spectrum.h"
enum mlxsw_sp_counter_sub_pool_id {
MLXSW_SP_COUNTER_SUB_POOL_FLOW,
};
int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
unsigned int *p_counter_index);
void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_counter_sub_pool_id sub_pool_id,
unsigned int counter_index);
int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp);
#endif
......@@ -56,6 +56,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
if (tc_no_actions(exts))
return 0;
/* Count action is inserted first */
err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei);
if (err)
return err;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
if (is_tcf_gact_shot(a)) {
......@@ -346,3 +351,47 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
}
int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
struct tc_cls_flower_offload *f)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_acl_ruleset *ruleset;
struct mlxsw_sp_acl_rule *rule;
struct tc_action *a;
LIST_HEAD(actions);
u64 packets;
u64 lastuse;
u64 bytes;
int err;
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
ingress,
MLXSW_SP_ACL_PROFILE_FLOWER);
if (WARN_ON(IS_ERR(ruleset)))
return -EINVAL;
rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
if (!rule)
return -EINVAL;
err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &bytes, &packets,
&lastuse);
if (err)
goto err_rule_get_stats;
preempt_disable();
tcf_exts_to_list(f->exts, &actions);
list_for_each_entry(a, &actions, list)
tcf_action_stats_update(a, bytes, packets, lastuse);
preempt_enable();
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
return 0;
err_rule_get_stats:
mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
return err;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册