spectrum_buffers.c 27.6 KB
Newer Older
1 2
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 4 5

#include <linux/kernel.h>
#include <linux/types.h>
6
#include <linux/dcbnl.h>
7
#include <linux/if_ether.h>
8
#include <linux/list.h>
9 10 11 12 13 14

#include "spectrum.h"
#include "core.h"
#include "port.h"
#include "reg.h"

15 16 17 18 19 20 21 22 23 24 25 26 27
struct mlxsw_sp_sb_pr {
	enum mlxsw_reg_sbpr_mode mode;
	u32 size;
};

struct mlxsw_cp_sb_occ {
	u32 cur;
	u32 max;
};

struct mlxsw_sp_sb_cm {
	u32 min_buff;
	u32 max_buff;
28
	u16 pool_index;
29 30 31 32 33 34 35 36 37
	struct mlxsw_cp_sb_occ occ;
};

struct mlxsw_sp_sb_pm {
	u32 min_buff;
	u32 max_buff;
	struct mlxsw_cp_sb_occ occ;
};

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
struct mlxsw_sp_sb_pool_des {
	enum mlxsw_reg_sbxx_dir dir;
	u8 pool;
};

/* Order ingress pools before egress pools. */
static const struct mlxsw_sp_sb_pool_des mlxsw_sp_sb_pool_dess[] = {
	{MLXSW_REG_SBXX_DIR_INGRESS, 0},
	{MLXSW_REG_SBXX_DIR_INGRESS, 1},
	{MLXSW_REG_SBXX_DIR_INGRESS, 2},
	{MLXSW_REG_SBXX_DIR_INGRESS, 3},
	{MLXSW_REG_SBXX_DIR_EGRESS, 0},
	{MLXSW_REG_SBXX_DIR_EGRESS, 1},
	{MLXSW_REG_SBXX_DIR_EGRESS, 2},
	{MLXSW_REG_SBXX_DIR_EGRESS, 3},
	{MLXSW_REG_SBXX_DIR_EGRESS, 15},
};

#define MLXSW_SP_SB_POOL_DESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pool_dess)

58 59
#define MLXSW_SP_SB_ING_TC_COUNT 8
#define MLXSW_SP_SB_EG_TC_COUNT 16
60 61

struct mlxsw_sp_sb_port {
62 63
	struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
	struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
64
	struct mlxsw_sp_sb_pm pms[MLXSW_SP_SB_POOL_DESS_LEN];
65 66 67
};

struct mlxsw_sp_sb {
68
	struct mlxsw_sp_sb_pr prs[MLXSW_SP_SB_POOL_DESS_LEN];
69 70 71 72 73 74 75 76 77 78 79 80 81 82
	struct mlxsw_sp_sb_port *ports;
	u32 cell_size;
};

u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
{
	return mlxsw_sp->sb->cell_size * cells;
}

u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
{
	return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
}

83
static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
84
						 u16 pool_index)
85
{
86
	return &mlxsw_sp->sb->prs[pool_index];
87 88
}

89 90 91 92 93 94 95 96
static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
{
	if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
		return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
	else
		return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
}

97 98 99 100
static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
						 u8 local_port, u8 pg_buff,
						 enum mlxsw_reg_sbxx_dir dir)
{
101 102 103 104 105 106 107
	struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];

	WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
	if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
		return &sb_port->ing_cms[pg_buff];
	else
		return &sb_port->eg_cms[pg_buff];
108 109 110
}

static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
111
						 u8 local_port, u16 pool_index)
112
{
113
	return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
114 115
}

116
static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
117 118
				enum mlxsw_reg_sbpr_mode mode, u32 size)
{
119 120
	const struct mlxsw_sp_sb_pool_des *des =
		&mlxsw_sp_sb_pool_dess[pool_index];
121
	char sbpr_pl[MLXSW_REG_SBPR_LEN];
122 123
	struct mlxsw_sp_sb_pr *pr;
	int err;
124

125
	mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode, size);
126 127 128 129
	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
	if (err)
		return err;

130
	pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
131 132 133
	pr->mode = mode;
	pr->size = size;
	return 0;
134 135 136
}

static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
137 138
				u8 pg_buff, u32 min_buff, u32 max_buff,
				u16 pool_index)
139
{
140 141
	const struct mlxsw_sp_sb_pool_des *des =
		&mlxsw_sp_sb_pool_dess[pool_index];
142
	char sbcm_pl[MLXSW_REG_SBCM_LEN];
143
	int err;
144

145 146
	mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
			    min_buff, max_buff, des->pool);
147 148 149
	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
	if (err)
		return err;
150
	if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
151 152
		struct mlxsw_sp_sb_cm *cm;

153 154
		cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
					des->dir);
155 156
		cm->min_buff = min_buff;
		cm->max_buff = max_buff;
157
		cm->pool_index = pool_index;
158 159
	}
	return 0;
160 161 162
}

static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
163
				u16 pool_index, u32 min_buff, u32 max_buff)
164
{
165 166
	const struct mlxsw_sp_sb_pool_des *des =
		&mlxsw_sp_sb_pool_dess[pool_index];
167
	char sbpm_pl[MLXSW_REG_SBPM_LEN];
168 169
	struct mlxsw_sp_sb_pm *pm;
	int err;
170

171
	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
172
			    min_buff, max_buff);
173 174 175 176
	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
	if (err)
		return err;

177
	pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
178 179 180
	pm->min_buff = min_buff;
	pm->max_buff = max_buff;
	return 0;
181 182
}

183
static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
184
				    u16 pool_index, struct list_head *bulk_list)
185
{
186 187
	const struct mlxsw_sp_sb_pool_des *des =
		&mlxsw_sp_sb_pool_dess[pool_index];
188 189
	char sbpm_pl[MLXSW_REG_SBPM_LEN];

190 191
	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
			    true, 0, 0);
192 193 194 195 196 197 198 199 200 201 202 203 204 205
	return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
				     bulk_list, NULL, 0);
}

static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
					char *sbpm_pl, size_t sbpm_pl_len,
					unsigned long cb_priv)
{
	struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;

	mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
}

static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
206
				    u16 pool_index, struct list_head *bulk_list)
207
{
208 209
	const struct mlxsw_sp_sb_pool_des *des =
		&mlxsw_sp_sb_pool_dess[pool_index];
210 211 212
	char sbpm_pl[MLXSW_REG_SBPM_LEN];
	struct mlxsw_sp_sb_pm *pm;

213 214 215
	pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
			    false, 0, 0);
216 217 218 219 220 221
	return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
				     bulk_list,
				     mlxsw_sp_sb_pm_occ_query_cb,
				     (unsigned long) pm);
}

222
static const u16 mlxsw_sp_pbs[] = {
223 224
	[0] = 2 * ETH_FRAME_LEN,
	[9] = 2 * MLXSW_PORT_MAX_MTU,
225 226 227
};

#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
228
#define MLXSW_SP_PB_UNUSED 8
229 230 231

static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
232
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
233 234 235 236 237 238
	char pbmc_pl[MLXSW_REG_PBMC_LEN];
	int i;

	mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
			    0xffff, 0xffff / 2);
	for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
239 240
		u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp_pbs[i]);

241
		if (i == MLXSW_SP_PB_UNUSED)
242
			continue;
243
		mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
244
	}
245 246
	mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
					 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
247
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
248 249
}

250 251 252 253 254 255 256
static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
	char pptb_pl[MLXSW_REG_PPTB_LEN];
	int i;

	mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
257
		mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
258 259 260 261 262 263 264 265 266 267 268 269 270 271
	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
			       pptb_pl);
}

static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
	int err;

	err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
	if (err)
		return err;
	return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
}

272 273 274 275
static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
{
	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);

276 277 278 279
	mlxsw_sp->sb->ports = kcalloc(max_ports,
				      sizeof(struct mlxsw_sp_sb_port),
				      GFP_KERNEL);
	if (!mlxsw_sp->sb->ports)
280 281 282 283 284 285
		return -ENOMEM;
	return 0;
}

static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
{
286
	kfree(mlxsw_sp->sb->ports);
287 288 289
}

#define MLXSW_SP_SB_PR_INGRESS_SIZE	12440000
290
#define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
291
#define MLXSW_SP_SB_PR_EGRESS_SIZE	13232000
292

293
#define MLXSW_SP_SB_PR(_mode, _size)	\
294 295 296
	{				\
		.mode = _mode,		\
		.size = _size,		\
297 298
	}

299 300
static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs[] = {
	/* Ingress pools. */
301
	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
302
		       MLXSW_SP_SB_PR_INGRESS_SIZE),
303 304
	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
305
	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
306
		       MLXSW_SP_SB_PR_INGRESS_MNG_SIZE),
307
	/* Egress pools. */
308
	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE),
309 310
	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
311
	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
312 313
};

314
#define MLXSW_SP_SB_PRS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs)
315

316 317 318
static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
				const struct mlxsw_sp_sb_pr *prs,
				size_t prs_len)
319 320 321 322
{
	int i;
	int err;

323
	for (i = 0; i < prs_len; i++) {
324
		u32 size = mlxsw_sp_bytes_cells(mlxsw_sp, prs[i].size);
325

326
		err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode, size);
327 328 329 330 331 332
		if (err)
			return err;
	}
	return 0;
}

333 334 335 336
#define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool)	\
	{						\
		.min_buff = _min_buff,			\
		.max_buff = _max_buff,			\
337
		.pool_index = _pool,			\
338 339
	}

340
static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
341
	MLXSW_SP_SB_CM(10000, 8, 0),
342 343 344 345 346 347 348
	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
349
	MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
350
	MLXSW_SP_SB_CM(20000, 1, 3),
351 352
};

353 354 355
#define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)

static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
	MLXSW_SP_SB_CM(1500, 9, 4),
	MLXSW_SP_SB_CM(1500, 9, 4),
	MLXSW_SP_SB_CM(1500, 9, 4),
	MLXSW_SP_SB_CM(1500, 9, 4),
	MLXSW_SP_SB_CM(1500, 9, 4),
	MLXSW_SP_SB_CM(1500, 9, 4),
	MLXSW_SP_SB_CM(1500, 9, 4),
	MLXSW_SP_SB_CM(1500, 9, 4),
	MLXSW_SP_SB_CM(0, 140000, 8),
	MLXSW_SP_SB_CM(0, 140000, 8),
	MLXSW_SP_SB_CM(0, 140000, 8),
	MLXSW_SP_SB_CM(0, 140000, 8),
	MLXSW_SP_SB_CM(0, 140000, 8),
	MLXSW_SP_SB_CM(0, 140000, 8),
	MLXSW_SP_SB_CM(0, 140000, 8),
	MLXSW_SP_SB_CM(0, 140000, 8),
	MLXSW_SP_SB_CM(1, 0xff, 4),
373 374 375 376
};

#define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)

377
#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 4)
378 379

static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
380
	MLXSW_SP_CPU_PORT_SB_CM,
381 382 383 384 385
	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
386
	MLXSW_SP_CPU_PORT_SB_CM,
387
	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
412 413 414 415 416
};

#define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
	ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)

417 418 419 420
static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
				  enum mlxsw_reg_sbxx_dir dir,
				  const struct mlxsw_sp_sb_cm *cms,
				  size_t cms_len)
421 422 423 424 425 426
{
	int i;
	int err;

	for (i = 0; i < cms_len; i++) {
		const struct mlxsw_sp_sb_cm *cm;
427
		u32 min_buff;
428

429 430
		if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
			continue; /* PG number 8 does not exist, skip it */
431
		cm = &cms[i];
432 433 434
		if (WARN_ON(mlxsw_sp_sb_pool_dess[cm->pool_index].dir != dir))
			continue;

435 436 437 438
		/* All pools are initialized using dynamic thresholds,
		 * therefore 'max_buff' isn't specified in cells.
		 */
		min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
439 440 441
		err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
					   min_buff, cm->max_buff,
					   cm->pool_index);
442 443 444 445 446 447 448 449
		if (err)
			return err;
	}
	return 0;
}

static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
450 451 452 453 454 455 456 457 458 459 460 461 462 463
	int err;

	err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
				     mlxsw_sp_port->local_port,
				     MLXSW_REG_SBXX_DIR_INGRESS,
				     mlxsw_sp_sb_cms_ingress,
				     MLXSW_SP_SB_CMS_INGRESS_LEN);
	if (err)
		return err;
	return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
				      mlxsw_sp_port->local_port,
				      MLXSW_REG_SBXX_DIR_EGRESS,
				      mlxsw_sp_sb_cms_egress,
				      MLXSW_SP_SB_CMS_EGRESS_LEN);
464 465 466 467
}

static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
{
468 469 470
	return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
				      mlxsw_sp_cpu_port_sb_cms,
				      MLXSW_SP_CPU_PORT_SB_MCS_LEN);
471 472
}

473 474 475 476
#define MLXSW_SP_SB_PM(_min_buff, _max_buff)	\
	{					\
		.min_buff = _min_buff,		\
		.max_buff = _max_buff,		\
477 478
	}

479 480
static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = {
	/* Ingress pools. */
481 482 483 484
	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
485
	/* Egress pools. */
486
	MLXSW_SP_SB_PM(0, 7),
487 488 489
	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
490 491
};

492
#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms)
493

494
static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
495
{
496
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
497 498 499
	int i;
	int err;

500 501
	for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) {
		const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp_sb_pms[i];
502

503 504
		err = mlxsw_sp_sb_pm_write(mlxsw_sp, mlxsw_sp_port->local_port,
					   i, pm->min_buff, pm->max_buff);
505 506 507 508 509 510 511 512 513
		if (err)
			return err;
	}
	return 0;
}

struct mlxsw_sp_sb_mm {
	u32 min_buff;
	u32 max_buff;
514
	u16 pool_index;
515 516
};

517 518 519 520
#define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool)	\
	{						\
		.min_buff = _min_buff,			\
		.max_buff = _max_buff,			\
521
		.pool_index = _pool,			\
522 523 524
	}

static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
540 541 542 543 544 545 546 547 548 549 550
};

#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)

static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
{
	char sbmm_pl[MLXSW_REG_SBMM_LEN];
	int i;
	int err;

	for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
551
		const struct mlxsw_sp_sb_pool_des *des;
552
		const struct mlxsw_sp_sb_mm *mc;
553
		u32 min_buff;
554 555

		mc = &mlxsw_sp_sb_mms[i];
556
		des = &mlxsw_sp_sb_pool_dess[mc->pool_index];
557 558 559 560 561
		/* All pools are initialized using dynamic thresholds,
		 * therefore 'max_buff' isn't specified in cells.
		 */
		min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
		mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
562
				    des->pool);
563 564 565 566 567 568 569
		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
		if (err)
			return err;
	}
	return 0;
}

570 571 572 573 574 575 576 577 578 579 580 581 582 583
static void mlxsw_sp_pool_count(u16 *p_ingress_len, u16 *p_egress_len)
{
	int i;

	for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; ++i)
		if (mlxsw_sp_sb_pool_dess[i].dir == MLXSW_REG_SBXX_DIR_EGRESS)
			goto out;
	WARN(1, "No egress pools\n");

out:
	*p_ingress_len = i;
	*p_egress_len = MLXSW_SP_SB_POOL_DESS_LEN - i;
}

584 585
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
{
586 587
	u16 ing_pool_count;
	u16 eg_pool_count;
588
	u64 sb_size;
589 590
	int err;

591 592 593
	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
		return -EIO;

594 595 596 597
	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
		return -EIO;
	sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE);

598 599 600 601 602
	mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
	if (!mlxsw_sp->sb)
		return -ENOMEM;
	mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);

603
	err = mlxsw_sp_sb_ports_init(mlxsw_sp);
604
	if (err)
605
		goto err_sb_ports_init;
606 607
	err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp_sb_prs,
				   MLXSW_SP_SB_PRS_LEN);
608 609
	if (err)
		goto err_sb_prs_init;
610 611
	err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
	if (err)
612
		goto err_sb_cpu_port_sb_cms_init;
613
	err = mlxsw_sp_sb_mms_init(mlxsw_sp);
614
	if (err)
615
		goto err_sb_mms_init;
616
	mlxsw_sp_pool_count(&ing_pool_count, &eg_pool_count);
617
	err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, sb_size,
618 619
				  ing_pool_count,
				  eg_pool_count,
620 621
				  MLXSW_SP_SB_ING_TC_COUNT,
				  MLXSW_SP_SB_EG_TC_COUNT);
622 623 624 625 626 627 628 629 630 631
	if (err)
		goto err_devlink_sb_register;

	return 0;

err_devlink_sb_register:
err_sb_mms_init:
err_sb_cpu_port_sb_cms_init:
err_sb_prs_init:
	mlxsw_sp_sb_ports_fini(mlxsw_sp);
632 633
err_sb_ports_init:
	kfree(mlxsw_sp->sb);
634
	return err;
635
}
636

637 638 639
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
{
	devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
640
	mlxsw_sp_sb_ports_fini(mlxsw_sp);
641
	kfree(mlxsw_sp->sb);
642 643 644 645 646 647
}

int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
	int err;

648
	err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
649 650 651 652 653 654 655 656 657
	if (err)
		return err;
	err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
	if (err)
		return err;
	err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);

	return err;
}
658 659 660 661 662

int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
			 unsigned int sb_index, u16 pool_index,
			 struct devlink_sb_pool_info *pool_info)
{
663
	enum mlxsw_reg_sbxx_dir dir = mlxsw_sp_sb_pool_dess[pool_index].dir;
664
	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
665
	struct mlxsw_sp_sb_pr *pr;
666

667
	pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
668
	pool_info->pool_type = (enum devlink_sb_pool_type) dir;
669
	pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
670
	pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
671 672 673 674 675 676 677 678
	return 0;
}

int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
			 unsigned int sb_index, u16 pool_index, u32 size,
			 enum devlink_sb_threshold_type threshold_type)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
679
	u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
680
	enum mlxsw_reg_sbpr_mode mode;
681

682 683 684
	if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE))
		return -EINVAL;

685
	mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
686
	return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode, pool_size);
687 688 689 690
}

#define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */

691 692
static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
				     u32 max_buff)
693
{
694
	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
695 696 697

	if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
		return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
698
	return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
699 700
}

701 702
static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
				    u32 threshold, u32 *p_max_buff)
703
{
704
	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
705 706 707 708 709 710 711 712 713 714

	if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
		int val;

		val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
		if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
		    val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
			return -EINVAL;
		*p_max_buff = val;
	} else {
715
		*p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
716 717 718 719 720 721 722 723 724 725 726 727 728
	}
	return 0;
}

int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
			      unsigned int sb_index, u16 pool_index,
			      u32 *p_threshold)
{
	struct mlxsw_sp_port *mlxsw_sp_port =
			mlxsw_core_port_driver_priv(mlxsw_core_port);
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	u8 local_port = mlxsw_sp_port->local_port;
	struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
729
						       pool_index);
730

731
	*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
						 pm->max_buff);
	return 0;
}

int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
			      unsigned int sb_index, u16 pool_index,
			      u32 threshold)
{
	struct mlxsw_sp_port *mlxsw_sp_port =
			mlxsw_core_port_driver_priv(mlxsw_core_port);
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	u8 local_port = mlxsw_sp_port->local_port;
	u32 max_buff;
	int err;

747
	err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
748 749 750 751
				       threshold, &max_buff);
	if (err)
		return err;

752
	return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
753 754 755 756 757 758 759 760 761 762 763 764 765
				    0, max_buff);
}

int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
				 unsigned int sb_index, u16 tc_index,
				 enum devlink_sb_pool_type pool_type,
				 u16 *p_pool_index, u32 *p_threshold)
{
	struct mlxsw_sp_port *mlxsw_sp_port =
			mlxsw_core_port_driver_priv(mlxsw_core_port);
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	u8 local_port = mlxsw_sp_port->local_port;
	u8 pg_buff = tc_index;
766
	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
767 768 769
	struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
						       pg_buff, dir);

770
	*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
771
						 cm->max_buff);
772
	*p_pool_index = cm->pool_index;
773 774 775 776 777 778 779 780 781 782 783 784 785
	return 0;
}

int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
				 unsigned int sb_index, u16 tc_index,
				 enum devlink_sb_pool_type pool_type,
				 u16 pool_index, u32 threshold)
{
	struct mlxsw_sp_port *mlxsw_sp_port =
			mlxsw_core_port_driver_priv(mlxsw_core_port);
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	u8 local_port = mlxsw_sp_port->local_port;
	u8 pg_buff = tc_index;
786
	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
787 788 789
	u32 max_buff;
	int err;

790
	if (dir != mlxsw_sp_sb_pool_dess[pool_index].dir)
791 792
		return -EINVAL;

793
	err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
794 795 796 797
				       threshold, &max_buff);
	if (err)
		return err;

798 799
	return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
				    0, max_buff, pool_index);
800
}
801 802

#define MASKED_COUNT_MAX \
803 804
	(MLXSW_REG_SBSR_REC_MAX_COUNT / \
	 (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826

struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
	u8 masked_count;
	u8 local_port_1;
};

static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
					char *sbsr_pl, size_t sbsr_pl_len,
					unsigned long cb_priv)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
	struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
	u8 masked_count;
	u8 local_port;
	int rec_index = 0;
	struct mlxsw_sp_sb_cm *cm;
	int i;

	memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));

	masked_count = 0;
	for (local_port = cb_ctx.local_port_1;
827
	     local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
828 829
		if (!mlxsw_sp->ports[local_port])
			continue;
830
		for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
831 832 833 834 835 836 837 838 839 840
			cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
						MLXSW_REG_SBXX_DIR_INGRESS);
			mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
						  &cm->occ.cur, &cm->occ.max);
		}
		if (++masked_count == cb_ctx.masked_count)
			break;
	}
	masked_count = 0;
	for (local_port = cb_ctx.local_port_1;
841
	     local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
842 843
		if (!mlxsw_sp->ports[local_port])
			continue;
844
		for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
			cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
						MLXSW_REG_SBXX_DIR_EGRESS);
			mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
						  &cm->occ.cur, &cm->occ.max);
		}
		if (++masked_count == cb_ctx.masked_count)
			break;
	}
}

int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
			     unsigned int sb_index)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
	struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
	unsigned long cb_priv;
	LIST_HEAD(bulk_list);
	char *sbsr_pl;
	u8 masked_count;
	u8 local_port_1;
	u8 local_port = 0;
	int i;
	int err;
	int err2;

	sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
	if (!sbsr_pl)
		return -ENOMEM;

next_batch:
	local_port++;
	local_port_1 = local_port;
	masked_count = 0;
	mlxsw_reg_sbsr_pack(sbsr_pl, false);
879
	for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
880
		mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
881
	for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
882
		mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
883
	for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
884 885 886 887
		if (!mlxsw_sp->ports[local_port])
			continue;
		mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
888
		for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) {
889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
			err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
						       &bulk_list);
			if (err)
				goto out;
		}
		if (++masked_count == MASKED_COUNT_MAX)
			goto do_query;
	}

do_query:
	cb_ctx.masked_count = masked_count;
	cb_ctx.local_port_1 = local_port_1;
	memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
	err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
				    &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
				    cb_priv);
	if (err)
		goto out;
907
	if (local_port < mlxsw_core_max_ports(mlxsw_core))
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937
		goto next_batch;

out:
	err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
	if (!err)
		err = err2;
	kfree(sbsr_pl);
	return err;
}

int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
			      unsigned int sb_index)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
	LIST_HEAD(bulk_list);
	char *sbsr_pl;
	unsigned int masked_count;
	u8 local_port = 0;
	int i;
	int err;
	int err2;

	sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
	if (!sbsr_pl)
		return -ENOMEM;

next_batch:
	local_port++;
	masked_count = 0;
	mlxsw_reg_sbsr_pack(sbsr_pl, true);
938
	for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
939
		mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
940
	for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
941
		mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
942
	for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
943 944 945 946
		if (!mlxsw_sp->ports[local_port])
			continue;
		mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
947
		for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) {
948 949 950 951 952 953 954 955 956 957 958 959 960 961
			err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
						       &bulk_list);
			if (err)
				goto out;
		}
		if (++masked_count == MASKED_COUNT_MAX)
			goto do_query;
	}

do_query:
	err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
				    &bulk_list, NULL, 0);
	if (err)
		goto out;
962
	if (local_port < mlxsw_core_max_ports(mlxsw_core))
963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981
		goto next_batch;

out:
	err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
	if (!err)
		err = err2;
	kfree(sbsr_pl);
	return err;
}

int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
				  unsigned int sb_index, u16 pool_index,
				  u32 *p_cur, u32 *p_max)
{
	struct mlxsw_sp_port *mlxsw_sp_port =
			mlxsw_core_port_driver_priv(mlxsw_core_port);
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	u8 local_port = mlxsw_sp_port->local_port;
	struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
982
						       pool_index);
983

984 985
	*p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
	*p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
986 987 988 989 990 991 992 993 994 995 996 997 998
	return 0;
}

int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
				     unsigned int sb_index, u16 tc_index,
				     enum devlink_sb_pool_type pool_type,
				     u32 *p_cur, u32 *p_max)
{
	struct mlxsw_sp_port *mlxsw_sp_port =
			mlxsw_core_port_driver_priv(mlxsw_core_port);
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	u8 local_port = mlxsw_sp_port->local_port;
	u8 pg_buff = tc_index;
999
	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1000 1001 1002
	struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
						       pg_buff, dir);

1003 1004
	*p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
	*p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
1005 1006
	return 0;
}