spectrum_buffers.c 28.8 KB
Newer Older
1 2
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 4 5

#include <linux/kernel.h>
#include <linux/types.h>
6
#include <linux/dcbnl.h>
7
#include <linux/if_ether.h>
8
#include <linux/list.h>
9 10 11 12 13 14

#include "spectrum.h"
#include "core.h"
#include "port.h"
#include "reg.h"

15 16 17 18 19 20 21 22 23 24 25 26 27
struct mlxsw_sp_sb_pr {
	enum mlxsw_reg_sbpr_mode mode;
	u32 size;
};

struct mlxsw_cp_sb_occ {
	u32 cur;
	u32 max;
};

struct mlxsw_sp_sb_cm {
	u32 min_buff;
	u32 max_buff;
28
	u16 pool_index;
29 30 31
	struct mlxsw_cp_sb_occ occ;
};

32 33
#define MLXSW_SP_SB_INFI -1U

34 35 36 37 38 39
struct mlxsw_sp_sb_pm {
	u32 min_buff;
	u32 max_buff;
	struct mlxsw_cp_sb_occ occ;
};

40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
struct mlxsw_sp_sb_pool_des {
	enum mlxsw_reg_sbxx_dir dir;
	u8 pool;
};

/* Order ingress pools before egress pools. */
static const struct mlxsw_sp_sb_pool_des mlxsw_sp_sb_pool_dess[] = {
	{MLXSW_REG_SBXX_DIR_INGRESS, 0},
	{MLXSW_REG_SBXX_DIR_INGRESS, 1},
	{MLXSW_REG_SBXX_DIR_INGRESS, 2},
	{MLXSW_REG_SBXX_DIR_INGRESS, 3},
	{MLXSW_REG_SBXX_DIR_EGRESS, 0},
	{MLXSW_REG_SBXX_DIR_EGRESS, 1},
	{MLXSW_REG_SBXX_DIR_EGRESS, 2},
	{MLXSW_REG_SBXX_DIR_EGRESS, 3},
	{MLXSW_REG_SBXX_DIR_EGRESS, 15},
};

#define MLXSW_SP_SB_POOL_DESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pool_dess)

60 61
#define MLXSW_SP_SB_ING_TC_COUNT 8
#define MLXSW_SP_SB_EG_TC_COUNT 16
62 63

struct mlxsw_sp_sb_port {
64 65
	struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
	struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
66
	struct mlxsw_sp_sb_pm pms[MLXSW_SP_SB_POOL_DESS_LEN];
67 68 69
};

struct mlxsw_sp_sb {
70
	struct mlxsw_sp_sb_pr prs[MLXSW_SP_SB_POOL_DESS_LEN];
71 72
	struct mlxsw_sp_sb_port *ports;
	u32 cell_size;
73
	u64 sb_size;
74 75 76 77 78 79 80 81 82 83 84 85
};

u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
{
	return mlxsw_sp->sb->cell_size * cells;
}

u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
{
	return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
}

86
static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
87
						 u16 pool_index)
88
{
89
	return &mlxsw_sp->sb->prs[pool_index];
90 91
}

92 93 94 95 96 97 98 99
static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
{
	if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
		return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
	else
		return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
}

100 101 102 103
static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
						 u8 local_port, u8 pg_buff,
						 enum mlxsw_reg_sbxx_dir dir)
{
104 105 106 107 108 109 110
	struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];

	WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
	if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
		return &sb_port->ing_cms[pg_buff];
	else
		return &sb_port->eg_cms[pg_buff];
111 112 113
}

static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
114
						 u8 local_port, u16 pool_index)
115
{
116
	return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
117 118
}

119
static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
120 121
				enum mlxsw_reg_sbpr_mode mode,
				u32 size, bool infi_size)
122
{
123 124
	const struct mlxsw_sp_sb_pool_des *des =
		&mlxsw_sp_sb_pool_dess[pool_index];
125
	char sbpr_pl[MLXSW_REG_SBPR_LEN];
126 127
	struct mlxsw_sp_sb_pr *pr;
	int err;
128

129 130
	mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
			    size, infi_size);
131 132 133 134
	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
	if (err)
		return err;

135 136
	if (infi_size)
		size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
137
	pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
138 139 140
	pr->mode = mode;
	pr->size = size;
	return 0;
141 142 143
}

static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
144
				u8 pg_buff, u32 min_buff, u32 max_buff,
145
				bool infi_max, u16 pool_index)
146
{
147 148
	const struct mlxsw_sp_sb_pool_des *des =
		&mlxsw_sp_sb_pool_dess[pool_index];
149
	char sbcm_pl[MLXSW_REG_SBCM_LEN];
150
	struct mlxsw_sp_sb_cm *cm;
151
	int err;
152

153
	mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
154
			    min_buff, max_buff, infi_max, des->pool);
155 156 157
	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
	if (err)
		return err;
158

159
	if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
160 161 162
		if (infi_max)
			max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
							mlxsw_sp->sb->sb_size);
163

164 165
		cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
					des->dir);
166 167
		cm->min_buff = min_buff;
		cm->max_buff = max_buff;
168
		cm->pool_index = pool_index;
169 170
	}
	return 0;
171 172 173
}

static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
174
				u16 pool_index, u32 min_buff, u32 max_buff)
175
{
176 177
	const struct mlxsw_sp_sb_pool_des *des =
		&mlxsw_sp_sb_pool_dess[pool_index];
178
	char sbpm_pl[MLXSW_REG_SBPM_LEN];
179 180
	struct mlxsw_sp_sb_pm *pm;
	int err;
181

182
	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
183
			    min_buff, max_buff);
184 185 186 187
	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
	if (err)
		return err;

188
	pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
189 190 191
	pm->min_buff = min_buff;
	pm->max_buff = max_buff;
	return 0;
192 193
}

194
static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
195
				    u16 pool_index, struct list_head *bulk_list)
196
{
197 198
	const struct mlxsw_sp_sb_pool_des *des =
		&mlxsw_sp_sb_pool_dess[pool_index];
199 200
	char sbpm_pl[MLXSW_REG_SBPM_LEN];

201 202
	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
			    true, 0, 0);
203 204 205 206 207 208 209 210 211 212 213 214 215 216
	return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
				     bulk_list, NULL, 0);
}

static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
					char *sbpm_pl, size_t sbpm_pl_len,
					unsigned long cb_priv)
{
	struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;

	mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
}

static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
217
				    u16 pool_index, struct list_head *bulk_list)
218
{
219 220
	const struct mlxsw_sp_sb_pool_des *des =
		&mlxsw_sp_sb_pool_dess[pool_index];
221 222 223
	char sbpm_pl[MLXSW_REG_SBPM_LEN];
	struct mlxsw_sp_sb_pm *pm;

224 225 226
	pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
	mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
			    false, 0, 0);
227 228 229 230 231 232
	return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
				     bulk_list,
				     mlxsw_sp_sb_pm_occ_query_cb,
				     (unsigned long) pm);
}

233
static const u16 mlxsw_sp_pbs[] = {
234 235
	[0] = 2 * ETH_FRAME_LEN,
	[9] = 2 * MLXSW_PORT_MAX_MTU,
236 237 238
};

#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
239
#define MLXSW_SP_PB_UNUSED 8
240 241 242

static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
243
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
244 245 246 247 248 249
	char pbmc_pl[MLXSW_REG_PBMC_LEN];
	int i;

	mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
			    0xffff, 0xffff / 2);
	for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
250 251
		u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp_pbs[i]);

252
		if (i == MLXSW_SP_PB_UNUSED)
253
			continue;
254
		mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
255
	}
256 257
	mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
					 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
258
	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
259 260
}

261 262 263 264 265 266 267
static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
	char pptb_pl[MLXSW_REG_PPTB_LEN];
	int i;

	mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
268
		mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
269 270 271 272 273 274 275 276 277 278 279 280 281 282
	return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
			       pptb_pl);
}

static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
	int err;

	err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
	if (err)
		return err;
	return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
}

283 284 285 286
static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
{
	unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);

287 288 289 290
	mlxsw_sp->sb->ports = kcalloc(max_ports,
				      sizeof(struct mlxsw_sp_sb_port),
				      GFP_KERNEL);
	if (!mlxsw_sp->sb->ports)
291 292 293 294 295 296
		return -ENOMEM;
	return 0;
}

static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
{
297
	kfree(mlxsw_sp->sb->ports);
298 299 300
}

#define MLXSW_SP_SB_PR_INGRESS_SIZE	12440000
301
#define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
302
#define MLXSW_SP_SB_PR_EGRESS_SIZE	13232000
303

304
#define MLXSW_SP_SB_PR(_mode, _size)	\
305 306 307
	{				\
		.mode = _mode,		\
		.size = _size,		\
308 309
	}

310 311
static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs[] = {
	/* Ingress pools. */
312
	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
313
		       MLXSW_SP_SB_PR_INGRESS_SIZE),
314 315
	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
316
	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
317
		       MLXSW_SP_SB_PR_INGRESS_MNG_SIZE),
318
	/* Egress pools. */
319
	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE),
320 321
	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
322
	MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
323 324
};

325
#define MLXSW_SP_SB_PRS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs)
326

327 328 329
static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
				const struct mlxsw_sp_sb_pr *prs,
				size_t prs_len)
330 331 332 333
{
	int i;
	int err;

334
	for (i = 0; i < prs_len; i++) {
335 336 337 338 339 340 341 342 343 344 345
		u32 size = prs[i].size;
		u32 size_cells;

		if (size == MLXSW_SP_SB_INFI) {
			err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
						   0, true);
		} else {
			size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
			err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
						   size_cells, false);
		}
346 347 348 349 350 351
		if (err)
			return err;
	}
	return 0;
}

352 353 354 355
#define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool)	\
	{						\
		.min_buff = _min_buff,			\
		.max_buff = _max_buff,			\
356
		.pool_index = _pool,			\
357 358
	}

359
static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
360
	MLXSW_SP_SB_CM(10000, 8, 0),
361 362 363 364 365 366 367
	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
	MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
368
	MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
369
	MLXSW_SP_SB_CM(20000, 1, 3),
370 371
};

372 373 374
#define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)

static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
375 376 377 378 379 380 381 382
	MLXSW_SP_SB_CM(1500, 9, 4),
	MLXSW_SP_SB_CM(1500, 9, 4),
	MLXSW_SP_SB_CM(1500, 9, 4),
	MLXSW_SP_SB_CM(1500, 9, 4),
	MLXSW_SP_SB_CM(1500, 9, 4),
	MLXSW_SP_SB_CM(1500, 9, 4),
	MLXSW_SP_SB_CM(1500, 9, 4),
	MLXSW_SP_SB_CM(1500, 9, 4),
383 384 385 386 387 388 389 390
	MLXSW_SP_SB_CM(0, 13440000, 8),
	MLXSW_SP_SB_CM(0, 13440000, 8),
	MLXSW_SP_SB_CM(0, 13440000, 8),
	MLXSW_SP_SB_CM(0, 13440000, 8),
	MLXSW_SP_SB_CM(0, 13440000, 8),
	MLXSW_SP_SB_CM(0, 13440000, 8),
	MLXSW_SP_SB_CM(0, 13440000, 8),
	MLXSW_SP_SB_CM(0, 13440000, 8),
391
	MLXSW_SP_SB_CM(1, 0xff, 4),
392 393 394 395
};

#define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)

396
#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 4)
397 398

static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
399
	MLXSW_SP_CPU_PORT_SB_CM,
400 401 402 403 404
	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
405
	MLXSW_SP_CPU_PORT_SB_CM,
406
	MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
	MLXSW_SP_CPU_PORT_SB_CM,
431 432 433 434 435
};

#define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
	ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)

436 437 438 439 440 441 442 443
static bool
mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
{
	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);

	return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
}

444 445 446 447
static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
				  enum mlxsw_reg_sbxx_dir dir,
				  const struct mlxsw_sp_sb_cm *cms,
				  size_t cms_len)
448 449 450 451 452 453
{
	int i;
	int err;

	for (i = 0; i < cms_len; i++) {
		const struct mlxsw_sp_sb_cm *cm;
454
		u32 min_buff;
455
		u32 max_buff;
456

457 458
		if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
			continue; /* PG number 8 does not exist, skip it */
459
		cm = &cms[i];
460 461 462
		if (WARN_ON(mlxsw_sp_sb_pool_dess[cm->pool_index].dir != dir))
			continue;

463
		min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
464 465
		max_buff = cm->max_buff;
		if (max_buff == MLXSW_SP_SB_INFI) {
466 467 468
			err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
						   min_buff, 0,
						   true, cm->pool_index);
469 470 471 472 473
		} else {
			if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
						       cm->pool_index))
				max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
								max_buff);
474
			err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
475
						   min_buff, max_buff,
476
						   false, cm->pool_index);
477
		}
478 479 480 481 482 483 484 485
		if (err)
			return err;
	}
	return 0;
}

static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
486 487 488 489 490 491 492 493 494 495 496 497 498 499
	int err;

	err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
				     mlxsw_sp_port->local_port,
				     MLXSW_REG_SBXX_DIR_INGRESS,
				     mlxsw_sp_sb_cms_ingress,
				     MLXSW_SP_SB_CMS_INGRESS_LEN);
	if (err)
		return err;
	return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
				      mlxsw_sp_port->local_port,
				      MLXSW_REG_SBXX_DIR_EGRESS,
				      mlxsw_sp_sb_cms_egress,
				      MLXSW_SP_SB_CMS_EGRESS_LEN);
500 501 502 503
}

static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
{
504 505 506
	return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
				      mlxsw_sp_cpu_port_sb_cms,
				      MLXSW_SP_CPU_PORT_SB_MCS_LEN);
507 508
}

509 510 511 512
#define MLXSW_SP_SB_PM(_min_buff, _max_buff)	\
	{					\
		.min_buff = _min_buff,		\
		.max_buff = _max_buff,		\
513 514
	}

515 516
static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = {
	/* Ingress pools. */
517 518 519 520
	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
521
	/* Egress pools. */
522
	MLXSW_SP_SB_PM(0, 7),
523 524 525
	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
	MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
526 527
};

528
#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms)
529

530
static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
531
{
532
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
533 534 535
	int i;
	int err;

536 537
	for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) {
		const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp_sb_pms[i];
538
		u32 max_buff;
539
		u32 min_buff;
540

541
		min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
542 543 544
		max_buff = pm->max_buff;
		if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
			max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
545
		err = mlxsw_sp_sb_pm_write(mlxsw_sp, mlxsw_sp_port->local_port,
546
					   i, min_buff, max_buff);
547 548 549 550 551 552 553 554 555
		if (err)
			return err;
	}
	return 0;
}

struct mlxsw_sp_sb_mm {
	u32 min_buff;
	u32 max_buff;
556
	u16 pool_index;
557 558
};

559 560 561 562
#define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool)	\
	{						\
		.min_buff = _min_buff,			\
		.max_buff = _max_buff,			\
563
		.pool_index = _pool,			\
564 565 566
	}

static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
	MLXSW_SP_SB_MM(20000, 0xff, 4),
582 583 584 585 586 587 588 589 590 591 592
};

#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)

static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
{
	char sbmm_pl[MLXSW_REG_SBMM_LEN];
	int i;
	int err;

	for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
593
		const struct mlxsw_sp_sb_pool_des *des;
594
		const struct mlxsw_sp_sb_mm *mc;
595
		u32 min_buff;
596 597

		mc = &mlxsw_sp_sb_mms[i];
598
		des = &mlxsw_sp_sb_pool_dess[mc->pool_index];
599 600
		/* All pools used by sb_mm's are initialized using dynamic
		 * thresholds, therefore 'max_buff' isn't specified in cells.
601 602 603
		 */
		min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
		mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
604
				    des->pool);
605 606 607 608 609 610 611
		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
		if (err)
			return err;
	}
	return 0;
}

612 613 614 615 616 617 618 619 620 621 622 623 624 625
static void mlxsw_sp_pool_count(u16 *p_ingress_len, u16 *p_egress_len)
{
	int i;

	for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; ++i)
		if (mlxsw_sp_sb_pool_dess[i].dir == MLXSW_REG_SBXX_DIR_EGRESS)
			goto out;
	WARN(1, "No egress pools\n");

out:
	*p_ingress_len = i;
	*p_egress_len = MLXSW_SP_SB_POOL_DESS_LEN - i;
}

626 627
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
{
628 629
	u16 ing_pool_count;
	u16 eg_pool_count;
630 631
	int err;

632 633 634
	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
		return -EIO;

635 636 637
	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
		return -EIO;

638 639 640 641
	mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
	if (!mlxsw_sp->sb)
		return -ENOMEM;
	mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
642 643
	mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
						   MAX_BUFFER_SIZE);
644

645
	err = mlxsw_sp_sb_ports_init(mlxsw_sp);
646
	if (err)
647
		goto err_sb_ports_init;
648 649
	err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp_sb_prs,
				   MLXSW_SP_SB_PRS_LEN);
650 651
	if (err)
		goto err_sb_prs_init;
652 653
	err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
	if (err)
654
		goto err_sb_cpu_port_sb_cms_init;
655
	err = mlxsw_sp_sb_mms_init(mlxsw_sp);
656
	if (err)
657
		goto err_sb_mms_init;
658
	mlxsw_sp_pool_count(&ing_pool_count, &eg_pool_count);
659 660
	err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
				  mlxsw_sp->sb->sb_size,
661 662
				  ing_pool_count,
				  eg_pool_count,
663 664
				  MLXSW_SP_SB_ING_TC_COUNT,
				  MLXSW_SP_SB_EG_TC_COUNT);
665 666 667 668 669 670 671 672 673 674
	if (err)
		goto err_devlink_sb_register;

	return 0;

err_devlink_sb_register:
err_sb_mms_init:
err_sb_cpu_port_sb_cms_init:
err_sb_prs_init:
	mlxsw_sp_sb_ports_fini(mlxsw_sp);
675 676
err_sb_ports_init:
	kfree(mlxsw_sp->sb);
677
	return err;
678
}
679

680 681 682
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
{
	devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
683
	mlxsw_sp_sb_ports_fini(mlxsw_sp);
684
	kfree(mlxsw_sp->sb);
685 686 687 688 689 690
}

int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
	int err;

691
	err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
692 693 694 695 696 697 698 699 700
	if (err)
		return err;
	err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
	if (err)
		return err;
	err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);

	return err;
}
701 702 703 704 705

int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
			 unsigned int sb_index, u16 pool_index,
			 struct devlink_sb_pool_info *pool_info)
{
706
	enum mlxsw_reg_sbxx_dir dir = mlxsw_sp_sb_pool_dess[pool_index].dir;
707
	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
708
	struct mlxsw_sp_sb_pr *pr;
709

710
	pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
711
	pool_info->pool_type = (enum devlink_sb_pool_type) dir;
712
	pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
713
	pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
714 715 716 717 718 719 720 721
	return 0;
}

int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
			 unsigned int sb_index, u16 pool_index, u32 size,
			 enum devlink_sb_threshold_type threshold_type)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
722
	u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
723
	enum mlxsw_reg_sbpr_mode mode;
724

725 726 727
	if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE))
		return -EINVAL;

728
	mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
729 730
	return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
				    pool_size, false);
731 732 733 734
}

#define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */

735 736
static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
				     u32 max_buff)
737
{
738
	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
739 740 741

	if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
		return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
742
	return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
743 744
}

745 746
static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
				    u32 threshold, u32 *p_max_buff)
747
{
748
	struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
749 750 751 752 753 754 755 756 757 758

	if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
		int val;

		val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
		if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
		    val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
			return -EINVAL;
		*p_max_buff = val;
	} else {
759
		*p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
760 761 762 763 764 765 766 767 768 769 770 771 772
	}
	return 0;
}

int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
			      unsigned int sb_index, u16 pool_index,
			      u32 *p_threshold)
{
	struct mlxsw_sp_port *mlxsw_sp_port =
			mlxsw_core_port_driver_priv(mlxsw_core_port);
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	u8 local_port = mlxsw_sp_port->local_port;
	struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
773
						       pool_index);
774

775
	*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
						 pm->max_buff);
	return 0;
}

int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
			      unsigned int sb_index, u16 pool_index,
			      u32 threshold)
{
	struct mlxsw_sp_port *mlxsw_sp_port =
			mlxsw_core_port_driver_priv(mlxsw_core_port);
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	u8 local_port = mlxsw_sp_port->local_port;
	u32 max_buff;
	int err;

791
	err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
792 793 794 795
				       threshold, &max_buff);
	if (err)
		return err;

796
	return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
797 798 799 800 801 802 803 804 805 806 807 808 809
				    0, max_buff);
}

int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
				 unsigned int sb_index, u16 tc_index,
				 enum devlink_sb_pool_type pool_type,
				 u16 *p_pool_index, u32 *p_threshold)
{
	struct mlxsw_sp_port *mlxsw_sp_port =
			mlxsw_core_port_driver_priv(mlxsw_core_port);
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	u8 local_port = mlxsw_sp_port->local_port;
	u8 pg_buff = tc_index;
810
	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
811 812 813
	struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
						       pg_buff, dir);

814
	*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
815
						 cm->max_buff);
816
	*p_pool_index = cm->pool_index;
817 818 819 820 821 822 823 824 825 826 827 828 829
	return 0;
}

int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
				 unsigned int sb_index, u16 tc_index,
				 enum devlink_sb_pool_type pool_type,
				 u16 pool_index, u32 threshold)
{
	struct mlxsw_sp_port *mlxsw_sp_port =
			mlxsw_core_port_driver_priv(mlxsw_core_port);
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	u8 local_port = mlxsw_sp_port->local_port;
	u8 pg_buff = tc_index;
830
	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
831 832 833
	u32 max_buff;
	int err;

834
	if (dir != mlxsw_sp_sb_pool_dess[pool_index].dir)
835 836
		return -EINVAL;

837
	err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
838 839 840 841
				       threshold, &max_buff);
	if (err)
		return err;

842
	return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
843
				    0, max_buff, false, pool_index);
844
}
845 846

#define MASKED_COUNT_MAX \
847 848
	(MLXSW_REG_SBSR_REC_MAX_COUNT / \
	 (MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870

struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
	u8 masked_count;
	u8 local_port_1;
};

static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
					char *sbsr_pl, size_t sbsr_pl_len,
					unsigned long cb_priv)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
	struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
	u8 masked_count;
	u8 local_port;
	int rec_index = 0;
	struct mlxsw_sp_sb_cm *cm;
	int i;

	memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));

	masked_count = 0;
	for (local_port = cb_ctx.local_port_1;
871
	     local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
872 873
		if (!mlxsw_sp->ports[local_port])
			continue;
874
		for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
875 876 877 878 879 880 881 882 883 884
			cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
						MLXSW_REG_SBXX_DIR_INGRESS);
			mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
						  &cm->occ.cur, &cm->occ.max);
		}
		if (++masked_count == cb_ctx.masked_count)
			break;
	}
	masked_count = 0;
	for (local_port = cb_ctx.local_port_1;
885
	     local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
886 887
		if (!mlxsw_sp->ports[local_port])
			continue;
888
		for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922
			cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
						MLXSW_REG_SBXX_DIR_EGRESS);
			mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
						  &cm->occ.cur, &cm->occ.max);
		}
		if (++masked_count == cb_ctx.masked_count)
			break;
	}
}

int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
			     unsigned int sb_index)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
	struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
	unsigned long cb_priv;
	LIST_HEAD(bulk_list);
	char *sbsr_pl;
	u8 masked_count;
	u8 local_port_1;
	u8 local_port = 0;
	int i;
	int err;
	int err2;

	sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
	if (!sbsr_pl)
		return -ENOMEM;

next_batch:
	local_port++;
	local_port_1 = local_port;
	masked_count = 0;
	mlxsw_reg_sbsr_pack(sbsr_pl, false);
923
	for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
924
		mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
925
	for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
926
		mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
927
	for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
928 929 930 931
		if (!mlxsw_sp->ports[local_port])
			continue;
		mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
932
		for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) {
933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950
			err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
						       &bulk_list);
			if (err)
				goto out;
		}
		if (++masked_count == MASKED_COUNT_MAX)
			goto do_query;
	}

do_query:
	cb_ctx.masked_count = masked_count;
	cb_ctx.local_port_1 = local_port_1;
	memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
	err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
				    &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
				    cb_priv);
	if (err)
		goto out;
951
	if (local_port < mlxsw_core_max_ports(mlxsw_core))
952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981
		goto next_batch;

out:
	err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
	if (!err)
		err = err2;
	kfree(sbsr_pl);
	return err;
}

int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
			      unsigned int sb_index)
{
	struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
	LIST_HEAD(bulk_list);
	char *sbsr_pl;
	unsigned int masked_count;
	u8 local_port = 0;
	int i;
	int err;
	int err2;

	sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
	if (!sbsr_pl)
		return -ENOMEM;

next_batch:
	local_port++;
	masked_count = 0;
	mlxsw_reg_sbsr_pack(sbsr_pl, true);
982
	for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
983
		mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
984
	for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
985
		mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
986
	for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
987 988 989 990
		if (!mlxsw_sp->ports[local_port])
			continue;
		mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
		mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
991
		for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) {
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
			err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
						       &bulk_list);
			if (err)
				goto out;
		}
		if (++masked_count == MASKED_COUNT_MAX)
			goto do_query;
	}

do_query:
	err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
				    &bulk_list, NULL, 0);
	if (err)
		goto out;
1006
	if (local_port < mlxsw_core_max_ports(mlxsw_core))
1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
		goto next_batch;

out:
	err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
	if (!err)
		err = err2;
	kfree(sbsr_pl);
	return err;
}

int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
				  unsigned int sb_index, u16 pool_index,
				  u32 *p_cur, u32 *p_max)
{
	struct mlxsw_sp_port *mlxsw_sp_port =
			mlxsw_core_port_driver_priv(mlxsw_core_port);
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	u8 local_port = mlxsw_sp_port->local_port;
	struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1026
						       pool_index);
1027

1028 1029
	*p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
	*p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
	return 0;
}

int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
				     unsigned int sb_index, u16 tc_index,
				     enum devlink_sb_pool_type pool_type,
				     u32 *p_cur, u32 *p_max)
{
	struct mlxsw_sp_port *mlxsw_sp_port =
			mlxsw_core_port_driver_priv(mlxsw_core_port);
	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
	u8 local_port = mlxsw_sp_port->local_port;
	u8 pg_buff = tc_index;
1043
	enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1044 1045 1046
	struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
						       pg_buff, dir);

1047 1048
	*p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
	*p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
1049 1050
	return 0;
}