ce.c 32.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
/*
 * Copyright (c) 2005-2011 Atheros Communications Inc.
 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

#include "hif.h"
#include "pci.h"
#include "ce.h"
#include "debug.h"

/*
 * Support for Copy Engine hardware, which is mainly used for
 * communication between Host and Target over a PCIe interconnect.
 */

/*
 * A single CopyEngine (CE) comprises two "rings":
 *   a source ring
 *   a destination ring
 *
 * Each ring consists of a number of descriptors which specify
 * an address, length, and meta-data.
 *
 * Typically, one side of the PCIe interconnect (Host or Target)
 * controls one ring and the other side controls the other ring.
 * The source side chooses when to initiate a transfer and it
 * chooses what to send (buffer address, length). The destination
 * side keeps a supply of "anonymous receive buffers" available and
 * it handles incoming data as it arrives (when the destination
42
 * receives an interrupt).
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
 *
 * The sender may send a simple buffer (address/length) or it may
 * send a small list of buffers.  When a small list is sent, hardware
 * "gathers" these and they end up in a single destination buffer
 * with a single interrupt.
 *
 * There are several "contexts" managed by this layer -- more, it
 * may seem -- than should be needed. These are provided mainly for
 * maximum flexibility and especially to facilitate a simpler HIF
 * implementation. There are per-CopyEngine recv, send, and watermark
 * contexts. These are supplied by the caller when a recv, send,
 * or watermark handler is established and they are echoed back to
 * the caller when the respective callbacks are invoked. There is
 * also a per-transfer context supplied by the caller when a buffer
 * (or sendlist) is sent and when a buffer is enqueued for recv.
 * These per-transfer contexts are echoed back to the caller when
 * the buffer is sent/received.
 */

static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
						       u32 ce_ctrl_addr,
						       unsigned int n)
{
	ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
}

static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
						      u32 ce_ctrl_addr)
{
	return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
}

static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
						      u32 ce_ctrl_addr,
						      unsigned int n)
{
79
	ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
}

static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
						     u32 ce_ctrl_addr)
{
	return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
}

static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
						    u32 ce_ctrl_addr)
{
	return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
}

static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
						    u32 ce_ctrl_addr,
						    unsigned int addr)
{
	ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
}

static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
					       u32 ce_ctrl_addr,
					       unsigned int n)
{
	ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
}

static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
					       u32 ce_ctrl_addr,
					       unsigned int n)
{
	u32 ctrl1_addr = ath10k_pci_read32((ar),
					   (ce_ctrl_addr) + CE_CTRL1_ADDRESS);

	ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
			   (ctrl1_addr &  ~CE_CTRL1_DMAX_LENGTH_MASK) |
			   CE_CTRL1_DMAX_LENGTH_SET(n));
}

static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
						    u32 ce_ctrl_addr,
						    unsigned int n)
{
	u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);

	ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
			   (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
			   CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
}

static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
						     u32 ce_ctrl_addr,
						     unsigned int n)
{
	u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);

	ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
			   (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
			   CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
}

static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
						     u32 ce_ctrl_addr)
{
	return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
}

static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
						     u32 ce_ctrl_addr,
						     u32 addr)
{
	ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
}

static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
						u32 ce_ctrl_addr,
						unsigned int n)
{
	ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
}

static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
						   u32 ce_ctrl_addr,
						   unsigned int n)
{
	u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);

	ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
			   (addr & ~SRC_WATERMARK_HIGH_MASK) |
			   SRC_WATERMARK_HIGH_SET(n));
}

static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
						  u32 ce_ctrl_addr,
						  unsigned int n)
{
	u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);

	ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
			   (addr & ~SRC_WATERMARK_LOW_MASK) |
			   SRC_WATERMARK_LOW_SET(n));
}

static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
						    u32 ce_ctrl_addr,
						    unsigned int n)
{
	u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);

	ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
			   (addr & ~DST_WATERMARK_HIGH_MASK) |
			   DST_WATERMARK_HIGH_SET(n));
}

static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
						   u32 ce_ctrl_addr,
						   unsigned int n)
{
	u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);

	ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
			   (addr & ~DST_WATERMARK_LOW_MASK) |
			   DST_WATERMARK_LOW_SET(n));
}

static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
							u32 ce_ctrl_addr)
{
	u32 host_ie_addr = ath10k_pci_read32(ar,
					     ce_ctrl_addr + HOST_IE_ADDRESS);

	ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
			   host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
}

static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
							u32 ce_ctrl_addr)
{
	u32 host_ie_addr = ath10k_pci_read32(ar,
					     ce_ctrl_addr + HOST_IE_ADDRESS);

	ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
			   host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
}

static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
						    u32 ce_ctrl_addr)
{
	u32 host_ie_addr = ath10k_pci_read32(ar,
					     ce_ctrl_addr + HOST_IE_ADDRESS);

	ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
			   host_ie_addr & ~CE_WATERMARK_MASK);
}

static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
					       u32 ce_ctrl_addr)
{
	u32 misc_ie_addr = ath10k_pci_read32(ar,
					     ce_ctrl_addr + MISC_IE_ADDRESS);

	ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
			   misc_ie_addr | CE_ERROR_MASK);
}

246 247 248 249 250 251 252 253 254 255
static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
						u32 ce_ctrl_addr)
{
	u32 misc_ie_addr = ath10k_pci_read32(ar,
					     ce_ctrl_addr + MISC_IE_ADDRESS);

	ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
			   misc_ie_addr & ~CE_ERROR_MASK);
}

256 257 258 259 260 261 262 263
static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
						     u32 ce_ctrl_addr,
						     unsigned int mask)
{
	ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
}

/*
264
 * Guts of ath10k_ce_send.
265 266
 * The caller takes responsibility for any needed locking.
 */
267 268 269 270 271 272
int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
			  void *per_transfer_context,
			  u32 buffer,
			  unsigned int nbytes,
			  unsigned int transfer_id,
			  unsigned int flags)
273 274
{
	struct ath10k *ar = ce_state->ar;
275
	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
276
	struct ce_desc *desc, sdesc;
277 278 279 280 281 282 283 284
	unsigned int nentries_mask = src_ring->nentries_mask;
	unsigned int sw_index = src_ring->sw_index;
	unsigned int write_index = src_ring->write_index;
	u32 ctrl_addr = ce_state->ctrl_addr;
	u32 desc_flags = 0;
	int ret = 0;

	if (nbytes > ce_state->src_sz_max)
285
		ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
286 287 288 289
			    __func__, nbytes, ce_state->src_sz_max);

	if (unlikely(CE_RING_DELTA(nentries_mask,
				   write_index, sw_index - 1) <= 0)) {
M
Michal Kazior 已提交
290
		ret = -ENOSR;
291 292 293 294 295 296 297 298 299 300 301 302 303
		goto exit;
	}

	desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
				   write_index);

	desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);

	if (flags & CE_SEND_FLAG_GATHER)
		desc_flags |= CE_DESC_FLAGS_GATHER;
	if (flags & CE_SEND_FLAG_BYTE_SWAP)
		desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;

304 305 306
	sdesc.addr   = __cpu_to_le32(buffer);
	sdesc.nbytes = __cpu_to_le16(nbytes);
	sdesc.flags  = __cpu_to_le16(desc_flags);
307

308
	*desc = sdesc;
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323

	src_ring->per_transfer_context[write_index] = per_transfer_context;

	/* Update Source Ring Write Index */
	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);

	/* WORKAROUND */
	if (!(flags & CE_SEND_FLAG_GATHER))
		ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);

	src_ring->write_index = write_index;
exit:
	return ret;
}

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
{
	struct ath10k *ar = pipe->ar;
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_ring *src_ring = pipe->src_ring;
	u32 ctrl_addr = pipe->ctrl_addr;

	lockdep_assert_held(&ar_pci->ce_lock);

	/*
	 * This function must be called only if there is an incomplete
	 * scatter-gather transfer (before index register is updated)
	 * that needs to be cleaned up.
	 */
	if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
		return;

	if (WARN_ON_ONCE(src_ring->write_index ==
			 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
		return;

	src_ring->write_index--;
	src_ring->write_index &= src_ring->nentries_mask;

	src_ring->per_transfer_context[src_ring->write_index] = NULL;
}

351
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
		   void *per_transfer_context,
		   u32 buffer,
		   unsigned int nbytes,
		   unsigned int transfer_id,
		   unsigned int flags)
{
	struct ath10k *ar = ce_state->ar;
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	int ret;

	spin_lock_bh(&ar_pci->ce_lock);
	ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
				    buffer, nbytes, transfer_id, flags);
	spin_unlock_bh(&ar_pci->ce_lock);

	return ret;
}

M
Michal Kazior 已提交
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
{
	struct ath10k *ar = pipe->ar;
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	int delta;

	spin_lock_bh(&ar_pci->ce_lock);
	delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
			      pipe->src_ring->write_index,
			      pipe->src_ring->sw_index - 1);
	spin_unlock_bh(&ar_pci->ce_lock);

	return delta;
}

385
int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
386
{
387
	struct ath10k *ar = pipe->ar;
388
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
389
	struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
390
	unsigned int nentries_mask = dest_ring->nentries_mask;
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
	unsigned int write_index = dest_ring->write_index;
	unsigned int sw_index = dest_ring->sw_index;

	lockdep_assert_held(&ar_pci->ce_lock);

	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
}

int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
{
	struct ath10k *ar = pipe->ar;
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
	unsigned int nentries_mask = dest_ring->nentries_mask;
	unsigned int write_index = dest_ring->write_index;
	unsigned int sw_index = dest_ring->sw_index;
	struct ce_desc *base = dest_ring->base_addr_owner_space;
	struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
	u32 ctrl_addr = pipe->ctrl_addr;

	lockdep_assert_held(&ar_pci->ce_lock);

413 414
	if ((pipe->id != 5) &&
	    CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
415
		return -ENOSPC;
416 417 418 419 420 421 422 423 424 425 426 427

	desc->addr = __cpu_to_le32(paddr);
	desc->nbytes = 0;

	dest_ring->per_transfer_context[write_index] = ctx;
	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
	ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
	dest_ring->write_index = write_index;

	return 0;
}

428 429 430 431 432 433 434
void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
{
	struct ath10k *ar = pipe->ar;
	struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
	unsigned int nentries_mask = dest_ring->nentries_mask;
	unsigned int write_index = dest_ring->write_index;
	u32 ctrl_addr = pipe->ctrl_addr;
435 436 437 438 439 440 441
	u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);

	/* Prevent CE ring stuck issue that will occur when ring is full.
	 * Make sure that write index is 1 less than read index.
	 */
	if ((cur_write_idx + nentries)  == dest_ring->sw_index)
		nentries -= 1;
442 443 444 445 446 447

	write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
	ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
	dest_ring->write_index = write_index;
}

448 449 450 451
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
{
	struct ath10k *ar = pipe->ar;
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
452 453 454
	int ret;

	spin_lock_bh(&ar_pci->ce_lock);
455
	ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
456
	spin_unlock_bh(&ar_pci->ce_lock);
457

458 459 460 461 462 463 464
	return ret;
}

/*
 * Guts of ath10k_ce_completed_recv_next.
 * The caller takes responsibility for any necessary locking.
 */
K
Kalle Valo 已提交
465 466
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
					 void **per_transfer_contextp,
467
					 unsigned int *nbytesp)
468
{
469
	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
	unsigned int nentries_mask = dest_ring->nentries_mask;
	unsigned int sw_index = dest_ring->sw_index;

	struct ce_desc *base = dest_ring->base_addr_owner_space;
	struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
	struct ce_desc sdesc;
	u16 nbytes;

	/* Copy in one go for performance reasons */
	sdesc = *desc;

	nbytes = __le16_to_cpu(sdesc.nbytes);
	if (nbytes == 0) {
		/*
		 * This closes a relatively unusual race where the Host
		 * sees the updated DRRI before the update to the
		 * corresponding descriptor has completed. We treat this
		 * as a descriptor that is not yet done.
		 */
		return -EIO;
	}

	desc->nbytes = 0;

	/* Return data from completed destination descriptor */
	*nbytesp = nbytes;

	if (per_transfer_contextp)
		*per_transfer_contextp =
			dest_ring->per_transfer_context[sw_index];

501 502 503 504 505
	/* Copy engine 5 (HTT Rx) will reuse the same transfer context.
	 * So update transfer context all CEs except CE5.
	 */
	if (ce_state->id != 5)
		dest_ring->per_transfer_context[sw_index] = NULL;
506 507 508 509 510 511 512 513

	/* Update sw_index */
	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
	dest_ring->sw_index = sw_index;

	return 0;
}

514
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
515
				  void **per_transfer_contextp,
516
				  unsigned int *nbytesp)
517 518 519 520 521 522 523 524
{
	struct ath10k *ar = ce_state->ar;
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	int ret;

	spin_lock_bh(&ar_pci->ce_lock);
	ret = ath10k_ce_completed_recv_next_nolock(ce_state,
						   per_transfer_contextp,
525
						   nbytesp);
526 527 528 529 530
	spin_unlock_bh(&ar_pci->ce_lock);

	return ret;
}

531
int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
532 533 534
			       void **per_transfer_contextp,
			       u32 *bufferp)
{
535
	struct ath10k_ce_ring *dest_ring;
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
	unsigned int nentries_mask;
	unsigned int sw_index;
	unsigned int write_index;
	int ret;
	struct ath10k *ar;
	struct ath10k_pci *ar_pci;

	dest_ring = ce_state->dest_ring;

	if (!dest_ring)
		return -EIO;

	ar = ce_state->ar;
	ar_pci = ath10k_pci_priv(ar);

	spin_lock_bh(&ar_pci->ce_lock);

	nentries_mask = dest_ring->nentries_mask;
	sw_index = dest_ring->sw_index;
	write_index = dest_ring->write_index;
	if (write_index != sw_index) {
		struct ce_desc *base = dest_ring->base_addr_owner_space;
		struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);

		/* Return data from completed destination descriptor */
		*bufferp = __le32_to_cpu(desc->addr);

		if (per_transfer_contextp)
			*per_transfer_contextp =
				dest_ring->per_transfer_context[sw_index];

		/* sanity */
		dest_ring->per_transfer_context[sw_index] = NULL;
M
Michal Kazior 已提交
569
		desc->nbytes = 0;
570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587

		/* Update sw_index */
		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
		dest_ring->sw_index = sw_index;
		ret = 0;
	} else {
		ret = -EIO;
	}

	spin_unlock_bh(&ar_pci->ce_lock);

	return ret;
}

/*
 * Guts of ath10k_ce_completed_send_next.
 * The caller takes responsibility for any necessary locking.
 */
K
Kalle Valo 已提交
588
int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
589
					 void **per_transfer_contextp)
590
{
591
	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
592 593 594 595 596 597 598 599 600 601 602 603 604 605
	u32 ctrl_addr = ce_state->ctrl_addr;
	struct ath10k *ar = ce_state->ar;
	unsigned int nentries_mask = src_ring->nentries_mask;
	unsigned int sw_index = src_ring->sw_index;
	unsigned int read_index;

	if (src_ring->hw_index == sw_index) {
		/*
		 * The SW completion index has caught up with the cached
		 * version of the HW completion index.
		 * Update the cached HW completion index to see whether
		 * the SW has really caught up to the HW, or if the cached
		 * value of the HW index has become stale.
		 */
606

607 608 609 610 611 612
		read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
		if (read_index == 0xffffffff)
			return -ENODEV;

		read_index &= nentries_mask;
		src_ring->hw_index = read_index;
613
	}
614

615 616
	read_index = src_ring->hw_index;

617
	if (read_index == sw_index)
618
		return -EIO;
619

620 621 622
	if (per_transfer_contextp)
		*per_transfer_contextp =
			src_ring->per_transfer_context[sw_index];
623

624 625
	/* sanity */
	src_ring->per_transfer_context[sw_index] = NULL;
626

627 628 629 630 631
	/* Update sw_index */
	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
	src_ring->sw_index = sw_index;

	return 0;
632 633 634
}

/* NB: Modeled after ath10k_ce_completed_send_next */
635
int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
636 637 638 639 640
			       void **per_transfer_contextp,
			       u32 *bufferp,
			       unsigned int *nbytesp,
			       unsigned int *transfer_idp)
{
641
	struct ath10k_ce_ring *src_ring;
642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
	unsigned int nentries_mask;
	unsigned int sw_index;
	unsigned int write_index;
	int ret;
	struct ath10k *ar;
	struct ath10k_pci *ar_pci;

	src_ring = ce_state->src_ring;

	if (!src_ring)
		return -EIO;

	ar = ce_state->ar;
	ar_pci = ath10k_pci_priv(ar);

	spin_lock_bh(&ar_pci->ce_lock);

	nentries_mask = src_ring->nentries_mask;
	sw_index = src_ring->sw_index;
	write_index = src_ring->write_index;

	if (write_index != sw_index) {
		struct ce_desc *base = src_ring->base_addr_owner_space;
		struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);

		/* Return data from completed source descriptor */
		*bufferp = __le32_to_cpu(desc->addr);
		*nbytesp = __le16_to_cpu(desc->nbytes);
		*transfer_idp = MS(__le16_to_cpu(desc->flags),
						CE_DESC_FLAGS_META_DATA);

		if (per_transfer_contextp)
			*per_transfer_contextp =
				src_ring->per_transfer_context[sw_index];

		/* sanity */
		src_ring->per_transfer_context[sw_index] = NULL;

		/* Update sw_index */
		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
		src_ring->sw_index = sw_index;
		ret = 0;
	} else {
		ret = -EIO;
	}

	spin_unlock_bh(&ar_pci->ce_lock);

	return ret;
}

693
int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
694
				  void **per_transfer_contextp)
695 696 697 698 699 700 701
{
	struct ath10k *ar = ce_state->ar;
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	int ret;

	spin_lock_bh(&ar_pci->ce_lock);
	ret = ath10k_ce_completed_send_next_nolock(ce_state,
702
						   per_transfer_contextp);
703 704 705 706 707 708 709 710 711 712 713 714 715 716
	spin_unlock_bh(&ar_pci->ce_lock);

	return ret;
}

/*
 * Guts of interrupt handler for per-engine interrupts on a particular CE.
 *
 * Invokes registered callbacks for recv_complete,
 * send_complete, and watermarks.
 */
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
717
	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
718 719 720 721 722 723 724 725
	u32 ctrl_addr = ce_state->ctrl_addr;

	spin_lock_bh(&ar_pci->ce_lock);

	/* Clear the copy-complete interrupts that will be handled here. */
	ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
					  HOST_IS_COPY_COMPLETE_MASK);

726
	spin_unlock_bh(&ar_pci->ce_lock);
727

728 729 730 731 732 733 734
	if (ce_state->recv_cb)
		ce_state->recv_cb(ce_state);

	if (ce_state->send_cb)
		ce_state->send_cb(ce_state);

	spin_lock_bh(&ar_pci->ce_lock);
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752

	/*
	 * Misc CE interrupts are not being handled, but still need
	 * to be cleared.
	 */
	ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);

	spin_unlock_bh(&ar_pci->ce_lock);
}

/*
 * Handler for per-engine interrupts on ALL active CEs.
 * This is used in cases where the system is sharing a
 * single interrput for all CEs
 */

void ath10k_ce_per_engine_service_any(struct ath10k *ar)
{
753
	int ce_id;
754 755 756 757
	u32 intr_summary;

	intr_summary = CE_INTERRUPT_SUMMARY(ar);

M
Michal Kazior 已提交
758
	for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
		if (intr_summary & (1 << ce_id))
			intr_summary &= ~(1 << ce_id);
		else
			/* no intr pending on this CE */
			continue;

		ath10k_ce_per_engine_service(ar, ce_id);
	}
}

/*
 * Adjust interrupts for the copy complete handler.
 * If it's needed for either send or recv, then unmask
 * this interrupt; otherwise, mask it.
 *
 * Called with ce_lock held.
 */
776
static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
777 778 779
{
	u32 ctrl_addr = ce_state->ctrl_addr;
	struct ath10k *ar = ce_state->ar;
780
	bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
781 782 783 784 785 786 787 788 789 790

	if ((!disable_copy_compl_intr) &&
	    (ce_state->send_cb || ce_state->recv_cb))
		ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
	else
		ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);

	ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
}

791
int ath10k_ce_disable_interrupts(struct ath10k *ar)
792
{
793
	int ce_id;
794

M
Michal Kazior 已提交
795
	for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
M
Michal Kazior 已提交
796
		u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
797 798

		ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
799 800
		ath10k_ce_error_intr_disable(ar, ctrl_addr);
		ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
801
	}
802 803

	return 0;
804 805
}

806
void ath10k_ce_enable_interrupts(struct ath10k *ar)
807 808
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
809
	int ce_id;
810

811 812 813 814
	/* Skip the last copy engine, CE7 the diagnostic window, as that
	 * uses polling and isn't initialized for interrupts.
	 */
	for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++)
815
		ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]);
816 817 818 819 820 821
}

static int ath10k_ce_init_src_ring(struct ath10k *ar,
				   unsigned int ce_id,
				   const struct ce_attr *attr)
{
822 823 824
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
	struct ath10k_ce_ring *src_ring = ce_state->src_ring;
M
Michal Kazior 已提交
825
	u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
826

827
	nentries = roundup_pow_of_two(attr->src_nentries);
828

M
Michal Kazior 已提交
829 830 831
	memset(src_ring->base_addr_owner_space, 0,
	       nentries * sizeof(struct ce_desc));

832
	src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
833
	src_ring->sw_index &= src_ring->nentries_mask;
834 835 836 837
	src_ring->hw_index = src_ring->sw_index;

	src_ring->write_index =
		ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
838
	src_ring->write_index &= src_ring->nentries_mask;
839

840 841 842 843 844 845 846 847
	ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
					 src_ring->base_addr_ce_space);
	ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
	ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
	ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
	ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
	ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);

848
	ath10k_dbg(ar, ATH10K_DBG_BOOT,
849
		   "boot init ce src ring id %d entries %d base_addr %pK\n",
850 851 852 853 854 855 856 857 858 859 860 861
		   ce_id, nentries, src_ring->base_addr_owner_space);

	return 0;
}

static int ath10k_ce_init_dest_ring(struct ath10k *ar,
				    unsigned int ce_id,
				    const struct ce_attr *attr)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
M
Michal Kazior 已提交
862
	u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
863 864 865

	nentries = roundup_pow_of_two(attr->dest_nentries);

M
Michal Kazior 已提交
866 867 868
	memset(dest_ring->base_addr_owner_space, 0,
	       nentries * sizeof(struct ce_desc));

869 870 871 872 873 874 875 876 877 878 879 880 881
	dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
	dest_ring->sw_index &= dest_ring->nentries_mask;
	dest_ring->write_index =
		ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
	dest_ring->write_index &= dest_ring->nentries_mask;

	ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
					  dest_ring->base_addr_ce_space);
	ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
	ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
	ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
	ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);

882
	ath10k_dbg(ar, ATH10K_DBG_BOOT,
883
		   "boot ce dest ring id %d entries %d base_addr %pK\n",
884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907
		   ce_id, nentries, dest_ring->base_addr_owner_space);

	return 0;
}

static struct ath10k_ce_ring *
ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
			 const struct ce_attr *attr)
{
	struct ath10k_ce_ring *src_ring;
	u32 nentries = attr->src_nentries;
	dma_addr_t base_addr;

	nentries = roundup_pow_of_two(nentries);

	src_ring = kzalloc(sizeof(*src_ring) +
			   (nentries *
			    sizeof(*src_ring->per_transfer_context)),
			   GFP_KERNEL);
	if (src_ring == NULL)
		return ERR_PTR(-ENOMEM);

	src_ring->nentries = nentries;
	src_ring->nentries_mask = nentries - 1;
908 909 910 911 912 913

	/*
	 * Legacy platforms that do not support cache
	 * coherent DMA are unsupported
	 */
	src_ring->base_addr_owner_space_unaligned =
914 915 916 917
		dma_alloc_coherent(ar->dev,
				   (nentries * sizeof(struct ce_desc) +
				    CE_DESC_RING_ALIGN),
				   &base_addr, GFP_KERNEL);
918
	if (!src_ring->base_addr_owner_space_unaligned) {
919 920
		kfree(src_ring);
		return ERR_PTR(-ENOMEM);
921 922
	}

923 924 925 926 927 928 929 930 931
	src_ring->base_addr_ce_space_unaligned = base_addr;

	src_ring->base_addr_owner_space = PTR_ALIGN(
			src_ring->base_addr_owner_space_unaligned,
			CE_DESC_RING_ALIGN);
	src_ring->base_addr_ce_space = ALIGN(
			src_ring->base_addr_ce_space_unaligned,
			CE_DESC_RING_ALIGN);

932
	return src_ring;
933 934
}

935 936 937
static struct ath10k_ce_ring *
ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
			  const struct ce_attr *attr)
938
{
939
	struct ath10k_ce_ring *dest_ring;
940
	u32 nentries;
941 942
	dma_addr_t base_addr;

943
	nentries = roundup_pow_of_two(attr->dest_nentries);
944

945 946 947 948 949 950
	dest_ring = kzalloc(sizeof(*dest_ring) +
			    (nentries *
			     sizeof(*dest_ring->per_transfer_context)),
			    GFP_KERNEL);
	if (dest_ring == NULL)
		return ERR_PTR(-ENOMEM);
951 952 953 954 955 956 957 958 959

	dest_ring->nentries = nentries;
	dest_ring->nentries_mask = nentries - 1;

	/*
	 * Legacy platforms that do not support cache
	 * coherent DMA are unsupported
	 */
	dest_ring->base_addr_owner_space_unaligned =
960
		dma_zalloc_coherent(ar->dev,
K
Kalle Valo 已提交
961 962 963
				    (nentries * sizeof(struct ce_desc) +
				     CE_DESC_RING_ALIGN),
				    &base_addr, GFP_KERNEL);
964
	if (!dest_ring->base_addr_owner_space_unaligned) {
965 966
		kfree(dest_ring);
		return ERR_PTR(-ENOMEM);
967 968
	}

969 970 971 972 973 974 975 976 977
	dest_ring->base_addr_ce_space_unaligned = base_addr;

	dest_ring->base_addr_owner_space = PTR_ALIGN(
			dest_ring->base_addr_owner_space_unaligned,
			CE_DESC_RING_ALIGN);
	dest_ring->base_addr_ce_space = ALIGN(
			dest_ring->base_addr_ce_space_unaligned,
			CE_DESC_RING_ALIGN);

978
	return dest_ring;
979 980 981 982 983 984 985 986 987
}

/*
 * Initialize a Copy Engine based on caller-supplied attributes.
 * This may be called once to initialize both source and destination
 * rings or it may be called twice for separate source and destination
 * initialization. It may be that only one side or the other is
 * initialized by software/firmware.
 */
988
int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
989
			const struct ce_attr *attr)
990
{
991
	int ret;
992 993

	if (attr->src_nentries) {
994
		ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
995
		if (ret) {
996
			ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
997
				   ce_id, ret);
998
			return ret;
999 1000 1001 1002
		}
	}

	if (attr->dest_nentries) {
1003
		ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
1004
		if (ret) {
1005
			ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
1006
				   ce_id, ret);
1007
			return ret;
1008 1009 1010
		}
	}

1011
	return 0;
1012 1013
}

1014
static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
1015
{
M
Michal Kazior 已提交
1016
	u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1017 1018 1019 1020 1021 1022 1023 1024 1025

	ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
	ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
	ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
	ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
}

static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
{
M
Michal Kazior 已提交
1026
	u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039

	ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
	ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
	ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
}

void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
{
	ath10k_ce_deinit_src_ring(ar, ce_id);
	ath10k_ce_deinit_dest_ring(ar, ce_id);
}

int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
1040
			 const struct ce_attr *attr)
1041 1042 1043 1044 1045
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
	int ret;

1046 1047 1048 1049 1050 1051
	/*
	 * Make sure there's enough CE ringbuffer entries for HTT TX to avoid
	 * additional TX locking checks.
	 *
	 * For the lack of a better place do the check here.
	 */
1052
	BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
1053
		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1054
	BUILD_BUG_ON(2 * TARGET_10X_NUM_MSDU_DESC >
1055
		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1056
	BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
M
Michal Kazior 已提交
1057
		     (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1058 1059 1060

	ce_state->ar = ar;
	ce_state->id = ce_id;
M
Michal Kazior 已提交
1061
	ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1062 1063 1064 1065
	ce_state->attr_flags = attr->flags;
	ce_state->src_sz_max = attr->src_sz_max;

	if (attr->src_nentries)
1066
		ce_state->send_cb = attr->send_cb;
1067 1068

	if (attr->dest_nentries)
1069
		ce_state->recv_cb = attr->recv_cb;
1070

1071 1072 1073 1074
	if (attr->src_nentries) {
		ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
		if (IS_ERR(ce_state->src_ring)) {
			ret = PTR_ERR(ce_state->src_ring);
1075
			ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
				   ce_id, ret);
			ce_state->src_ring = NULL;
			return ret;
		}
	}

	if (attr->dest_nentries) {
		ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
								attr);
		if (IS_ERR(ce_state->dest_ring)) {
			ret = PTR_ERR(ce_state->dest_ring);
1087
			ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
				   ce_id, ret);
			ce_state->dest_ring = NULL;
			return ret;
		}
	}

	return 0;
}

void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
1101 1102

	if (ce_state->src_ring) {
1103 1104 1105 1106 1107 1108
		dma_free_coherent(ar->dev,
				  (ce_state->src_ring->nentries *
				   sizeof(struct ce_desc) +
				   CE_DESC_RING_ALIGN),
				  ce_state->src_ring->base_addr_owner_space,
				  ce_state->src_ring->base_addr_ce_space);
1109 1110 1111 1112
		kfree(ce_state->src_ring);
	}

	if (ce_state->dest_ring) {
1113 1114 1115 1116 1117 1118
		dma_free_coherent(ar->dev,
				  (ce_state->dest_ring->nentries *
				   sizeof(struct ce_desc) +
				   CE_DESC_RING_ALIGN),
				  ce_state->dest_ring->base_addr_owner_space,
				  ce_state->dest_ring->base_addr_ce_space);
1119 1120
		kfree(ce_state->dest_ring);
	}
1121 1122 1123

	ce_state->src_ring = NULL;
	ce_state->dest_ring = NULL;
1124
}
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163

void ath10k_ce_dump_registers(struct ath10k *ar,
			      struct ath10k_fw_crash_data *crash_data)
{
	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
	struct ath10k_ce_crash_data ce;
	u32 addr, id;

	lockdep_assert_held(&ar->data_lock);

	ath10k_err(ar, "Copy Engine register dump:\n");

	spin_lock_bh(&ar_pci->ce_lock);
	for (id = 0; id < CE_COUNT; id++) {
		addr = ath10k_ce_base_address(ar, id);
		ce.base_addr = cpu_to_le32(addr);

		ce.src_wr_idx =
			cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr));
		ce.src_r_idx =
			cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr));
		ce.dst_wr_idx =
			cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr));
		ce.dst_r_idx =
			cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr));

		if (crash_data)
			crash_data->ce_crash_data[id] = ce;

		ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id,
			   le32_to_cpu(ce.base_addr),
			   le32_to_cpu(ce.src_wr_idx),
			   le32_to_cpu(ce.src_r_idx),
			   le32_to_cpu(ce.dst_wr_idx),
			   le32_to_cpu(ce.dst_r_idx));
	}

	spin_unlock_bh(&ar_pci->ce_lock);
}