/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, * USA * * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include #include #include #include #include #include #include #include "iwl-drv.h" #include "iwl-trans.h" #include "iwl-csr.h" #include "iwl-prph.h" #include "iwl-agn-hw.h" #include "internal.h" /* FIXME: need to abstract out TX command (once we know what it looks like) */ #include "dvm/commands.h" #define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \ (((1<cfg->base_params->num_of_queues) - 1) &\ (~(1<<(trans_pcie)->cmd_queue))) static int iwl_trans_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_queue *rxq = &trans_pcie->rxq; struct device *dev = trans->dev; memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); spin_lock_init(&rxq->lock); if (WARN_ON(rxq->bd || rxq->rb_stts)) return -EINVAL; /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, &rxq->bd_dma, GFP_KERNEL); if (!rxq->bd) goto err_bd; /*Allocate the driver's pointer to receive buffer status */ rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), &rxq->rb_stts_dma, GFP_KERNEL); if (!rxq->rb_stts) goto err_rb_stts; return 0; err_rb_stts: dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, rxq->bd, rxq->bd_dma); memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); rxq->bd = NULL; err_bd: return -ENOMEM; } static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_queue *rxq = &trans_pcie->rxq; int i; /* Fill the rx_used queue with _all_ of the Rx buffers */ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { /* In the reset function, these buffers may have been allocated * to an SKB, so we need to unmap and free potential storage */ if (rxq->pool[i].page != NULL) { dma_unmap_page(trans->dev, rxq->pool[i].page_dma, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order); rxq->pool[i].page = NULL; } list_add_tail(&rxq->pool[i].list, &rxq->rx_used); } } static void iwl_trans_rx_hw_init(struct iwl_trans *trans, struct iwl_rx_queue *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 rb_size; const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */ if (trans_pcie->rx_buf_size_8k) rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; else rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; /* Stop Rx DMA */ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); /* Reset driver's Rx queue write index */ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); /* Tell device where to find RBD circular buffer in DRAM */ iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, (u32)(rxq->bd_dma >> 8)); /* Tell device where in DRAM to update its Rx status */ iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4); /* Enable Rx DMA * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in * the credit mechanism in 5000 HW RX FIFO * Direct rx interrupts to hosts * Rx buffer size 4 or 8k * RB timeout 0x10 * 256 RBDs */ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | rb_size| (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); /* Set interrupt coalescing timer to default (2048 usecs) */ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); } static int iwl_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_queue *rxq = &trans_pcie->rxq; int i, err; unsigned long flags; if (!rxq->bd) { err = iwl_trans_rx_alloc(trans); if (err) return err; } spin_lock_irqsave(&rxq->lock, flags); INIT_LIST_HEAD(&rxq->rx_free); INIT_LIST_HEAD(&rxq->rx_used); iwl_trans_rxq_free_rx_bufs(trans); for (i = 0; i < RX_QUEUE_SIZE; i++) rxq->queue[i] = NULL; /* Set us so that we have processed and used all buffers, but have * not restocked the Rx queue with fresh buffers */ rxq->read = rxq->write = 0; rxq->write_actual = 0; rxq->free_count = 0; spin_unlock_irqrestore(&rxq->lock, flags); iwl_rx_replenish(trans); iwl_trans_rx_hw_init(trans, rxq); spin_lock_irqsave(&trans_pcie->irq_lock, flags); rxq->need_update = 1; iwl_rx_queue_update_write_ptr(trans, rxq); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); return 0; } static void iwl_trans_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_queue *rxq = &trans_pcie->rxq; unsigned long flags; /*if rxq->bd is NULL, it means that nothing has been allocated, * exit now */ if (!rxq->bd) { IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); return; } spin_lock_irqsave(&rxq->lock, flags); iwl_trans_rxq_free_rx_bufs(trans); spin_unlock_irqrestore(&rxq->lock, flags); dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, rxq->bd, rxq->bd_dma); memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); rxq->bd = NULL; if (rxq->rb_stts) dma_free_coherent(trans->dev, sizeof(struct iwl_rb_status), rxq->rb_stts, rxq->rb_stts_dma); else IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma)); rxq->rb_stts = NULL; } static int iwl_trans_rx_stop(struct iwl_trans *trans) { /* stop Rx DMA */ iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); } static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr, size_t size) { if (WARN_ON(ptr->addr)) return -EINVAL; ptr->addr = dma_alloc_coherent(trans->dev, size, &ptr->dma, GFP_KERNEL); if (!ptr->addr) return -ENOMEM; ptr->size = size; return 0; } static void iwlagn_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) { if (unlikely(!ptr->addr)) return; dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); memset(ptr, 0, sizeof(*ptr)); } static void iwl_trans_pcie_queue_stuck_timer(unsigned long data) { struct iwl_tx_queue *txq = (void *)data; struct iwl_queue *q = &txq->q; struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); u32 scd_sram_addr = trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_LOWER_BOUND + (16 * txq->q.id); u8 buf[16]; int i; spin_lock(&txq->lock); /* check if triggered erroneously */ if (txq->q.read_ptr == txq->q.write_ptr) { spin_unlock(&txq->lock); return; } spin_unlock(&txq->lock); IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, jiffies_to_msecs(trans_pcie->wd_timeout)); IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", txq->q.read_ptr, txq->q.write_ptr); iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); iwl_print_hex_error(trans, buf, sizeof(buf)); for (i = 0; i < FH_TCSR_CHNL_NUM; i++) IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i, iwl_read_direct32(trans, FH_TX_TRB_REG(i))); for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i)); u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); u32 tbl_dw = iwl_read_targ_mem(trans, trans_pcie->scd_base_addr + SCD_TRANS_TBL_OFFSET_QUEUE(i)); if (i & 0x1) tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; else tbl_dw = tbl_dw & 0x0000FFFF; IWL_ERR(trans, "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", i, active ? "" : "in", fifo, tbl_dw, iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1), iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); } for (i = q->read_ptr; i != q->write_ptr; i = iwl_queue_inc_wrap(i, q->n_bd)) { struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)txq->entries[i].cmd->payload; IWL_ERR(trans, "scratch %d = 0x%08x\n", i, get_unaligned_le32(&tx_cmd->scratch)); } iwl_op_mode_nic_error(trans->op_mode); } static int iwl_trans_txq_alloc(struct iwl_trans *trans, struct iwl_tx_queue *txq, int slots_num, u32 txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; int i; if (WARN_ON(txq->entries || txq->tfds)) return -EINVAL; setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer, (unsigned long)txq); txq->trans_pcie = trans_pcie; txq->q.n_window = slots_num; txq->entries = kcalloc(slots_num, sizeof(struct iwl_pcie_tx_queue_entry), GFP_KERNEL); if (!txq->entries) goto error; if (txq_id == trans_pcie->cmd_queue) for (i = 0; i < slots_num; i++) { txq->entries[i].cmd = kmalloc(sizeof(struct iwl_device_cmd), GFP_KERNEL); if (!txq->entries[i].cmd) goto error; } /* Circular buffer of transmit frame descriptors (TFDs), * shared with device */ txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL); if (!txq->tfds) { IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz); goto error; } txq->q.id = txq_id; return 0; error: if (txq->entries && txq_id == trans_pcie->cmd_queue) for (i = 0; i < slots_num; i++) kfree(txq->entries[i].cmd); kfree(txq->entries); txq->entries = NULL; return -ENOMEM; } static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq, int slots_num, u32 txq_id) { int ret; txq->need_update = 0; /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); /* Initialize queue's high/low-water marks, and head/tail indexes */ ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); if (ret) return ret; spin_lock_init(&txq->lock); /* * Tell nic where to find circular buffer of Tx Frame Descriptors for * given Tx queue, and enable the DMA channel used for that queue. * Circular buffer (TFD queue in DRAM) physical base address */ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8); return 0; } /** * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's */ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; struct iwl_queue *q = &txq->q; enum dma_data_direction dma_dir; if (!q->n_bd) return; /* In the command queue, all the TBs are mapped as BIDI * so unmap them as such. */ if (txq_id == trans_pcie->cmd_queue) dma_dir = DMA_BIDIRECTIONAL; else dma_dir = DMA_TO_DEVICE; spin_lock_bh(&txq->lock); while (q->write_ptr != q->read_ptr) { iwl_txq_free_tfd(trans, txq, dma_dir); q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); } spin_unlock_bh(&txq->lock); } /** * iwl_tx_queue_free - Deallocate DMA queue. * @txq: Transmit queue to deallocate. * * Empty queue by removing and destroying all BD's. * Free all buffers. * 0-fill, but do not free "txq" descriptor structure. */ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; struct device *dev = trans->dev; int i; if (WARN_ON(!txq)) return; iwl_tx_queue_unmap(trans, txq_id); /* De-alloc array of command/tx buffers */ if (txq_id == trans_pcie->cmd_queue) for (i = 0; i < txq->q.n_window; i++) { kfree(txq->entries[i].cmd); kfree(txq->entries[i].copy_cmd); } /* De-alloc circular buffer of TFDs */ if (txq->q.n_bd) { dma_free_coherent(dev, sizeof(struct iwl_tfd) * txq->q.n_bd, txq->tfds, txq->q.dma_addr); memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); } kfree(txq->entries); txq->entries = NULL; del_timer_sync(&txq->stuck_timer); /* 0-fill queue descriptor structure */ memset(txq, 0, sizeof(*txq)); } /** * iwl_trans_tx_free - Free TXQ Context * * Destroy all TX DMA queues and structures */ static void iwl_trans_pcie_tx_free(struct iwl_trans *trans) { int txq_id; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); /* Tx queues */ if (trans_pcie->txq) { for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) iwl_tx_queue_free(trans, txq_id); } kfree(trans_pcie->txq); trans_pcie->txq = NULL; iwlagn_free_dma_ptr(trans, &trans_pcie->kw); iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); } /** * iwl_trans_tx_alloc - allocate TX context * Allocate all Tx DMA structures and initialize them * * @param priv * @return error code */ static int iwl_trans_tx_alloc(struct iwl_trans *trans) { int ret; int txq_id, slots_num; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues * sizeof(struct iwlagn_scd_bc_tbl); /*It is not allowed to alloc twice, so warn when this happens. * We cannot rely on the previous allocation, so free and fail */ if (WARN_ON(trans_pcie->txq)) { ret = -EINVAL; goto error; } ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, scd_bc_tbls_size); if (ret) { IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); goto error; } /* Alloc keep-warm buffer */ ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); if (ret) { IWL_ERR(trans, "Keep Warm allocation failed\n"); goto error; } trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues, sizeof(struct iwl_tx_queue), GFP_KERNEL); if (!trans_pcie->txq) { IWL_ERR(trans, "Not enough memory for txq\n"); ret = ENOMEM; goto error; } /* Alloc and init all Tx queues, including the command queue (#4/#9) */ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) { slots_num = (txq_id == trans_pcie->cmd_queue) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id], slots_num, txq_id); if (ret) { IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); goto error; } } return 0; error: iwl_trans_pcie_tx_free(trans); return ret; } static int iwl_tx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; int txq_id, slots_num; unsigned long flags; bool alloc = false; if (!trans_pcie->txq) { ret = iwl_trans_tx_alloc(trans); if (ret) goto error; alloc = true; } spin_lock_irqsave(&trans_pcie->irq_lock, flags); /* Turn off all Tx DMA fifos */ iwl_write_prph(trans, SCD_TXFACT, 0); /* Tell NIC where to find the "keep warm" buffer */ iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, trans_pcie->kw.dma >> 4); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); /* Alloc and init all Tx queues, including the command queue (#4/#9) */ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) { slots_num = (txq_id == trans_pcie->cmd_queue) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id], slots_num, txq_id); if (ret) { IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); goto error; } } return 0; error: /*Upon error, free only if we allocated something */ if (alloc) iwl_trans_pcie_tx_free(trans); return ret; } static void iwl_set_pwr_vmain(struct iwl_trans *trans) { /* * (for documentation purposes) * to set power to V_AUX, do: if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VAUX, ~APMG_PS_CTRL_MSK_PWR_SRC); */ iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~APMG_PS_CTRL_MSK_PWR_SRC); } /* PCI registers */ #define PCI_CFG_RETRY_TIMEOUT 0x041 #define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 #define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u16 pci_lnk_ctl; pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &pci_lnk_ctl); return pci_lnk_ctl; } static void iwl_apm_config(struct iwl_trans *trans) { /* * HW bug W/A for instability in PCIe bus L0S->L1 transition. * Check if BIOS (or OS) enabled L1-ASPM on this device. * If so (likely), disable L0S, so device moves directly L0->L1; * costs negligible amount of power savings. * If not (unlikely), enable L0S, so there is at least some * power savings, even without L1. */ u16 lctl = iwl_pciexp_link_ctrl(trans); if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN) { /* L1-ASPM enabled; disable(!) L0S */ iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); dev_printk(KERN_INFO, trans->dev, "L1 Enabled; Disabling L0S\n"); } else { /* L1-ASPM disabled; enable(!) L0S */ iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); dev_printk(KERN_INFO, trans->dev, "L1 Disabled; Enabling L0S\n"); } trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN); } /* * Start up NIC's basic functionality after it has been reset * (e.g. after platform boot, or shutdown via iwl_apm_stop()) * NOTE: This does not load uCode nor start the embedded processor */ static int iwl_apm_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret = 0; IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); /* * Use "set_bit" below rather than "write", to preserve any hardware * bits already set by default after reset. */ /* Disable L0S exit timer (platform NMI Work/Around) */ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); /* * Disable L0s without affecting L1; * don't wait for ICH L0s (ICH bug W/A) */ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); /* Set FH wait threshold to maximum (HW error during stress W/A) */ iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); /* * Enable HAP INTA (interrupt from management bus) to * wake device's PCI Express link L1a -> L0s */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); iwl_apm_config(trans); /* Configure analog phase-lock-loop before activating to D0A */ if (trans->cfg->base_params->pll_cfg_val) iwl_set_bit(trans, CSR_ANA_PLL_CFG, trans->cfg->base_params->pll_cfg_val); /* * Set "initialization complete" bit to move adapter from * D0U* --> D0A* (powered-up active) state. */ iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); /* * Wait for clock stabilization; once stabilized, access to * device-internal resources is supported, e.g. iwl_write_prph() * and accesses to uCode SRAM. */ ret = iwl_poll_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); if (ret < 0) { IWL_DEBUG_INFO(trans, "Failed to init the card\n"); goto out; } /* * Enable DMA clock and wait for it to stabilize. * * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits * do not disable clocks. This preserves any hardware bits already * set by default in "CLK_CTRL_REG" after reset. */ iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(20); /* Disable L1-Active */ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_L1_ACT_DIS); set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status); out: return ret; } static int iwl_apm_stop_master(struct iwl_trans *trans) { int ret = 0; /* stop device's busmaster DMA activity */ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); ret = iwl_poll_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED, CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); if (ret) IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); IWL_DEBUG_INFO(trans, "stop master\n"); return ret; } static void iwl_apm_stop(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status); /* Stop device's DMA activity */ iwl_apm_stop_master(trans); /* Reset the entire device */ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); udelay(10); /* * Clear "initialization complete" bit to move adapter from * D0A* (powered-up Active) --> D0U* (Uninitialized) state. */ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); } static int iwl_nic_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); unsigned long flags; /* nic_init */ spin_lock_irqsave(&trans_pcie->irq_lock, flags); iwl_apm_init(trans); /* Set interrupt coalescing calibration timer to default (512 usecs) */ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); iwl_set_pwr_vmain(trans); iwl_op_mode_nic_config(trans->op_mode); /* Allocate the RX queue, or reset if it is already allocated */ iwl_rx_init(trans); /* Allocate or reset and init all Tx and Command queues */ if (iwl_tx_init(trans)) return -ENOMEM; if (trans->cfg->base_params->shadow_reg_enable) { /* enable shadow regs in HW */ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); } return 0; } #define HW_READY_TIMEOUT (50) /* Note: returns poll_bit return value, which is >= 0 if success */ static int iwl_set_hw_ready(struct iwl_trans *trans) { int ret; iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); /* See if we got it */ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT); IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); return ret; } /* Note: returns standard 0/-ERROR code */ static int iwl_prepare_card_hw(struct iwl_trans *trans) { int ret; int t = 0; IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); ret = iwl_set_hw_ready(trans); /* If the card is ready, exit 0 */ if (ret >= 0) return 0; /* If HW is not ready, prepare the conditions to check again */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE); do { ret = iwl_set_hw_ready(trans); if (ret >= 0) return 0; usleep_range(200, 1000); t += 200; } while (t < 150000); return ret; } /* * ucode */ static int iwl_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr, dma_addr_t phy_addr, u32 byte_cnt) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; trans_pcie->ucode_write_complete = false; iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); iwl_write_direct32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr); iwl_write_direct32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); iwl_write_direct32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), (iwl_get_dma_hi_addr(phy_addr) << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); iwl_write_direct32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); ret = wait_event_timeout(trans_pcie->ucode_write_waitq, trans_pcie->ucode_write_complete, 5 * HZ); if (!ret) { IWL_ERR(trans, "Failed to load firmware chunk!\n"); return -ETIMEDOUT; } return 0; } static int iwl_load_section(struct iwl_trans *trans, u8 section_num, const struct fw_desc *section) { u8 *v_addr; dma_addr_t p_addr; u32 offset; int ret = 0; IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", section_num); v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL); if (!v_addr) return -ENOMEM; for (offset = 0; offset < section->len; offset += PAGE_SIZE) { u32 copy_size; copy_size = min_t(u32, PAGE_SIZE, section->len - offset); memcpy(v_addr, (u8 *)section->data + offset, copy_size); ret = iwl_load_firmware_chunk(trans, section->offset + offset, p_addr, copy_size); if (ret) { IWL_ERR(trans, "Could not load the [%d] uCode section\n", section_num); break; } } dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr); return ret; } static int iwl_load_given_ucode(struct iwl_trans *trans, const struct fw_img *image) { int i, ret = 0; for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) { if (!image->sec[i].data) break; ret = iwl_load_section(trans, i, &image->sec[i]); if (ret) return ret; } /* Remove all resets to allow NIC to operate */ iwl_write32(trans, CSR_RESET, 0); return 0; } static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, const struct fw_img *fw) { int ret; bool hw_rfkill; /* This may fail if AMT took ownership of the device */ if (iwl_prepare_card_hw(trans)) { IWL_WARN(trans, "Exit HW not ready\n"); return -EIO; } iwl_enable_rfkill_int(trans); /* If platform's RF_KILL switch is NOT set to KILL */ hw_rfkill = iwl_is_rfkill_set(trans); iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); if (hw_rfkill) return -ERFKILL; iwl_write32(trans, CSR_INT, 0xFFFFFFFF); ret = iwl_nic_init(trans); if (ret) { IWL_ERR(trans, "Unable to init nic\n"); return ret; } /* make sure rfkill handshake bits are cleared */ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); /* clear (again), then enable host interrupts */ iwl_write32(trans, CSR_INT, 0xFFFFFFFF); iwl_enable_interrupts(trans); /* really make sure rfkill handshake bits are cleared */ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); /* Load the given image to the HW */ return iwl_load_given_ucode(trans, fw); } /* * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask */ static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask) { struct iwl_trans_pcie __maybe_unused *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); iwl_write_prph(trans, SCD_TXFACT, mask); } static void iwl_tx_start(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 a; int chan; u32 reg_val; /* make sure all queue are not stopped/used */ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); trans_pcie->scd_base_addr = iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND; /* reset conext data memory */ for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND; a += 4) iwl_write_targ_mem(trans, a, 0); /* reset tx status memory */ for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND; a += 4) iwl_write_targ_mem(trans, a, 0); for (; a < trans_pcie->scd_base_addr + SCD_TRANS_TBL_OFFSET_QUEUE( trans->cfg->base_params->num_of_queues); a += 4) iwl_write_targ_mem(trans, a, 0); iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, trans_pcie->scd_bc_tbls.dma >> 10); /* The chain extension of the SCD doesn't work well. This feature is * enabled by default by the HW, so we need to disable it manually. */ iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, trans_pcie->cmd_fifo); /* Activate all Tx DMA/FIFO channels */ iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7)); /* Enable DMA channel */ for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); /* Update FH chicken bits */ reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); /* Enable L1-Active */ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_L1_ACT_DIS); } static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans) { iwl_reset_ict(trans); iwl_tx_start(trans); } /** * iwlagn_txq_ctx_stop - Stop all Tx DMA channels */ static int iwl_trans_tx_stop(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ch, txq_id, ret; unsigned long flags; /* Turn off all Tx DMA fifos */ spin_lock_irqsave(&trans_pcie->irq_lock, flags); iwl_trans_txq_set_sched(trans, 0); /* Stop each Tx DMA channel, and wait for it to be idle */ for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG, FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000); if (ret < 0) IWL_ERR(trans, "Failing on timeout while stopping DMA channel %d [0x%08x]\n", ch, iwl_read_direct32(trans, FH_TSSR_TX_STATUS_REG)); } spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); if (!trans_pcie->txq) { IWL_WARN(trans, "Stopping tx queues that aren't allocated...\n"); return 0; } /* Unmap DMA from host system and free skb's */ for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) iwl_tx_queue_unmap(trans, txq_id); return 0; } static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); unsigned long flags; /* tell the device to stop sending interrupts */ spin_lock_irqsave(&trans_pcie->irq_lock, flags); iwl_disable_interrupts(trans); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); /* device going down, Stop using ICT table */ iwl_disable_ict(trans); /* * If a HW restart happens during firmware loading, * then the firmware loading might call this function * and later it might be called again due to the * restart. So don't process again if the device is * already dead. */ if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) { iwl_trans_tx_stop(trans); iwl_trans_rx_stop(trans); /* Power-down device's busmaster DMA clocks */ iwl_write_prph(trans, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(5); } /* Make sure (redundant) we've released our request to stay awake */ iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); /* Stop the device, and put it in low power state */ iwl_apm_stop(trans); /* Upon stop, the APM issues an interrupt if HW RF kill is set. * Clean again the interrupt here */ spin_lock_irqsave(&trans_pcie->irq_lock, flags); iwl_disable_interrupts(trans); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); iwl_enable_rfkill_int(trans); /* wait to make sure we flush pending tasklet*/ synchronize_irq(trans_pcie->irq); tasklet_kill(&trans_pcie->irq_tasklet); cancel_work_sync(&trans_pcie->rx_replenish); /* stop and reset the on-board processor */ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); /* clear all status bits */ clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); clear_bit(STATUS_INT_ENABLED, &trans_pcie->status); clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status); clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status); } static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans) { /* let the ucode operate on its own */ iwl_write32(trans, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); iwl_disable_interrupts(trans); iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); } static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; struct iwl_cmd_meta *out_meta; struct iwl_tx_queue *txq; struct iwl_queue *q; dma_addr_t phys_addr = 0; dma_addr_t txcmd_phys; dma_addr_t scratch_phys; u16 len, firstlen, secondlen; u8 wait_write_ptr = 0; __le16 fc = hdr->frame_control; u8 hdr_len = ieee80211_hdrlen(fc); u16 __maybe_unused wifi_seq; txq = &trans_pcie->txq[txq_id]; q = &txq->q; if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) { WARN_ON_ONCE(1); return -EINVAL; } spin_lock(&txq->lock); /* In AGG mode, the index in the ring must correspond to the WiFi * sequence number. This is a HW requirements to help the SCD to parse * the BA. * Check here that the packets are in the right place on the ring. */ #ifdef CONFIG_IWLWIFI_DEBUG wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) && ((wifi_seq & 0xff) != q->write_ptr), "Q: %d WiFi Seq %d tfdNum %d", txq_id, wifi_seq, q->write_ptr); #endif /* Set up driver data for this TFD */ txq->entries[q->write_ptr].skb = skb; txq->entries[q->write_ptr].cmd = dev_cmd; dev_cmd->hdr.cmd = REPLY_TX; dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | INDEX_TO_SEQ(q->write_ptr))); /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_meta = &txq->entries[q->write_ptr].meta; /* * Use the first empty entry in this queue's command buffer array * to contain the Tx command and MAC header concatenated together * (payload data will be in another buffer). * Size of this varies, due to varying MAC header length. * If end is not dword aligned, we'll have 2 extra bytes at the end * of the MAC header (device reads on dword boundaries). * We'll tell device about this padding later. */ len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + hdr_len; firstlen = (len + 3) & ~3; /* Tell NIC about any 2-byte padding after MAC header */ if (firstlen != len) tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; /* Physical address of this Tx command's header (not MAC header!), * within command buffer array. */ txcmd_phys = dma_map_single(trans->dev, &dev_cmd->hdr, firstlen, DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(trans->dev, txcmd_phys))) goto out_err; dma_unmap_addr_set(out_meta, mapping, txcmd_phys); dma_unmap_len_set(out_meta, len, firstlen); if (!ieee80211_has_morefrags(fc)) { txq->need_update = 1; } else { wait_write_ptr = 1; txq->need_update = 0; } /* Set up TFD's 2nd entry to point directly to remainder of skb, * if any (802.11 null frames have no payload). */ secondlen = skb->len - hdr_len; if (secondlen > 0) { phys_addr = dma_map_single(trans->dev, skb->data + hdr_len, secondlen, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { dma_unmap_single(trans->dev, dma_unmap_addr(out_meta, mapping), dma_unmap_len(out_meta, len), DMA_BIDIRECTIONAL); goto out_err; } } /* Attach buffers to TFD */ iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1); if (secondlen > 0) iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, secondlen, 0); scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + offsetof(struct iwl_tx_cmd, scratch); /* take back ownership of DMA buffer to enable update */ dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen, DMA_BIDIRECTIONAL); tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n", le16_to_cpu(dev_cmd->hdr.sequence)); IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); /* Set up entry for this TFD in Tx byte-count array */ iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen, DMA_BIDIRECTIONAL); trace_iwlwifi_dev_tx(trans->dev, skb, &txq->tfds[txq->q.write_ptr], sizeof(struct iwl_tfd), &dev_cmd->hdr, firstlen, skb->data + hdr_len, secondlen); trace_iwlwifi_dev_tx_data(trans->dev, skb, skb->data + hdr_len, secondlen); /* start timer if queue currently empty */ if (txq->need_update && q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); /* Tell device the write index *just past* this latest filled TFD */ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); iwl_txq_update_write_ptr(trans, txq); /* * At this point the frame is "transmitted" successfully * and we will get a TX status notification eventually, * regardless of the value of ret. "ret" only indicates * whether or not we should update the write pointer. */ if (iwl_queue_space(q) < q->high_mark) { if (wait_write_ptr) { txq->need_update = 1; iwl_txq_update_write_ptr(trans, txq); } else { iwl_stop_queue(trans, txq); } } spin_unlock(&txq->lock); return 0; out_err: spin_unlock(&txq->lock); return -1; } static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int err; bool hw_rfkill; trans_pcie->inta_mask = CSR_INI_SET_MASK; if (!trans_pcie->irq_requested) { tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long)) iwl_irq_tasklet, (unsigned long)trans); iwl_alloc_isr_ict(trans); err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED, DRV_NAME, trans); if (err) { IWL_ERR(trans, "Error allocating IRQ %d\n", trans_pcie->irq); goto error; } INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish); trans_pcie->irq_requested = true; } err = iwl_prepare_card_hw(trans); if (err) { IWL_ERR(trans, "Error while preparing HW: %d\n", err); goto err_free_irq; } iwl_apm_init(trans); /* From now on, the op_mode will be kept updated about RF kill state */ iwl_enable_rfkill_int(trans); hw_rfkill = iwl_is_rfkill_set(trans); iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); return err; err_free_irq: trans_pcie->irq_requested = false; free_irq(trans_pcie->irq, trans); error: iwl_free_isr_ict(trans); tasklet_kill(&trans_pcie->irq_tasklet); return err; } static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans, bool op_mode_leaving) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; unsigned long flags; spin_lock_irqsave(&trans_pcie->irq_lock, flags); iwl_disable_interrupts(trans); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); iwl_apm_stop(trans); spin_lock_irqsave(&trans_pcie->irq_lock, flags); iwl_disable_interrupts(trans); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); if (!op_mode_leaving) { /* * Even if we stop the HW, we still want the RF kill * interrupt */ iwl_enable_rfkill_int(trans); /* * Check again since the RF kill state may have changed while * all the interrupts were disabled, in this case we couldn't * receive the RF kill interrupt and update the state in the * op_mode. */ hw_rfkill = iwl_is_rfkill_set(trans); iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); } } static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; /* n_bd is usually 256 => n_bd - 1 = 0xff */ int tfd_num = ssn & (txq->q.n_bd - 1); int freed = 0; spin_lock(&txq->lock); if (txq->q.read_ptr != tfd_num) { IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", txq_id, txq->q.read_ptr, tfd_num, ssn); freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs); if (iwl_queue_space(&txq->q) > txq->q.low_mark) iwl_wake_queue(trans, txq); } spin_unlock(&txq->lock); } static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) { writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) { writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) { return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } static void iwl_trans_pcie_configure(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); trans_pcie->cmd_queue = trans_cfg->cmd_queue; trans_pcie->cmd_fifo = trans_cfg->cmd_fifo; if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) trans_pcie->n_no_reclaim_cmds = 0; else trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; if (trans_pcie->n_no_reclaim_cmds) memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, trans_pcie->n_no_reclaim_cmds * sizeof(u8)); trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k; if (trans_pcie->rx_buf_size_8k) trans_pcie->rx_page_order = get_order(8 * 1024); else trans_pcie->rx_page_order = get_order(4 * 1024); trans_pcie->wd_timeout = msecs_to_jiffies(trans_cfg->queue_watchdog_timeout); trans_pcie->command_names = trans_cfg->command_names; } void iwl_trans_pcie_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); iwl_trans_pcie_tx_free(trans); iwl_trans_pcie_rx_free(trans); if (trans_pcie->irq_requested == true) { free_irq(trans_pcie->irq, trans); iwl_free_isr_ict(trans); } pci_disable_msi(trans_pcie->pci_dev); iounmap(trans_pcie->hw_base); pci_release_regions(trans_pcie->pci_dev); pci_disable_device(trans_pcie->pci_dev); kmem_cache_destroy(trans->dev_cmd_pool); kfree(trans); } static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); if (state) set_bit(STATUS_TPOWER_PMI, &trans_pcie->status); else clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status); } #ifdef CONFIG_PM_SLEEP static int iwl_trans_pcie_suspend(struct iwl_trans *trans) { return 0; } static int iwl_trans_pcie_resume(struct iwl_trans *trans) { bool hw_rfkill; iwl_enable_rfkill_int(trans); hw_rfkill = iwl_is_rfkill_set(trans); iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); if (!hw_rfkill) iwl_enable_interrupts(trans); return 0; } #endif /* CONFIG_PM_SLEEP */ #define IWL_FLUSH_WAIT_MS 2000 static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_queue *txq; struct iwl_queue *q; int cnt; unsigned long now = jiffies; int ret = 0; /* waiting for all the tx frames complete might take a while */ for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { if (cnt == trans_pcie->cmd_queue) continue; txq = &trans_pcie->txq[cnt]; q = &txq->q; while (q->read_ptr != q->write_ptr && !time_after(jiffies, now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) msleep(1); if (q->read_ptr != q->write_ptr) { IWL_ERR(trans, "fail to flush all tx fifo queues\n"); ret = -ETIMEDOUT; break; } } return ret; } static const char *get_fh_string(int cmd) { #define IWL_CMD(x) case x: return #x switch (cmd) { IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); IWL_CMD(FH_RSCSR_CHNL0_WPTR); IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG); IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG); IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG); IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); IWL_CMD(FH_TSSR_TX_STATUS_REG); IWL_CMD(FH_TSSR_TX_ERROR_REG); default: return "UNKNOWN"; } #undef IWL_CMD } int iwl_dump_fh(struct iwl_trans *trans, char **buf) { int i; static const u32 fh_tbl[] = { FH_RSCSR_CHNL0_STTS_WPTR_REG, FH_RSCSR_CHNL0_RBDCB_BASE_REG, FH_RSCSR_CHNL0_WPTR, FH_MEM_RCSR_CHNL0_CONFIG_REG, FH_MEM_RSSR_SHARED_CTRL_REG, FH_MEM_RSSR_RX_STATUS_REG, FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, FH_TSSR_TX_STATUS_REG, FH_TSSR_TX_ERROR_REG }; #ifdef CONFIG_IWLWIFI_DEBUGFS if (buf) { int pos = 0; size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; *buf = kmalloc(bufsz, GFP_KERNEL); if (!*buf) return -ENOMEM; pos += scnprintf(*buf + pos, bufsz - pos, "FH register values:\n"); for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) pos += scnprintf(*buf + pos, bufsz - pos, " %34s: 0X%08x\n", get_fh_string(fh_tbl[i]), iwl_read_direct32(trans, fh_tbl[i])); return pos; } #endif IWL_ERR(trans, "FH register values:\n"); for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) IWL_ERR(trans, " %34s: 0X%08x\n", get_fh_string(fh_tbl[i]), iwl_read_direct32(trans, fh_tbl[i])); return 0; } static const char *get_csr_string(int cmd) { #define IWL_CMD(x) case x: return #x switch (cmd) { IWL_CMD(CSR_HW_IF_CONFIG_REG); IWL_CMD(CSR_INT_COALESCING); IWL_CMD(CSR_INT); IWL_CMD(CSR_INT_MASK); IWL_CMD(CSR_FH_INT_STATUS); IWL_CMD(CSR_GPIO_IN); IWL_CMD(CSR_RESET); IWL_CMD(CSR_GP_CNTRL); IWL_CMD(CSR_HW_REV); IWL_CMD(CSR_EEPROM_REG); IWL_CMD(CSR_EEPROM_GP); IWL_CMD(CSR_OTP_GP_REG); IWL_CMD(CSR_GIO_REG); IWL_CMD(CSR_GP_UCODE_REG); IWL_CMD(CSR_GP_DRIVER_REG); IWL_CMD(CSR_UCODE_DRV_GP1); IWL_CMD(CSR_UCODE_DRV_GP2); IWL_CMD(CSR_LED_REG); IWL_CMD(CSR_DRAM_INT_TBL_REG); IWL_CMD(CSR_GIO_CHICKEN_BITS); IWL_CMD(CSR_ANA_PLL_CFG); IWL_CMD(CSR_HW_REV_WA_REG); IWL_CMD(CSR_DBG_HPET_MEM_REG); default: return "UNKNOWN"; } #undef IWL_CMD } void iwl_dump_csr(struct iwl_trans *trans) { int i; static const u32 csr_tbl[] = { CSR_HW_IF_CONFIG_REG, CSR_INT_COALESCING, CSR_INT, CSR_INT_MASK, CSR_FH_INT_STATUS, CSR_GPIO_IN, CSR_RESET, CSR_GP_CNTRL, CSR_HW_REV, CSR_EEPROM_REG, CSR_EEPROM_GP, CSR_OTP_GP_REG, CSR_GIO_REG, CSR_GP_UCODE_REG, CSR_GP_DRIVER_REG, CSR_UCODE_DRV_GP1, CSR_UCODE_DRV_GP2, CSR_LED_REG, CSR_DRAM_INT_TBL_REG, CSR_GIO_CHICKEN_BITS, CSR_ANA_PLL_CFG, CSR_HW_REV_WA_REG, CSR_DBG_HPET_MEM_REG }; IWL_ERR(trans, "CSR values:\n"); IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " "CSR_INT_PERIODIC_REG)\n"); for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { IWL_ERR(trans, " %25s: 0X%08x\n", get_csr_string(csr_tbl[i]), iwl_read32(trans, csr_tbl[i])); } } #ifdef CONFIG_IWLWIFI_DEBUGFS /* create and remove of files */ #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ if (!debugfs_create_file(#name, mode, parent, trans, \ &iwl_dbgfs_##name##_ops)) \ goto err; \ } while (0) /* file operation */ #define DEBUGFS_READ_FUNC(name) \ static ssize_t iwl_dbgfs_##name##_read(struct file *file, \ char __user *user_buf, \ size_t count, loff_t *ppos); #define DEBUGFS_WRITE_FUNC(name) \ static ssize_t iwl_dbgfs_##name##_write(struct file *file, \ const char __user *user_buf, \ size_t count, loff_t *ppos); #define DEBUGFS_READ_FILE_OPS(name) \ DEBUGFS_READ_FUNC(name); \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .read = iwl_dbgfs_##name##_read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; #define DEBUGFS_WRITE_FILE_OPS(name) \ DEBUGFS_WRITE_FUNC(name); \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = iwl_dbgfs_##name##_write, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ DEBUGFS_READ_FUNC(name); \ DEBUGFS_WRITE_FUNC(name); \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = iwl_dbgfs_##name##_write, \ .read = iwl_dbgfs_##name##_read, \ .open = simple_open, \ .llseek = generic_file_llseek, \ }; static ssize_t iwl_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_queue *txq; struct iwl_queue *q; char *buf; int pos = 0; int cnt; int ret; size_t bufsz; bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues; if (!trans_pcie->txq) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { txq = &trans_pcie->txq[cnt]; q = &txq->q; pos += scnprintf(buf + pos, bufsz - pos, "hwq %.2d: read=%u write=%u use=%d stop=%d\n", cnt, q->read_ptr, q->write_ptr, !!test_bit(cnt, trans_pcie->queue_used), !!test_bit(cnt, trans_pcie->queue_stopped)); } ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_queue *rxq = &trans_pcie->rxq; char buf[256]; int pos = 0; const size_t bufsz = sizeof(buf); pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", rxq->read); pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", rxq->write); pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", rxq->free_count); if (rxq->rb_stts) { pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); } else { pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: Not Allocated\n"); } return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; int pos = 0; char *buf; int bufsz = 24 * 64; /* 24 items * 64 char per item */ ssize_t ret; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; pos += scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n"); pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", isr_stats->hw); pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", isr_stats->sw); if (isr_stats->sw || isr_stats->hw) { pos += scnprintf(buf + pos, bufsz - pos, "\tLast Restarting Code: 0x%X\n", isr_stats->err_code); } #ifdef CONFIG_IWLWIFI_DEBUG pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", isr_stats->sch); pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", isr_stats->alive); #endif pos += scnprintf(buf + pos, bufsz - pos, "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", isr_stats->ctkill); pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", isr_stats->wakeup); pos += scnprintf(buf + pos, bufsz - pos, "Rx command responses:\t\t %u\n", isr_stats->rx); pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", isr_stats->tx); pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", isr_stats->unhandled); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); return ret; } static ssize_t iwl_dbgfs_interrupt_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; char buf[8]; int buf_size; u32 reset_flag; memset(buf, 0, sizeof(buf)); buf_size = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; if (sscanf(buf, "%x", &reset_flag) != 1) return -EFAULT; if (reset_flag == 0) memset(isr_stats, 0, sizeof(*isr_stats)); return count; } static ssize_t iwl_dbgfs_csr_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; char buf[8]; int buf_size; int csr; memset(buf, 0, sizeof(buf)); buf_size = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; if (sscanf(buf, "%d", &csr) != 1) return -EFAULT; iwl_dump_csr(trans); return count; } static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; char *buf = NULL; int pos = 0; ssize_t ret = -EFAULT; ret = pos = iwl_dump_fh(trans, &buf); if (buf) { ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); } return ret; } static ssize_t iwl_dbgfs_fw_restart_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; if (!trans->op_mode) return -EAGAIN; local_bh_disable(); iwl_op_mode_nic_error(trans->op_mode); local_bh_enable(); return count; } DEBUGFS_READ_WRITE_FILE_OPS(interrupt); DEBUGFS_READ_FILE_OPS(fh_reg); DEBUGFS_READ_FILE_OPS(rx_queue); DEBUGFS_READ_FILE_OPS(tx_queue); DEBUGFS_WRITE_FILE_OPS(csr); DEBUGFS_WRITE_FILE_OPS(fw_restart); /* * Create the debugfs files and directories * */ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, struct dentry *dir) { DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR); DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR); DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR); DEBUGFS_ADD_FILE(csr, dir, S_IWUSR); DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR); return 0; err: IWL_ERR(trans, "failed to create the trans debugfs entry\n"); return -ENOMEM; } #else static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans, struct dentry *dir) { return 0; } #endif /*CONFIG_IWLWIFI_DEBUGFS */ static const struct iwl_trans_ops trans_ops_pcie = { .start_hw = iwl_trans_pcie_start_hw, .stop_hw = iwl_trans_pcie_stop_hw, .fw_alive = iwl_trans_pcie_fw_alive, .start_fw = iwl_trans_pcie_start_fw, .stop_device = iwl_trans_pcie_stop_device, .wowlan_suspend = iwl_trans_pcie_wowlan_suspend, .send_cmd = iwl_trans_pcie_send_cmd, .tx = iwl_trans_pcie_tx, .reclaim = iwl_trans_pcie_reclaim, .txq_disable = iwl_trans_pcie_txq_disable, .txq_enable = iwl_trans_pcie_txq_enable, .dbgfs_register = iwl_trans_pcie_dbgfs_register, .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty, #ifdef CONFIG_PM_SLEEP .suspend = iwl_trans_pcie_suspend, .resume = iwl_trans_pcie_resume, #endif .write8 = iwl_trans_pcie_write8, .write32 = iwl_trans_pcie_write32, .read32 = iwl_trans_pcie_read32, .configure = iwl_trans_pcie_configure, .set_pmi = iwl_trans_pcie_set_pmi, }; struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, const struct pci_device_id *ent, const struct iwl_cfg *cfg) { struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; u16 pci_cmd; int err; trans = kzalloc(sizeof(struct iwl_trans) + sizeof(struct iwl_trans_pcie), GFP_KERNEL); if (WARN_ON(!trans)) return NULL; trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); trans->ops = &trans_ops_pcie; trans->cfg = cfg; trans_pcie->trans = trans; spin_lock_init(&trans_pcie->irq_lock); init_waitqueue_head(&trans_pcie->ucode_write_waitq); /* W/A - seems to solve weird behavior. We need to remove this if we * don't want to stay in L1 all the time. This wastes a lot of power */ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); if (pci_enable_device(pdev)) { err = -ENODEV; goto out_no_pci; } pci_set_master(pdev); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); if (!err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (!err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); /* both attempts failed: */ if (err) { dev_printk(KERN_ERR, &pdev->dev, "No suitable DMA available.\n"); goto out_pci_disable_device; } } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed\n"); goto out_pci_disable_device; } trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); if (!trans_pcie->hw_base) { dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed\n"); err = -ENODEV; goto out_pci_release_regions; } dev_printk(KERN_INFO, &pdev->dev, "pci_resource_len = 0x%08llx\n", (unsigned long long) pci_resource_len(pdev, 0)); dev_printk(KERN_INFO, &pdev->dev, "pci_resource_base = %p\n", trans_pcie->hw_base); dev_printk(KERN_INFO, &pdev->dev, "HW Revision ID = 0x%X\n", pdev->revision); /* We disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); err = pci_enable_msi(pdev); if (err) dev_printk(KERN_ERR, &pdev->dev, "pci_enable_msi failed(0X%x)\n", err); trans->dev = &pdev->dev; trans_pcie->irq = pdev->irq; trans_pcie->pci_dev = pdev; trans->hw_rev = iwl_read32(trans, CSR_HW_REV); trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); /* TODO: Move this away, not needed if not MSI */ /* enable rfkill interrupt: hw bug w/a */ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); } /* Initialize the wait queue for commands */ init_waitqueue_head(&trans->wait_command_queue); spin_lock_init(&trans->reg_lock); snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), "iwl_cmd_pool:%s", dev_name(trans->dev)); trans->dev_cmd_headroom = 0; trans->dev_cmd_pool = kmem_cache_create(trans->dev_cmd_pool_name, sizeof(struct iwl_device_cmd) + trans->dev_cmd_headroom, sizeof(void *), SLAB_HWCACHE_ALIGN, NULL); if (!trans->dev_cmd_pool) goto out_pci_disable_msi; return trans; out_pci_disable_msi: pci_disable_msi(pdev); out_pci_release_regions: pci_release_regions(pdev); out_pci_disable_device: pci_disable_device(pdev); out_no_pci: kfree(trans); return NULL; }