提交 419ad34e 编写于 作者: A Arend van Spriel 提交者: Greg Kroah-Hartman

staging: brcm80211: reformat long lines in brcmsmac to 80 columns

Linux coding style strongly suggest to limit length of source lines
to 80 characters. This commit correct this for the brcmsmac sources.
Reviewed-by: NHenry Ptasinski <henryp@broadcom.com>
Reviewed-by: NRoland Vossen <rvossen@broadcom.com>
Reviewed-by: NPieter-Paul Giesberts <pieterpg@broadcom.com>
Signed-off-by: NArend van Spriel <arend@broadcom.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@suse.de>
上级 7cdac4ff
......@@ -28,48 +28,68 @@
#include "aiutils.h"
/* slow_clk_ctl */
#define SCC_SS_MASK 0x00000007 /* slow clock source mask */
#define SCC_SS_LPO 0x00000000 /* source of slow clock is LPO */
#define SCC_SS_XTAL 0x00000001 /* source of slow clock is crystal */
#define SCC_SS_PCI 0x00000002 /* source of slow clock is PCI */
#define SCC_LF 0x00000200 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */
#define SCC_LP 0x00000400 /* LPOPowerDown, 1: LPO is disabled,
* 0: LPO is enabled
*/
#define SCC_FS 0x00000800 /* ForceSlowClk, 1: sb/cores running on slow clock,
* 0: power logic control
*/
#define SCC_IP 0x00001000 /* IgnorePllOffReq, 1/0: power logic ignores/honors
* PLL clock disable requests from core
*/
#define SCC_XC 0x00002000 /* XtalControlEn, 1/0: power logic does/doesn't
* disable crystal when appropriate
*/
#define SCC_XP 0x00004000 /* XtalPU (RO), 1/0: crystal running/disabled */
#define SCC_CD_MASK 0xffff0000 /* ClockDivider (SlowClk = 1/(4+divisor)) */
/* slow clock source mask */
#define SCC_SS_MASK 0x00000007
/* source of slow clock is LPO */
#define SCC_SS_LPO 0x00000000
/* source of slow clock is crystal */
#define SCC_SS_XTAL 0x00000001
/* source of slow clock is PCI */
#define SCC_SS_PCI 0x00000002
/* LPOFreqSel, 1: 160Khz, 0: 32KHz */
#define SCC_LF 0x00000200
/* LPOPowerDown, 1: LPO is disabled, 0: LPO is enabled */
#define SCC_LP 0x00000400
/* ForceSlowClk, 1: sb/cores running on slow clock, 0: power logic control */
#define SCC_FS 0x00000800
/* IgnorePllOffReq, 1/0:
* power logic ignores/honors PLL clock disable requests from core
*/
#define SCC_IP 0x00001000
/* XtalControlEn, 1/0:
* power logic does/doesn't disable crystal when appropriate
*/
#define SCC_XC 0x00002000
/* XtalPU (RO), 1/0: crystal running/disabled */
#define SCC_XP 0x00004000
/* ClockDivider (SlowClk = 1/(4+divisor)) */
#define SCC_CD_MASK 0xffff0000
#define SCC_CD_SHIFT 16
/* system_clk_ctl */
#define SYCC_IE 0x00000001 /* ILPen: Enable Idle Low Power */
#define SYCC_AE 0x00000002 /* ALPen: Enable Active Low Power */
#define SYCC_FP 0x00000004 /* ForcePLLOn */
#define SYCC_AR 0x00000008 /* Force ALP (or HT if ALPen is not set */
#define SYCC_HR 0x00000010 /* Force HT */
#define SYCC_CD_MASK 0xffff0000 /* ClkDiv (ILP = 1/(4 * (divisor + 1)) */
/* ILPen: Enable Idle Low Power */
#define SYCC_IE 0x00000001
/* ALPen: Enable Active Low Power */
#define SYCC_AE 0x00000002
/* ForcePLLOn */
#define SYCC_FP 0x00000004
/* Force ALP (or HT if ALPen is not set */
#define SYCC_AR 0x00000008
/* Force HT */
#define SYCC_HR 0x00000010
/* ClkDiv (ILP = 1/(4 * (divisor + 1)) */
#define SYCC_CD_MASK 0xffff0000
#define SYCC_CD_SHIFT 16
#define CST4329_SPROM_OTP_SEL_MASK 0x00000003
#define CST4329_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
#define CST4329_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
#define CST4329_OTP_SEL 2 /* OTP is powered up, no SPROM */
#define CST4329_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */
/* OTP is powered up, use def. CIS, no SPROM */
#define CST4329_DEFCIS_SEL 0
/* OTP is powered up, SPROM is present */
#define CST4329_SPROM_SEL 1
/* OTP is powered up, no SPROM */
#define CST4329_OTP_SEL 2
/* OTP is powered down, SPROM is present */
#define CST4329_OTP_PWRDN 3
#define CST4329_SPI_SDIO_MODE_MASK 0x00000004
#define CST4329_SPI_SDIO_MODE_SHIFT 2
/* 43224 chip-specific ChipControl register bits */
#define CCTRL43224_GPIO_TOGGLE 0x8000
#define CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0 /* 12 mA drive strength */
#define CCTRL_43224B0_12MA_LED_DRIVE 0xF0 /* 12 mA drive strength for later 43224s */
/* 12 mA drive strength */
#define CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0
/* 12 mA drive strength for later 43224s */
#define CCTRL_43224B0_12MA_LED_DRIVE 0xF0
/* 43236 Chip specific ChipStatus register bits */
#define CST43236_SFLASH_MASK 0x00000040
......@@ -78,29 +98,44 @@
#define CST43236_BP_CLK 0x00000200 /* 120/96Mbps */
#define CST43236_BOOT_MASK 0x00001800
#define CST43236_BOOT_SHIFT 11
#define CST43236_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */
#define CST43236_BOOT_FROM_ROM 1 /* boot from ROM */
#define CST43236_BOOT_FROM_FLASH 2 /* boot from FLASH */
#define CST43236_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */
#define CST43236_BOOT_FROM_ROM 1 /* boot from ROM */
#define CST43236_BOOT_FROM_FLASH 2 /* boot from FLASH */
#define CST43236_BOOT_FROM_INVALID 3
/* 4331 chip-specific ChipControl register bits */
#define CCTRL4331_BT_COEXIST (1<<0) /* 0 disable */
#define CCTRL4331_SECI (1<<1) /* 0 SECI is disabled (JATG functional) */
#define CCTRL4331_EXT_LNA (1<<2) /* 0 disable */
#define CCTRL4331_SPROM_GPIO13_15 (1<<3) /* sprom/gpio13-15 mux */
#define CCTRL4331_EXTPA_EN (1<<4) /* 0 ext pa disable, 1 ext pa enabled */
#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5) /* set drive out GPIO_CLK on sprom_cs pin */
#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6) /* use sprom_cs pin as PCIE mdio interface */
#define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7) /* aband extpa will be at gpio2/5 and sprom_dout */
#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8) /* override core control on pipe_AuxClkEnable */
#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9) /* override core control on pipe_AuxPowerDown */
#define CCTRL4331_PCIE_AUXCLKEN (1<<10) /* pcie_auxclkenable */
#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11) /* pcie_pipe_pllpowerdown */
#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16) /* enable bt_shd0 at gpio4 */
#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17) /* enable bt_shd1 at gpio5 */
/* 0 disable */
#define CCTRL4331_BT_COEXIST (1<<0)
/* 0 SECI is disabled (JTAG functional) */
#define CCTRL4331_SECI (1<<1)
/* 0 disable */
#define CCTRL4331_EXT_LNA (1<<2)
/* sprom/gpio13-15 mux */
#define CCTRL4331_SPROM_GPIO13_15 (1<<3)
/* 0 ext pa disable, 1 ext pa enabled */
#define CCTRL4331_EXTPA_EN (1<<4)
/* set drive out GPIO_CLK on sprom_cs pin */
#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5)
/* use sprom_cs pin as PCIE mdio interface */
#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6)
/* aband extpa will be at gpio2/5 and sprom_dout */
#define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7)
/* override core control on pipe_AuxClkEnable */
#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8)
/* override core control on pipe_AuxPowerDown */
#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9)
/* pcie_auxclkenable */
#define CCTRL4331_PCIE_AUXCLKEN (1<<10)
/* pcie_pipe_pllpowerdown */
#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11)
/* enable bt_shd0 at gpio4 */
#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16)
/* enable bt_shd1 at gpio5 */
#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17)
/* 4331 Chip specific ChipStatus register bits */
#define CST4331_XTAL_FREQ 0x00000001 /* crystal frequency 20/40Mhz */
/* crystal frequency 20/40Mhz */
#define CST4331_XTAL_FREQ 0x00000001
#define CST4331_SPROM_PRESENT 0x00000002
#define CST4331_OTP_PRESENT 0x00000004
#define CST4331_LDO_RF 0x00000008
......@@ -110,19 +145,26 @@
#define CST4319_SPI_CPULESSUSB 0x00000001
#define CST4319_SPI_CLK_POL 0x00000002
#define CST4319_SPI_CLK_PH 0x00000008
#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0 /* gpio [7:6], SDIO CIS selection */
/* gpio [7:6], SDIO CIS selection */
#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0
#define CST4319_SPROM_OTP_SEL_SHIFT 6
#define CST4319_DEFCIS_SEL 0x00000000 /* use default CIS, OTP is powered up */
#define CST4319_SPROM_SEL 0x00000040 /* use SPROM, OTP is powered up */
#define CST4319_OTP_SEL 0x00000080 /* use OTP, OTP is powered up */
#define CST4319_OTP_PWRDN 0x000000c0 /* use SPROM, OTP is powered down */
#define CST4319_SDIO_USB_MODE 0x00000100 /* gpio [8], sdio/usb mode */
/* use default CIS, OTP is powered up */
#define CST4319_DEFCIS_SEL 0x00000000
/* use SPROM, OTP is powered up */
#define CST4319_SPROM_SEL 0x00000040
/* use OTP, OTP is powered up */
#define CST4319_OTP_SEL 0x00000080
/* use SPROM, OTP is powered down */
#define CST4319_OTP_PWRDN 0x000000c0
/* gpio [8], sdio/usb mode */
#define CST4319_SDIO_USB_MODE 0x00000100
#define CST4319_REMAP_SEL_MASK 0x00000600
#define CST4319_ILPDIV_EN 0x00000800
#define CST4319_XTAL_PD_POL 0x00001000
#define CST4319_LPO_SEL 0x00002000
#define CST4319_RES_INIT_MODE 0x0000c000
#define CST4319_PALDO_EXTPNP 0x00010000 /* PALDO is configured with external PNP */
/* PALDO is configured with external PNP */
#define CST4319_PALDO_EXTPNP 0x00010000
#define CST4319_CBUCK_MODE_MASK 0x00060000
#define CST4319_CBUCK_MODE_BURST 0x00020000
#define CST4319_CBUCK_MODE_LPBURST 0x00060000
......@@ -153,7 +195,8 @@
#define CST4313_SPROM_OTP_SEL_SHIFT 0
/* 4313 Chip specific ChipControl register bits */
#define CCTRL_4313_12MA_LED_DRIVE 0x00000007 /* 12 mA drive strengh for later 4313 */
/* 12 mA drive strengh for later 4313 */
#define CCTRL_4313_12MA_LED_DRIVE 0x00000007
#define BCM47162_DMP() ((sih->chip == BCM47162_CHIP_ID) && \
(sih->chiprev == 0) && \
......@@ -227,9 +270,12 @@
#define SD_SG32 0x00000008
#define SD_SZ_ALIGN 0x00000fff
#define PCI_CFG_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */
#define PCI_CFG_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal power-up */
#define PCI_CFG_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL power-down */
/* PCI config space bit 4 for 4306c0 slow clock source */
#define PCI_CFG_GPIO_SCS 0x10
/* PCI config space GPIO 14 for Xtal power-up */
#define PCI_CFG_GPIO_XTAL 0x40
/* PCI config space GPIO 15 for PLL power-down */
#define PCI_CFG_GPIO_PLL 0x80
/* power control defines */
#define PLL_DELAY 150 /* us pll on delay */
......@@ -468,7 +514,8 @@ void ai_scan(struct si_pub *sih, void *regs)
}
eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", regs, erombase, eromptr, eromlim));
SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, "
"eromlim = 0x%p\n", regs, erombase, eromptr, eromlim));
while (eromptr < eromlim) {
u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
u32 mpd, asd, addrl, addrh, sizel, sizeh;
......@@ -502,7 +549,9 @@ void ai_scan(struct si_pub *sih, void *regs)
nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " "nsw = %d, nmp = %d & nsp = %d\n", mfg, cid, crev, base, nmw, nsw, nmp, nsp));
SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr "
"0x%p, with nmw = %d, nsw = %d, nmp = %d & nsp = %d\n",
mfg, cid, crev, base, nmw, nsw, nmp, nsp));
if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
continue;
......@@ -526,7 +575,8 @@ void ai_scan(struct si_pub *sih, void *regs)
for (i = 0; i < nmp; i++) {
mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
if ((mpd & ER_TAG) != ER_MP) {
SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
SI_ERROR(("Not enough MP entries for "
"component 0x%x\n", cid));
goto error;
}
SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
......@@ -549,7 +599,8 @@ void ai_scan(struct si_pub *sih, void *regs)
br = true;
else if ((addrh != 0) || (sizeh != 0)
|| (sizel != SI_CORE_SIZE)) {
SI_ERROR(("First Slave ASD for core 0x%04x malformed " "(0x%08x)\n", cid, asd));
SI_ERROR(("First Slave ASD for core 0x%04x "
"malformed (0x%08x)\n", cid, asd));
goto error;
}
}
......@@ -704,7 +755,8 @@ u32 ai_addrspace(struct si_pub *sih, uint asidx)
else if (asidx == 1)
return sii->coresba2[cidx];
else {
SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
SI_ERROR(("%s: Need to parse the erom again to find addr "
"space %d\n", __func__, asidx));
return 0;
}
}
......@@ -723,7 +775,8 @@ u32 ai_addrspacesize(struct si_pub *sih, uint asidx)
else if (asidx == 1)
return sii->coresba2_size[cidx];
else {
SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", __func__, asidx));
SI_ERROR(("%s: Need to parse the erom again to find addr "
"space %d\n", __func__, asidx));
return 0;
}
}
......@@ -735,7 +788,8 @@ uint ai_flag(struct si_pub *sih)
sii = SI_INFO(sih);
if (BCM47162_DMP()) {
SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __func__));
SI_ERROR(("%s: Attempting to read MIPS DMP registers "
"on 47162a0", __func__));
return sii->curidx;
}
ai = sii->curwrap;
......@@ -833,7 +887,8 @@ u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val)
sii = SI_INFO(sih);
if (BCM47162_DMP()) {
SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0", __func__));
SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) "
"on 47162a0", __func__));
return 0;
}
......
......@@ -242,16 +242,23 @@
#define SRC_PRESENT 0x00000001
/* 4330 chip-specific ChipStatus register bits */
#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6) /* SDIO || gSPI */
#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6) /* USB || USBDA */
#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0) /* SDIO */
#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4) /* gSPI */
#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6) /* USB packet-oriented */
#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7) /* USB Direct Access */
/* SDIO || gSPI */
#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6)
/* USB || USBDA */
#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6)
/* SDIO */
#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0)
/* gSPI */
#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4)
/* USB packet-oriented */
#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6)
/* USB Direct Access */
#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7)
#define CST4330_OTP_PRESENT 0x00000010
#define CST4330_LPO_AUTODET_EN 0x00000020
#define CST4330_ARMREMAP_0 0x00000040
#define CST4330_SPROM_PRESENT 0x00000080 /* takes priority over OTP if both set */
/* takes priority over OTP if both set */
#define CST4330_SPROM_PRESENT 0x00000080
#define CST4330_ILPDIV_EN 0x00000100
#define CST4330_LPO_SEL 0x00000200
#define CST4330_RES_INIT_MODE_SHIFT 10
......
......@@ -22,19 +22,32 @@
#include "main.h"
#include "ampdu.h"
#define AMPDU_MAX_MPDU 32 /* max number of mpdus in an ampdu */
#define AMPDU_NUM_MPDU_LEGACY 16 /* max number of mpdus in an ampdu to a legacy */
#define AMPDU_TX_BA_MAX_WSIZE 64 /* max Tx ba window size (in pdu) */
#define AMPDU_TX_BA_DEF_WSIZE 64 /* default Tx ba window size (in pdu) */
#define AMPDU_RX_BA_DEF_WSIZE 64 /* max Rx ba window size (in pdu) */
#define AMPDU_RX_BA_MAX_WSIZE 64 /* default Rx ba window size (in pdu) */
#define AMPDU_MAX_DUR 5 /* max dur of tx ampdu (in msec) */
#define AMPDU_DEF_RETRY_LIMIT 5 /* default tx retry limit */
#define AMPDU_DEF_RR_RETRY_LIMIT 2 /* default tx retry limit at reg rate */
#define AMPDU_DEF_TXPKT_WEIGHT 2 /* default weight of ampdu in txfifo */
#define AMPDU_DEF_FFPLD_RSVD 2048 /* default ffpld reserved bytes */
#define AMPDU_INI_FREE 10 /* # of inis to be freed on detach */
#define AMPDU_SCB_MAX_RELEASE 20 /* max # of mpdus released at a time */
/* max number of mpdus in an ampdu */
#define AMPDU_MAX_MPDU 32
/* max number of mpdus in an ampdu to a legacy */
#define AMPDU_NUM_MPDU_LEGACY 16
/* max Tx ba window size (in pdu) */
#define AMPDU_TX_BA_MAX_WSIZE 64
/* default Tx ba window size (in pdu) */
#define AMPDU_TX_BA_DEF_WSIZE 64
/* default Rx ba window size (in pdu) */
#define AMPDU_RX_BA_DEF_WSIZE 64
/* max Rx ba window size (in pdu) */
#define AMPDU_RX_BA_MAX_WSIZE 64
/* max dur of tx ampdu (in msec) */
#define AMPDU_MAX_DUR 5
/* default tx retry limit */
#define AMPDU_DEF_RETRY_LIMIT 5
/* default tx retry limit at reg rate */
#define AMPDU_DEF_RR_RETRY_LIMIT 2
/* default weight of ampdu in txfifo */
#define AMPDU_DEF_TXPKT_WEIGHT 2
/* default ffpld reserved bytes */
#define AMPDU_DEF_FFPLD_RSVD 2048
/* # of inis to be freed on detach */
#define AMPDU_INI_FREE 10
/* max # of mpdus released at a time */
#define AMPDU_SCB_MAX_RELEASE 20
#define NUM_FFPLD_FIFO 4 /* number of fifo concerned by pre-loading */
#define FFPLD_TX_MAX_UNFL 200 /* default value of the average number of ampdu
......@@ -59,44 +72,70 @@
* some counters might be redundant with the ones in wlc or ampdu structures.
* This allows to maintain a specific state independently of
* how often and/or when the wlc counters are updated.
*
* ampdu_pld_size: number of bytes to be pre-loaded
* mcs2ampdu_table: per-mcs max # of mpdus in an ampdu
* prev_txfunfl: num of underflows last read from the HW macstats counter
* accum_txfunfl: num of underflows since we modified pld params
* accum_txampdu: num of tx ampdu since we modified pld params
* prev_txampdu: previous reading of tx ampdu
* dmaxferrate: estimated dma avg xfer rate in kbits/sec
*/
struct brcms_fifo_info {
u16 ampdu_pld_size; /* number of bytes to be pre-loaded */
u8 mcs2ampdu_table[FFPLD_MAX_MCS + 1]; /* per-mcs max # of mpdus in an ampdu */
u16 prev_txfunfl; /* num of underflows last read from the HW macstats counter */
u32 accum_txfunfl; /* num of underflows since we modified pld params */
u32 accum_txampdu; /* num of tx ampdu since we modified pld params */
u32 prev_txampdu; /* previous reading of tx ampdu */
u32 dmaxferrate; /* estimated dma avg xfer rate in kbits/sec */
u16 ampdu_pld_size;
u8 mcs2ampdu_table[FFPLD_MAX_MCS + 1];
u16 prev_txfunfl;
u32 accum_txfunfl;
u32 accum_txampdu;
u32 prev_txampdu;
u32 dmaxferrate;
};
/* AMPDU module specific state */
/* AMPDU module specific state
*
* wlc: pointer to main wlc structure
* scb_handle: scb cubby handle to retrieve data from scb
* ini_enable: per-tid initiator enable/disable of ampdu
* ba_tx_wsize: Tx ba window size (in pdu)
* ba_rx_wsize: Rx ba window size (in pdu)
* retry_limit: mpdu transmit retry limit
* rr_retry_limit: mpdu transmit retry limit at regular rate
* retry_limit_tid: per-tid mpdu transmit retry limit
* rr_retry_limit_tid: per-tid mpdu transmit retry limit at regular rate
* mpdu_density: min mpdu spacing (0-7) ==> 2^(x-1)/8 usec
* max_pdu: max pdus allowed in ampdu
* dur: max duration of an ampdu (in msec)
* txpkt_weight: weight of ampdu in txfifo; reduces rate lag
* rx_factor: maximum rx ampdu factor (0-3) ==> 2^(13+x) bytes
* ffpld_rsvd: number of bytes to reserve for preload
* max_txlen: max size of ampdu per mcs, bw and sgi
* ini_free: array of ini's to be freed on detach
* mfbr: enable multiple fallback rate
* tx_max_funl: underflows should be kept such that
* (tx_max_funfl*underflows) < tx frames
* fifo_tb: table of fifo infos
*/
struct ampdu_info {
struct brcms_c_info *wlc; /* pointer to main wlc structure */
int scb_handle; /* scb cubby handle to retrieve data from scb */
u8 ini_enable[AMPDU_MAX_SCB_TID]; /* per-tid initiator enable/disable of ampdu */
u8 ba_tx_wsize; /* Tx ba window size (in pdu) */
u8 ba_rx_wsize; /* Rx ba window size (in pdu) */
u8 retry_limit; /* mpdu transmit retry limit */
u8 rr_retry_limit; /* mpdu transmit retry limit at regular rate */
u8 retry_limit_tid[AMPDU_MAX_SCB_TID]; /* per-tid mpdu transmit retry limit */
/* per-tid mpdu transmit retry limit at regular rate */
struct brcms_c_info *wlc;
int scb_handle;
u8 ini_enable[AMPDU_MAX_SCB_TID];
u8 ba_tx_wsize;
u8 ba_rx_wsize;
u8 retry_limit;
u8 rr_retry_limit;
u8 retry_limit_tid[AMPDU_MAX_SCB_TID];
u8 rr_retry_limit_tid[AMPDU_MAX_SCB_TID];
u8 mpdu_density; /* min mpdu spacing (0-7) ==> 2^(x-1)/8 usec */
s8 max_pdu; /* max pdus allowed in ampdu */
u8 dur; /* max duration of an ampdu (in msec) */
u8 txpkt_weight; /* weight of ampdu in txfifo; reduces rate lag */
u8 rx_factor; /* maximum rx ampdu factor (0-3) ==> 2^(13+x) bytes */
u32 ffpld_rsvd; /* number of bytes to reserve for preload */
u32 max_txlen[MCS_TABLE_SIZE][2][2]; /* max size of ampdu per mcs, bw and sgi */
void *ini_free[AMPDU_INI_FREE]; /* array of ini's to be freed on detach */
bool mfbr; /* enable multiple fallback rate */
u32 tx_max_funl; /* underflows should be kept such that
* (tx_max_funfl*underflows) < tx frames
*/
/* table of fifo infos */
u8 mpdu_density;
s8 max_pdu;
u8 dur;
u8 txpkt_weight;
u8 rx_factor;
u32 ffpld_rsvd;
u32 max_txlen[MCS_TABLE_SIZE][2][2];
void *ini_free[AMPDU_INI_FREE];
bool mfbr;
u32 tx_max_funl;
struct brcms_fifo_info fifo_tb[NUM_FFPLD_FIFO];
};
/* used for flushing ampdu packets */
......@@ -163,7 +202,10 @@ struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc)
ampdu->txpkt_weight = AMPDU_DEF_TXPKT_WEIGHT;
ampdu->ffpld_rsvd = AMPDU_DEF_FFPLD_RSVD;
/* bump max ampdu rcv size to 64k for all 11n devices except 4321A0 and 4321A1 */
/*
* bump max ampdu rcv size to 64k for all 11n
* devices except 4321A0 and 4321A1
*/
if (BRCMS_ISNPHY(wlc->band) && NREV_LT(wlc->band->phyrev, 2))
ampdu->rx_factor = IEEE80211_HT_MAX_AMPDU_32K;
else
......@@ -194,7 +236,10 @@ void brcms_c_ampdu_detach(struct ampdu_info *ampdu)
if (!ampdu)
return;
/* free all ini's which were to be freed on callbacks which were never called */
/*
* free all ini's which were to be freed on
* callbacks which were never called
*/
for (i = 0; i < AMPDU_INI_FREE; i++)
kfree(ampdu->ini_free[i]);
......@@ -220,7 +265,8 @@ static void brcms_c_scb_ampdu_update_config(struct ampdu_info *ampdu,
if (ampdu->max_pdu != AUTO)
scb_ampdu->max_pdu = (u8) ampdu->max_pdu;
scb_ampdu->release = min_t(u8, scb_ampdu->max_pdu, AMPDU_SCB_MAX_RELEASE);
scb_ampdu->release = min_t(u8, scb_ampdu->max_pdu,
AMPDU_SCB_MAX_RELEASE);
if (scb_ampdu->max_rx_ampdu_bytes)
scb_ampdu->release = min_t(u8, scb_ampdu->release,
......@@ -321,8 +367,8 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
return 0;
}
max_mpdu =
min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS], AMPDU_NUM_MPDU_LEGACY);
max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS],
AMPDU_NUM_MPDU_LEGACY);
/* In case max value max_pdu is already lower than
the fifo depth, there is nothing more we can do.
......@@ -344,11 +390,12 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
brcms_c_scb_ampdu_update_config_all(ampdu);
/*
compute a new dma xfer rate for max_mpdu @ max mcs.
This is the minimum dma rate that
can achieve no underflow condition for the current mpdu size.
* compute a new dma xfer rate for max_mpdu @ max mcs.
* This is the minimum dma rate that can achieve no
* underflow condition for the current mpdu size.
*
* note : we divide/multiply by 100 to avoid integer overflows
*/
/* note : we divide/multiply by 100 to avoid integer overflows */
fifo->dmaxferrate =
(((phy_rate / 100) *
(max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
......@@ -387,8 +434,8 @@ static void brcms_c_ffpld_calc_mcs2ampdu_table(struct ampdu_info *ampdu, int f)
/* recompute the dma rate */
/* note : we divide/multiply by 100 to avoid integer overflows */
max_mpdu =
min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS], AMPDU_NUM_MPDU_LEGACY);
max_mpdu = min_t(u8, fifo->mcs2ampdu_table[FFPLD_MAX_MCS],
AMPDU_NUM_MPDU_LEGACY);
phy_rate = MCS_RATE(FFPLD_MAX_MCS, true, false);
dma_rate =
(((phy_rate / 100) *
......@@ -666,9 +713,13 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
if (count == scb_ampdu->max_pdu)
break;
/* check to see if the next pkt is a candidate for aggregation */
/*
* check to see if the next pkt is
* a candidate for aggregation
*/
p = pktq_ppeek(&qi->q, prec);
tx_info = IEEE80211_SKB_CB(p); /* tx_info must be checked with current p */
/* tx_info must be checked with current p */
tx_info = IEEE80211_SKB_CB(p);
if (p) {
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
......@@ -683,7 +734,10 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
continue;
}
/* check if there are enough descriptors available */
/*
* check if there are enough
* descriptors available
*/
if (TXAVAIL(wlc, fifo) <= (seg_cnt + 1)) {
wiphy_err(wiphy, "%s: No fifo space "
"!!\n", __func__);
......@@ -962,10 +1016,13 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
supr_status == TX_STATUS_SUPR_EXPTIME) {
retry = false;
} else if (supr_status == TX_STATUS_SUPR_EXPTIME) {
/* TX underflow : try tuning pre-loading or ampdu size */
/* TX underflow:
* try tuning pre-loading or ampdu size
*/
} else if (supr_status == TX_STATUS_SUPR_FRAG) {
/* if there were underflows, but pre-loading is not active,
notify rate adaptation.
/*
* if there were underflows, but pre-loading
* is not active, notify rate adaptation.
*/
if (brcms_c_ffpld_check_txfunfl(wlc,
prio2fifo[tid]) > 0)
......@@ -1013,7 +1070,10 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
ini->tx_in_transit--;
ini->txretry[index] = 0;
/* ampdu_ack_len: number of acked aggregated frames */
/*
* ampdu_ack_len:
* number of acked aggregated frames
*/
/* ampdu_len: number of aggregated frames */
brcms_c_ampdu_rate_status(wlc, tx_info, txs,
mcs);
......@@ -1038,11 +1098,14 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
if (retry && (txrate[0].count < (int)retry_limit)) {
ini->txretry[index]++;
ini->tx_in_transit--;
/* Use high prededence for retransmit to give some punch */
/*
* Use high prededence for retransmit to
* give some punch
*/
/* brcms_c_txq_enq(wlc, scb, p,
* BRCMS_PRIO_TO_PREC(tid)); */
brcms_c_txq_enq(wlc, scb, p,
BRCMS_PRIO_TO_HI_PREC(tid));
BRCMS_PRIO_TO_HI_PREC(tid));
} else {
/* Retry timeout */
ini->tx_in_transit--;
......@@ -1150,7 +1213,10 @@ void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu)
{
struct brcms_c_info *wlc = ampdu->wlc;
/* Extend ucode internal watchdog timer to match larger received frames */
/*
* Extend ucode internal watchdog timer to
* match larger received frames
*/
if ((ampdu->rx_factor & IEEE80211_HT_AMPDU_PARM_FACTOR) ==
IEEE80211_HT_MAX_AMPDU_64K) {
brcms_c_write_shm(wlc, M_MIMO_MAXSYM, MIMO_MAXSYM_MAX);
......
......@@ -259,7 +259,10 @@ static u8 brcms_c_antsel_id2antcfg(struct antsel_info *asi, u8 id)
return antcfg;
}
/* boardlevel antenna selection: convert ant_cfg to mimo_antsel (ucode interface) */
/*
* boardlevel antenna selection:
* convert ant_cfg to mimo_antsel (ucode interface)
*/
static u16 brcms_c_antsel_antcfg2antsel(struct antsel_info *asi, u8 ant_cfg)
{
u8 idx = BRCMS_ANTIDX_11N(BRCMS_ANTSEL_11N(ant_cfg));
......@@ -293,7 +296,10 @@ static int brcms_c_antsel_cfgupd(struct antsel_info *asi,
ant_cfg = antsel->ant_config[ANT_SELCFG_TX_DEF];
mimo_antsel = brcms_c_antsel_antcfg2antsel(asi, ant_cfg);
brcms_c_write_shm(wlc, M_MIMO_ANTSEL_TXDFLT, mimo_antsel);
/* Update driver stats for currently selected default tx/rx antenna config */
/*
* Update driver stats for currently selected
* default tx/rx antenna config
*/
asi->antcfg_cur.ant_config[ANT_SELCFG_TX_DEF] = ant_cfg;
/* 2) Update RX antconfig for all frames that are not unicast data
......@@ -302,7 +308,10 @@ static int brcms_c_antsel_cfgupd(struct antsel_info *asi,
ant_cfg = antsel->ant_config[ANT_SELCFG_RX_DEF];
mimo_antsel = brcms_c_antsel_antcfg2antsel(asi, ant_cfg);
brcms_c_write_shm(wlc, M_MIMO_ANTSEL_RXDFLT, mimo_antsel);
/* Update driver stats for currently selected default tx/rx antenna config */
/*
* Update driver stats for currently selected
* default tx/rx antenna config
*/
asi->antcfg_cur.ant_config[ANT_SELCFG_RX_DEF] = ant_cfg;
return 0;
......
......@@ -151,9 +151,9 @@ const struct brcms_chanvec chanvec_all_5G = {
/* Channels 52 - 64, 100 - 140 */
static const struct brcms_chanvec radar_set1 = {
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, /* 52 - 60 */
0x01, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x11, /* 64, 100 - 124 */
0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 128 - 140 */
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, /* 52 - 60 */
0x01, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x11, /* 64, 100 - 124 */
0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 128 - 140 */
0x00, 0x00, 0x00, 0x00}
};
......@@ -220,15 +220,19 @@ static const struct brcms_chanvec restricted_set_12_13_14 = {
#define LOCALE_CHAN_52_140_ALL (1<<14)
#define LOCALE_SET_5G_HIGH4 (1<<15) /* 184-216 */
#define LOCALE_CHAN_36_64 (LOCALE_SET_5G_LOW1 | LOCALE_SET_5G_LOW2 | LOCALE_SET_5G_LOW3)
#define LOCALE_CHAN_52_64 (LOCALE_SET_5G_LOW2 | LOCALE_SET_5G_LOW3)
#define LOCALE_CHAN_100_124 (LOCALE_SET_5G_MID1 | LOCALE_SET_5G_MID2)
#define LOCALE_CHAN_100_140 \
(LOCALE_SET_5G_MID1 | LOCALE_SET_5G_MID2 | LOCALE_SET_5G_MID3 | LOCALE_SET_5G_HIGH1)
#define LOCALE_CHAN_149_165 (LOCALE_SET_5G_HIGH2 | LOCALE_SET_5G_HIGH3)
#define LOCALE_CHAN_184_216 LOCALE_SET_5G_HIGH4
#define LOCALE_CHAN_36_64 (LOCALE_SET_5G_LOW1 | \
LOCALE_SET_5G_LOW2 | \
LOCALE_SET_5G_LOW3)
#define LOCALE_CHAN_52_64 (LOCALE_SET_5G_LOW2 | LOCALE_SET_5G_LOW3)
#define LOCALE_CHAN_100_124 (LOCALE_SET_5G_MID1 | LOCALE_SET_5G_MID2)
#define LOCALE_CHAN_100_140 (LOCALE_SET_5G_MID1 | LOCALE_SET_5G_MID2 | \
LOCALE_SET_5G_MID3 | LOCALE_SET_5G_HIGH1)
#define LOCALE_CHAN_149_165 (LOCALE_SET_5G_HIGH2 | LOCALE_SET_5G_HIGH3)
#define LOCALE_CHAN_184_216 LOCALE_SET_5G_HIGH4
#define LOCALE_CHAN_01_14 (LOCALE_CHAN_01_11 | LOCALE_CHAN_12_13 | LOCALE_CHAN_14)
#define LOCALE_CHAN_01_14 (LOCALE_CHAN_01_11 | \
LOCALE_CHAN_12_13 | \
LOCALE_CHAN_14)
#define LOCALE_RADAR_SET_NONE 0
#define LOCALE_RADAR_SET_1 1
......@@ -500,7 +504,8 @@ static const struct locale_mimo_info *g_mimo_5g_table[] = {
#endif
#define LC_5G(id) LOCALE_5G_IDX_ ## id
#define LOCALES(band2, band5, mimo2, mimo5) {LC_2G(band2), LC_5G(band5), LC(mimo2), LC(mimo5)}
#define LOCALES(band2, band5, mimo2, mimo5) \
{LC_2G(band2), LC_5G(band5), LC(mimo2), LC(mimo5)}
static const struct {
char abbrev[BRCM_CNTRY_BUF_SZ]; /* country abbreviation */
......@@ -644,7 +649,10 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
if (ccode)
strncpy(wlc->pub->srom_ccode, ccode, BRCM_CNTRY_BUF_SZ - 1);
/* internal country information which must match regulatory constraints in firmware */
/*
* internal country information which must match
* regulatory constraints in firmware
*/
memset(country_abbrev, 0, BRCM_CNTRY_BUF_SZ);
strncpy(country_abbrev, "X2", sizeof(country_abbrev) - 1);
country = brcms_c_country_lookup(wlc, country_abbrev);
......@@ -672,8 +680,10 @@ brcms_c_channel_locale_flags_in_band(struct brcms_cm_info *wlc_cm,
return wlc_cm->bandstate[bandunit].locale_flags;
}
/* set the driver's current country and regulatory information using a country code
* as the source. Lookup built in country information found with the country code.
/*
* set the driver's current country and regulatory information using
* a country code as the source. Lookup built in country information
* found with the country code.
*/
static int
brcms_c_set_countrycode(struct brcms_cm_info *wlc_cm, const char *ccode)
......@@ -696,7 +706,10 @@ brcms_c_set_countrycode_rev(struct brcms_cm_info *wlc_cm,
* otherwise use the ccode and regrev directly
*/
if (regrev == -1) {
/* map the country code to a built-in country code, regrev, and country_info */
/*
* map the country code to a built-in country
* code, regrev, and country_info
*/
country =
brcms_c_countrycode_map(wlc_cm, ccode, mapped_ccode,
&mapped_regrev);
......@@ -717,8 +730,10 @@ brcms_c_set_countrycode_rev(struct brcms_cm_info *wlc_cm,
return 0;
}
/* set the driver's current country and regulatory information using a country code
* as the source. Look up built in country information found with the country code.
/*
* set the driver's current country and regulatory information
* using a country code as the source. Look up built in country
* information found with the country code.
*/
static void
brcms_c_set_country_common(struct brcms_cm_info *wlc_cm,
......@@ -777,7 +792,10 @@ brcms_c_country_lookup(struct brcms_c_info *wlc, const char *ccode)
char mapped_ccode[BRCM_CNTRY_BUF_SZ];
uint mapped_regrev;
/* map the country code to a built-in country code, regrev, and country_info struct */
/*
* map the country code to a built-in country code, regrev, and
* country_info struct
*/
country = brcms_c_countrycode_map(wlc->cmi, ccode, mapped_ccode,
&mapped_regrev);
......@@ -850,7 +868,10 @@ brcms_c_country_lookup_direct(const char *ccode, uint regrev)
/* Should just return 0 for single locale driver. */
/* Keep it this way in case we add more locales. (for now anyway) */
/* all other country def arrays are for regrev == 0, so if regrev is non-zero, fail */
/*
* all other country def arrays are for regrev == 0, so if
* regrev is non-zero, fail
*/
if (regrev > 0)
return NULL;
......@@ -895,8 +916,9 @@ brcms_c_channels_init(struct brcms_cm_info *wlc_cm,
wlc_cm->bandstate[band->bandunit].radar_channels =
g_table_radar_set[li->radar_channels];
/* set the channel availability,
* masking out the channels that may not be supported on this phy
/*
* set the channel availability, masking out the channels
* that may not be supported on this phy.
*/
wlc_phy_chanspec_band_validch(band->pi, band->bandtype,
&sup_chan);
......@@ -931,9 +953,15 @@ static void brcms_c_channels_commit(struct brcms_cm_info *wlc_cm)
if (chan == MAXCHANNEL)
chan = INVCHANNEL;
/* based on the channel search above, set or clear WL_RADIO_COUNTRY_DISABLE */
/*
* based on the channel search above, set or
* clear WL_RADIO_COUNTRY_DISABLE.
*/
if (chan == INVCHANNEL) {
/* country/locale with no valid channels, set the radio disable bit */
/*
* country/locale with no valid channels, set
* the radio disable bit
*/
mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
wiphy_err(wlc->wiphy, "wl%d: %s: no valid channel for \"%s\" "
"nbands %d bandlocked %d\n", wlc->pub->unit,
......@@ -941,12 +969,16 @@ static void brcms_c_channels_commit(struct brcms_cm_info *wlc_cm)
wlc->bandlocked);
} else if (mboolisset(wlc->pub->radio_disabled,
WL_RADIO_COUNTRY_DISABLE)) {
/* country/locale with valid channel, clear the radio disable bit */
/*
* country/locale with valid channel, clear
* the radio disable bit
*/
mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
}
/* Now that the country abbreviation is set, if the radio supports 2G, then
* set channel 14 restrictions based on the new locale.
/*
* Now that the country abbreviation is set, if the radio supports 2G,
* then set channel 14 restrictions based on the new locale.
*/
if (NBANDS(wlc) > 1 || BAND_2G(wlc->band->bandtype))
wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi,
......@@ -961,7 +993,10 @@ static void brcms_c_channels_commit(struct brcms_cm_info *wlc_cm)
}
}
/* reset the quiet channels vector to the union of the restricted and radar channel sets */
/*
* reset the quiet channels vector to the union
* of the restricted and radar channel sets
*/
static void brcms_c_quiet_channels_reset(struct brcms_cm_info *wlc_cm)
{
struct brcms_c_info *wlc = wlc_cm->wlc;
......@@ -987,15 +1022,11 @@ static bool
brcms_c_quiet_chanspec(struct brcms_cm_info *wlc_cm, u16 chspec)
{
return N_ENAB(wlc_cm->wlc->pub) && CHSPEC_IS40(chspec) ?
(isset
(wlc_cm->quiet_channels.vec,
LOWER_20_SB(CHSPEC_CHANNEL(chspec)))
|| isset(wlc_cm->quiet_channels.vec,
UPPER_20_SB(CHSPEC_CHANNEL(chspec)))) : isset(wlc_cm->
quiet_channels.
vec,
CHSPEC_CHANNEL
(chspec));
(isset(wlc_cm->quiet_channels.vec,
LOWER_20_SB(CHSPEC_CHANNEL(chspec))) ||
isset(wlc_cm->quiet_channels.vec,
UPPER_20_SB(CHSPEC_CHANNEL(chspec)))) :
isset(wlc_cm->quiet_channels.vec, CHSPEC_CHANNEL(chspec));
}
/* Is the channel valid for the current locale? (but don't consider channels not
......@@ -1112,8 +1143,9 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr);
brcms_c_channel_min_txpower_limits_with_local_constraint(wlc_cm, &txpwr,
local_constraint_qdbm);
brcms_c_channel_min_txpower_limits_with_local_constraint(
wlc_cm, &txpwr, local_constraint_qdbm
);
brcms_b_set_chanspec(wlc->hw, chanspec,
(brcms_c_quiet_chanspec(wlc_cm, chanspec) != 0),
......@@ -1317,10 +1349,12 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
txpwr->ofdm[i] = (u8) maxpwr;
for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
/* OFDM 40 MHz SISO has the same power as the corresponding MCS0-7 rate unless
* overriden by the locale specific code. We set this value to 0 as a
* flag (presumably 0 dBm isn't a possibility) and then copy the MCS0-7 value
* to the 40 MHz value if it wasn't explicitly set.
/*
* OFDM 40 MHz SISO has the same power as the corresponding
* MCS0-7 rate unless overriden by the locale specific code.
* We set this value to 0 as a flag (presumably 0 dBm isn't
* a possibility) and then copy the MCS0-7 value to the 40 MHz
* value if it wasn't explicitly set.
*/
txpwr->ofdm_40_siso[i] = 0;
......@@ -1354,8 +1388,9 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
/* Fill in the MCS 0-7 (SISO) rates */
for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
/* 20 MHz has the same power as the corresponding OFDM rate unless
* overriden by the locale specific code.
/*
* 20 MHz has the same power as the corresponding OFDM rate
* unless overriden by the locale specific code.
*/
txpwr->mcs_20_siso[i] = txpwr->ofdm[i];
txpwr->mcs_40_siso[i] = 0;
......@@ -1367,7 +1402,10 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
txpwr->mcs_40_cdd[i] = (u8) maxpwr40;
}
/* These locales have SISO expressed in the table and override CDD later */
/*
* These locales have SISO expressed in the
* table and override CDD later
*/
if (li_mimo == &locale_bn) {
if (li_mimo == &locale_bn) {
maxpwr20 = QDB(16);
......@@ -1408,10 +1446,10 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
}
}
/* Copy the 40 MHZ MCS 0-7 CDD value to the 40 MHZ MCS 0-7 SISO value if it wasn't
* provided explicitly.
/*
* Copy the 40 MHZ MCS 0-7 CDD value to the 40 MHZ MCS 0-7 SISO
* value if it wasn't provided explicitly.
*/
for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
if (txpwr->mcs_40_siso[i] == 0)
txpwr->mcs_40_siso[i] = txpwr->mcs_40_cdd[i];
......@@ -1427,8 +1465,9 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
}
}
/* Copy the 20 and 40 MHz MCS0-7 CDD values to the corresponding STBC values if they weren't
* provided explicitly.
/*
* Copy the 20 and 40 MHz MCS0-7 CDD values to the corresponding
* STBC values if they weren't provided explicitly.
*/
for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
if (txpwr->mcs_20_stbc[i] == 0)
......@@ -1458,8 +1497,9 @@ static bool brcms_c_japan_ccode(const char *ccode)
}
/*
* Validate the chanspec for this locale, for 40MHZ we need to also check that the sidebands
* are valid 20MZH channels in this locale and they are also a legal HT combination
* Validate the chanspec for this locale, for 40MHZ we need to also
* check that the sidebands are valid 20MZH channels in this locale
* and they are also a legal HT combination
*/
static bool
brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec,
......@@ -1487,8 +1527,9 @@ brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec,
return VALID_CHANNEL20(wlc_cm->wlc, channel);
}
#ifdef SUPPORT_40MHZ
/* We know we are now checking a 40MHZ channel, so we should only be here
* for NPHYS
/*
* We know we are now checking a 40MHZ channel, so we should
* only be here for NPHYS
*/
if (BRCMS_ISNPHY(wlc->band) || BRCMS_ISSSLPNPHY(wlc->band)) {
u8 upper_sideband = 0, idx;
......
......@@ -40,12 +40,14 @@
*/
/* macro to get 2.4 GHz channel group index for tx power */
#define CHANNEL_POWER_IDX_2G_CCK(c) (((c) < 2) ? 0 : (((c) < 11) ? 1 : 2)) /* cck index */
#define CHANNEL_POWER_IDX_2G_OFDM(c) (((c) < 2) ? 3 : (((c) < 11) ? 4 : 5)) /* ofdm index */
#define CHANNEL_POWER_IDX_2G_CCK(c) (((c) < 2) ? 0 : (((c) < 11) ? 1 : 2))
#define CHANNEL_POWER_IDX_2G_OFDM(c) (((c) < 2) ? 3 : (((c) < 11) ? 4 : 5))
/* macro to get 5 GHz channel group index for tx power */
#define CHANNEL_POWER_IDX_5G(c) \
(((c) < 52) ? 0 : (((c) < 62) ? 1 : (((c) < 100) ? 2 : (((c) < 149) ? 3 : 4))))
#define CHANNEL_POWER_IDX_5G(c) (((c) < 52) ? 0 : \
(((c) < 62) ? 1 : \
(((c) < 100) ? 2 : \
(((c) < 149) ? 3 : 4))))
/* max of BAND_5G_PWR_LVLS and 6 for 2.4 GHz */
#define BRCMS_MAXPWR_TBL_SIZE 6
......@@ -67,9 +69,8 @@ struct locale_info {
u8 restricted_channels;
/* Max tx pwr in qdBm for each sub-band */
s8 maxpwr[BRCMS_MAXPWR_TBL_SIZE];
s8 pub_maxpwr[BAND_5G_PWR_LVLS]; /* Country IE advertised max tx pwr in dBm
* per sub-band
*/
/* Country IE advertised max tx pwr in dBm per sub-band */
s8 pub_maxpwr[BAND_5G_PWR_LVLS];
u8 flags;
};
......
......@@ -28,7 +28,8 @@
#include "dma.h"
/*
* Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical address.
* Each descriptor ring must be 8kB aligned, and fit within a
* contiguous 8kB physical address.
*/
#define D64RINGALIGN_BITS 13
#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS)
......@@ -69,21 +70,32 @@
#define D64_XS1_XE_COREE 0x50000000 /* core error */
/* receive channel control */
#define D64_RC_RE 0x00000001 /* receive enable */
#define D64_RC_RO_MASK 0x000000fe /* receive frame offset */
/* receive enable */
#define D64_RC_RE 0x00000001
/* receive frame offset */
#define D64_RC_RO_MASK 0x000000fe
#define D64_RC_RO_SHIFT 1
#define D64_RC_FM 0x00000100 /* direct fifo receive (pio) mode */
#define D64_RC_SH 0x00000200 /* separate rx header descriptor enable */
#define D64_RC_OC 0x00000400 /* overflow continue */
#define D64_RC_PD 0x00000800 /* parity check disable */
#define D64_RC_AE 0x00030000 /* address extension bits */
/* direct fifo receive (pio) mode */
#define D64_RC_FM 0x00000100
/* separate rx header descriptor enable */
#define D64_RC_SH 0x00000200
/* overflow continue */
#define D64_RC_OC 0x00000400
/* parity check disable */
#define D64_RC_PD 0x00000800
/* address extension bits */
#define D64_RC_AE 0x00030000
#define D64_RC_AE_SHIFT 16
/* flags for dma controller */
#define DMA_CTRL_PEN (1 << 0) /* partity enable */
#define DMA_CTRL_ROC (1 << 1) /* rx overflow continue */
#define DMA_CTRL_RXMULTI (1 << 2) /* allow rx scatter to multiple descriptors */
#define DMA_CTRL_UNFRAMED (1 << 3) /* Unframed Rx/Tx data */
/* partity enable */
#define DMA_CTRL_PEN (1 << 0)
/* rx overflow continue */
#define DMA_CTRL_ROC (1 << 1)
/* allow rx scatter to multiple descriptors */
#define DMA_CTRL_RXMULTI (1 << 2)
/* Unframed Rx/Tx data */
#define DMA_CTRL_UNFRAMED (1 << 3)
/* receive descriptor table pointer */
#define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */
......@@ -131,10 +143,13 @@
#define D64_CTRL1_SOF ((u32)1 << 31) /* start of frame */
/* descriptor control flags 2 */
#define D64_CTRL2_BC_MASK 0x00007fff /* buffer byte count. real data len must <= 16KB */
#define D64_CTRL2_AE 0x00030000 /* address extension bits */
/* buffer byte count. real data len must <= 16KB */
#define D64_CTRL2_BC_MASK 0x00007fff
/* address extension bits */
#define D64_CTRL2_AE 0x00030000
#define D64_CTRL2_AE_SHIFT 16
#define D64_CTRL2_PARITY 0x00040000 /* parity bit */
/* parity bit */
#define D64_CTRL2_PARITY 0x00040000
/* control flags in the range [27:20] are core-specific and not defined here */
#define D64_CTRL_CORE_MASK 0x0ff00000
......@@ -149,10 +164,17 @@
#define DMADDRWIDTH_63 63 /* 64-bit addressing capability */
#define DMADDRWIDTH_64 64 /* 64-bit addressing capability */
/* packet headroom necessary to accommodate the largest header in the system, (i.e TXOFF).
* By doing, we avoid the need to allocate an extra buffer for the header when bridging to WL.
* There is a compile time check in wlc.c which ensure that this value is at least as big
* as TXOFF. This value is used in dma_rxfill (dma.c).
#define DMA64_DD_PARITY(dd) \
parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
/*
* packet headroom necessary to accommodate the largest header
* in the system, (i.e TXOFF). By doing, we avoid the need to
* allocate an extra buffer for the header when bridging to WL.
* There is a compile time check in wlc.c which ensure that this
* value is at least as big as TXOFF. This value is used in
* dma_rxfill().
*/
#define BCMEXTRAHDROOM 172
......@@ -193,7 +215,10 @@
#define txd64 dregs.d64_u.txd_64
#define rxd64 dregs.d64_u.rxd_64
/* default dma message level (if input msg_level pointer is null in dma_attach()) */
/*
* default dma message level (if input msg_level
* pointer is null in dma_attach())
*/
static uint dma_msg_level;
#define MAXNAMEL 8 /* 8 char names */
......@@ -210,8 +235,8 @@ struct dma_seg {
};
struct dma_seg_map {
void *oshdmah; /* Opaque handle for OSL to store its information */
uint origsize; /* Size of the virtual packet */
void *oshdmah; /* Opaque handle for OSL to store its information */
uint origsize; /* Size of the virtual packet */
uint nsegs;
struct dma_seg segs[MAX_DMA_SEGS];
};
......@@ -221,9 +246,9 @@ struct dma_seg_map {
* Descriptors are only read by the hardware, never written back.
*/
struct dma64desc {
u32 ctrl1; /* misc control bits & bufcount */
u32 ctrl2; /* buffer count and address extension */
u32 addrlow; /* memory address of the date buffer, bits 31:0 */
u32 ctrl1; /* misc control bits & bufcount */
u32 ctrl2; /* buffer count and address extension */
u32 addrlow; /* memory address of the date buffer, bits 31:0 */
u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
};
......@@ -235,8 +260,8 @@ struct dma_info {
void *pbus; /* bus handle */
bool dma64; /* this dma engine is operating in 64-bit mode */
bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
bool dma64; /* this dma engine is operating in 64-bit mode */
bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
union {
struct {
......@@ -256,7 +281,7 @@ struct dma_info {
u16 ntxd; /* # tx descriptors tunable */
u16 txin; /* index of next descriptor to reclaim */
u16 txout; /* index of next descriptor to post */
void **txp; /* pointer to parallel array of pointers to packets */
void **txp; /* pointer to parallel array of pointers to packets */
struct dma_seg_map *txp_dmah; /* DMA MAP meta-data handle */
/* Aligned physical address of descriptor ring */
unsigned long txdpa;
......@@ -265,14 +290,14 @@ struct dma_info {
u16 txdalign; /* #bytes added to alloc'd mem to align txd */
u32 txdalloc; /* #bytes allocated for the ring */
u32 xmtptrbase; /* When using unaligned descriptors, the ptr register
* is not just an index, it needs all 13 bits to be
* an offset from the addr register.
*/
* is not just an index, it needs all 13 bits to be
* an offset from the addr register.
*/
u16 nrxd; /* # rx descriptors tunable */
u16 rxin; /* index of next descriptor to reclaim */
u16 rxout; /* index of next descriptor to post */
void **rxp; /* pointer to parallel array of pointers to packets */
u16 nrxd; /* # rx descriptors tunable */
u16 rxin; /* index of next descriptor to reclaim */
u16 rxout; /* index of next descriptor to post */
void **rxp; /* pointer to parallel array of pointers to packets */
struct dma_seg_map *rxp_dmah; /* DMA MAP meta-data handle */
/* Aligned physical address of descriptor ring */
unsigned long rxdpa;
......@@ -283,25 +308,34 @@ struct dma_info {
u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
/* tunables */
unsigned int rxbufsize; /* rx buffer size in bytes,
* not including the extra headroom
unsigned int rxbufsize; /* rx buffer size in bytes, not including
* the extra headroom
*/
uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper stack
* e.g. some rx pkt buffers will be bridged to tx side
* without byte copying. The extra headroom needs to be
* large enough to fit txheader needs.
* Some dongle driver may not need it.
uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper
* stack, e.g. some rx pkt buffers will be
* bridged to tx side without byte copying.
* The extra headroom needs to be large enough
* to fit txheader needs. Some dongle driver may
* not need it.
*/
uint nrxpost; /* # rx buffers to keep posted */
unsigned int rxoffset; /* rxcontrol offset */
uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
uint ddoffsethigh; /* high 32 bits */
uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
uint dataoffsethigh; /* high 32 bits */
bool aligndesc_4k; /* descriptor base need to be aligned or not */
/* add to get dma address of descriptor ring, low 32 bits */
uint ddoffsetlow;
/* high 32 bits */
uint ddoffsethigh;
/* add to get dma address of data buffer, low 32 bits */
uint dataoffsetlow;
/* high 32 bits */
uint dataoffsethigh;
/* descriptor base need to be aligned or not */
bool aligndesc_4k;
};
/* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
/*
* DMA Scatter-gather list is supported. Note this is limited to TX
* direction only
*/
#ifdef BCMDMASGLISTOSL
#define DMASGLIST_ENAB true
#else
......@@ -309,7 +343,9 @@ struct dma_info {
#endif /* BCMDMASGLISTOSL */
/* descriptor bumping macros */
#define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
/* faster than %, but n must be power of 2 */
#define XXD(x, n) ((x) & ((n) - 1))
#define TXD(x) XXD((x), di->ntxd)
#define RXD(x) XXD((x), di->nrxd)
#define NEXTTXD(i) TXD((i) + 1)
......@@ -465,10 +501,10 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->d64rxregs = (struct dma64regs *) dmaregsrx;
di->dma.di_fn = (const struct di_fcn_s *)&dma64proc;
/* Default flags (which can be changed by the driver calling dma_ctrlflags
* before enable): For backwards compatibility both Rx Overflow Continue
* and Parity are DISABLED.
* supports it.
/*
* Default flags (which can be changed by the driver calling
* dma_ctrlflags before enable): For backwards compatibility
* both Rx Overflow Continue and Parity are DISABLED.
*/
di->dma.di_fn->ctrlflags(&di->dma, DMA_CTRL_ROC | DMA_CTRL_PEN,
0);
......@@ -502,9 +538,10 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
/*
* figure out the DMA physical address offset for dd and data
* PCI/PCIE: they map silicon backplace address to zero based memory, need offset
* Other bus: use zero
* SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
* PCI/PCIE: they map silicon backplace address to zero
* based memory, need offset
* Other bus: use zero SI_BUS BIGENDIAN kludge: use sdram
* swapped region for data buffer, not descriptor
*/
di->ddoffsetlow = 0;
di->dataoffsetlow = 0;
......@@ -529,7 +566,7 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
else
di->addrext = _dma_isaddrext(di);
/* does the descriptors need to be aligned and if yes, on 4K/8K or not */
/* does the descriptor need to be aligned and if yes, on 4K/8K or not */
di->aligndesc_4k = _dma_descriptor_align(di);
if (di->aligndesc_4k) {
di->dmadesc_align = D64RINGALIGN_BITS;
......@@ -548,7 +585,8 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
size = ntxd * sizeof(void *);
di->txp = kzalloc(size, GFP_ATOMIC);
if (di->txp == NULL) {
DMA_ERROR(("%s: dma_attach: out of tx memory\n", di->name));
DMA_ERROR(("%s: dma_attach: out of tx memory\n",
di->name));
goto fail;
}
}
......@@ -558,18 +596,25 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
size = nrxd * sizeof(void *);
di->rxp = kzalloc(size, GFP_ATOMIC);
if (di->rxp == NULL) {
DMA_ERROR(("%s: dma_attach: out of rx memory\n", di->name));
DMA_ERROR(("%s: dma_attach: out of rx memory\n",
di->name));
goto fail;
}
}
/* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
/*
* allocate transmit descriptor ring, only need ntxd descriptors
* but it must be aligned
*/
if (ntxd) {
if (!_dma_alloc(di, DMA_TX))
goto fail;
}
/* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
/*
* allocate receive descriptor ring, only need nrxd descriptors
* but it must be aligned
*/
if (nrxd) {
if (!_dma_alloc(di, DMA_RX))
goto fail;
......@@ -577,16 +622,23 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
if ((di->ddoffsetlow != 0) && !di->addrext) {
if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) {
DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->txdpa)));
DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not "
"supported\n", di->name,
(u32)PHYSADDRLO(di->txdpa)));
goto fail;
}
if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) {
DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->rxdpa)));
DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not "
"supported\n", di->name,
(u32)PHYSADDRLO(di->rxdpa)));
goto fail;
}
}
DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext));
DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x "
"dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow,
di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh,
di->addrext));
/* allocate DMA mapping vectors */
if (DMASGLIST_ENAB) {
......@@ -624,8 +676,6 @@ static inline u32 parity32(u32 data)
return data & 1;
}
#define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
static inline void
dma64_dd_upd(struct dma_info *di, struct dma64desc *ddring,
unsigned long pa, uint outidx, u32 *flags, u32 bufcount)
......@@ -735,7 +785,10 @@ static bool _dma_descriptor_align(struct dma_info *di)
return true;
}
/* return true if this dma engine supports DmaExtendedAddrChanges, otherwise false */
/*
* return true if this dma engine supports DmaExtendedAddrChanges,
* otherwise false
*/
static bool _dma_isaddrext(struct dma_info *di)
{
/* DMA64 supports full 32- or 64-bit operation. AE is always valid */
......@@ -868,14 +921,15 @@ _dma_rx_param_get(struct dma_info *di, u16 *rxoffset, u16 *rxbufsize)
*rxbufsize = (u16) di->rxbufsize;
}
/* !! rx entry routine
/*
* !! rx entry routine
* returns a pointer to the next frame received, or NULL if there are no more
* if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
* with pkts chain
* if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is
* supported with pkts chain
* otherwise, it's treated as giant pkt and will be tossed.
* The DMA scattering starts with normal DMA header, followed by first buffer data.
* After it reaches the max size of buffer, the data continues in next DMA descriptor
* buffer WITHOUT DMA header
* The DMA scattering starts with normal DMA header, followed by first
* buffer data. After it reaches the max size of buffer, the data continues
* in next DMA descriptor buffer WITHOUT DMA header
*/
static void *_dma_rx(struct dma_info *di)
{
......@@ -935,10 +989,11 @@ static void *_dma_rx(struct dma_info *di)
return head;
}
/* post receive buffers
* return false is refill failed completely and ring is empty
* this will stall the rx dma and user might want to call rxfill again asap
* This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
/*
* post receive buffers
* return false is refill failed completely and ring is empty this will stall
* the rx dma and user might want to call rxfill again asap. This unlikely
* happens on memory-rich NIC, but often on memory-constrained dongle
*/
static bool _dma_rxfill(struct dma_info *di)
{
......@@ -970,10 +1025,10 @@ static bool _dma_rxfill(struct dma_info *di)
extra_offset = di->rxextrahdrroom;
for (i = 0; i < n; i++) {
/* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
size to be allocated
/*
* the di->rxbufsize doesn't include the extra headroom,
* we need to add it to the size to be allocated
*/
p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
if (p == NULL) {
......@@ -1339,7 +1394,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
&alloced, &di->txdpaorig);
if (va == NULL) {
DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd)"
" failed\n", di->name));
return false;
}
align = (1 << align_bits);
......@@ -1354,7 +1410,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
&alloced, &di->rxdpaorig);
if (va == NULL) {
DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd)"
" failed\n", di->name));
return false;
}
align = (1 << align_bits);
......@@ -1445,9 +1502,10 @@ static bool dma64_txsuspendedidle(struct dma_info *di)
return 0;
}
/* Useful when sending unframed data. This allows us to get a progress report from the DMA.
* We return a pointer to the beginning of the DATA buffer of the current descriptor.
* If DMA is idle, we return NULL.
/*
* Useful when sending unframed data. This allows us to get a progress report
* from the DMA. We return a pointer to the beginning of the DATA buffer of the
* current descriptor. If DMA is idle, we return NULL.
*/
static void *dma64_getpos(struct dma_info *di, bool direction)
{
......@@ -1481,8 +1539,8 @@ static void *dma64_getpos(struct dma_info *di, bool direction)
* Adds a DMA ring descriptor for the data pointed to by "buf".
* This is for DMA of a buffer of data and is unlike other dma TX functions
* that take a pointer to a "packet"
* Each call to this is results in a single descriptor being added for "len" bytes of
* data starting at "buf", it doesn't handle chained buffers.
* Each call to this is results in a single descriptor being added for "len"
* bytes of data starting at "buf", it doesn't handle chained buffers.
*/
static int
dma64_txunframed(struct dma_info *di, void *buf, uint len, bool commit)
......@@ -1533,9 +1591,11 @@ dma64_txunframed(struct dma_info *di, void *buf, uint len, bool commit)
return -1;
}
/* !! tx entry routine
/*
* !! tx entry routine
* WARNING: call must check the return value for error.
* the error(toss frames) could be fatal and cause many subsequent hard to debug problems
* the error(toss frames) could be fatal and cause many subsequent hard
* to debug problems
*/
static int dma64_txfast(struct dma_info *di, struct sk_buff *p0,
bool commit)
......@@ -1597,8 +1657,8 @@ static int dma64_txfast(struct dma_info *di, struct sk_buff *p0,
/* With a DMA segment list, Descriptor table is filled
* using the segment list instead of looping over
* buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
* end of segment list is reached.
* buffers in multi-chain DMA. Therefore, EOF for SGLIST
* is when end of segment list is reached.
*/
if ((!DMASGLIST_ENAB && next == NULL) ||
(DMASGLIST_ENAB && j == nsegs))
......@@ -1746,7 +1806,8 @@ static void *dma64_getnexttxp(struct dma_info *di, enum txd_range range)
return txp;
bogus:
DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d "
"force %d\n", start, end, di->txout, forceall));
return NULL;
}
......@@ -1888,7 +1949,7 @@ uint dma_addrwidth(struct si_pub *sih, void *dmaregs)
(sih->buscoretype == PCIE_CORE_ID)))
return DMADDRWIDTH_64;
}
/* DMA hardware not supported by this driver*/
/* DMA hardware not supported by this driver */
return DMADDRWIDTH_64;
}
......
......@@ -39,12 +39,12 @@ struct dma32diag { /* diag access */
/* dma registers per channel(xmt or rcv) */
struct dma64regs {
u32 control; /* enable, et al */
u32 ptr; /* last descriptor posted to chip */
u32 addrlow; /* descriptor ring base address low 32-bits (8K aligned) */
u32 addrhigh; /* descriptor ring base address bits 63:32 (8K aligned) */
u32 status0; /* current descriptor, xmt state */
u32 status1; /* active descriptor, xmt error */
u32 control; /* enable, et al */
u32 ptr; /* last descriptor posted to chip */
u32 addrlow; /* desc ring base address low 32-bits (8K aligned) */
u32 addrhigh; /* desc ring base address bits 63:32 (8K aligned) */
u32 status0; /* current descriptor, xmt state */
u32 status1; /* active descriptor, xmt error */
};
/* map/unmap direction */
......@@ -172,7 +172,8 @@ extern const struct di_fcn_s dma64proc;
#define dma_rxactive(di) (dma64proc.rxactive(di))
#define dma_txrotate(di) (dma64proc.txrotate(di))
#define dma_counterreset(di) (dma64proc.counterreset(di))
#define dma_ctrlflags(di, mask, flags) (dma64proc.ctrlflags((di), (mask), (flags)))
#define dma_ctrlflags(di, mask, flags) \
(dma64proc.ctrlflags((di), (mask), (flags)))
#define dma_txpending(di) (dma64proc.txpending(di))
#define dma_txcommitted(di) (dma64proc.txcommitted(di))
......
......@@ -606,7 +606,10 @@ brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT;
/* minstrel_ht initiates addBA on our behalf by calling ieee80211_start_tx_ba_session() */
/*
* minstrel_ht initiates addBA on our behalf by calling
* ieee80211_start_tx_ba_session()
*/
return 0;
}
......@@ -644,7 +647,10 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
tid);
return -EINVAL;
}
/* Future improvement: Use the starting sequence number provided ... */
/*
* Future improvement:
* Use the starting sequence number provided ...
*/
*ssn = 0;
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
......@@ -1019,8 +1025,9 @@ static struct ieee80211_supported_band brcms_band_5GHz_nphy = {
.bitrates = legacy_ratetable + 4,
.n_bitrates = ARRAY_SIZE(legacy_ratetable) - 4,
.ht_cap = {
/* use IEEE80211_HT_CAP_* from include/linux/ieee80211.h */
.cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT, /* No 40 mhz yet */
.cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_40MHZ_INTOLERANT, /* No 40 mhz yet */
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
.ampdu_density = AMPDU_DEF_MPDU_DENSITY,
......@@ -1086,7 +1093,8 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
hw->queues = N_TX_QUEUES;
hw->max_rates = 2; /* Primary rate and 1 fallback rate */
hw->channel_change_time = 7 * 1000; /* channel change time is dependent on chip and band */
/* channel change time is dependent on chip and band */
hw->channel_change_time = 7 * 1000;
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
hw->rate_control_algorithm = "minstrel_ht";
......@@ -1361,8 +1369,9 @@ static void brcms_free(struct brcms_info *wl)
}
/*
* unregister_netdev() calls get_stats() which may read chip registers
* so we cannot unmap the chip registers until after calling unregister_netdev() .
* unregister_netdev() calls get_stats() which may read chip
* registers so we cannot unmap the chip registers until
* after calling unregister_netdev() .
*/
if (wl->regsva && wl->bcm_bustype != SDIO_BUS &&
wl->bcm_bustype != JTAG_BUS)
......@@ -1632,7 +1641,8 @@ struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
return t;
}
/* BMAC_NOTE: Add timer adds only the kernel timer since it's going to be more accurate
/*
* adds only the kernel timer since it's going to be more accurate
* as well as it's easier to make it periodic
*
* precondition: perimeter lock has been acquired
......
......@@ -23,11 +23,6 @@
/* softmac ioctl definitions */
#define BRCMS_SET_SHORTSLOT_OVERRIDE 146
/* BMAC Note: High-only driver is no longer working in softirq context as it needs to block and
* sleep so perimeter lock has to be a semaphore instead of spinlock. This requires timers to be
* submitted to workqueue instead of being on kernel timer
*/
struct brcms_timer {
struct timer_list timer;
struct brcms_info *wl;
......@@ -56,8 +51,8 @@ struct brcms_firmware {
};
struct brcms_info {
struct brcms_pub *pub; /* pointer to public wlc state */
void *wlc; /* pointer to private common os-independent data */
struct brcms_pub *pub; /* pointer to public wlc state */
void *wlc; /* pointer to private common data */
u32 magic;
int irq;
......
......@@ -24,10 +24,14 @@
#define OTPS_GUP_MASK 0x00000f00
#define OTPS_GUP_SHIFT 8
#define OTPS_GUP_HW 0x00000100 /* h/w subregion is programmed */
#define OTPS_GUP_SW 0x00000200 /* s/w subregion is programmed */
#define OTPS_GUP_CI 0x00000400 /* chipid/pkgopt subregion is programmed */
#define OTPS_GUP_FUSE 0x00000800 /* fuse subregion is programmed */
/* h/w subregion is programmed */
#define OTPS_GUP_HW 0x00000100
/* s/w subregion is programmed */
#define OTPS_GUP_SW 0x00000200
/* chipid/pkgopt subregion is programmed */
#define OTPS_GUP_CI 0x00000400
/* fuse subregion is programmed */
#define OTPS_GUP_FUSE 0x00000800
/* Fields in otpprog in rev >= 21 */
#define OTPP_COL_MASK 0x000000ff
......@@ -195,8 +199,9 @@ static u16 ipxotp_read_bit(void *oh, struct chipcregs *cc, uint off)
return (int)st;
}
/* Calculate max HW/SW region byte size by subtracting fuse region and checksum size,
* osizew is oi->wsize (OTP size - GU size) in words
/*
* Calculate max HW/SW region byte size by subtracting fuse region
* and checksum size, osizew is oi->wsize (OTP size - GU size) in words
*/
static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew)
{
......@@ -222,12 +227,18 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs *cc)
uint k;
u32 otpp, st;
/* record word offset of General Use Region for various chipcommon revs */
/*
* record word offset of General Use Region
* for various chipcommon revs
*/
if (oi->sih->ccrev == 21 || oi->sih->ccrev == 24
|| oi->sih->ccrev == 27) {
oi->otpgu_base = REVA4_OTPGU_BASE;
} else if (oi->sih->ccrev == 36) {
/* OTP size greater than equal to 2KB (128 words), otpgu_base is similar to rev23 */
/*
* OTP size greater than equal to 2KB (128 words),
* otpgu_base is similar to rev23
*/
if (oi->wsize >= 128)
oi->otpgu_base = REVB8_OTPGU_BASE;
else
......@@ -262,8 +273,9 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs *cc)
}
/*
* h/w region base and fuse region limit are fixed to the top and
* the bottom of the general use region. Everything else can be flexible.
* h/w region base and fuse region limit are fixed to
* the top and the bottom of the general use region.
* Everything else can be flexible.
*/
oi->hwbase = oi->otpgu_base + OTPGU_SROM_OFF;
oi->hwlim = oi->wsize;
......
......@@ -24,7 +24,8 @@
#define OTP_SW_RGN 2
#define OTP_CI_RGN 4
#define OTP_FUSE_RGN 8
#define OTP_ALL_RGN 0xf /* From h/w region to end of OTP including checksum */
/* From h/w region to end of OTP including checksum */
#define OTP_ALL_RGN 0xf
/* OTP Size */
#define OTP_SZ_MAX (6144/8) /* maximum bytes in one CIS */
......
......@@ -15,10 +15,11 @@
*/
/*
* This is "two-way" interface, acting as the SHIM layer between WL and PHY layer.
* WL driver can optinally call this translation layer to do some preprocessing, then reach PHY.
* On the PHY->WL driver direction, all calls go through this layer since PHY doesn't have the
* access to wlc_hw pointer.
* This is "two-way" interface, acting as the SHIM layer between driver
* and PHY layer. The driver can optionally call this translation layer
* to do some preprocessing, then reach PHY. On the PHY->driver direction,
* all calls go through this layer since PHY doesn't have access to the
* driver's brcms_hardware pointer.
*/
#include <linux/slab.h>
#include <net/mac80211.h>
......
......@@ -45,17 +45,17 @@
#define FRA_ERR_20MHZ 60
#define FRA_ERR_40MHZ 120
#define ANTSEL_NA 0 /* No boardlevel selection available */
#define ANTSEL_2x4 1 /* 2x4 boardlevel selection available */
#define ANTSEL_2x3 2 /* 2x3 CB2 boardlevel selection available */
#define ANTSEL_NA 0 /* No boardlevel selection available */
#define ANTSEL_2x4 1 /* 2x4 boardlevel selection available */
#define ANTSEL_2x3 2 /* 2x3 CB2 boardlevel selection available */
/* Rx Antenna diversity control values */
#define ANT_RX_DIV_FORCE_0 0 /* Use antenna 0 */
#define ANT_RX_DIV_FORCE_1 1 /* Use antenna 1 */
#define ANT_RX_DIV_START_1 2 /* Choose starting with 1 */
#define ANT_RX_DIV_START_0 3 /* Choose starting with 0 */
#define ANT_RX_DIV_ENABLE 3 /* APHY bbConfig Enable RX Diversity */
#define ANT_RX_DIV_DEF ANT_RX_DIV_START_0 /* default antdiv setting */
#define ANT_RX_DIV_FORCE_0 0 /* Use antenna 0 */
#define ANT_RX_DIV_FORCE_1 1 /* Use antenna 1 */
#define ANT_RX_DIV_START_1 2 /* Choose starting with 1 */
#define ANT_RX_DIV_START_0 3 /* Choose starting with 0 */
#define ANT_RX_DIV_ENABLE 3 /* APHY bbConfig Enable RX Diversity */
#define ANT_RX_DIV_DEF ANT_RX_DIV_START_0 /* default antdiv setting */
#define WL_ANT_RX_MAX 2 /* max 2 receive antennas */
#define WL_ANT_HT_RX_MAX 3 /* max 3 receive antennas/cores */
......@@ -77,26 +77,40 @@
#define WL_TX_POWER_RATES 101
#define WL_TX_POWER_CCK_FIRST 0
#define WL_TX_POWER_CCK_NUM 4
#define WL_TX_POWER_OFDM_FIRST 4 /* Index for first 20MHz OFDM SISO rate */
#define WL_TX_POWER_OFDM20_CDD_FIRST 12 /* Index for first 20MHz OFDM CDD rate */
#define WL_TX_POWER_OFDM40_SISO_FIRST 52 /* Index for first 40MHz OFDM SISO rate */
#define WL_TX_POWER_OFDM40_CDD_FIRST 60 /* Index for first 40MHz OFDM CDD rate */
/* Index for first 20MHz OFDM SISO rate */
#define WL_TX_POWER_OFDM_FIRST 4
/* Index for first 20MHz OFDM CDD rate */
#define WL_TX_POWER_OFDM20_CDD_FIRST 12
/* Index for first 40MHz OFDM SISO rate */
#define WL_TX_POWER_OFDM40_SISO_FIRST 52
/* Index for first 40MHz OFDM CDD rate */
#define WL_TX_POWER_OFDM40_CDD_FIRST 60
#define WL_TX_POWER_OFDM_NUM 8
#define WL_TX_POWER_MCS20_SISO_FIRST 20 /* Index for first 20MHz MCS SISO rate */
#define WL_TX_POWER_MCS20_CDD_FIRST 28 /* Index for first 20MHz MCS CDD rate */
#define WL_TX_POWER_MCS20_STBC_FIRST 36 /* Index for first 20MHz MCS STBC rate */
#define WL_TX_POWER_MCS20_SDM_FIRST 44 /* Index for first 20MHz MCS SDM rate */
#define WL_TX_POWER_MCS40_SISO_FIRST 68 /* Index for first 40MHz MCS SISO rate */
#define WL_TX_POWER_MCS40_CDD_FIRST 76 /* Index for first 40MHz MCS CDD rate */
#define WL_TX_POWER_MCS40_STBC_FIRST 84 /* Index for first 40MHz MCS STBC rate */
#define WL_TX_POWER_MCS40_SDM_FIRST 92 /* Index for first 40MHz MCS SDM rate */
/* Index for first 20MHz MCS SISO rate */
#define WL_TX_POWER_MCS20_SISO_FIRST 20
/* Index for first 20MHz MCS CDD rate */
#define WL_TX_POWER_MCS20_CDD_FIRST 28
/* Index for first 20MHz MCS STBC rate */
#define WL_TX_POWER_MCS20_STBC_FIRST 36
/* Index for first 20MHz MCS SDM rate */
#define WL_TX_POWER_MCS20_SDM_FIRST 44
/* Index for first 40MHz MCS SISO rate */
#define WL_TX_POWER_MCS40_SISO_FIRST 68
/* Index for first 40MHz MCS CDD rate */
#define WL_TX_POWER_MCS40_CDD_FIRST 76
/* Index for first 40MHz MCS STBC rate */
#define WL_TX_POWER_MCS40_STBC_FIRST 84
/* Index for first 40MHz MCS SDM rate */
#define WL_TX_POWER_MCS40_SDM_FIRST 92
#define WL_TX_POWER_MCS_1_STREAM_NUM 8
#define WL_TX_POWER_MCS_2_STREAM_NUM 8
#define WL_TX_POWER_MCS_32 100 /* Index for 40MHz rate MCS 32 */
/* Index for 40MHz rate MCS 32 */
#define WL_TX_POWER_MCS_32 100
#define WL_TX_POWER_MCS_32_NUM 1
/* sslpnphy specifics */
#define WL_TX_POWER_MCS20_SISO_FIRST_SSN 12 /* Index for first 20MHz MCS SISO rate */
/* Index for first 20MHz MCS SISO rate */
#define WL_TX_POWER_MCS20_SISO_FIRST_SSN 12
/* struct tx_power::flags bits */
#define WL_TX_POWER_F_ENABLED 1
......
......@@ -70,11 +70,13 @@
#define PMURES_BIT(bit) (1 << (bit))
/* PMU corerev and chip specific PLL controls.
* PMU<rev>_PLL<num>_XX where <rev> is PMU corerev and <num> is an arbitrary number
* to differentiate different PLLs controlled by the same PMU rev.
* PMU<rev>_PLL<num>_XX where <rev> is PMU corerev and <num> is an arbitrary
* number to differentiate different PLLs controlled by the same PMU rev.
*/
/* pllcontrol registers:
* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>,
* p1div, p2div, _bypass_sdmod
*/
/* pllcontrol registers */
/* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>, p1div, p2div, _bypass_sdmod */
#define PMU1_PLL0_PLLCTL0 0
#define PMU1_PLL0_PLLCTL1 1
#define PMU1_PLL0_PLLCTL2 2
......
......@@ -23,13 +23,15 @@
#include "types.h"
#define AMPDU_TX_BA_MAX_WSIZE 64 /* max Tx ba window size (in pdu) */
/* structure to store per-tid state for the ampdu initiator */
struct scb_ampdu_tid_ini {
u8 tx_in_transit; /* number of pending mpdus in transit in driver */
u8 tid; /* initiator tid for easy lookup */
u8 txretry[AMPDU_TX_BA_MAX_WSIZE]; /* tx retry count; indexed by seq modulo */
struct scb *scb; /* backptr for easy lookup */
u8 ba_wsize; /* negotiated ba window size (in pdu) */
u8 tx_in_transit; /* number of pending mpdus in transit in driver */
u8 tid; /* initiator tid for easy lookup */
/* tx retry count; indexed by seq modulo */
u8 txretry[AMPDU_TX_BA_MAX_WSIZE];
struct scb *scb; /* backptr for easy lookup */
u8 ba_wsize; /* negotiated ba window size (in pdu) */
};
#define AMPDU_MAX_SCB_TID NUMPRIO
......@@ -43,9 +45,10 @@ struct scb_ampdu {
u32 max_rx_ampdu_bytes; /* max ampdu rcv length; 8k, 16k, 32k, 64k */
struct pktq txq; /* sdu transmit queue pending aggregation */
/* This could easily be a ini[] pointer and we keep this info in wl itself instead
* of having mac80211 hold it for us. Also could be made dynamic per tid instead of
* static.
/*
* This could easily be a ini[] pointer and we keep this info in wl
* itself instead of having mac80211 hold it for us. Also could be made
* dynamic per tid instead of static.
*/
/* initiator info - per tid (NUMPRIO): */
struct scb_ampdu_tid_ini ini[AMPDU_MAX_SCB_TID];
......@@ -56,18 +59,18 @@ struct scb_ampdu {
/* station control block - one per remote MAC address */
struct scb {
u32 magic;
u32 flags; /* various bit flags as defined below */
u32 flags2; /* various bit flags2 as defined below */
u8 state; /* current state bitfield of auth/assoc process */
u32 flags; /* various bit flags as defined below */
u32 flags2; /* various bit flags2 as defined below */
u8 state; /* current state bitfield of auth/assoc process */
u8 ea[ETH_ALEN]; /* station address */
void *fragbuf[NUMPRIO]; /* defragmentation buffer per prio */
uint fragresid[NUMPRIO]; /* #bytes unused in frag buffer per prio */
uint fragresid[NUMPRIO];/* #bytes unused in frag buffer per prio */
u16 seqctl[NUMPRIO]; /* seqctl of last received frame (for dups) */
u16 seqctl_nonqos; /* seqctl of last received frame (for dups) for
* non-QoS data and management
*/
u16 seqnum[NUMPRIO]; /* WME: driver maintained sw seqnum per priority */
/* seqctl of last received frame (for dups) for non-QoS data and
* management */
u16 seqctl_nonqos;
u16 seqnum[NUMPRIO];/* WME: driver maintained sw seqnum per priority */
struct scb_ampdu scb_ampdu; /* AMPDU state including per tid info */
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册