提交 941742f4 编写于 作者: D David S. Miller
上级 ac7ba51c 5879ae5f
master alk-4.19.24 alk-4.19.30 alk-4.19.34 alk-4.19.36 alk-4.19.43 alk-4.19.48 alk-4.19.57 ck-4.19.67 ck-4.19.81 ck-4.19.91 github/fork/deepanshu1422/fix-typo-in-comment github/fork/haosdent/fix-typo linux-next v4.19.91 v4.19.90 v4.19.89 v4.19.88 v4.19.87 v4.19.86 v4.19.85 v4.19.84 v4.19.83 v4.19.82 v4.19.81 v4.19.80 v4.19.79 v4.19.78 v4.19.77 v4.19.76 v4.19.75 v4.19.74 v4.19.73 v4.19.72 v4.19.71 v4.19.70 v4.19.69 v4.19.68 v4.19.67 v4.19.66 v4.19.65 v4.19.64 v4.19.63 v4.19.62 v4.19.61 v4.19.60 v4.19.59 v4.19.58 v4.19.57 v4.19.56 v4.19.55 v4.19.54 v4.19.53 v4.19.52 v4.19.51 v4.19.50 v4.19.49 v4.19.48 v4.19.47 v4.19.46 v4.19.45 v4.19.44 v4.19.43 v4.19.42 v4.19.41 v4.19.40 v4.19.39 v4.19.38 v4.19.37 v4.19.36 v4.19.35 v4.19.34 v4.19.33 v4.19.32 v4.19.31 v4.19.30 v4.19.29 v4.19.28 v4.19.27 v4.19.26 v4.19.25 v4.19.24 v4.19.23 v4.19.22 v4.19.21 v4.19.20 v4.19.19 v4.19.18 v4.19.17 v4.19.16 v4.19.15 v4.19.14 v4.19.13 v4.19.12 v4.19.11 v4.19.10 v4.19.9 v4.19.8 v4.19.7 v4.19.6 v4.19.5 v4.19.4 v4.19.3 v4.19.2 v4.19.1 v4.19 v4.19-rc8 v4.19-rc7 v4.19-rc6 v4.19-rc5 v4.19-rc4 v4.19-rc3 v4.19-rc2 v4.19-rc1 ck-release-21 ck-release-20 ck-release-19.2 ck-release-19.1 ck-release-19 ck-release-18 ck-release-17.2 ck-release-17.1 ck-release-17 ck-release-16 ck-release-15.1 ck-release-15 ck-release-14 ck-release-13.2 ck-release-13 ck-release-12 ck-release-11 ck-release-10 ck-release-9 ck-release-7 alk-release-15 alk-release-14 alk-release-13.2 alk-release-13 alk-release-12 alk-release-11 alk-release-10 alk-release-9 alk-release-7
无相关合并请求
...@@ -32,8 +32,8 @@ Example: ...@@ -32,8 +32,8 @@ Example:
touchscreen-fuzz-x = <4>; touchscreen-fuzz-x = <4>;
touchscreen-fuzz-y = <7>; touchscreen-fuzz-y = <7>;
touchscreen-fuzz-pressure = <2>; touchscreen-fuzz-pressure = <2>;
touchscreen-max-x = <4096>; touchscreen-size-x = <4096>;
touchscreen-max-y = <4096>; touchscreen-size-y = <4096>;
touchscreen-max-pressure = <2048>; touchscreen-max-pressure = <2048>;
ti,x-plate-ohms = <280>; ti,x-plate-ohms = <280>;
......
...@@ -15,10 +15,8 @@ Optional properties: ...@@ -15,10 +15,8 @@ Optional properties:
- phys: phandle + phy specifier pair - phys: phandle + phy specifier pair
- phy-names: must be "usb" - phy-names: must be "usb"
- dmas: Must contain a list of references to DMA specifiers. - dmas: Must contain a list of references to DMA specifiers.
- dma-names : Must contain a list of DMA names: - dma-names : named "ch%d", where %d is the channel number ranging from zero
- tx0 ... tx<n> to the number of channels (DnFIFOs) minus one.
- rx0 ... rx<n>
- This <n> means DnFIFO in USBHS module.
Example: Example:
usbhs: usb@e6590000 { usbhs: usb@e6590000 {
......
...@@ -51,9 +51,9 @@ trivial patch so apply some common sense. ...@@ -51,9 +51,9 @@ trivial patch so apply some common sense.
or does something very odd once a month document it. or does something very odd once a month document it.
PLEASE remember that submissions must be made under the terms PLEASE remember that submissions must be made under the terms
of the OSDL certificate of contribution and should include a of the Linux Foundation certificate of contribution and should
Signed-off-by: line. The current version of this "Developer's include a Signed-off-by: line. The current version of this
Certificate of Origin" (DCO) is listed in the file "Developer's Certificate of Origin" (DCO) is listed in the file
Documentation/SubmittingPatches. Documentation/SubmittingPatches.
6. Make sure you have the right to send any changes you make. If you 6. Make sure you have the right to send any changes you make. If you
...@@ -7587,6 +7587,7 @@ F: drivers/pci/host/pci-exynos.c ...@@ -7587,6 +7587,7 @@ F: drivers/pci/host/pci-exynos.c
PCI DRIVER FOR SYNOPSIS DESIGNWARE PCI DRIVER FOR SYNOPSIS DESIGNWARE
M: Jingoo Han <jingoohan1@gmail.com> M: Jingoo Han <jingoohan1@gmail.com>
M: Pratyush Anand <pratyush.anand@gmail.com>
L: linux-pci@vger.kernel.org L: linux-pci@vger.kernel.org
S: Maintained S: Maintained
F: drivers/pci/host/*designware* F: drivers/pci/host/*designware*
...@@ -7600,8 +7601,9 @@ F: Documentation/devicetree/bindings/pci/host-generic-pci.txt ...@@ -7600,8 +7601,9 @@ F: Documentation/devicetree/bindings/pci/host-generic-pci.txt
F: drivers/pci/host/pci-host-generic.c F: drivers/pci/host/pci-host-generic.c
PCIE DRIVER FOR ST SPEAR13XX PCIE DRIVER FOR ST SPEAR13XX
M: Pratyush Anand <pratyush.anand@gmail.com>
L: linux-pci@vger.kernel.org L: linux-pci@vger.kernel.org
S: Orphan S: Maintained
F: drivers/pci/host/*spear* F: drivers/pci/host/*spear*
PCMCIA SUBSYSTEM PCMCIA SUBSYSTEM
......
VERSION = 4 VERSION = 4
PATCHLEVEL = 1 PATCHLEVEL = 1
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc6 EXTRAVERSION = -rc7
NAME = Hurr durr I'ma sheep NAME = Hurr durr I'ma sheep
# *DOCUMENTATION* # *DOCUMENTATION*
......
...@@ -223,6 +223,25 @@ ...@@ -223,6 +223,25 @@
/include/ "tps65217.dtsi" /include/ "tps65217.dtsi"
&tps { &tps {
/*
* Configure pmic to enter OFF-state instead of SLEEP-state ("RTC-only
* mode") at poweroff. Most BeagleBone versions do not support RTC-only
* mode and risk hardware damage if this mode is entered.
*
* For details, see linux-omap mailing list May 2015 thread
* [PATCH] ARM: dts: am335x-bone* enable pmic-shutdown-controller
* In particular, messages:
* http://www.spinics.net/lists/linux-omap/msg118585.html
* http://www.spinics.net/lists/linux-omap/msg118615.html
*
* You can override this later with
* &tps { /delete-property/ ti,pmic-shutdown-controller; }
* if you want to use RTC-only mode and made sure you are not affected
* by the hardware problems. (Tip: double-check by performing a current
* measurement after shutdown: it should be less than 1 mA.)
*/
ti,pmic-shutdown-controller;
regulators { regulators {
dcdc1_reg: regulator@0 { dcdc1_reg: regulator@0 {
regulator-name = "vdds_dpr"; regulator-name = "vdds_dpr";
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#clock-cells = <0>; #clock-cells = <0>;
compatible = "ti,am35xx-gate-clock"; compatible = "ti,am35xx-gate-clock";
clocks = <&ipss_ick>; clocks = <&ipss_ick>;
reg = <0x059c>; reg = <0x032c>;
ti,bit-shift = <1>; ti,bit-shift = <1>;
}; };
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#clock-cells = <0>; #clock-cells = <0>;
compatible = "ti,gate-clock"; compatible = "ti,gate-clock";
clocks = <&rmii_ck>; clocks = <&rmii_ck>;
reg = <0x059c>; reg = <0x032c>;
ti,bit-shift = <9>; ti,bit-shift = <9>;
}; };
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#clock-cells = <0>; #clock-cells = <0>;
compatible = "ti,am35xx-gate-clock"; compatible = "ti,am35xx-gate-clock";
clocks = <&ipss_ick>; clocks = <&ipss_ick>;
reg = <0x059c>; reg = <0x032c>;
ti,bit-shift = <2>; ti,bit-shift = <2>;
}; };
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
#clock-cells = <0>; #clock-cells = <0>;
compatible = "ti,gate-clock"; compatible = "ti,gate-clock";
clocks = <&pclk_ck>; clocks = <&pclk_ck>;
reg = <0x059c>; reg = <0x032c>;
ti,bit-shift = <10>; ti,bit-shift = <10>;
}; };
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
#clock-cells = <0>; #clock-cells = <0>;
compatible = "ti,am35xx-gate-clock"; compatible = "ti,am35xx-gate-clock";
clocks = <&ipss_ick>; clocks = <&ipss_ick>;
reg = <0x059c>; reg = <0x032c>;
ti,bit-shift = <0>; ti,bit-shift = <0>;
}; };
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
#clock-cells = <0>; #clock-cells = <0>;
compatible = "ti,gate-clock"; compatible = "ti,gate-clock";
clocks = <&sys_ck>; clocks = <&sys_ck>;
reg = <0x059c>; reg = <0x032c>;
ti,bit-shift = <8>; ti,bit-shift = <8>;
}; };
...@@ -60,7 +60,7 @@ ...@@ -60,7 +60,7 @@
#clock-cells = <0>; #clock-cells = <0>;
compatible = "ti,am35xx-gate-clock"; compatible = "ti,am35xx-gate-clock";
clocks = <&sys_ck>; clocks = <&sys_ck>;
reg = <0x059c>; reg = <0x032c>;
ti,bit-shift = <3>; ti,bit-shift = <3>;
}; };
}; };
......
...@@ -95,6 +95,11 @@ ...@@ -95,6 +95,11 @@
internal-regs { internal-regs {
rtc@10300 {
/* No crystal connected to the internal RTC */
status = "disabled";
};
/* J10: VCC, NC, RX, NC, TX, GND */ /* J10: VCC, NC, RX, NC, TX, GND */
serial@12000 { serial@12000 {
status = "okay"; status = "okay";
......
...@@ -382,7 +382,7 @@ ...@@ -382,7 +382,7 @@
ti,hwmods = "usb_otg_hs"; ti,hwmods = "usb_otg_hs";
usb0: usb@47401000 { usb0: usb@47401000 {
compatible = "ti,musb-am33xx"; compatible = "ti,musb-dm816";
reg = <0x47401400 0x400 reg = <0x47401400 0x400
0x47401000 0x200>; 0x47401000 0x200>;
reg-names = "mc", "control"; reg-names = "mc", "control";
...@@ -422,7 +422,7 @@ ...@@ -422,7 +422,7 @@
}; };
usb1: usb@47401800 { usb1: usb@47401800 {
compatible = "ti,musb-am33xx"; compatible = "ti,musb-dm816";
reg = <0x47401c00 0x400 reg = <0x47401c00 0x400
0x47401800 0x200>; 0x47401800 0x200>;
reg-names = "mc", "control"; reg-names = "mc", "control";
......
...@@ -832,8 +832,8 @@ ...@@ -832,8 +832,8 @@
touchscreen-fuzz-x = <4>; touchscreen-fuzz-x = <4>;
touchscreen-fuzz-y = <7>; touchscreen-fuzz-y = <7>;
touchscreen-fuzz-pressure = <2>; touchscreen-fuzz-pressure = <2>;
touchscreen-max-x = <4096>; touchscreen-size-x = <4096>;
touchscreen-max-y = <4096>; touchscreen-size-y = <4096>;
touchscreen-max-pressure = <2048>; touchscreen-max-pressure = <2048>;
ti,x-plate-ohms = <280>; ti,x-plate-ohms = <280>;
......
...@@ -87,8 +87,8 @@ static unsigned int exynos_pmu_spare3; ...@@ -87,8 +87,8 @@ static unsigned int exynos_pmu_spare3;
static u32 exynos_irqwake_intmask = 0xffffffff; static u32 exynos_irqwake_intmask = 0xffffffff;
static const struct exynos_wkup_irq exynos3250_wkup_irq[] = { static const struct exynos_wkup_irq exynos3250_wkup_irq[] = {
{ 105, BIT(1) }, /* RTC alarm */ { 73, BIT(1) }, /* RTC alarm */
{ 106, BIT(2) }, /* RTC tick */ { 74, BIT(2) }, /* RTC tick */
{ /* sentinel */ }, { /* sentinel */ },
}; };
......
...@@ -203,23 +203,8 @@ save_context_wfi: ...@@ -203,23 +203,8 @@ save_context_wfi:
*/ */
ldr r1, kernel_flush ldr r1, kernel_flush
blx r1 blx r1
/*
* The kernel doesn't interwork: v7_flush_dcache_all in particluar will
* always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
* This sequence switches back to ARM. Note that .align may insert a
* nop: bx pc needs to be word-aligned in order to work.
*/
THUMB( .thumb )
THUMB( .align )
THUMB( bx pc )
THUMB( nop )
.arm
b omap3_do_wfi b omap3_do_wfi
ENDPROC(omap34xx_cpu_suspend)
/*
* Local variables
*/
omap3_do_wfi_sram_addr: omap3_do_wfi_sram_addr:
.word omap3_do_wfi_sram .word omap3_do_wfi_sram
kernel_flush: kernel_flush:
...@@ -364,10 +349,7 @@ exit_nonoff_modes: ...@@ -364,10 +349,7 @@ exit_nonoff_modes:
* =================================== * ===================================
*/ */
ldmfd sp!, {r4 - r11, pc} @ restore regs and return ldmfd sp!, {r4 - r11, pc} @ restore regs and return
ENDPROC(omap3_do_wfi)
/*
* Local variables
*/
sdrc_power: sdrc_power:
.word SDRC_POWER_V .word SDRC_POWER_V
cm_idlest1_core: cm_idlest1_core:
......
...@@ -16,7 +16,8 @@ ...@@ -16,7 +16,8 @@
#include "mt8173.dtsi" #include "mt8173.dtsi"
/ { / {
model = "mediatek,mt8173-evb"; model = "MediaTek MT8173 evaluation board";
compatible = "mediatek,mt8173-evb", "mediatek,mt8173";
aliases { aliases {
serial0 = &uart0; serial0 = &uart0;
......
...@@ -127,7 +127,7 @@ int smp_num_siblings = 1; ...@@ -127,7 +127,7 @@ int smp_num_siblings = 1;
volatile int ia64_cpu_to_sapicid[NR_CPUS]; volatile int ia64_cpu_to_sapicid[NR_CPUS];
EXPORT_SYMBOL(ia64_cpu_to_sapicid); EXPORT_SYMBOL(ia64_cpu_to_sapicid);
static volatile cpumask_t cpu_callin_map; static cpumask_t cpu_callin_map;
struct smp_boot_data smp_boot_data __initdata; struct smp_boot_data smp_boot_data __initdata;
...@@ -477,6 +477,7 @@ do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) ...@@ -477,6 +477,7 @@ do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
for (timeout = 0; timeout < 100000; timeout++) { for (timeout = 0; timeout < 100000; timeout++) {
if (cpumask_test_cpu(cpu, &cpu_callin_map)) if (cpumask_test_cpu(cpu, &cpu_callin_map))
break; /* It has booted */ break; /* It has booted */
barrier(); /* Make sure we re-read cpu_callin_map */
udelay(100); udelay(100);
} }
Dprintk("\n"); Dprintk("\n");
......
...@@ -225,7 +225,7 @@ void __init plat_time_init(void) ...@@ -225,7 +225,7 @@ void __init plat_time_init(void)
ddr_clk_rate = ath79_get_sys_clk_rate("ddr"); ddr_clk_rate = ath79_get_sys_clk_rate("ddr");
ref_clk_rate = ath79_get_sys_clk_rate("ref"); ref_clk_rate = ath79_get_sys_clk_rate("ref");
pr_info("Clocks: CPU:%lu.%03luMHz, DDR:%lu.%03luMHz, AHB:%lu.%03luMHz, Ref:%lu.%03luMHz", pr_info("Clocks: CPU:%lu.%03luMHz, DDR:%lu.%03luMHz, AHB:%lu.%03luMHz, Ref:%lu.%03luMHz\n",
cpu_clk_rate / 1000000, (cpu_clk_rate / 1000) % 1000, cpu_clk_rate / 1000000, (cpu_clk_rate / 1000) % 1000,
ddr_clk_rate / 1000000, (ddr_clk_rate / 1000) % 1000, ddr_clk_rate / 1000000, (ddr_clk_rate / 1000) % 1000,
ahb_clk_rate / 1000000, (ahb_clk_rate / 1000) % 1000, ahb_clk_rate / 1000000, (ahb_clk_rate / 1000) % 1000,
......
...@@ -74,13 +74,12 @@ static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c) ...@@ -74,13 +74,12 @@ static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c)
{ {
unsigned long sr, mask, fcsr, fcsr0, fcsr1; unsigned long sr, mask, fcsr, fcsr0, fcsr1;
fcsr = c->fpu_csr31;
mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM; mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM;
sr = read_c0_status(); sr = read_c0_status();
__enable_fpu(FPU_AS_IS); __enable_fpu(FPU_AS_IS);
fcsr = read_32bit_cp1_register(CP1_STATUS);
fcsr0 = fcsr & mask; fcsr0 = fcsr & mask;
write_32bit_cp1_register(CP1_STATUS, fcsr0); write_32bit_cp1_register(CP1_STATUS, fcsr0);
fcsr0 = read_32bit_cp1_register(CP1_STATUS); fcsr0 = read_32bit_cp1_register(CP1_STATUS);
......
...@@ -109,7 +109,7 @@ void __init init_IRQ(void) ...@@ -109,7 +109,7 @@ void __init init_IRQ(void)
#endif #endif
} }
#ifdef DEBUG_STACKOVERFLOW #ifdef CONFIG_DEBUG_STACKOVERFLOW
static inline void check_stack_overflow(void) static inline void check_stack_overflow(void)
{ {
unsigned long sp; unsigned long sp;
......
...@@ -2409,7 +2409,7 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, ...@@ -2409,7 +2409,7 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
if (vcpu->mmio_needed == 2) if (vcpu->mmio_needed == 2)
*gpr = *(int16_t *) run->mmio.data; *gpr = *(int16_t *) run->mmio.data;
else else
*gpr = *(int16_t *) run->mmio.data; *gpr = *(uint16_t *)run->mmio.data;
break; break;
case 1: case 1:
......
...@@ -272,7 +272,7 @@ void loongson3_ipi_interrupt(struct pt_regs *regs) ...@@ -272,7 +272,7 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
if (action & SMP_ASK_C0COUNT) { if (action & SMP_ASK_C0COUNT) {
BUG_ON(cpu != 0); BUG_ON(cpu != 0);
c0count = read_c0_count(); c0count = read_c0_count();
for (i = 1; i < loongson_sysconf.nr_cpus; i++) for (i = 1; i < num_possible_cpus(); i++)
per_cpu(core0_c0count, i) = c0count; per_cpu(core0_c0count, i) = c0count;
} }
} }
......
...@@ -1372,7 +1372,7 @@ static int probe_scache(void) ...@@ -1372,7 +1372,7 @@ static int probe_scache(void)
scache_size = addr; scache_size = addr;
c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22); c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
c->scache.ways = 1; c->scache.ways = 1;
c->dcache.waybit = 0; /* does not matter */ c->scache.waybit = 0; /* does not matter */
return 1; return 1;
} }
......
...@@ -681,11 +681,7 @@ static unsigned int get_stack_depth(struct jit_ctx *ctx) ...@@ -681,11 +681,7 @@ static unsigned int get_stack_depth(struct jit_ctx *ctx)
sp_off += config_enabled(CONFIG_64BIT) ? sp_off += config_enabled(CONFIG_64BIT) ?
(ARGS_USED_BY_JIT + 1) * RSIZE : RSIZE; (ARGS_USED_BY_JIT + 1) * RSIZE : RSIZE;
/* return sp_off;
* Subtract the bytes for the last registers since we only care about
* the location on the stack pointer.
*/
return sp_off - RSIZE;
} }
static void build_prologue(struct jit_ctx *ctx) static void build_prologue(struct jit_ctx *ctx)
......
...@@ -41,7 +41,7 @@ static irqreturn_t ill_acc_irq_handler(int irq, void *_priv) ...@@ -41,7 +41,7 @@ static irqreturn_t ill_acc_irq_handler(int irq, void *_priv)
addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M, addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M,
type & ILL_ACC_LEN_M); type & ILL_ACC_LEN_M);
rt_memc_w32(REG_ILL_ACC_TYPE, REG_ILL_ACC_TYPE); rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -48,7 +48,9 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; ...@@ -48,7 +48,9 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
* We get 160 bytes stack space from calling function, but only use * We get 160 bytes stack space from calling function, but only use
* 11 * 8 byte (old backchain + r15 - r6) for storing registers. * 11 * 8 byte (old backchain + r15 - r6) for storing registers.
*/ */
#define STK_OFF (MAX_BPF_STACK + 8 + 4 + 4 + (160 - 11 * 8)) #define STK_SPACE (MAX_BPF_STACK + 8 + 4 + 4 + 160)
#define STK_160_UNUSED (160 - 11 * 8)
#define STK_OFF (STK_SPACE - STK_160_UNUSED)
#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */ #define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */ #define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
......
...@@ -384,13 +384,16 @@ static void bpf_jit_prologue(struct bpf_jit *jit) ...@@ -384,13 +384,16 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
} }
/* Setup stack and backchain */ /* Setup stack and backchain */
if (jit->seen & SEEN_STACK) { if (jit->seen & SEEN_STACK) {
/* lgr %bfp,%r15 (BPF frame pointer) */ if (jit->seen & SEEN_FUNC)
EMIT4(0xb9040000, BPF_REG_FP, REG_15); /* lgr %w1,%r15 (backchain) */
EMIT4(0xb9040000, REG_W1, REG_15);
/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
/* aghi %r15,-STK_OFF */ /* aghi %r15,-STK_OFF */
EMIT4_IMM(0xa70b0000, REG_15, -STK_OFF); EMIT4_IMM(0xa70b0000, REG_15, -STK_OFF);
if (jit->seen & SEEN_FUNC) if (jit->seen & SEEN_FUNC)
/* stg %bfp,152(%r15) (backchain) */ /* stg %w1,152(%r15) (backchain) */
EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_FP, REG_0, EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
REG_15, 152); REG_15, 152);
} }
/* /*
......
...@@ -2,15 +2,14 @@ ...@@ -2,15 +2,14 @@
#define BOOT_COMPRESSED_MISC_H #define BOOT_COMPRESSED_MISC_H
/* /*
* we have to be careful, because no indirections are allowed here, and * Special hack: we have to be careful, because no indirections are allowed here,
* paravirt_ops is a kind of one. As it will only run in baremetal anyway, * and paravirt_ops is a kind of one. As it will only run in baremetal anyway,
* we just keep it from happening * we just keep it from happening. (This list needs to be extended when new
* paravirt and debugging variants are added.)
*/ */
#undef CONFIG_PARAVIRT #undef CONFIG_PARAVIRT
#undef CONFIG_PARAVIRT_SPINLOCKS
#undef CONFIG_KASAN #undef CONFIG_KASAN
#ifdef CONFIG_X86_32
#define _ASM_X86_DESC_H 1
#endif
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/screen_info.h> #include <linux/screen_info.h>
......
...@@ -107,7 +107,7 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) ...@@ -107,7 +107,7 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
static inline int user_mode(struct pt_regs *regs) static inline int user_mode(struct pt_regs *regs)
{ {
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL;
#else #else
return !!(regs->cs & 3); return !!(regs->cs & 3);
#endif #endif
......
...@@ -231,11 +231,21 @@ ...@@ -231,11 +231,21 @@
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8) #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8)
#ifdef __KERNEL__ #ifdef __KERNEL__
/*
* early_idt_handler_array is an array of entry points referenced in the
* early IDT. For simplicity, it's a real array with one entry point
* every nine bytes. That leaves room for an optional 'push $0' if the
* vector has no error code (two bytes), a 'push $vector_number' (two
* bytes), and a jump to the common entry code (up to five bytes).
*/
#define EARLY_IDT_HANDLER_SIZE 9
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5]; extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
# define trace_early_idt_handlers early_idt_handlers # define trace_early_idt_handler_array early_idt_handler_array
#endif #endif
/* /*
......
...@@ -190,6 +190,7 @@ static bool check_hw_exists(void) ...@@ -190,6 +190,7 @@ static bool check_hw_exists(void)
u64 val, val_fail, val_new= ~0; u64 val, val_fail, val_new= ~0;
int i, reg, reg_fail, ret = 0; int i, reg, reg_fail, ret = 0;
int bios_fail = 0; int bios_fail = 0;
int reg_safe = -1;
/* /*
* Check to see if the BIOS enabled any of the counters, if so * Check to see if the BIOS enabled any of the counters, if so
...@@ -204,6 +205,8 @@ static bool check_hw_exists(void) ...@@ -204,6 +205,8 @@ static bool check_hw_exists(void)
bios_fail = 1; bios_fail = 1;
val_fail = val; val_fail = val;
reg_fail = reg; reg_fail = reg;
} else {
reg_safe = i;
} }
} }
...@@ -221,12 +224,23 @@ static bool check_hw_exists(void) ...@@ -221,12 +224,23 @@ static bool check_hw_exists(void)
} }
} }
/*
* If all the counters are enabled, the below test will always
* fail. The tools will also become useless in this scenario.
* Just fail and disable the hardware counters.
*/
if (reg_safe == -1) {
reg = reg_safe;
goto msr_fail;
}
/* /*
* Read the current value, change it and read it back to see if it * Read the current value, change it and read it back to see if it
* matches, this is needed to detect certain hardware emulators * matches, this is needed to detect certain hardware emulators
* (qemu/kvm) that don't trap on the MSR access and always return 0s. * (qemu/kvm) that don't trap on the MSR access and always return 0s.
*/ */
reg = x86_pmu_event_addr(0); reg = x86_pmu_event_addr(reg_safe);
if (rdmsrl_safe(reg, &val)) if (rdmsrl_safe(reg, &val))
goto msr_fail; goto msr_fail;
val ^= 0xffffUL; val ^= 0xffffUL;
...@@ -611,6 +625,7 @@ struct sched_state { ...@@ -611,6 +625,7 @@ struct sched_state {
int event; /* event index */ int event; /* event index */
int counter; /* counter index */ int counter; /* counter index */
int unassigned; /* number of events to be assigned left */ int unassigned; /* number of events to be assigned left */
int nr_gp; /* number of GP counters used */
unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
}; };
...@@ -620,27 +635,29 @@ struct sched_state { ...@@ -620,27 +635,29 @@ struct sched_state {
struct perf_sched { struct perf_sched {
int max_weight; int max_weight;
int max_events; int max_events;
struct perf_event **events; int max_gp;
struct sched_state state;
int saved_states; int saved_states;
struct event_constraint **constraints;
struct sched_state state;
struct sched_state saved[SCHED_STATES_MAX]; struct sched_state saved[SCHED_STATES_MAX];
}; };
/* /*
* Initialize interator that runs through all events and counters. * Initialize interator that runs through all events and counters.
*/ */
static void perf_sched_init(struct perf_sched *sched, struct perf_event **events, static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
int num, int wmin, int wmax) int num, int wmin, int wmax, int gpmax)
{ {
int idx; int idx;
memset(sched, 0, sizeof(*sched)); memset(sched, 0, sizeof(*sched));
sched->max_events = num; sched->max_events = num;
sched->max_weight = wmax; sched->max_weight = wmax;
sched->events = events; sched->max_gp = gpmax;
sched->constraints = constraints;
for (idx = 0; idx < num; idx++) { for (idx = 0; idx < num; idx++) {
if (events[idx]->hw.constraint->weight == wmin) if (constraints[idx]->weight == wmin)
break; break;
} }
...@@ -687,7 +704,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched) ...@@ -687,7 +704,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
if (sched->state.event >= sched->max_events) if (sched->state.event >= sched->max_events)
return false; return false;
c = sched->events[sched->state.event]->hw.constraint; c = sched->constraints[sched->state.event];
/* Prefer fixed purpose counters */ /* Prefer fixed purpose counters */
if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) { if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
idx = INTEL_PMC_IDX_FIXED; idx = INTEL_PMC_IDX_FIXED;
...@@ -696,11 +713,16 @@ static bool __perf_sched_find_counter(struct perf_sched *sched) ...@@ -696,11 +713,16 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
goto done; goto done;
} }
} }
/* Grab the first unused counter starting with idx */ /* Grab the first unused counter starting with idx */
idx = sched->state.counter; idx = sched->state.counter;
for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) { for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
if (!__test_and_set_bit(idx, sched->state.used)) if (!__test_and_set_bit(idx, sched->state.used)) {
if (sched->state.nr_gp++ >= sched->max_gp)
return false;
goto done; goto done;
}
} }
return false; return false;
...@@ -745,7 +767,7 @@ static bool perf_sched_next_event(struct perf_sched *sched) ...@@ -745,7 +767,7 @@ static bool perf_sched_next_event(struct perf_sched *sched)
if (sched->state.weight > sched->max_weight) if (sched->state.weight > sched->max_weight)
return false; return false;
} }
c = sched->events[sched->state.event]->hw.constraint; c = sched->constraints[sched->state.event];
} while (c->weight != sched->state.weight); } while (c->weight != sched->state.weight);
sched->state.counter = 0; /* start with first counter */ sched->state.counter = 0; /* start with first counter */
...@@ -756,12 +778,12 @@ static bool perf_sched_next_event(struct perf_sched *sched) ...@@ -756,12 +778,12 @@ static bool perf_sched_next_event(struct perf_sched *sched)
/* /*
* Assign a counter for each event. * Assign a counter for each event.
*/ */
int perf_assign_events(struct perf_event **events, int n, int perf_assign_events(struct event_constraint **constraints, int n,
int wmin, int wmax, int *assign) int wmin, int wmax, int gpmax, int *assign)
{ {
struct perf_sched sched; struct perf_sched sched;
perf_sched_init(&sched, events, n, wmin, wmax); perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax);
do { do {
if (!perf_sched_find_counter(&sched)) if (!perf_sched_find_counter(&sched))
...@@ -788,9 +810,9 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) ...@@ -788,9 +810,9 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
x86_pmu.start_scheduling(cpuc); x86_pmu.start_scheduling(cpuc);
for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) { for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
hwc = &cpuc->event_list[i]->hw; cpuc->event_constraint[i] = NULL;
c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]); c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
hwc->constraint = c; cpuc->event_constraint[i] = c;
wmin = min(wmin, c->weight); wmin = min(wmin, c->weight);
wmax = max(wmax, c->weight); wmax = max(wmax, c->weight);
...@@ -801,7 +823,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) ...@@ -801,7 +823,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
*/ */
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
hwc = &cpuc->event_list[i]->hw; hwc = &cpuc->event_list[i]->hw;
c = hwc->constraint; c = cpuc->event_constraint[i];
/* never assigned */ /* never assigned */
if (hwc->idx == -1) if (hwc->idx == -1)
...@@ -821,9 +843,26 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) ...@@ -821,9 +843,26 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
} }
/* slow path */ /* slow path */
if (i != n) if (i != n) {
unsched = perf_assign_events(cpuc->event_list, n, wmin, int gpmax = x86_pmu.num_counters;
wmax, assign);
/*
* Do not allow scheduling of more than half the available
* generic counters.
*
* This helps avoid counter starvation of sibling thread by
* ensuring at most half the counters cannot be in exclusive
* mode. There is no designated counters for the limits. Any
* N/2 counters can be used. This helps with events with
* specific counter constraints.
*/
if (is_ht_workaround_enabled() && !cpuc->is_fake &&
READ_ONCE(cpuc->excl_cntrs->exclusive_present))
gpmax /= 2;
unsched = perf_assign_events(cpuc->event_constraint, n, wmin,
wmax, gpmax, assign);
}
/* /*
* In case of success (unsched = 0), mark events as committed, * In case of success (unsched = 0), mark events as committed,
...@@ -840,7 +879,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) ...@@ -840,7 +879,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
e = cpuc->event_list[i]; e = cpuc->event_list[i];
e->hw.flags |= PERF_X86_EVENT_COMMITTED; e->hw.flags |= PERF_X86_EVENT_COMMITTED;
if (x86_pmu.commit_scheduling) if (x86_pmu.commit_scheduling)
x86_pmu.commit_scheduling(cpuc, e, assign[i]); x86_pmu.commit_scheduling(cpuc, i, assign[i]);
} }
} }
...@@ -1292,8 +1331,10 @@ static void x86_pmu_del(struct perf_event *event, int flags) ...@@ -1292,8 +1331,10 @@ static void x86_pmu_del(struct perf_event *event, int flags)
x86_pmu.put_event_constraints(cpuc, event); x86_pmu.put_event_constraints(cpuc, event);
/* Delete the array entry. */ /* Delete the array entry. */
while (++i < cpuc->n_events) while (++i < cpuc->n_events) {
cpuc->event_list[i-1] = cpuc->event_list[i]; cpuc->event_list[i-1] = cpuc->event_list[i];
cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
}
--cpuc->n_events; --cpuc->n_events;
perf_event_update_userpage(event); perf_event_update_userpage(event);
......
...@@ -74,6 +74,7 @@ struct event_constraint { ...@@ -74,6 +74,7 @@ struct event_constraint {
#define PERF_X86_EVENT_EXCL 0x0040 /* HT exclusivity on counter */ #define PERF_X86_EVENT_EXCL 0x0040 /* HT exclusivity on counter */
#define PERF_X86_EVENT_DYNAMIC 0x0080 /* dynamic alloc'd constraint */ #define PERF_X86_EVENT_DYNAMIC 0x0080 /* dynamic alloc'd constraint */
#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */ #define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */
#define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */
struct amd_nb { struct amd_nb {
...@@ -134,8 +135,6 @@ enum intel_excl_state_type { ...@@ -134,8 +135,6 @@ enum intel_excl_state_type {
struct intel_excl_states { struct intel_excl_states {
enum intel_excl_state_type init_state[X86_PMC_IDX_MAX]; enum intel_excl_state_type init_state[X86_PMC_IDX_MAX];
enum intel_excl_state_type state[X86_PMC_IDX_MAX]; enum intel_excl_state_type state[X86_PMC_IDX_MAX];
int num_alloc_cntrs;/* #counters allocated */
int max_alloc_cntrs;/* max #counters allowed */
bool sched_started; /* true if scheduling has started */ bool sched_started; /* true if scheduling has started */
}; };
...@@ -144,6 +143,11 @@ struct intel_excl_cntrs { ...@@ -144,6 +143,11 @@ struct intel_excl_cntrs {
struct intel_excl_states states[2]; struct intel_excl_states states[2];
union {
u16 has_exclusive[2];
u32 exclusive_present;
};
int refcnt; /* per-core: #HT threads */ int refcnt; /* per-core: #HT threads */
unsigned core_id; /* per-core: core id */ unsigned core_id; /* per-core: core id */
}; };
...@@ -172,7 +176,11 @@ struct cpu_hw_events { ...@@ -172,7 +176,11 @@ struct cpu_hw_events {
added in the current transaction */ added in the current transaction */
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
u64 tags[X86_PMC_IDX_MAX]; u64 tags[X86_PMC_IDX_MAX];
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
struct event_constraint *event_constraint[X86_PMC_IDX_MAX];
int n_excl; /* the number of exclusive events */
unsigned int group_flag; unsigned int group_flag;
int is_fake; int is_fake;
...@@ -519,9 +527,7 @@ struct x86_pmu { ...@@ -519,9 +527,7 @@ struct x86_pmu {
void (*put_event_constraints)(struct cpu_hw_events *cpuc, void (*put_event_constraints)(struct cpu_hw_events *cpuc,
struct perf_event *event); struct perf_event *event);
void (*commit_scheduling)(struct cpu_hw_events *cpuc, void (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
struct perf_event *event,
int cntr);
void (*start_scheduling)(struct cpu_hw_events *cpuc); void (*start_scheduling)(struct cpu_hw_events *cpuc);
...@@ -717,8 +723,8 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, ...@@ -717,8 +723,8 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
void x86_pmu_enable_all(int added); void x86_pmu_enable_all(int added);
int perf_assign_events(struct perf_event **events, int n, int perf_assign_events(struct event_constraint **constraints, int n,
int wmin, int wmax, int *assign); int wmin, int wmax, int gpmax, int *assign);
int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign); int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
void x86_pmu_stop(struct perf_event *event, int flags); void x86_pmu_stop(struct perf_event *event, int flags);
...@@ -929,4 +935,8 @@ static inline struct intel_shared_regs *allocate_shared_regs(int cpu) ...@@ -929,4 +935,8 @@ static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
return NULL; return NULL;
} }
static inline int is_ht_workaround_enabled(void)
{
return 0;
}
#endif /* CONFIG_CPU_SUP_INTEL */ #endif /* CONFIG_CPU_SUP_INTEL */
...@@ -1923,7 +1923,6 @@ intel_start_scheduling(struct cpu_hw_events *cpuc) ...@@ -1923,7 +1923,6 @@ intel_start_scheduling(struct cpu_hw_events *cpuc)
xl = &excl_cntrs->states[tid]; xl = &excl_cntrs->states[tid];
xl->sched_started = true; xl->sched_started = true;
xl->num_alloc_cntrs = 0;
/* /*
* lock shared state until we are done scheduling * lock shared state until we are done scheduling
* in stop_event_scheduling() * in stop_event_scheduling()
...@@ -2000,6 +1999,11 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, ...@@ -2000,6 +1999,11 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
* across HT threads * across HT threads
*/ */
is_excl = c->flags & PERF_X86_EVENT_EXCL; is_excl = c->flags & PERF_X86_EVENT_EXCL;
if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
if (!cpuc->n_excl++)
WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
}
/* /*
* xl = state of current HT * xl = state of current HT
...@@ -2008,18 +2012,6 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, ...@@ -2008,18 +2012,6 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
xl = &excl_cntrs->states[tid]; xl = &excl_cntrs->states[tid];
xlo = &excl_cntrs->states[o_tid]; xlo = &excl_cntrs->states[o_tid];
/*
* do not allow scheduling of more than max_alloc_cntrs
* which is set to half the available generic counters.
* this helps avoid counter starvation of sibling thread
* by ensuring at most half the counters cannot be in
* exclusive mode. There is not designated counters for the
* limits. Any N/2 counters can be used. This helps with
* events with specifix counter constraints
*/
if (xl->num_alloc_cntrs++ == xl->max_alloc_cntrs)
return &emptyconstraint;
cx = c; cx = c;
/* /*
...@@ -2106,7 +2098,7 @@ static struct event_constraint * ...@@ -2106,7 +2098,7 @@ static struct event_constraint *
intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event) struct perf_event *event)
{ {
struct event_constraint *c1 = event->hw.constraint; struct event_constraint *c1 = cpuc->event_constraint[idx];
struct event_constraint *c2; struct event_constraint *c2;
/* /*
...@@ -2150,6 +2142,11 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc, ...@@ -2150,6 +2142,11 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
xl = &excl_cntrs->states[tid]; xl = &excl_cntrs->states[tid];
xlo = &excl_cntrs->states[o_tid]; xlo = &excl_cntrs->states[o_tid];
if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
if (!--cpuc->n_excl)
WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
}
/* /*
* put_constraint may be called from x86_schedule_events() * put_constraint may be called from x86_schedule_events()
...@@ -2188,8 +2185,6 @@ intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, ...@@ -2188,8 +2185,6 @@ intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
static void intel_put_event_constraints(struct cpu_hw_events *cpuc, static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event) struct perf_event *event)
{ {
struct event_constraint *c = event->hw.constraint;
intel_put_shared_regs_event_constraints(cpuc, event); intel_put_shared_regs_event_constraints(cpuc, event);
/* /*
...@@ -2197,19 +2192,14 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc, ...@@ -2197,19 +2192,14 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
* all events are subject to and must call the * all events are subject to and must call the
* put_excl_constraints() routine * put_excl_constraints() routine
*/ */
if (c && cpuc->excl_cntrs) if (cpuc->excl_cntrs)
intel_put_excl_constraints(cpuc, event); intel_put_excl_constraints(cpuc, event);
/* cleanup dynamic constraint */
if (c && (c->flags & PERF_X86_EVENT_DYNAMIC))
event->hw.constraint = NULL;
} }
static void intel_commit_scheduling(struct cpu_hw_events *cpuc, static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
struct perf_event *event, int cntr)
{ {
struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
struct event_constraint *c = event->hw.constraint; struct event_constraint *c = cpuc->event_constraint[idx];
struct intel_excl_states *xlo, *xl; struct intel_excl_states *xlo, *xl;
int tid = cpuc->excl_thread_id; int tid = cpuc->excl_thread_id;
int o_tid = 1 - tid; int o_tid = 1 - tid;
...@@ -2639,8 +2629,6 @@ static void intel_pmu_cpu_starting(int cpu) ...@@ -2639,8 +2629,6 @@ static void intel_pmu_cpu_starting(int cpu)
cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR]; cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
int h = x86_pmu.num_counters >> 1;
for_each_cpu(i, topology_thread_cpumask(cpu)) { for_each_cpu(i, topology_thread_cpumask(cpu)) {
struct intel_excl_cntrs *c; struct intel_excl_cntrs *c;
...@@ -2654,11 +2642,6 @@ static void intel_pmu_cpu_starting(int cpu) ...@@ -2654,11 +2642,6 @@ static void intel_pmu_cpu_starting(int cpu)
} }
cpuc->excl_cntrs->core_id = core_id; cpuc->excl_cntrs->core_id = core_id;
cpuc->excl_cntrs->refcnt++; cpuc->excl_cntrs->refcnt++;
/*
* set hard limit to half the number of generic counters
*/
cpuc->excl_cntrs->states[0].max_alloc_cntrs = h;
cpuc->excl_cntrs->states[1].max_alloc_cntrs = h;
} }
} }
......
...@@ -706,9 +706,9 @@ void intel_pmu_pebs_disable(struct perf_event *event) ...@@ -706,9 +706,9 @@ void intel_pmu_pebs_disable(struct perf_event *event)
cpuc->pebs_enabled &= ~(1ULL << hwc->idx); cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
if (event->hw.constraint->flags & PERF_X86_EVENT_PEBS_LDLAT) if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32)); cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
else if (event->hw.constraint->flags & PERF_X86_EVENT_PEBS_ST) else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
cpuc->pebs_enabled &= ~(1ULL << 63); cpuc->pebs_enabled &= ~(1ULL << 63);
if (cpuc->enabled) if (cpuc->enabled)
......
...@@ -151,7 +151,7 @@ static int __init pt_pmu_hw_init(void) ...@@ -151,7 +151,7 @@ static int __init pt_pmu_hw_init(void)
de_attr->attr.attr.name = pt_caps[i].name; de_attr->attr.attr.name = pt_caps[i].name;
sysfs_attr_init(&de_attrs->attr.attr); sysfs_attr_init(&de_attr->attr.attr);
de_attr->attr.attr.mode = S_IRUGO; de_attr->attr.attr.mode = S_IRUGO;
de_attr->attr.show = pt_cap_show; de_attr->attr.show = pt_cap_show;
...@@ -615,7 +615,8 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf, ...@@ -615,7 +615,8 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
struct perf_output_handle *handle) struct perf_output_handle *handle)
{ {
unsigned long idx, npages, end; unsigned long head = local64_read(&buf->head);
unsigned long idx, npages, wakeup;
if (buf->snapshot) if (buf->snapshot)
return 0; return 0;
...@@ -634,17 +635,26 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf, ...@@ -634,17 +635,26 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
buf->topa_index[buf->stop_pos]->stop = 0; buf->topa_index[buf->stop_pos]->stop = 0;
buf->topa_index[buf->intr_pos]->intr = 0; buf->topa_index[buf->intr_pos]->intr = 0;
if (pt_cap_get(PT_CAP_topa_multiple_entries)) { /* how many pages till the STOP marker */
npages = (handle->size + 1) >> PAGE_SHIFT; npages = handle->size >> PAGE_SHIFT;
end = (local64_read(&buf->head) >> PAGE_SHIFT) + npages;
/*if (end > handle->wakeup >> PAGE_SHIFT) /* if it's on a page boundary, fill up one more page */
end = handle->wakeup >> PAGE_SHIFT;*/ if (!offset_in_page(head + handle->size + 1))
idx = end & (buf->nr_pages - 1); npages++;
buf->stop_pos = idx;
idx = (local64_read(&buf->head) >> PAGE_SHIFT) + npages - 1; idx = (head >> PAGE_SHIFT) + npages;
idx &= buf->nr_pages - 1; idx &= buf->nr_pages - 1;
buf->intr_pos = idx; buf->stop_pos = idx;
}
wakeup = handle->wakeup >> PAGE_SHIFT;
/* in the worst case, wake up the consumer one page before hard stop */
idx = (head >> PAGE_SHIFT) + npages - 1;
if (idx > wakeup)
idx = wakeup;
idx &= buf->nr_pages - 1;
buf->intr_pos = idx;
buf->topa_index[buf->stop_pos]->stop = 1; buf->topa_index[buf->stop_pos]->stop = 1;
buf->topa_index[buf->intr_pos]->intr = 1; buf->topa_index[buf->intr_pos]->intr = 1;
......
...@@ -365,9 +365,8 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int ...@@ -365,9 +365,8 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int
bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX); bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) { for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
hwc = &box->event_list[i]->hw;
c = uncore_get_event_constraint(box, box->event_list[i]); c = uncore_get_event_constraint(box, box->event_list[i]);
hwc->constraint = c; box->event_constraint[i] = c;
wmin = min(wmin, c->weight); wmin = min(wmin, c->weight);
wmax = max(wmax, c->weight); wmax = max(wmax, c->weight);
} }
...@@ -375,7 +374,7 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int ...@@ -375,7 +374,7 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int
/* fastpath, try to reuse previous register */ /* fastpath, try to reuse previous register */
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
hwc = &box->event_list[i]->hw; hwc = &box->event_list[i]->hw;
c = hwc->constraint; c = box->event_constraint[i];
/* never assigned */ /* never assigned */
if (hwc->idx == -1) if (hwc->idx == -1)
...@@ -395,8 +394,8 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int ...@@ -395,8 +394,8 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int
} }
/* slow path */ /* slow path */
if (i != n) if (i != n)
ret = perf_assign_events(box->event_list, n, ret = perf_assign_events(box->event_constraint, n,
wmin, wmax, assign); wmin, wmax, n, assign);
if (!assign || ret) { if (!assign || ret) {
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
......
...@@ -97,6 +97,7 @@ struct intel_uncore_box { ...@@ -97,6 +97,7 @@ struct intel_uncore_box {
atomic_t refcnt; atomic_t refcnt;
struct perf_event *events[UNCORE_PMC_IDX_MAX]; struct perf_event *events[UNCORE_PMC_IDX_MAX];
struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
u64 tags[UNCORE_PMC_IDX_MAX]; u64 tags[UNCORE_PMC_IDX_MAX];
struct pci_dev *pci_dev; struct pci_dev *pci_dev;
......
...@@ -167,7 +167,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) ...@@ -167,7 +167,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
clear_bss(); clear_bss();
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
set_intr_gate(i, early_idt_handlers[i]); set_intr_gate(i, early_idt_handler_array[i]);
load_idt((const struct desc_ptr *)&idt_descr); load_idt((const struct desc_ptr *)&idt_descr);
copy_bootdata(__va(real_mode_data)); copy_bootdata(__va(real_mode_data));
......
...@@ -478,21 +478,22 @@ is486: ...@@ -478,21 +478,22 @@ is486:
__INIT __INIT
setup_once: setup_once:
/* /*
* Set up a idt with 256 entries pointing to ignore_int, * Set up a idt with 256 interrupt gates that push zero if there
* interrupt gates. It doesn't actually load idt - that needs * is no error code and then jump to early_idt_handler_common.
* to be done on each CPU. Interrupts are enabled elsewhere, * It doesn't actually load the idt - that needs to be done on
* when we can be relatively sure everything is ok. * each CPU. Interrupts are enabled elsewhere, when we can be
* relatively sure everything is ok.
*/ */
movl $idt_table,%edi movl $idt_table,%edi
movl $early_idt_handlers,%eax movl $early_idt_handler_array,%eax
movl $NUM_EXCEPTION_VECTORS,%ecx movl $NUM_EXCEPTION_VECTORS,%ecx
1: 1:
movl %eax,(%edi) movl %eax,(%edi)
movl %eax,4(%edi) movl %eax,4(%edi)
/* interrupt gate, dpl=0, present */ /* interrupt gate, dpl=0, present */
movl $(0x8E000000 + __KERNEL_CS),2(%edi) movl $(0x8E000000 + __KERNEL_CS),2(%edi)
addl $9,%eax addl $EARLY_IDT_HANDLER_SIZE,%eax
addl $8,%edi addl $8,%edi
loop 1b loop 1b
...@@ -524,26 +525,28 @@ setup_once: ...@@ -524,26 +525,28 @@ setup_once:
andl $0,setup_once_ref /* Once is enough, thanks */ andl $0,setup_once_ref /* Once is enough, thanks */
ret ret
ENTRY(early_idt_handlers) ENTRY(early_idt_handler_array)
# 36(%esp) %eflags # 36(%esp) %eflags
# 32(%esp) %cs # 32(%esp) %cs
# 28(%esp) %eip # 28(%esp) %eip
# 24(%rsp) error code # 24(%rsp) error code
i = 0 i = 0
.rept NUM_EXCEPTION_VECTORS .rept NUM_EXCEPTION_VECTORS
.if (EXCEPTION_ERRCODE_MASK >> i) & 1 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
ASM_NOP2
.else
pushl $0 # Dummy error code, to make stack frame uniform pushl $0 # Dummy error code, to make stack frame uniform
.endif .endif
pushl $i # 20(%esp) Vector number pushl $i # 20(%esp) Vector number
jmp early_idt_handler jmp early_idt_handler_common
i = i + 1 i = i + 1
.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
.endr .endr
ENDPROC(early_idt_handlers) ENDPROC(early_idt_handler_array)
/* This is global to keep gas from relaxing the jumps */ early_idt_handler_common:
ENTRY(early_idt_handler) /*
* The stack is the hardware frame, an error code or zero, and the
* vector number.
*/
cld cld
cmpl $2,(%esp) # X86_TRAP_NMI cmpl $2,(%esp) # X86_TRAP_NMI
...@@ -603,7 +606,7 @@ ex_entry: ...@@ -603,7 +606,7 @@ ex_entry:
is_nmi: is_nmi:
addl $8,%esp /* drop vector number and error code */ addl $8,%esp /* drop vector number and error code */
iret iret
ENDPROC(early_idt_handler) ENDPROC(early_idt_handler_common)
/* This is the default interrupt "handler" :-) */ /* This is the default interrupt "handler" :-) */
ALIGN ALIGN
......
...@@ -321,26 +321,28 @@ bad_address: ...@@ -321,26 +321,28 @@ bad_address:
jmp bad_address jmp bad_address
__INIT __INIT
.globl early_idt_handlers ENTRY(early_idt_handler_array)
early_idt_handlers:
# 104(%rsp) %rflags # 104(%rsp) %rflags
# 96(%rsp) %cs # 96(%rsp) %cs
# 88(%rsp) %rip # 88(%rsp) %rip
# 80(%rsp) error code # 80(%rsp) error code
i = 0 i = 0
.rept NUM_EXCEPTION_VECTORS .rept NUM_EXCEPTION_VECTORS
.if (EXCEPTION_ERRCODE_MASK >> i) & 1 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
ASM_NOP2
.else
pushq $0 # Dummy error code, to make stack frame uniform pushq $0 # Dummy error code, to make stack frame uniform
.endif .endif
pushq $i # 72(%rsp) Vector number pushq $i # 72(%rsp) Vector number
jmp early_idt_handler jmp early_idt_handler_common
i = i + 1 i = i + 1
.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
.endr .endr
ENDPROC(early_idt_handler_array)
/* This is global to keep gas from relaxing the jumps */ early_idt_handler_common:
ENTRY(early_idt_handler) /*
* The stack is the hardware frame, an error code or zero, and the
* vector number.
*/
cld cld
cmpl $2,(%rsp) # X86_TRAP_NMI cmpl $2,(%rsp) # X86_TRAP_NMI
...@@ -412,7 +414,7 @@ ENTRY(early_idt_handler) ...@@ -412,7 +414,7 @@ ENTRY(early_idt_handler)
is_nmi: is_nmi:
addq $16,%rsp # drop vector number and error code addq $16,%rsp # drop vector number and error code
INTERRUPT_RETURN INTERRUPT_RETURN
ENDPROC(early_idt_handler) ENDPROC(early_idt_handler_common)
__INITDATA __INITDATA
......
...@@ -653,7 +653,6 @@ void del_gendisk(struct gendisk *disk) ...@@ -653,7 +653,6 @@ void del_gendisk(struct gendisk *disk)
disk->flags &= ~GENHD_FL_UP; disk->flags &= ~GENHD_FL_UP;
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
bdi_unregister(&disk->queue->backing_dev_info);
blk_unregister_queue(disk); blk_unregister_queue(disk);
blk_unregister_region(disk_devt(disk), disk->minors); blk_unregister_region(disk_devt(disk), disk->minors);
......
...@@ -45,7 +45,7 @@ static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv, ...@@ -45,7 +45,7 @@ static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv,
writel((cs->mbus_attr << 8) | writel((cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1, (dram->mbus_dram_target_id << 4) | 1,
hpriv->mmio + AHCI_WINDOW_CTRL(i)); hpriv->mmio + AHCI_WINDOW_CTRL(i));
writel(cs->base, hpriv->mmio + AHCI_WINDOW_BASE(i)); writel(cs->base >> 16, hpriv->mmio + AHCI_WINDOW_BASE(i));
writel(((cs->size - 1) & 0xffff0000), writel(((cs->size - 1) & 0xffff0000),
hpriv->mmio + AHCI_WINDOW_SIZE(i)); hpriv->mmio + AHCI_WINDOW_SIZE(i));
} }
......
...@@ -1053,7 +1053,7 @@ static struct of_device_id octeon_cf_match[] = { ...@@ -1053,7 +1053,7 @@ static struct of_device_id octeon_cf_match[] = {
}, },
{}, {},
}; };
MODULE_DEVICE_TABLE(of, octeon_i2c_match); MODULE_DEVICE_TABLE(of, octeon_cf_match);
static struct platform_driver octeon_cf_driver = { static struct platform_driver octeon_cf_driver = {
.probe = octeon_cf_probe, .probe = octeon_cf_probe,
......
...@@ -179,7 +179,7 @@ static int detect_cache_attributes(unsigned int cpu) ...@@ -179,7 +179,7 @@ static int detect_cache_attributes(unsigned int cpu)
{ {
int ret; int ret;
if (init_cache_level(cpu)) if (init_cache_level(cpu) || !cache_leaves(cpu))
return -ENOENT; return -ENOENT;
per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/memory.h> #include <linux/memory.h>
#include <linux/of.h>
#include "base.h" #include "base.h"
...@@ -34,4 +35,5 @@ void __init driver_init(void) ...@@ -34,4 +35,5 @@ void __init driver_init(void)
cpu_dev_init(); cpu_dev_init();
memory_dev_init(); memory_dev_init();
container_dev_init(); container_dev_init();
of_core_init();
} }
...@@ -1750,6 +1750,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) ...@@ -1750,6 +1750,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
struct nvme_iod *iod; struct nvme_iod *iod;
dma_addr_t meta_dma = 0; dma_addr_t meta_dma = 0;
void *meta = NULL; void *meta = NULL;
void __user *metadata;
if (copy_from_user(&io, uio, sizeof(io))) if (copy_from_user(&io, uio, sizeof(io)))
return -EFAULT; return -EFAULT;
...@@ -1763,6 +1764,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) ...@@ -1763,6 +1764,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
meta_len = 0; meta_len = 0;
} }
metadata = (void __user *)(unsigned long)io.metadata;
write = io.opcode & 1; write = io.opcode & 1;
switch (io.opcode) { switch (io.opcode) {
...@@ -1786,13 +1789,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) ...@@ -1786,13 +1789,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
if (meta_len) { if (meta_len) {
meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len, meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
&meta_dma, GFP_KERNEL); &meta_dma, GFP_KERNEL);
if (!meta) { if (!meta) {
status = -ENOMEM; status = -ENOMEM;
goto unmap; goto unmap;
} }
if (write) { if (write) {
if (copy_from_user(meta, (void __user *)io.metadata, if (copy_from_user(meta, metadata, meta_len)) {
meta_len)) {
status = -EFAULT; status = -EFAULT;
goto unmap; goto unmap;
} }
...@@ -1819,8 +1822,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) ...@@ -1819,8 +1822,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
nvme_free_iod(dev, iod); nvme_free_iod(dev, iod);
if (meta) { if (meta) {
if (status == NVME_SC_SUCCESS && !write) { if (status == NVME_SC_SUCCESS && !write) {
if (copy_to_user((void __user *)io.metadata, meta, if (copy_to_user(metadata, meta, meta_len))
meta_len))
status = -EFAULT; status = -EFAULT;
} }
dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma); dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma);
......
...@@ -58,7 +58,6 @@ ...@@ -58,7 +58,6 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/syscore_ops.h> #include <linux/syscore_ops.h>
#include <linux/memblock.h>
/* /*
* DDR target is the same on all platforms. * DDR target is the same on all platforms.
...@@ -70,6 +69,7 @@ ...@@ -70,6 +69,7 @@
*/ */
#define WIN_CTRL_OFF 0x0000 #define WIN_CTRL_OFF 0x0000
#define WIN_CTRL_ENABLE BIT(0) #define WIN_CTRL_ENABLE BIT(0)
/* Only on HW I/O coherency capable platforms */
#define WIN_CTRL_SYNCBARRIER BIT(1) #define WIN_CTRL_SYNCBARRIER BIT(1)
#define WIN_CTRL_TGT_MASK 0xf0 #define WIN_CTRL_TGT_MASK 0xf0
#define WIN_CTRL_TGT_SHIFT 4 #define WIN_CTRL_TGT_SHIFT 4
...@@ -102,9 +102,7 @@ ...@@ -102,9 +102,7 @@
/* Relative to mbusbridge_base */ /* Relative to mbusbridge_base */
#define MBUS_BRIDGE_CTRL_OFF 0x0 #define MBUS_BRIDGE_CTRL_OFF 0x0
#define MBUS_BRIDGE_SIZE_MASK 0xffff0000
#define MBUS_BRIDGE_BASE_OFF 0x4 #define MBUS_BRIDGE_BASE_OFF 0x4
#define MBUS_BRIDGE_BASE_MASK 0xffff0000
/* Maximum number of windows, for all known platforms */ /* Maximum number of windows, for all known platforms */
#define MBUS_WINS_MAX 20 #define MBUS_WINS_MAX 20
...@@ -323,8 +321,9 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus, ...@@ -323,8 +321,9 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) | ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
(attr << WIN_CTRL_ATTR_SHIFT) | (attr << WIN_CTRL_ATTR_SHIFT) |
(target << WIN_CTRL_TGT_SHIFT) | (target << WIN_CTRL_TGT_SHIFT) |
WIN_CTRL_SYNCBARRIER |
WIN_CTRL_ENABLE; WIN_CTRL_ENABLE;
if (mbus->hw_io_coherency)
ctrl |= WIN_CTRL_SYNCBARRIER;
writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF); writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF);
writel(ctrl, addr + WIN_CTRL_OFF); writel(ctrl, addr + WIN_CTRL_OFF);
...@@ -577,106 +576,36 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win) ...@@ -577,106 +576,36 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win)
return MVEBU_MBUS_NO_REMAP; return MVEBU_MBUS_NO_REMAP;
} }
/*
* Use the memblock information to find the MBus bridge hole in the
* physical address space.
*/
static void __init
mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
{
struct memblock_region *r;
uint64_t s = 0;
for_each_memblock(memory, r) {
/*
* This part of the memory is above 4 GB, so we don't
* care for the MBus bridge hole.
*/
if (r->base >= 0x100000000)
continue;
/*
* The MBus bridge hole is at the end of the RAM under
* the 4 GB limit.
*/
if (r->base + r->size > s)
s = r->base + r->size;
}
*start = s;
*end = 0x100000000;
}
static void __init static void __init
mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus) mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
{ {
int i; int i;
int cs; int cs;
uint64_t mbus_bridge_base, mbus_bridge_end;
mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR; mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end);
for (i = 0, cs = 0; i < 4; i++) { for (i = 0, cs = 0; i < 4; i++) {
u64 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i)); u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
u64 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i)); u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
u64 end;
struct mbus_dram_window *w;
/* Ignore entries that are not enabled */
if (!(size & DDR_SIZE_ENABLED))
continue;
/*
* Ignore entries whose base address is above 2^32,
* since devices cannot DMA to such high addresses
*/
if (base & DDR_BASE_CS_HIGH_MASK)
continue;
base = base & DDR_BASE_CS_LOW_MASK;
size = (size | ~DDR_SIZE_MASK) + 1;
end = base + size;
/*
* Adjust base/size of the current CS to make sure it
* doesn't overlap with the MBus bridge hole. This is
* particularly important for devices that do DMA from
* DRAM to a SRAM mapped in a MBus window, such as the
* CESA cryptographic engine.
*/
/* /*
* The CS is fully enclosed inside the MBus bridge * We only take care of entries for which the chip
* area, so ignore it. * select is enabled, and that don't have high base
* address bits set (devices can only access the first
* 32 bits of the memory).
*/ */
if (base >= mbus_bridge_base && end <= mbus_bridge_end) if ((size & DDR_SIZE_ENABLED) &&
continue; !(base & DDR_BASE_CS_HIGH_MASK)) {
struct mbus_dram_window *w;
/* w = &mvebu_mbus_dram_info.cs[cs++];
* Beginning of CS overlaps with end of MBus, raise CS w->cs_index = i;
* base address, and shrink its size. w->mbus_attr = 0xf & ~(1 << i);
*/ if (mbus->hw_io_coherency)
if (base >= mbus_bridge_base && end > mbus_bridge_end) { w->mbus_attr |= ATTR_HW_COHERENCY;
size -= mbus_bridge_end - base; w->base = base & DDR_BASE_CS_LOW_MASK;
base = mbus_bridge_end; w->size = (size | ~DDR_SIZE_MASK) + 1;
} }
/*
* End of CS overlaps with beginning of MBus, shrink
* CS size.
*/
if (base < mbus_bridge_base && end > mbus_bridge_base)
size -= end - mbus_bridge_base;
w = &mvebu_mbus_dram_info.cs[cs++];
w->cs_index = i;
w->mbus_attr = 0xf & ~(1 << i);
if (mbus->hw_io_coherency)
w->mbus_attr |= ATTR_HW_COHERENCY;
w->base = base;
w->size = size;
} }
mvebu_mbus_dram_info.num_cs = cs; mvebu_mbus_dram_info.num_cs = cs;
} }
......
...@@ -384,7 +384,10 @@ static int hsu_dma_terminate_all(struct dma_chan *chan) ...@@ -384,7 +384,10 @@ static int hsu_dma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&hsuc->vchan.lock, flags); spin_lock_irqsave(&hsuc->vchan.lock, flags);
hsu_dma_stop_channel(hsuc); hsu_dma_stop_channel(hsuc);
hsuc->desc = NULL; if (hsuc->desc) {
hsu_dma_desc_free(&hsuc->desc->vdesc);
hsuc->desc = NULL;
}
vchan_get_all_descriptors(&hsuc->vchan, &head); vchan_get_all_descriptors(&hsuc->vchan, &head);
spin_unlock_irqrestore(&hsuc->vchan.lock, flags); spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
......
...@@ -2127,6 +2127,7 @@ static int pl330_terminate_all(struct dma_chan *chan) ...@@ -2127,6 +2127,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
struct pl330_dmac *pl330 = pch->dmac; struct pl330_dmac *pl330 = pch->dmac;
LIST_HEAD(list); LIST_HEAD(list);
pm_runtime_get_sync(pl330->ddma.dev);
spin_lock_irqsave(&pch->lock, flags); spin_lock_irqsave(&pch->lock, flags);
spin_lock(&pl330->lock); spin_lock(&pl330->lock);
_stop(pch->thread); _stop(pch->thread);
...@@ -2151,6 +2152,8 @@ static int pl330_terminate_all(struct dma_chan *chan) ...@@ -2151,6 +2152,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
list_splice_tail_init(&pch->work_list, &pl330->desc_pool); list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags); spin_unlock_irqrestore(&pch->lock, flags);
pm_runtime_mark_last_busy(pl330->ddma.dev);
pm_runtime_put_autosuspend(pl330->ddma.dev);
return 0; return 0;
} }
......
...@@ -186,8 +186,20 @@ struct ibft_kobject { ...@@ -186,8 +186,20 @@ struct ibft_kobject {
static struct iscsi_boot_kset *boot_kset; static struct iscsi_boot_kset *boot_kset;
/* fully null address */
static const char nulls[16]; static const char nulls[16];
/* IPv4-mapped IPv6 ::ffff:0.0.0.0 */
static const char mapped_nulls[16] = { 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00 };
static int address_not_null(u8 *ip)
{
return (memcmp(ip, nulls, 16) && memcmp(ip, mapped_nulls, 16));
}
/* /*
* Helper functions to parse data properly. * Helper functions to parse data properly.
*/ */
...@@ -445,7 +457,7 @@ static umode_t ibft_check_nic_for(void *data, int type) ...@@ -445,7 +457,7 @@ static umode_t ibft_check_nic_for(void *data, int type)
rc = S_IRUGO; rc = S_IRUGO;
break; break;
case ISCSI_BOOT_ETH_IP_ADDR: case ISCSI_BOOT_ETH_IP_ADDR:
if (memcmp(nic->ip_addr, nulls, sizeof(nic->ip_addr))) if (address_not_null(nic->ip_addr))
rc = S_IRUGO; rc = S_IRUGO;
break; break;
case ISCSI_BOOT_ETH_SUBNET_MASK: case ISCSI_BOOT_ETH_SUBNET_MASK:
...@@ -456,21 +468,19 @@ static umode_t ibft_check_nic_for(void *data, int type) ...@@ -456,21 +468,19 @@ static umode_t ibft_check_nic_for(void *data, int type)
rc = S_IRUGO; rc = S_IRUGO;
break; break;
case ISCSI_BOOT_ETH_GATEWAY: case ISCSI_BOOT_ETH_GATEWAY:
if (memcmp(nic->gateway, nulls, sizeof(nic->gateway))) if (address_not_null(nic->gateway))
rc = S_IRUGO; rc = S_IRUGO;
break; break;
case ISCSI_BOOT_ETH_PRIMARY_DNS: case ISCSI_BOOT_ETH_PRIMARY_DNS:
if (memcmp(nic->primary_dns, nulls, if (address_not_null(nic->primary_dns))
sizeof(nic->primary_dns)))
rc = S_IRUGO; rc = S_IRUGO;
break; break;
case ISCSI_BOOT_ETH_SECONDARY_DNS: case ISCSI_BOOT_ETH_SECONDARY_DNS:
if (memcmp(nic->secondary_dns, nulls, if (address_not_null(nic->secondary_dns))
sizeof(nic->secondary_dns)))
rc = S_IRUGO; rc = S_IRUGO;
break; break;
case ISCSI_BOOT_ETH_DHCP: case ISCSI_BOOT_ETH_DHCP:
if (memcmp(nic->dhcp, nulls, sizeof(nic->dhcp))) if (address_not_null(nic->dhcp))
rc = S_IRUGO; rc = S_IRUGO;
break; break;
case ISCSI_BOOT_ETH_VLAN: case ISCSI_BOOT_ETH_VLAN:
...@@ -536,23 +546,19 @@ static umode_t __init ibft_check_initiator_for(void *data, int type) ...@@ -536,23 +546,19 @@ static umode_t __init ibft_check_initiator_for(void *data, int type)
rc = S_IRUGO; rc = S_IRUGO;
break; break;
case ISCSI_BOOT_INI_ISNS_SERVER: case ISCSI_BOOT_INI_ISNS_SERVER:
if (memcmp(init->isns_server, nulls, if (address_not_null(init->isns_server))
sizeof(init->isns_server)))
rc = S_IRUGO; rc = S_IRUGO;
break; break;
case ISCSI_BOOT_INI_SLP_SERVER: case ISCSI_BOOT_INI_SLP_SERVER:
if (memcmp(init->slp_server, nulls, if (address_not_null(init->slp_server))
sizeof(init->slp_server)))
rc = S_IRUGO; rc = S_IRUGO;
break; break;
case ISCSI_BOOT_INI_PRI_RADIUS_SERVER: case ISCSI_BOOT_INI_PRI_RADIUS_SERVER:
if (memcmp(init->pri_radius_server, nulls, if (address_not_null(init->pri_radius_server))
sizeof(init->pri_radius_server)))
rc = S_IRUGO; rc = S_IRUGO;
break; break;
case ISCSI_BOOT_INI_SEC_RADIUS_SERVER: case ISCSI_BOOT_INI_SEC_RADIUS_SERVER:
if (memcmp(init->sec_radius_server, nulls, if (address_not_null(init->sec_radius_server))
sizeof(init->sec_radius_server)))
rc = S_IRUGO; rc = S_IRUGO;
break; break;
case ISCSI_BOOT_INI_INITIATOR_NAME: case ISCSI_BOOT_INI_INITIATOR_NAME:
......
...@@ -684,8 +684,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, ...@@ -684,8 +684,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.cpu_core_id_base); dev->node_props.cpu_core_id_base);
sysfs_show_32bit_prop(buffer, "simd_id_base", sysfs_show_32bit_prop(buffer, "simd_id_base",
dev->node_props.simd_id_base); dev->node_props.simd_id_base);
sysfs_show_32bit_prop(buffer, "capability",
dev->node_props.capability);
sysfs_show_32bit_prop(buffer, "max_waves_per_simd", sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
dev->node_props.max_waves_per_simd); dev->node_props.max_waves_per_simd);
sysfs_show_32bit_prop(buffer, "lds_size_in_kb", sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
...@@ -736,6 +734,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, ...@@ -736,6 +734,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->gpu->kfd2kgd->get_fw_version( dev->gpu->kfd2kgd->get_fw_version(
dev->gpu->kgd, dev->gpu->kgd,
KGD_ENGINE_MEC1)); KGD_ENGINE_MEC1));
sysfs_show_32bit_prop(buffer, "capability",
dev->node_props.capability);
} }
return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute", return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
......
...@@ -217,7 +217,7 @@ static ssize_t status_store(struct device *device, ...@@ -217,7 +217,7 @@ static ssize_t status_store(struct device *device,
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
return ret; return ret ? ret : count;
} }
static ssize_t status_show(struct device *device, static ssize_t status_show(struct device *device,
......
...@@ -1667,12 +1667,15 @@ static int i915_sr_status(struct seq_file *m, void *unused) ...@@ -1667,12 +1667,15 @@ static int i915_sr_status(struct seq_file *m, void *unused)
if (HAS_PCH_SPLIT(dev)) if (HAS_PCH_SPLIT(dev))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
IS_I945G(dev) || IS_I945GM(dev))
sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
else if (IS_I915GM(dev)) else if (IS_I915GM(dev))
sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
else if (IS_PINEVIEW(dev)) else if (IS_PINEVIEW(dev))
sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
else if (IS_VALLEYVIEW(dev))
sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
intel_runtime_pm_put(dev_priv); intel_runtime_pm_put(dev_priv);
......
...@@ -2656,9 +2656,6 @@ void i915_gem_reset(struct drm_device *dev) ...@@ -2656,9 +2656,6 @@ void i915_gem_reset(struct drm_device *dev)
void void
i915_gem_retire_requests_ring(struct intel_engine_cs *ring) i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
{ {
if (list_empty(&ring->request_list))
return;
WARN_ON(i915_verify_lists(ring->dev)); WARN_ON(i915_verify_lists(ring->dev));
/* Retire requests first as we use it above for the early return. /* Retire requests first as we use it above for the early return.
......
...@@ -880,10 +880,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, ...@@ -880,10 +880,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_RECEIVE_ERROR)) DP_AUX_CH_CTL_RECEIVE_ERROR))
continue; continue;
if (status & DP_AUX_CH_CTL_DONE) if (status & DP_AUX_CH_CTL_DONE)
break; goto done;
} }
if (status & DP_AUX_CH_CTL_DONE)
break;
} }
if ((status & DP_AUX_CH_CTL_DONE) == 0) { if ((status & DP_AUX_CH_CTL_DONE) == 0) {
...@@ -892,6 +890,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, ...@@ -892,6 +890,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
goto out; goto out;
} }
done:
/* Check for timeout or receive error. /* Check for timeout or receive error.
* Timeouts occur when the sink is not connected * Timeouts occur when the sink is not connected
*/ */
......
...@@ -1134,6 +1134,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) ...@@ -1134,6 +1134,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
if (ring->status_page.obj) {
I915_WRITE(RING_HWS_PGA(ring->mmio_base),
(u32)ring->status_page.gfx_addr);
POSTING_READ(RING_HWS_PGA(ring->mmio_base));
}
I915_WRITE(RING_MODE_GEN7(ring), I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
......
...@@ -901,13 +901,6 @@ static int chv_init_workarounds(struct intel_engine_cs *ring) ...@@ -901,13 +901,6 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4); GEN6_WIZ_HASHING_16x4);
if (INTEL_REVID(dev) == SKL_REVID_C0 ||
INTEL_REVID(dev) == SKL_REVID_D0)
/* WaBarrierPerformanceFixDisable:skl */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FENCE_DEST_SLM_DISABLE |
HDC_BARRIER_PERFORMANCE_DISABLE);
return 0; return 0;
} }
...@@ -1024,6 +1017,13 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) ...@@ -1024,6 +1017,13 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
WA_SET_BIT_MASKED(HIZ_CHICKEN, WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
if (INTEL_REVID(dev) == SKL_REVID_C0 ||
INTEL_REVID(dev) == SKL_REVID_D0)
/* WaBarrierPerformanceFixDisable:skl */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FENCE_DEST_SLM_DISABLE |
HDC_BARRIER_PERFORMANCE_DISABLE);
return skl_tune_iz_hashing(ring); return skl_tune_iz_hashing(ring);
} }
......
...@@ -173,7 +173,7 @@ void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset, ...@@ -173,7 +173,7 @@ void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset,
struct drm_device *dev = encoder->dev; struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
WREG32(HDMI0_ACR_PACKET_CONTROL + offset, WREG32(DCE3_HDMI0_ACR_PACKET_CONTROL + offset,
HDMI0_ACR_SOURCE | /* select SW CTS value */ HDMI0_ACR_SOURCE | /* select SW CTS value */
HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
......
...@@ -554,4 +554,4 @@ module_platform_driver(hix5hd2_i2c_driver); ...@@ -554,4 +554,4 @@ module_platform_driver(hix5hd2_i2c_driver);
MODULE_DESCRIPTION("Hix5hd2 I2C Bus driver"); MODULE_DESCRIPTION("Hix5hd2 I2C Bus driver");
MODULE_AUTHOR("Wei Yan <sledge.yanwei@huawei.com>"); MODULE_AUTHOR("Wei Yan <sledge.yanwei@huawei.com>");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:i2c-hix5hd2"); MODULE_ALIAS("platform:hix5hd2-i2c");
...@@ -1143,6 +1143,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) ...@@ -1143,6 +1143,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
return -ENOMEM; return -ENOMEM;
i2c->quirks = s3c24xx_get_device_quirks(pdev); i2c->quirks = s3c24xx_get_device_quirks(pdev);
i2c->sysreg = ERR_PTR(-ENOENT);
if (pdata) if (pdata)
memcpy(i2c->pdata, pdata, sizeof(*pdata)); memcpy(i2c->pdata, pdata, sizeof(*pdata));
else else
......
...@@ -1001,7 +1001,7 @@ static struct platform_driver twl6030_gpadc_driver = { ...@@ -1001,7 +1001,7 @@ static struct platform_driver twl6030_gpadc_driver = {
module_platform_driver(twl6030_gpadc_driver); module_platform_driver(twl6030_gpadc_driver);
MODULE_ALIAS("platform: " DRIVER_NAME); MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Balaji T K <balajitk@ti.com>"); MODULE_AUTHOR("Balaji T K <balajitk@ti.com>");
MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>"); MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
MODULE_AUTHOR("Oleksandr Kozaruk <oleksandr.kozaruk@ti.com"); MODULE_AUTHOR("Oleksandr Kozaruk <oleksandr.kozaruk@ti.com");
......
...@@ -139,6 +139,7 @@ ...@@ -139,6 +139,7 @@
#define ADIS16400_NO_BURST BIT(1) #define ADIS16400_NO_BURST BIT(1)
#define ADIS16400_HAS_SLOW_MODE BIT(2) #define ADIS16400_HAS_SLOW_MODE BIT(2)
#define ADIS16400_HAS_SERIAL_NUMBER BIT(3) #define ADIS16400_HAS_SERIAL_NUMBER BIT(3)
#define ADIS16400_BURST_DIAG_STAT BIT(4)
struct adis16400_state; struct adis16400_state;
...@@ -165,6 +166,7 @@ struct adis16400_state { ...@@ -165,6 +166,7 @@ struct adis16400_state {
int filt_int; int filt_int;
struct adis adis; struct adis adis;
unsigned long avail_scan_mask[2];
}; };
/* At the moment triggers are only used for ring buffer /* At the moment triggers are only used for ring buffer
......
...@@ -18,7 +18,8 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev, ...@@ -18,7 +18,8 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
{ {
struct adis16400_state *st = iio_priv(indio_dev); struct adis16400_state *st = iio_priv(indio_dev);
struct adis *adis = &st->adis; struct adis *adis = &st->adis;
uint16_t *tx; unsigned int burst_length;
u8 *tx;
if (st->variant->flags & ADIS16400_NO_BURST) if (st->variant->flags & ADIS16400_NO_BURST)
return adis_update_scan_mode(indio_dev, scan_mask); return adis_update_scan_mode(indio_dev, scan_mask);
...@@ -26,26 +27,29 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev, ...@@ -26,26 +27,29 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
kfree(adis->xfer); kfree(adis->xfer);
kfree(adis->buffer); kfree(adis->buffer);
/* All but the timestamp channel */
burst_length = (indio_dev->num_channels - 1) * sizeof(u16);
if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
burst_length += sizeof(u16);
adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL); adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL);
if (!adis->xfer) if (!adis->xfer)
return -ENOMEM; return -ENOMEM;
adis->buffer = kzalloc(indio_dev->scan_bytes + sizeof(u16), adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
GFP_KERNEL);
if (!adis->buffer) if (!adis->buffer)
return -ENOMEM; return -ENOMEM;
tx = adis->buffer + indio_dev->scan_bytes; tx = adis->buffer + burst_length;
tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD); tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD);
tx[1] = 0; tx[1] = 0;
adis->xfer[0].tx_buf = tx; adis->xfer[0].tx_buf = tx;
adis->xfer[0].bits_per_word = 8; adis->xfer[0].bits_per_word = 8;
adis->xfer[0].len = 2; adis->xfer[0].len = 2;
adis->xfer[1].tx_buf = tx; adis->xfer[1].rx_buf = adis->buffer;
adis->xfer[1].bits_per_word = 8; adis->xfer[1].bits_per_word = 8;
adis->xfer[1].len = indio_dev->scan_bytes; adis->xfer[1].len = burst_length;
spi_message_init(&adis->msg); spi_message_init(&adis->msg);
spi_message_add_tail(&adis->xfer[0], &adis->msg); spi_message_add_tail(&adis->xfer[0], &adis->msg);
...@@ -61,6 +65,7 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p) ...@@ -61,6 +65,7 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
struct adis16400_state *st = iio_priv(indio_dev); struct adis16400_state *st = iio_priv(indio_dev);
struct adis *adis = &st->adis; struct adis *adis = &st->adis;
u32 old_speed_hz = st->adis.spi->max_speed_hz; u32 old_speed_hz = st->adis.spi->max_speed_hz;
void *buffer;
int ret; int ret;
if (!adis->buffer) if (!adis->buffer)
...@@ -81,7 +86,12 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p) ...@@ -81,7 +86,12 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
spi_setup(st->adis.spi); spi_setup(st->adis.spi);
} }
iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer, if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
buffer = adis->buffer + sizeof(u16);
else
buffer = adis->buffer;
iio_push_to_buffers_with_timestamp(indio_dev, buffer,
pf->timestamp); pf->timestamp);
iio_trigger_notify_done(indio_dev->trig); iio_trigger_notify_done(indio_dev->trig);
......
...@@ -405,6 +405,11 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, ...@@ -405,6 +405,11 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
*val = st->variant->temp_scale_nano / 1000000; *val = st->variant->temp_scale_nano / 1000000;
*val2 = (st->variant->temp_scale_nano % 1000000); *val2 = (st->variant->temp_scale_nano % 1000000);
return IIO_VAL_INT_PLUS_MICRO; return IIO_VAL_INT_PLUS_MICRO;
case IIO_PRESSURE:
/* 20 uBar = 0.002kPascal */
*val = 0;
*val2 = 2000;
return IIO_VAL_INT_PLUS_MICRO;
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -454,10 +459,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, ...@@ -454,10 +459,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
} }
} }
#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \ #define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si, chn) { \
.type = IIO_VOLTAGE, \ .type = IIO_VOLTAGE, \
.indexed = 1, \ .indexed = 1, \
.channel = 0, \ .channel = chn, \
.extend_name = name, \ .extend_name = name, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE), \ BIT(IIO_CHAN_INFO_SCALE), \
...@@ -474,10 +479,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev, ...@@ -474,10 +479,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
} }
#define ADIS16400_SUPPLY_CHAN(addr, bits) \ #define ADIS16400_SUPPLY_CHAN(addr, bits) \
ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY) ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY, 0)
#define ADIS16400_AUX_ADC_CHAN(addr, bits) \ #define ADIS16400_AUX_ADC_CHAN(addr, bits) \
ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC) ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC, 1)
#define ADIS16400_GYRO_CHAN(mod, addr, bits) { \ #define ADIS16400_GYRO_CHAN(mod, addr, bits) { \
.type = IIO_ANGL_VEL, \ .type = IIO_ANGL_VEL, \
...@@ -773,7 +778,8 @@ static struct adis16400_chip_info adis16400_chips[] = { ...@@ -773,7 +778,8 @@ static struct adis16400_chip_info adis16400_chips[] = {
.channels = adis16448_channels, .channels = adis16448_channels,
.num_channels = ARRAY_SIZE(adis16448_channels), .num_channels = ARRAY_SIZE(adis16448_channels),
.flags = ADIS16400_HAS_PROD_ID | .flags = ADIS16400_HAS_PROD_ID |
ADIS16400_HAS_SERIAL_NUMBER, ADIS16400_HAS_SERIAL_NUMBER |
ADIS16400_BURST_DIAG_STAT,
.gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */ .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
.accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */ .accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */
.temp_scale_nano = 73860000, /* 0.07386 C */ .temp_scale_nano = 73860000, /* 0.07386 C */
...@@ -791,11 +797,6 @@ static const struct iio_info adis16400_info = { ...@@ -791,11 +797,6 @@ static const struct iio_info adis16400_info = {
.debugfs_reg_access = adis_debugfs_reg_access, .debugfs_reg_access = adis_debugfs_reg_access,
}; };
static const unsigned long adis16400_burst_scan_mask[] = {
~0UL,
0,
};
static const char * const adis16400_status_error_msgs[] = { static const char * const adis16400_status_error_msgs[] = {
[ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure", [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
[ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure", [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
...@@ -843,6 +844,20 @@ static const struct adis_data adis16400_data = { ...@@ -843,6 +844,20 @@ static const struct adis_data adis16400_data = {
BIT(ADIS16400_DIAG_STAT_POWER_LOW), BIT(ADIS16400_DIAG_STAT_POWER_LOW),
}; };
static void adis16400_setup_chan_mask(struct adis16400_state *st)
{
const struct adis16400_chip_info *chip_info = st->variant;
unsigned i;
for (i = 0; i < chip_info->num_channels; i++) {
const struct iio_chan_spec *ch = &chip_info->channels[i];
if (ch->scan_index >= 0 &&
ch->scan_index != ADIS16400_SCAN_TIMESTAMP)
st->avail_scan_mask[0] |= BIT(ch->scan_index);
}
}
static int adis16400_probe(struct spi_device *spi) static int adis16400_probe(struct spi_device *spi)
{ {
struct adis16400_state *st; struct adis16400_state *st;
...@@ -866,8 +881,10 @@ static int adis16400_probe(struct spi_device *spi) ...@@ -866,8 +881,10 @@ static int adis16400_probe(struct spi_device *spi)
indio_dev->info = &adis16400_info; indio_dev->info = &adis16400_info;
indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->modes = INDIO_DIRECT_MODE;
if (!(st->variant->flags & ADIS16400_NO_BURST)) if (!(st->variant->flags & ADIS16400_NO_BURST)) {
indio_dev->available_scan_masks = adis16400_burst_scan_mask; adis16400_setup_chan_mask(st);
indio_dev->available_scan_masks = st->avail_scan_mask;
}
ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data); ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
if (ret) if (ret)
......
...@@ -1063,9 +1063,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse) ...@@ -1063,9 +1063,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
right = (packet[1] & 0x02) >> 1; right = (packet[1] & 0x02) >> 1;
middle = (packet[1] & 0x04) >> 2; middle = (packet[1] & 0x04) >> 2;
/* Divide 2 since trackpoint's speed is too fast */ input_report_rel(dev2, REL_X, (char)x);
input_report_rel(dev2, REL_X, (char)x / 2); input_report_rel(dev2, REL_Y, -((char)y));
input_report_rel(dev2, REL_Y, -((char)y / 2));
input_report_key(dev2, BTN_LEFT, left); input_report_key(dev2, BTN_LEFT, left);
input_report_key(dev2, BTN_RIGHT, right); input_report_key(dev2, BTN_RIGHT, right);
......
...@@ -1376,10 +1376,11 @@ static bool elantech_is_signature_valid(const unsigned char *param) ...@@ -1376,10 +1376,11 @@ static bool elantech_is_signature_valid(const unsigned char *param)
return true; return true;
/* /*
* Some models have a revision higher then 20. Meaning param[2] may * Some hw_version >= 4 models have a revision higher then 20. Meaning
* be 10 or 20, skip the rates check for these. * that param[2] may be 10 or 20, skip the rates check for these.
*/ */
if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40) if ((param[0] & 0x0f) >= 0x06 && (param[1] & 0xaf) == 0x0f &&
param[2] < 40)
return true; return true;
for (i = 0; i < ARRAY_SIZE(rates); i++) for (i = 0; i < ARRAY_SIZE(rates); i++)
...@@ -1555,6 +1556,7 @@ static int elantech_set_properties(struct elantech_data *etd) ...@@ -1555,6 +1556,7 @@ static int elantech_set_properties(struct elantech_data *etd)
case 9: case 9:
case 10: case 10:
case 13: case 13:
case 14:
etd->hw_version = 4; etd->hw_version = 4;
break; break;
default: default:
......
...@@ -2930,6 +2930,7 @@ static void *alloc_coherent(struct device *dev, size_t size, ...@@ -2930,6 +2930,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
dma_mask = dev->coherent_dma_mask; dma_mask = dev->coherent_dma_mask;
flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
flag |= __GFP_ZERO;
page = alloc_pages(flag | __GFP_NOWARN, get_order(size)); page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
if (!page) { if (!page) {
......
...@@ -696,6 +696,11 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu ...@@ -696,6 +696,11 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
return &context[devfn]; return &context[devfn];
} }
static int iommu_dummy(struct device *dev)
{
return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
}
static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{ {
struct dmar_drhd_unit *drhd = NULL; struct dmar_drhd_unit *drhd = NULL;
...@@ -705,6 +710,9 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf ...@@ -705,6 +710,9 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
u16 segment = 0; u16 segment = 0;
int i; int i;
if (iommu_dummy(dev))
return NULL;
if (dev_is_pci(dev)) { if (dev_is_pci(dev)) {
pdev = to_pci_dev(dev); pdev = to_pci_dev(dev);
segment = pci_domain_nr(pdev->bus); segment = pci_domain_nr(pdev->bus);
...@@ -2969,11 +2977,6 @@ static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev) ...@@ -2969,11 +2977,6 @@ static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
return __get_valid_domain_for_dev(dev); return __get_valid_domain_for_dev(dev);
} }
static int iommu_dummy(struct device *dev)
{
return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
}
/* Check if the dev needs to go through non-identity map and unmap process.*/ /* Check if the dev needs to go through non-identity map and unmap process.*/
static int iommu_no_mapping(struct device *dev) static int iommu_no_mapping(struct device *dev)
{ {
......
...@@ -424,7 +424,7 @@ static void xgbe_tx_timer(unsigned long data) ...@@ -424,7 +424,7 @@ static void xgbe_tx_timer(unsigned long data)
if (napi_schedule_prep(napi)) { if (napi_schedule_prep(napi)) {
/* Disable Tx and Rx interrupts */ /* Disable Tx and Rx interrupts */
if (pdata->per_channel_irq) if (pdata->per_channel_irq)
disable_irq(channel->dma_irq); disable_irq_nosync(channel->dma_irq);
else else
xgbe_disable_rx_tx_ints(pdata); xgbe_disable_rx_tx_ints(pdata);
......
...@@ -2464,6 +2464,7 @@ static int b44_init_one(struct ssb_device *sdev, ...@@ -2464,6 +2464,7 @@ static int b44_init_one(struct ssb_device *sdev,
ssb_bus_may_powerdown(sdev->bus); ssb_bus_may_powerdown(sdev->bus);
err_out_free_dev: err_out_free_dev:
netif_napi_del(&bp->napi);
free_netdev(dev); free_netdev(dev);
out: out:
...@@ -2480,6 +2481,7 @@ static void b44_remove_one(struct ssb_device *sdev) ...@@ -2480,6 +2481,7 @@ static void b44_remove_one(struct ssb_device *sdev)
b44_unregister_phy_one(bp); b44_unregister_phy_one(bp);
ssb_device_disable(sdev, 0); ssb_device_disable(sdev, 0);
ssb_bus_may_powerdown(sdev->bus); ssb_bus_may_powerdown(sdev->bus);
netif_napi_del(&bp->napi);
free_netdev(dev); free_netdev(dev);
ssb_pcihost_set_power_state(sdev, PCI_D3hot); ssb_pcihost_set_power_state(sdev, PCI_D3hot);
ssb_set_drvdata(sdev, NULL); ssb_set_drvdata(sdev, NULL);
......
...@@ -299,9 +299,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) ...@@ -299,9 +299,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
phy_name = "external RGMII (no delay)"; phy_name = "external RGMII (no delay)";
else else
phy_name = "external RGMII (TX delay)"; phy_name = "external RGMII (TX delay)";
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
reg |= RGMII_MODE_EN | id_mode_dis;
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
bcmgenet_sys_writel(priv, bcmgenet_sys_writel(priv,
PORT_MODE_EXT_GPHY, SYS_PORT_CTRL); PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
break; break;
...@@ -310,6 +307,15 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) ...@@ -310,6 +307,15 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
return -EINVAL; return -EINVAL;
} }
/* This is an external PHY (xMII), so we need to enable the RGMII
* block for the interface to work
*/
if (priv->ext_phy) {
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
reg |= RGMII_MODE_EN | id_mode_dis;
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
}
if (init) if (init)
dev_info(kdev, "configuring instance for %s\n", phy_name); dev_info(kdev, "configuring instance for %s\n", phy_name);
......
...@@ -1742,9 +1742,9 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) ...@@ -1742,9 +1742,9 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
total_size = buf_len; total_size = buf_len;
get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
get_fat_cmd.size, get_fat_cmd.size,
&get_fat_cmd.dma); &get_fat_cmd.dma, GFP_ATOMIC);
if (!get_fat_cmd.va) { if (!get_fat_cmd.va) {
dev_err(&adapter->pdev->dev, dev_err(&adapter->pdev->dev,
"Memory allocation failure while reading FAT data\n"); "Memory allocation failure while reading FAT data\n");
...@@ -1789,8 +1789,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) ...@@ -1789,8 +1789,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
log_offset += buf_size; log_offset += buf_size;
} }
err: err:
pci_free_consistent(adapter->pdev, get_fat_cmd.size, dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
get_fat_cmd.va, get_fat_cmd.dma); get_fat_cmd.va, get_fat_cmd.dma);
spin_unlock_bh(&adapter->mcc_lock); spin_unlock_bh(&adapter->mcc_lock);
return status; return status;
} }
...@@ -2237,12 +2237,12 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, ...@@ -2237,12 +2237,12 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
return -EINVAL; return -EINVAL;
cmd.size = sizeof(struct be_cmd_resp_port_type); cmd.size = sizeof(struct be_cmd_resp_port_type);
cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
GFP_ATOMIC);
if (!cmd.va) { if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
return -ENOMEM; return -ENOMEM;
} }
memset(cmd.va, 0, cmd.size);
spin_lock_bh(&adapter->mcc_lock); spin_lock_bh(&adapter->mcc_lock);
...@@ -2267,7 +2267,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, ...@@ -2267,7 +2267,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
} }
err: err:
spin_unlock_bh(&adapter->mcc_lock); spin_unlock_bh(&adapter->mcc_lock);
pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status; return status;
} }
...@@ -2742,7 +2742,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) ...@@ -2742,7 +2742,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
goto err; goto err;
} }
cmd.size = sizeof(struct be_cmd_req_get_phy_info); cmd.size = sizeof(struct be_cmd_req_get_phy_info);
cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
GFP_ATOMIC);
if (!cmd.va) { if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM; status = -ENOMEM;
...@@ -2776,7 +2777,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) ...@@ -2776,7 +2777,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
BE_SUPPORTED_SPEED_1GBPS; BE_SUPPORTED_SPEED_1GBPS;
} }
} }
pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
err: err:
spin_unlock_bh(&adapter->mcc_lock); spin_unlock_bh(&adapter->mcc_lock);
return status; return status;
...@@ -2827,8 +2828,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) ...@@ -2827,8 +2828,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
&attribs_cmd.dma); attribs_cmd.size,
&attribs_cmd.dma, GFP_ATOMIC);
if (!attribs_cmd.va) { if (!attribs_cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM; status = -ENOMEM;
...@@ -2855,8 +2857,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) ...@@ -2855,8 +2857,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
err: err:
mutex_unlock(&adapter->mbox_lock); mutex_unlock(&adapter->mbox_lock);
if (attribs_cmd.va) if (attribs_cmd.va)
pci_free_consistent(adapter->pdev, attribs_cmd.size, dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
attribs_cmd.va, attribs_cmd.dma); attribs_cmd.va, attribs_cmd.dma);
return status; return status;
} }
...@@ -2994,9 +2996,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, ...@@ -2994,9 +2996,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
get_mac_list_cmd.size, get_mac_list_cmd.size,
&get_mac_list_cmd.dma); &get_mac_list_cmd.dma,
GFP_ATOMIC);
if (!get_mac_list_cmd.va) { if (!get_mac_list_cmd.va) {
dev_err(&adapter->pdev->dev, dev_err(&adapter->pdev->dev,
...@@ -3069,8 +3072,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, ...@@ -3069,8 +3072,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
out: out:
spin_unlock_bh(&adapter->mcc_lock); spin_unlock_bh(&adapter->mcc_lock);
pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
get_mac_list_cmd.va, get_mac_list_cmd.dma); get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status; return status;
} }
...@@ -3123,8 +3126,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, ...@@ -3123,8 +3126,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
memset(&cmd, 0, sizeof(struct be_dma_mem)); memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_req_set_mac_list); cmd.size = sizeof(struct be_cmd_req_set_mac_list);
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
&cmd.dma, GFP_KERNEL); GFP_KERNEL);
if (!cmd.va) if (!cmd.va)
return -ENOMEM; return -ENOMEM;
...@@ -3325,7 +3328,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) ...@@ -3325,7 +3328,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
memset(&cmd, 0, sizeof(struct be_dma_mem)); memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
GFP_ATOMIC);
if (!cmd.va) { if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM; status = -ENOMEM;
...@@ -3360,7 +3364,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) ...@@ -3360,7 +3364,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
err: err:
mutex_unlock(&adapter->mbox_lock); mutex_unlock(&adapter->mbox_lock);
if (cmd.va) if (cmd.va)
pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
cmd.dma);
return status; return status;
} }
...@@ -3374,8 +3379,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) ...@@ -3374,8 +3379,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
&extfat_cmd.dma); extfat_cmd.size, &extfat_cmd.dma,
GFP_ATOMIC);
if (!extfat_cmd.va) if (!extfat_cmd.va)
return -ENOMEM; return -ENOMEM;
...@@ -3397,8 +3403,8 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) ...@@ -3397,8 +3403,8 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
err: err:
pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
extfat_cmd.dma); extfat_cmd.dma);
return status; return status;
} }
...@@ -3411,8 +3417,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter) ...@@ -3411,8 +3417,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
&extfat_cmd.dma); extfat_cmd.size, &extfat_cmd.dma,
GFP_ATOMIC);
if (!extfat_cmd.va) { if (!extfat_cmd.va) {
dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
...@@ -3430,8 +3437,8 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter) ...@@ -3430,8 +3437,8 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
level = cfgs->module[0].trace_lvl[j].dbg_lvl; level = cfgs->module[0].trace_lvl[j].dbg_lvl;
} }
} }
pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
extfat_cmd.dma); extfat_cmd.dma);
err: err:
return level; return level;
} }
...@@ -3629,7 +3636,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) ...@@ -3629,7 +3636,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
memset(&cmd, 0, sizeof(struct be_dma_mem)); memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_func_config); cmd.size = sizeof(struct be_cmd_resp_get_func_config);
cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
GFP_ATOMIC);
if (!cmd.va) { if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM; status = -ENOMEM;
...@@ -3669,7 +3677,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) ...@@ -3669,7 +3677,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
err: err:
mutex_unlock(&adapter->mbox_lock); mutex_unlock(&adapter->mbox_lock);
if (cmd.va) if (cmd.va)
pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
cmd.dma);
return status; return status;
} }
...@@ -3690,7 +3699,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, ...@@ -3690,7 +3699,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
memset(&cmd, 0, sizeof(struct be_dma_mem)); memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_profile_config); cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
GFP_ATOMIC);
if (!cmd.va) if (!cmd.va)
return -ENOMEM; return -ENOMEM;
...@@ -3736,7 +3746,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, ...@@ -3736,7 +3746,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
res->vf_if_cap_flags = vf_res->cap_flags; res->vf_if_cap_flags = vf_res->cap_flags;
err: err:
if (cmd.va) if (cmd.va)
pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
cmd.dma);
return status; return status;
} }
...@@ -3751,7 +3762,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, ...@@ -3751,7 +3762,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
memset(&cmd, 0, sizeof(struct be_dma_mem)); memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_req_set_profile_config); cmd.size = sizeof(struct be_cmd_req_set_profile_config);
cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
GFP_ATOMIC);
if (!cmd.va) if (!cmd.va)
return -ENOMEM; return -ENOMEM;
...@@ -3767,7 +3779,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, ...@@ -3767,7 +3779,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
status = be_cmd_notify_wait(adapter, &wrb); status = be_cmd_notify_wait(adapter, &wrb);
if (cmd.va) if (cmd.va)
pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
cmd.dma);
return status; return status;
} }
......
...@@ -263,8 +263,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, ...@@ -263,8 +263,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
int status = 0; int status = 0;
read_cmd.size = LANCER_READ_FILE_CHUNK; read_cmd.size = LANCER_READ_FILE_CHUNK;
read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size, read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
&read_cmd.dma); &read_cmd.dma, GFP_ATOMIC);
if (!read_cmd.va) { if (!read_cmd.va) {
dev_err(&adapter->pdev->dev, dev_err(&adapter->pdev->dev,
...@@ -288,8 +288,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, ...@@ -288,8 +288,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
break; break;
} }
} }
pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va, dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va,
read_cmd.dma); read_cmd.dma);
return status; return status;
} }
...@@ -825,8 +825,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter) ...@@ -825,8 +825,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
}; };
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
&ddrdma_cmd.dma, GFP_KERNEL); ddrdma_cmd.size, &ddrdma_cmd.dma,
GFP_KERNEL);
if (!ddrdma_cmd.va) if (!ddrdma_cmd.va)
return -ENOMEM; return -ENOMEM;
...@@ -948,8 +949,9 @@ static int be_read_eeprom(struct net_device *netdev, ...@@ -948,8 +949,9 @@ static int be_read_eeprom(struct net_device *netdev,
memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
&eeprom_cmd.dma, GFP_KERNEL); eeprom_cmd.size, &eeprom_cmd.dma,
GFP_KERNEL);
if (!eeprom_cmd.va) if (!eeprom_cmd.va)
return -ENOMEM; return -ENOMEM;
......
...@@ -4855,8 +4855,8 @@ static int lancer_fw_download(struct be_adapter *adapter, ...@@ -4855,8 +4855,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
+ LANCER_FW_DOWNLOAD_CHUNK; + LANCER_FW_DOWNLOAD_CHUNK;
flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
&flash_cmd.dma, GFP_KERNEL); &flash_cmd.dma, GFP_KERNEL);
if (!flash_cmd.va) if (!flash_cmd.va)
return -ENOMEM; return -ENOMEM;
...@@ -4965,8 +4965,8 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) ...@@ -4965,8 +4965,8 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
} }
flash_cmd.size = sizeof(struct be_cmd_write_flashrom); flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
GFP_KERNEL); GFP_KERNEL);
if (!flash_cmd.va) if (!flash_cmd.va)
return -ENOMEM; return -ENOMEM;
...@@ -5521,16 +5521,15 @@ static int be_drv_init(struct be_adapter *adapter) ...@@ -5521,16 +5521,15 @@ static int be_drv_init(struct be_adapter *adapter)
int status = 0; int status = 0;
mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
&mbox_mem_alloc->dma, &mbox_mem_alloc->dma,
GFP_KERNEL); GFP_KERNEL);
if (!mbox_mem_alloc->va) if (!mbox_mem_alloc->va)
return -ENOMEM; return -ENOMEM;
mbox_mem_align->size = sizeof(struct be_mcc_mailbox); mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
rx_filter->size = sizeof(struct be_cmd_req_rx_filter); rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size, rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
......
...@@ -318,6 +318,7 @@ struct i40e_pf { ...@@ -318,6 +318,7 @@ struct i40e_pf {
#endif #endif
#define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28) #define I40E_FLAG_PORT_ID_VALID (u64)(1 << 28)
#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29) #define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40)
/* tracks features that get auto disabled by errors */ /* tracks features that get auto disabled by errors */
u64 auto_disable_flags; u64 auto_disable_flags;
......
...@@ -1021,6 +1021,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp, ...@@ -1021,6 +1021,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done; goto command_write_done;
} }
/* By default we are in VEPA mode, if this is the first VF/VMDq
* VSI to be added switch to VEB mode.
*/
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset_safe(pf,
BIT_ULL(__I40E_PF_RESET_REQUESTED));
}
vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
if (vsi) if (vsi)
dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n", dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
......
...@@ -6107,6 +6107,10 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) ...@@ -6107,6 +6107,10 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
if (ret) if (ret)
goto end_reconstitute; goto end_reconstitute;
if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
veb->bridge_mode = BRIDGE_MODE_VEB;
else
veb->bridge_mode = BRIDGE_MODE_VEPA;
i40e_config_bridge_mode(veb); i40e_config_bridge_mode(veb);
/* create the remaining VSIs attached to this VEB */ /* create the remaining VSIs attached to this VEB */
...@@ -8038,7 +8042,12 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, ...@@ -8038,7 +8042,12 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
} else if (mode != veb->bridge_mode) { } else if (mode != veb->bridge_mode) {
/* Existing HW bridge but different mode needs reset */ /* Existing HW bridge but different mode needs reset */
veb->bridge_mode = mode; veb->bridge_mode = mode;
i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
if (mode == BRIDGE_MODE_VEB)
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
else
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
break; break;
} }
} }
...@@ -8350,11 +8359,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ...@@ -8350,11 +8359,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.uplink_seid = vsi->uplink_seid; ctxt.uplink_seid = vsi->uplink_seid;
ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
ctxt.flags = I40E_AQ_VSI_TYPE_PF; ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (i40e_is_vsi_uplink_mode_veb(vsi)) { if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
(i40e_is_vsi_uplink_mode_veb(vsi))) {
ctxt.info.valid_sections |= ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
ctxt.info.switch_id = ctxt.info.switch_id =
cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
} }
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true); i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
break; break;
...@@ -8753,6 +8763,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, ...@@ -8753,6 +8763,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
__func__); __func__);
return NULL; return NULL;
} }
/* We come up by default in VEPA mode if SRIOV is not
* already enabled, in which case we can't force VEPA
* mode.
*/
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
veb->bridge_mode = BRIDGE_MODE_VEPA;
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
}
i40e_config_bridge_mode(veb); i40e_config_bridge_mode(veb);
} }
for (i = 0; i < I40E_MAX_VEB && !veb; i++) { for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
...@@ -9863,6 +9881,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -9863,6 +9881,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_switch_setup; goto err_switch_setup;
} }
#ifdef CONFIG_PCI_IOV
/* prep for VF support */
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
!test_bit(__I40E_BAD_EEPROM, &pf->state)) {
if (pci_num_vf(pdev))
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
}
#endif
err = i40e_setup_pf_switch(pf, false); err = i40e_setup_pf_switch(pf, false);
if (err) { if (err) {
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
......
...@@ -2419,14 +2419,12 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) ...@@ -2419,14 +2419,12 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* i40e_chk_linearize - Check if there are more than 8 fragments per packet * i40e_chk_linearize - Check if there are more than 8 fragments per packet
* @skb: send buffer * @skb: send buffer
* @tx_flags: collected send information * @tx_flags: collected send information
* @hdr_len: size of the packet header
* *
* Note: Our HW can't scatter-gather more than 8 fragments to build * Note: Our HW can't scatter-gather more than 8 fragments to build
* a packet on the wire and so we need to figure out the cases where we * a packet on the wire and so we need to figure out the cases where we
* need to linearize the skb. * need to linearize the skb.
**/ **/
static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
const u8 hdr_len)
{ {
struct skb_frag_struct *frag; struct skb_frag_struct *frag;
bool linearize = false; bool linearize = false;
...@@ -2438,7 +2436,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, ...@@ -2438,7 +2436,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
gso_segs = skb_shinfo(skb)->gso_segs; gso_segs = skb_shinfo(skb)->gso_segs;
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
u16 j = 1; u16 j = 0;
if (num_frags < (I40E_MAX_BUFFER_TXD)) if (num_frags < (I40E_MAX_BUFFER_TXD))
goto linearize_chk_done; goto linearize_chk_done;
...@@ -2449,21 +2447,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, ...@@ -2449,21 +2447,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
goto linearize_chk_done; goto linearize_chk_done;
} }
frag = &skb_shinfo(skb)->frags[0]; frag = &skb_shinfo(skb)->frags[0];
size = hdr_len;
/* we might still have more fragments per segment */ /* we might still have more fragments per segment */
do { do {
size += skb_frag_size(frag); size += skb_frag_size(frag);
frag++; j++; frag++; j++;
if ((size >= skb_shinfo(skb)->gso_size) &&
(j < I40E_MAX_BUFFER_TXD)) {
size = (size % skb_shinfo(skb)->gso_size);
j = (size) ? 1 : 0;
}
if (j == I40E_MAX_BUFFER_TXD) { if (j == I40E_MAX_BUFFER_TXD) {
if (size < skb_shinfo(skb)->gso_size) { linearize = true;
linearize = true; break;
break;
}
j = 1;
size -= skb_shinfo(skb)->gso_size;
if (size)
j++;
size += hdr_len;
} }
num_frags--; num_frags--;
} while (num_frags); } while (num_frags);
...@@ -2730,7 +2725,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2730,7 +2725,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (tsyn) if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN; tx_flags |= I40E_TX_FLAGS_TSYN;
if (i40e_chk_linearize(skb, tx_flags, hdr_len)) if (i40e_chk_linearize(skb, tx_flags))
if (skb_linearize(skb)) if (skb_linearize(skb))
goto out_drop; goto out_drop;
......
...@@ -1025,11 +1025,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) ...@@ -1025,11 +1025,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
{ {
struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_pf *pf = pci_get_drvdata(pdev);
if (num_vfs) if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset_safe(pf,
BIT_ULL(__I40E_PF_RESET_REQUESTED));
}
return i40e_pci_sriov_enable(pdev, num_vfs); return i40e_pci_sriov_enable(pdev, num_vfs);
}
if (!pci_vfs_assigned(pf->pdev)) { if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf); i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
} else { } else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
return -EINVAL; return -EINVAL;
......
...@@ -1608,14 +1608,12 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, ...@@ -1608,14 +1608,12 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
* i40e_chk_linearize - Check if there are more than 8 fragments per packet * i40e_chk_linearize - Check if there are more than 8 fragments per packet
* @skb: send buffer * @skb: send buffer
* @tx_flags: collected send information * @tx_flags: collected send information
* @hdr_len: size of the packet header
* *
* Note: Our HW can't scatter-gather more than 8 fragments to build * Note: Our HW can't scatter-gather more than 8 fragments to build
* a packet on the wire and so we need to figure out the cases where we * a packet on the wire and so we need to figure out the cases where we
* need to linearize the skb. * need to linearize the skb.
**/ **/
static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
const u8 hdr_len)
{ {
struct skb_frag_struct *frag; struct skb_frag_struct *frag;
bool linearize = false; bool linearize = false;
...@@ -1627,7 +1625,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, ...@@ -1627,7 +1625,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
gso_segs = skb_shinfo(skb)->gso_segs; gso_segs = skb_shinfo(skb)->gso_segs;
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
u16 j = 1; u16 j = 0;
if (num_frags < (I40E_MAX_BUFFER_TXD)) if (num_frags < (I40E_MAX_BUFFER_TXD))
goto linearize_chk_done; goto linearize_chk_done;
...@@ -1638,21 +1636,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, ...@@ -1638,21 +1636,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
goto linearize_chk_done; goto linearize_chk_done;
} }
frag = &skb_shinfo(skb)->frags[0]; frag = &skb_shinfo(skb)->frags[0];
size = hdr_len;
/* we might still have more fragments per segment */ /* we might still have more fragments per segment */
do { do {
size += skb_frag_size(frag); size += skb_frag_size(frag);
frag++; j++; frag++; j++;
if ((size >= skb_shinfo(skb)->gso_size) &&
(j < I40E_MAX_BUFFER_TXD)) {
size = (size % skb_shinfo(skb)->gso_size);
j = (size) ? 1 : 0;
}
if (j == I40E_MAX_BUFFER_TXD) { if (j == I40E_MAX_BUFFER_TXD) {
if (size < skb_shinfo(skb)->gso_size) { linearize = true;
linearize = true; break;
break;
}
j = 1;
size -= skb_shinfo(skb)->gso_size;
if (size)
j++;
size += hdr_len;
} }
num_frags--; num_frags--;
} while (num_frags); } while (num_frags);
...@@ -1940,7 +1935,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -1940,7 +1935,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (tso) else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO; tx_flags |= I40E_TX_FLAGS_TSO;
if (i40e_chk_linearize(skb, tx_flags, hdr_len)) if (i40e_chk_linearize(skb, tx_flags))
if (skb_linearize(skb)) if (skb_linearize(skb))
goto out_drop; goto out_drop;
......
...@@ -189,7 +189,7 @@ int __of_attach_node_sysfs(struct device_node *np) ...@@ -189,7 +189,7 @@ int __of_attach_node_sysfs(struct device_node *np)
return 0; return 0;
} }
static int __init of_init(void) void __init of_core_init(void)
{ {
struct device_node *np; struct device_node *np;
...@@ -198,7 +198,8 @@ static int __init of_init(void) ...@@ -198,7 +198,8 @@ static int __init of_init(void)
of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj); of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
if (!of_kset) { if (!of_kset) {
mutex_unlock(&of_mutex); mutex_unlock(&of_mutex);
return -ENOMEM; pr_err("devicetree: failed to register existing nodes\n");
return;
} }
for_each_of_allnodes(np) for_each_of_allnodes(np)
__of_attach_node_sysfs(np); __of_attach_node_sysfs(np);
...@@ -207,10 +208,7 @@ static int __init of_init(void) ...@@ -207,10 +208,7 @@ static int __init of_init(void)
/* Symlink in /proc as required by userspace ABI */ /* Symlink in /proc as required by userspace ABI */
if (of_root) if (of_root)
proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base"); proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
return 0;
} }
core_initcall(of_init);
static struct property *__of_find_property(const struct device_node *np, static struct property *__of_find_property(const struct device_node *np,
const char *name, int *lenp) const char *name, int *lenp)
......
...@@ -225,7 +225,7 @@ void __of_attach_node(struct device_node *np) ...@@ -225,7 +225,7 @@ void __of_attach_node(struct device_node *np)
phandle = __of_get_property(np, "phandle", &sz); phandle = __of_get_property(np, "phandle", &sz);
if (!phandle) if (!phandle)
phandle = __of_get_property(np, "linux,phandle", &sz); phandle = __of_get_property(np, "linux,phandle", &sz);
if (IS_ENABLED(PPC_PSERIES) && !phandle) if (IS_ENABLED(CONFIG_PPC_PSERIES) && !phandle)
phandle = __of_get_property(np, "ibm,phandle", &sz); phandle = __of_get_property(np, "ibm,phandle", &sz);
np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0; np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0;
......
...@@ -428,16 +428,19 @@ static void __assign_resources_sorted(struct list_head *head, ...@@ -428,16 +428,19 @@ static void __assign_resources_sorted(struct list_head *head,
* consistent. * consistent.
*/ */
if (add_align > dev_res->res->start) { if (add_align > dev_res->res->start) {
resource_size_t r_size = resource_size(dev_res->res);
dev_res->res->start = add_align; dev_res->res->start = add_align;
dev_res->res->end = add_align + dev_res->res->end = add_align + r_size - 1;
resource_size(dev_res->res);
list_for_each_entry(dev_res2, head, list) { list_for_each_entry(dev_res2, head, list) {
align = pci_resource_alignment(dev_res2->dev, align = pci_resource_alignment(dev_res2->dev,
dev_res2->res); dev_res2->res);
if (add_align > align) if (add_align > align) {
list_move_tail(&dev_res->list, list_move_tail(&dev_res->list,
&dev_res2->list); &dev_res2->list);
break;
}
} }
} }
......
...@@ -38,7 +38,9 @@ config ARMADA375_USBCLUSTER_PHY ...@@ -38,7 +38,9 @@ config ARMADA375_USBCLUSTER_PHY
config PHY_DM816X_USB config PHY_DM816X_USB
tristate "TI dm816x USB PHY driver" tristate "TI dm816x USB PHY driver"
depends on ARCH_OMAP2PLUS depends on ARCH_OMAP2PLUS
depends on USB_SUPPORT
select GENERIC_PHY select GENERIC_PHY
select USB_PHY
help help
Enable this for dm816x USB to work. Enable this for dm816x USB to work.
...@@ -97,8 +99,9 @@ config OMAP_CONTROL_PHY ...@@ -97,8 +99,9 @@ config OMAP_CONTROL_PHY
config OMAP_USB2 config OMAP_USB2
tristate "OMAP USB2 PHY Driver" tristate "OMAP USB2 PHY Driver"
depends on ARCH_OMAP2PLUS depends on ARCH_OMAP2PLUS
depends on USB_PHY depends on USB_SUPPORT
select GENERIC_PHY select GENERIC_PHY
select USB_PHY
select OMAP_CONTROL_PHY select OMAP_CONTROL_PHY
depends on OMAP_OCP2SCP depends on OMAP_OCP2SCP
help help
...@@ -122,8 +125,9 @@ config TI_PIPE3 ...@@ -122,8 +125,9 @@ config TI_PIPE3
config TWL4030_USB config TWL4030_USB
tristate "TWL4030 USB Transceiver Driver" tristate "TWL4030 USB Transceiver Driver"
depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
depends on USB_PHY depends on USB_SUPPORT
select GENERIC_PHY select GENERIC_PHY
select USB_PHY
help help
Enable this to support the USB OTG transceiver on TWL4030 Enable this to support the USB OTG transceiver on TWL4030
family chips (including the TWL5030 and TPS659x0 devices). family chips (including the TWL5030 and TPS659x0 devices).
...@@ -304,7 +308,7 @@ config PHY_STIH41X_USB ...@@ -304,7 +308,7 @@ config PHY_STIH41X_USB
config PHY_QCOM_UFS config PHY_QCOM_UFS
tristate "Qualcomm UFS PHY driver" tristate "Qualcomm UFS PHY driver"
depends on OF && ARCH_MSM depends on OF && ARCH_QCOM
select GENERIC_PHY select GENERIC_PHY
help help
Support for UFS PHY on QCOM chipsets. Support for UFS PHY on QCOM chipsets.
......
...@@ -530,7 +530,7 @@ struct phy *phy_optional_get(struct device *dev, const char *string) ...@@ -530,7 +530,7 @@ struct phy *phy_optional_get(struct device *dev, const char *string)
{ {
struct phy *phy = phy_get(dev, string); struct phy *phy = phy_get(dev, string);
if (PTR_ERR(phy) == -ENODEV) if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
phy = NULL; phy = NULL;
return phy; return phy;
...@@ -584,7 +584,7 @@ struct phy *devm_phy_optional_get(struct device *dev, const char *string) ...@@ -584,7 +584,7 @@ struct phy *devm_phy_optional_get(struct device *dev, const char *string)
{ {
struct phy *phy = devm_phy_get(dev, string); struct phy *phy = devm_phy_get(dev, string);
if (PTR_ERR(phy) == -ENODEV) if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
phy = NULL; phy = NULL;
return phy; return phy;
......
...@@ -275,6 +275,7 @@ static int omap_usb2_probe(struct platform_device *pdev) ...@@ -275,6 +275,7 @@ static int omap_usb2_probe(struct platform_device *pdev)
phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k"); phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
if (IS_ERR(phy->wkupclk)) { if (IS_ERR(phy->wkupclk)) {
dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n"); dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
pm_runtime_disable(phy->dev);
return PTR_ERR(phy->wkupclk); return PTR_ERR(phy->wkupclk);
} else { } else {
dev_warn(&pdev->dev, dev_warn(&pdev->dev,
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#define USBHS_LPSTS 0x02 #define USBHS_LPSTS 0x02
#define USBHS_UGCTRL 0x80 #define USBHS_UGCTRL 0x80
#define USBHS_UGCTRL2 0x84 #define USBHS_UGCTRL2 0x84
#define USBHS_UGSTS 0x88 /* The manuals have 0x90 */ #define USBHS_UGSTS 0x88 /* From technical update */
/* Low Power Status register (LPSTS) */ /* Low Power Status register (LPSTS) */
#define USBHS_LPSTS_SUSPM 0x4000 #define USBHS_LPSTS_SUSPM 0x4000
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
#define USBHS_UGCTRL2_USB0SEL_HS_USB 0x00000030 #define USBHS_UGCTRL2_USB0SEL_HS_USB 0x00000030
/* USB General status register (UGSTS) */ /* USB General status register (UGSTS) */
#define USBHS_UGSTS_LOCK 0x00000300 /* The manuals have 0x3 */ #define USBHS_UGSTS_LOCK 0x00000100 /* From technical update */
#define PHYS_PER_CHANNEL 2 #define PHYS_PER_CHANNEL 2
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
config MTK_PMIC_WRAP config MTK_PMIC_WRAP
tristate "MediaTek PMIC Wrapper Support" tristate "MediaTek PMIC Wrapper Support"
depends on ARCH_MEDIATEK depends on ARCH_MEDIATEK
depends on RESET_CONTROLLER
select REGMAP select REGMAP
help help
Say yes here to add support for MediaTek PMIC Wrapper found Say yes here to add support for MediaTek PMIC Wrapper found
......
...@@ -443,11 +443,6 @@ static int pwrap_wait_for_state(struct pmic_wrapper *wrp, ...@@ -443,11 +443,6 @@ static int pwrap_wait_for_state(struct pmic_wrapper *wrp,
static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata) static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
{ {
int ret; int ret;
u32 val;
val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR)
pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
if (ret) if (ret)
...@@ -462,11 +457,6 @@ static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata) ...@@ -462,11 +457,6 @@ static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
{ {
int ret; int ret;
u32 val;
val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR)
pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
if (ret) if (ret)
...@@ -480,6 +470,8 @@ static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) ...@@ -480,6 +470,8 @@ static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
*rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA)); *rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA));
pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
return 0; return 0;
} }
...@@ -563,45 +555,17 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp) ...@@ -563,45 +555,17 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp)
static int pwrap_init_reg_clock(struct pmic_wrapper *wrp) static int pwrap_init_reg_clock(struct pmic_wrapper *wrp)
{ {
unsigned long rate_spi; if (pwrap_is_mt8135(wrp)) {
int ck_mhz; pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
rate_spi = clk_get_rate(wrp->clk_spi);
if (rate_spi > 26000000)
ck_mhz = 26;
else if (rate_spi > 18000000)
ck_mhz = 18;
else
ck_mhz = 0;
switch (ck_mhz) {
case 18:
if (pwrap_is_mt8135(wrp))
pwrap_writel(wrp, 0xc, PWRAP_CSHEXT);
pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_WRITE);
pwrap_writel(wrp, 0xc, PWRAP_CSHEXT_READ);
pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
break;
case 26:
if (pwrap_is_mt8135(wrp))
pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE); pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ); pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START); pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END); pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
break; } else {
case 0: pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
if (pwrap_is_mt8135(wrp)) pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
pwrap_writel(wrp, 0xf, PWRAP_CSHEXT); pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START);
pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_WRITE); pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END);
pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_READ);
pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_START);
pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_END);
break;
default:
return -EINVAL;
} }
return 0; return 0;
......
...@@ -746,8 +746,8 @@ void oz_hcd_pd_reset(void *hpd, void *hport) ...@@ -746,8 +746,8 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
/* /*
* Context: softirq * Context: softirq
*/ */
void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc, void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, const u8 *desc,
int length, int offset, int total_size) u8 length, u16 offset, u16 total_size)
{ {
struct oz_port *port = hport; struct oz_port *port = hport;
struct urb *urb; struct urb *urb;
...@@ -759,8 +759,8 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc, ...@@ -759,8 +759,8 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
if (!urb) if (!urb)
return; return;
if (status == 0) { if (status == 0) {
int copy_len; unsigned int copy_len;
int required_size = urb->transfer_buffer_length; unsigned int required_size = urb->transfer_buffer_length;
if (required_size > total_size) if (required_size > total_size)
required_size = total_size; required_size = total_size;
......
...@@ -29,8 +29,8 @@ void oz_usb_request_heartbeat(void *hpd); ...@@ -29,8 +29,8 @@ void oz_usb_request_heartbeat(void *hpd);
/* Confirmation functions. /* Confirmation functions.
*/ */
void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status,
const u8 *desc, int length, int offset, int total_size); const u8 *desc, u8 length, u16 offset, u16 total_size);
void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode,
const u8 *data, int data_len); const u8 *data, int data_len);
......
...@@ -326,7 +326,11 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx, ...@@ -326,7 +326,11 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
struct oz_multiple_fixed *body = struct oz_multiple_fixed *body =
(struct oz_multiple_fixed *)data_hdr; (struct oz_multiple_fixed *)data_hdr;
u8 *data = body->data; u8 *data = body->data;
int n = (len - sizeof(struct oz_multiple_fixed)+1) unsigned int n;
if (!body->unit_size ||
len < sizeof(struct oz_multiple_fixed) - 1)
break;
n = (len - (sizeof(struct oz_multiple_fixed) - 1))
/ body->unit_size; / body->unit_size;
while (n--) { while (n--) {
oz_hcd_data_ind(usb_ctx->hport, body->endpoint, oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
...@@ -390,10 +394,15 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt) ...@@ -390,10 +394,15 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
case OZ_GET_DESC_RSP: { case OZ_GET_DESC_RSP: {
struct oz_get_desc_rsp *body = struct oz_get_desc_rsp *body =
(struct oz_get_desc_rsp *)usb_hdr; (struct oz_get_desc_rsp *)usb_hdr;
int data_len = elt->length - u16 offs, total_size;
sizeof(struct oz_get_desc_rsp) + 1; u8 data_len;
u16 offs = le16_to_cpu(get_unaligned(&body->offset));
u16 total_size = if (elt->length < sizeof(struct oz_get_desc_rsp) - 1)
break;
data_len = elt->length -
(sizeof(struct oz_get_desc_rsp) - 1);
offs = le16_to_cpu(get_unaligned(&body->offset));
total_size =
le16_to_cpu(get_unaligned(&body->total_size)); le16_to_cpu(get_unaligned(&body->total_size));
oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n"); oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id, oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
......
...@@ -898,11 +898,11 @@ static void SwLedControlMode1(struct _adapter *padapter, ...@@ -898,11 +898,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
IS_LED_WPS_BLINKING(pLed)) IS_LED_WPS_BLINKING(pLed))
return; return;
if (pLed->bLedLinkBlinkInProgress == true) { if (pLed->bLedLinkBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false; pLed->bLedLinkBlinkInProgress = false;
} }
if (pLed->bLedBlinkInProgress == true) { if (pLed->bLedBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
pLed->bLedNoLinkBlinkInProgress = true; pLed->bLedNoLinkBlinkInProgress = true;
...@@ -921,11 +921,11 @@ static void SwLedControlMode1(struct _adapter *padapter, ...@@ -921,11 +921,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
IS_LED_WPS_BLINKING(pLed)) IS_LED_WPS_BLINKING(pLed))
return; return;
if (pLed->bLedNoLinkBlinkInProgress == true) { if (pLed->bLedNoLinkBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false; pLed->bLedNoLinkBlinkInProgress = false;
} }
if (pLed->bLedBlinkInProgress == true) { if (pLed->bLedBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
pLed->bLedLinkBlinkInProgress = true; pLed->bLedLinkBlinkInProgress = true;
...@@ -946,15 +946,15 @@ static void SwLedControlMode1(struct _adapter *padapter, ...@@ -946,15 +946,15 @@ static void SwLedControlMode1(struct _adapter *padapter,
if (IS_LED_WPS_BLINKING(pLed)) if (IS_LED_WPS_BLINKING(pLed))
return; return;
if (pLed->bLedNoLinkBlinkInProgress == true) { if (pLed->bLedNoLinkBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false; pLed->bLedNoLinkBlinkInProgress = false;
} }
if (pLed->bLedLinkBlinkInProgress == true) { if (pLed->bLedLinkBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false; pLed->bLedLinkBlinkInProgress = false;
} }
if (pLed->bLedBlinkInProgress == true) { if (pLed->bLedBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
pLed->bLedScanBlinkInProgress = true; pLed->bLedScanBlinkInProgress = true;
...@@ -975,11 +975,11 @@ static void SwLedControlMode1(struct _adapter *padapter, ...@@ -975,11 +975,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
IS_LED_WPS_BLINKING(pLed)) IS_LED_WPS_BLINKING(pLed))
return; return;
if (pLed->bLedNoLinkBlinkInProgress == true) { if (pLed->bLedNoLinkBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false; pLed->bLedNoLinkBlinkInProgress = false;
} }
if (pLed->bLedLinkBlinkInProgress == true) { if (pLed->bLedLinkBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false; pLed->bLedLinkBlinkInProgress = false;
} }
pLed->bLedBlinkInProgress = true; pLed->bLedBlinkInProgress = true;
...@@ -998,19 +998,19 @@ static void SwLedControlMode1(struct _adapter *padapter, ...@@ -998,19 +998,19 @@ static void SwLedControlMode1(struct _adapter *padapter,
case LED_CTL_START_WPS_BOTTON: case LED_CTL_START_WPS_BOTTON:
if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedWPSBlinkInProgress == false) {
if (pLed->bLedNoLinkBlinkInProgress == true) { if (pLed->bLedNoLinkBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false; pLed->bLedNoLinkBlinkInProgress = false;
} }
if (pLed->bLedLinkBlinkInProgress == true) { if (pLed->bLedLinkBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false; pLed->bLedLinkBlinkInProgress = false;
} }
if (pLed->bLedBlinkInProgress == true) { if (pLed->bLedBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
if (pLed->bLedScanBlinkInProgress == true) { if (pLed->bLedScanBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false; pLed->bLedScanBlinkInProgress = false;
} }
pLed->bLedWPSBlinkInProgress = true; pLed->bLedWPSBlinkInProgress = true;
...@@ -1025,23 +1025,23 @@ static void SwLedControlMode1(struct _adapter *padapter, ...@@ -1025,23 +1025,23 @@ static void SwLedControlMode1(struct _adapter *padapter,
break; break;
case LED_CTL_STOP_WPS: case LED_CTL_STOP_WPS:
if (pLed->bLedNoLinkBlinkInProgress == true) { if (pLed->bLedNoLinkBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false; pLed->bLedNoLinkBlinkInProgress = false;
} }
if (pLed->bLedLinkBlinkInProgress == true) { if (pLed->bLedLinkBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false; pLed->bLedLinkBlinkInProgress = false;
} }
if (pLed->bLedBlinkInProgress == true) { if (pLed->bLedBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
if (pLed->bLedScanBlinkInProgress == true) { if (pLed->bLedScanBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false; pLed->bLedScanBlinkInProgress = false;
} }
if (pLed->bLedWPSBlinkInProgress) if (pLed->bLedWPSBlinkInProgress)
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
else else
pLed->bLedWPSBlinkInProgress = true; pLed->bLedWPSBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_WPS_STOP; pLed->CurrLedState = LED_BLINK_WPS_STOP;
...@@ -1057,7 +1057,7 @@ static void SwLedControlMode1(struct _adapter *padapter, ...@@ -1057,7 +1057,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
break; break;
case LED_CTL_STOP_WPS_FAIL: case LED_CTL_STOP_WPS_FAIL:
if (pLed->bLedWPSBlinkInProgress) { if (pLed->bLedWPSBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false; pLed->bLedWPSBlinkInProgress = false;
} }
pLed->bLedNoLinkBlinkInProgress = true; pLed->bLedNoLinkBlinkInProgress = true;
...@@ -1073,23 +1073,23 @@ static void SwLedControlMode1(struct _adapter *padapter, ...@@ -1073,23 +1073,23 @@ static void SwLedControlMode1(struct _adapter *padapter,
pLed->CurrLedState = LED_OFF; pLed->CurrLedState = LED_OFF;
pLed->BlinkingLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF;
if (pLed->bLedNoLinkBlinkInProgress) { if (pLed->bLedNoLinkBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false; pLed->bLedNoLinkBlinkInProgress = false;
} }
if (pLed->bLedLinkBlinkInProgress) { if (pLed->bLedLinkBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false; pLed->bLedLinkBlinkInProgress = false;
} }
if (pLed->bLedBlinkInProgress) { if (pLed->bLedBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
if (pLed->bLedWPSBlinkInProgress) { if (pLed->bLedWPSBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false; pLed->bLedWPSBlinkInProgress = false;
} }
if (pLed->bLedScanBlinkInProgress) { if (pLed->bLedScanBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false; pLed->bLedScanBlinkInProgress = false;
} }
mod_timer(&pLed->BlinkTimer, mod_timer(&pLed->BlinkTimer,
...@@ -1116,7 +1116,7 @@ static void SwLedControlMode2(struct _adapter *padapter, ...@@ -1116,7 +1116,7 @@ static void SwLedControlMode2(struct _adapter *padapter,
return; return;
if (pLed->bLedBlinkInProgress == true) { if (pLed->bLedBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
pLed->bLedScanBlinkInProgress = true; pLed->bLedScanBlinkInProgress = true;
...@@ -1154,11 +1154,11 @@ static void SwLedControlMode2(struct _adapter *padapter, ...@@ -1154,11 +1154,11 @@ static void SwLedControlMode2(struct _adapter *padapter,
pLed->CurrLedState = LED_ON; pLed->CurrLedState = LED_ON;
pLed->BlinkingLedState = LED_ON; pLed->BlinkingLedState = LED_ON;
if (pLed->bLedBlinkInProgress) { if (pLed->bLedBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
if (pLed->bLedScanBlinkInProgress) { if (pLed->bLedScanBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false; pLed->bLedScanBlinkInProgress = false;
} }
...@@ -1170,11 +1170,11 @@ static void SwLedControlMode2(struct _adapter *padapter, ...@@ -1170,11 +1170,11 @@ static void SwLedControlMode2(struct _adapter *padapter,
case LED_CTL_START_WPS_BOTTON: case LED_CTL_START_WPS_BOTTON:
if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedWPSBlinkInProgress == false) {
if (pLed->bLedBlinkInProgress == true) { if (pLed->bLedBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
if (pLed->bLedScanBlinkInProgress == true) { if (pLed->bLedScanBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false; pLed->bLedScanBlinkInProgress = false;
} }
pLed->bLedWPSBlinkInProgress = true; pLed->bLedWPSBlinkInProgress = true;
...@@ -1214,15 +1214,15 @@ static void SwLedControlMode2(struct _adapter *padapter, ...@@ -1214,15 +1214,15 @@ static void SwLedControlMode2(struct _adapter *padapter,
pLed->CurrLedState = LED_OFF; pLed->CurrLedState = LED_OFF;
pLed->BlinkingLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF;
if (pLed->bLedBlinkInProgress) { if (pLed->bLedBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
if (pLed->bLedScanBlinkInProgress) { if (pLed->bLedScanBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false; pLed->bLedScanBlinkInProgress = false;
} }
if (pLed->bLedWPSBlinkInProgress) { if (pLed->bLedWPSBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false; pLed->bLedWPSBlinkInProgress = false;
} }
mod_timer(&pLed->BlinkTimer, mod_timer(&pLed->BlinkTimer,
...@@ -1248,7 +1248,7 @@ static void SwLedControlMode3(struct _adapter *padapter, ...@@ -1248,7 +1248,7 @@ static void SwLedControlMode3(struct _adapter *padapter,
if (IS_LED_WPS_BLINKING(pLed)) if (IS_LED_WPS_BLINKING(pLed))
return; return;
if (pLed->bLedBlinkInProgress == true) { if (pLed->bLedBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
pLed->bLedScanBlinkInProgress = true; pLed->bLedScanBlinkInProgress = true;
...@@ -1286,11 +1286,11 @@ static void SwLedControlMode3(struct _adapter *padapter, ...@@ -1286,11 +1286,11 @@ static void SwLedControlMode3(struct _adapter *padapter,
pLed->CurrLedState = LED_ON; pLed->CurrLedState = LED_ON;
pLed->BlinkingLedState = LED_ON; pLed->BlinkingLedState = LED_ON;
if (pLed->bLedBlinkInProgress) { if (pLed->bLedBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
if (pLed->bLedScanBlinkInProgress) { if (pLed->bLedScanBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false; pLed->bLedScanBlinkInProgress = false;
} }
mod_timer(&pLed->BlinkTimer, mod_timer(&pLed->BlinkTimer,
...@@ -1300,11 +1300,11 @@ static void SwLedControlMode3(struct _adapter *padapter, ...@@ -1300,11 +1300,11 @@ static void SwLedControlMode3(struct _adapter *padapter,
case LED_CTL_START_WPS_BOTTON: case LED_CTL_START_WPS_BOTTON:
if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedWPSBlinkInProgress == false) {
if (pLed->bLedBlinkInProgress == true) { if (pLed->bLedBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
if (pLed->bLedScanBlinkInProgress == true) { if (pLed->bLedScanBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false; pLed->bLedScanBlinkInProgress = false;
} }
pLed->bLedWPSBlinkInProgress = true; pLed->bLedWPSBlinkInProgress = true;
...@@ -1319,7 +1319,7 @@ static void SwLedControlMode3(struct _adapter *padapter, ...@@ -1319,7 +1319,7 @@ static void SwLedControlMode3(struct _adapter *padapter,
break; break;
case LED_CTL_STOP_WPS: case LED_CTL_STOP_WPS:
if (pLed->bLedWPSBlinkInProgress) { if (pLed->bLedWPSBlinkInProgress) {
del_timer_sync(&(pLed->BlinkTimer)); del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false; pLed->bLedWPSBlinkInProgress = false;
} else } else
pLed->bLedWPSBlinkInProgress = true; pLed->bLedWPSBlinkInProgress = true;
...@@ -1336,7 +1336,7 @@ static void SwLedControlMode3(struct _adapter *padapter, ...@@ -1336,7 +1336,7 @@ static void SwLedControlMode3(struct _adapter *padapter,
break; break;
case LED_CTL_STOP_WPS_FAIL: case LED_CTL_STOP_WPS_FAIL:
if (pLed->bLedWPSBlinkInProgress) { if (pLed->bLedWPSBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false; pLed->bLedWPSBlinkInProgress = false;
} }
pLed->CurrLedState = LED_OFF; pLed->CurrLedState = LED_OFF;
...@@ -1357,15 +1357,15 @@ static void SwLedControlMode3(struct _adapter *padapter, ...@@ -1357,15 +1357,15 @@ static void SwLedControlMode3(struct _adapter *padapter,
pLed->CurrLedState = LED_OFF; pLed->CurrLedState = LED_OFF;
pLed->BlinkingLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF;
if (pLed->bLedBlinkInProgress) { if (pLed->bLedBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
if (pLed->bLedScanBlinkInProgress) { if (pLed->bLedScanBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false; pLed->bLedScanBlinkInProgress = false;
} }
if (pLed->bLedWPSBlinkInProgress) { if (pLed->bLedWPSBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false; pLed->bLedWPSBlinkInProgress = false;
} }
mod_timer(&pLed->BlinkTimer, mod_timer(&pLed->BlinkTimer,
...@@ -1388,7 +1388,7 @@ static void SwLedControlMode4(struct _adapter *padapter, ...@@ -1388,7 +1388,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
case LED_CTL_START_TO_LINK: case LED_CTL_START_TO_LINK:
if (pLed1->bLedWPSBlinkInProgress) { if (pLed1->bLedWPSBlinkInProgress) {
pLed1->bLedWPSBlinkInProgress = false; pLed1->bLedWPSBlinkInProgress = false;
del_timer_sync(&pLed1->BlinkTimer); del_timer(&pLed1->BlinkTimer);
pLed1->BlinkingLedState = LED_OFF; pLed1->BlinkingLedState = LED_OFF;
pLed1->CurrLedState = LED_OFF; pLed1->CurrLedState = LED_OFF;
if (pLed1->bLedOn) if (pLed1->bLedOn)
...@@ -1400,11 +1400,11 @@ static void SwLedControlMode4(struct _adapter *padapter, ...@@ -1400,11 +1400,11 @@ static void SwLedControlMode4(struct _adapter *padapter,
IS_LED_WPS_BLINKING(pLed)) IS_LED_WPS_BLINKING(pLed))
return; return;
if (pLed->bLedBlinkInProgress == true) { if (pLed->bLedBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
if (pLed->bLedNoLinkBlinkInProgress == true) { if (pLed->bLedNoLinkBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false; pLed->bLedNoLinkBlinkInProgress = false;
} }
pLed->bLedStartToLinkBlinkInProgress = true; pLed->bLedStartToLinkBlinkInProgress = true;
...@@ -1426,7 +1426,7 @@ static void SwLedControlMode4(struct _adapter *padapter, ...@@ -1426,7 +1426,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
if (LedAction == LED_CTL_LINK) { if (LedAction == LED_CTL_LINK) {
if (pLed1->bLedWPSBlinkInProgress) { if (pLed1->bLedWPSBlinkInProgress) {
pLed1->bLedWPSBlinkInProgress = false; pLed1->bLedWPSBlinkInProgress = false;
del_timer_sync(&pLed1->BlinkTimer); del_timer(&pLed1->BlinkTimer);
pLed1->BlinkingLedState = LED_OFF; pLed1->BlinkingLedState = LED_OFF;
pLed1->CurrLedState = LED_OFF; pLed1->CurrLedState = LED_OFF;
if (pLed1->bLedOn) if (pLed1->bLedOn)
...@@ -1439,7 +1439,7 @@ static void SwLedControlMode4(struct _adapter *padapter, ...@@ -1439,7 +1439,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
IS_LED_WPS_BLINKING(pLed)) IS_LED_WPS_BLINKING(pLed))
return; return;
if (pLed->bLedBlinkInProgress == true) { if (pLed->bLedBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
pLed->bLedNoLinkBlinkInProgress = true; pLed->bLedNoLinkBlinkInProgress = true;
...@@ -1460,11 +1460,11 @@ static void SwLedControlMode4(struct _adapter *padapter, ...@@ -1460,11 +1460,11 @@ static void SwLedControlMode4(struct _adapter *padapter,
if (IS_LED_WPS_BLINKING(pLed)) if (IS_LED_WPS_BLINKING(pLed))
return; return;
if (pLed->bLedNoLinkBlinkInProgress == true) { if (pLed->bLedNoLinkBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false; pLed->bLedNoLinkBlinkInProgress = false;
} }
if (pLed->bLedBlinkInProgress == true) { if (pLed->bLedBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
pLed->bLedScanBlinkInProgress = true; pLed->bLedScanBlinkInProgress = true;
...@@ -1485,7 +1485,7 @@ static void SwLedControlMode4(struct _adapter *padapter, ...@@ -1485,7 +1485,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
IS_LED_WPS_BLINKING(pLed)) IS_LED_WPS_BLINKING(pLed))
return; return;
if (pLed->bLedNoLinkBlinkInProgress == true) { if (pLed->bLedNoLinkBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false; pLed->bLedNoLinkBlinkInProgress = false;
} }
pLed->bLedBlinkInProgress = true; pLed->bLedBlinkInProgress = true;
...@@ -1503,7 +1503,7 @@ static void SwLedControlMode4(struct _adapter *padapter, ...@@ -1503,7 +1503,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
case LED_CTL_START_WPS_BOTTON: case LED_CTL_START_WPS_BOTTON:
if (pLed1->bLedWPSBlinkInProgress) { if (pLed1->bLedWPSBlinkInProgress) {
pLed1->bLedWPSBlinkInProgress = false; pLed1->bLedWPSBlinkInProgress = false;
del_timer_sync(&(pLed1->BlinkTimer)); del_timer(&pLed1->BlinkTimer);
pLed1->BlinkingLedState = LED_OFF; pLed1->BlinkingLedState = LED_OFF;
pLed1->CurrLedState = LED_OFF; pLed1->CurrLedState = LED_OFF;
if (pLed1->bLedOn) if (pLed1->bLedOn)
...@@ -1512,15 +1512,15 @@ static void SwLedControlMode4(struct _adapter *padapter, ...@@ -1512,15 +1512,15 @@ static void SwLedControlMode4(struct _adapter *padapter,
} }
if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedWPSBlinkInProgress == false) {
if (pLed->bLedNoLinkBlinkInProgress == true) { if (pLed->bLedNoLinkBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false; pLed->bLedNoLinkBlinkInProgress = false;
} }
if (pLed->bLedBlinkInProgress == true) { if (pLed->bLedBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
if (pLed->bLedScanBlinkInProgress == true) { if (pLed->bLedScanBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false; pLed->bLedScanBlinkInProgress = false;
} }
pLed->bLedWPSBlinkInProgress = true; pLed->bLedWPSBlinkInProgress = true;
...@@ -1538,7 +1538,7 @@ static void SwLedControlMode4(struct _adapter *padapter, ...@@ -1538,7 +1538,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
break; break;
case LED_CTL_STOP_WPS: /*WPS connect success*/ case LED_CTL_STOP_WPS: /*WPS connect success*/
if (pLed->bLedWPSBlinkInProgress) { if (pLed->bLedWPSBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false; pLed->bLedWPSBlinkInProgress = false;
} }
pLed->bLedNoLinkBlinkInProgress = true; pLed->bLedNoLinkBlinkInProgress = true;
...@@ -1552,7 +1552,7 @@ static void SwLedControlMode4(struct _adapter *padapter, ...@@ -1552,7 +1552,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
break; break;
case LED_CTL_STOP_WPS_FAIL: /*WPS authentication fail*/ case LED_CTL_STOP_WPS_FAIL: /*WPS authentication fail*/
if (pLed->bLedWPSBlinkInProgress) { if (pLed->bLedWPSBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false; pLed->bLedWPSBlinkInProgress = false;
} }
pLed->bLedNoLinkBlinkInProgress = true; pLed->bLedNoLinkBlinkInProgress = true;
...@@ -1565,7 +1565,7 @@ static void SwLedControlMode4(struct _adapter *padapter, ...@@ -1565,7 +1565,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA)); msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
/*LED1 settings*/ /*LED1 settings*/
if (pLed1->bLedWPSBlinkInProgress) if (pLed1->bLedWPSBlinkInProgress)
del_timer_sync(&pLed1->BlinkTimer); del_timer(&pLed1->BlinkTimer);
else else
pLed1->bLedWPSBlinkInProgress = true; pLed1->bLedWPSBlinkInProgress = true;
pLed1->CurrLedState = LED_BLINK_WPS_STOP; pLed1->CurrLedState = LED_BLINK_WPS_STOP;
...@@ -1578,7 +1578,7 @@ static void SwLedControlMode4(struct _adapter *padapter, ...@@ -1578,7 +1578,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
break; break;
case LED_CTL_STOP_WPS_FAIL_OVERLAP: /*WPS session overlap*/ case LED_CTL_STOP_WPS_FAIL_OVERLAP: /*WPS session overlap*/
if (pLed->bLedWPSBlinkInProgress) { if (pLed->bLedWPSBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false; pLed->bLedWPSBlinkInProgress = false;
} }
pLed->bLedNoLinkBlinkInProgress = true; pLed->bLedNoLinkBlinkInProgress = true;
...@@ -1591,7 +1591,7 @@ static void SwLedControlMode4(struct _adapter *padapter, ...@@ -1591,7 +1591,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA)); msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
/*LED1 settings*/ /*LED1 settings*/
if (pLed1->bLedWPSBlinkInProgress) if (pLed1->bLedWPSBlinkInProgress)
del_timer_sync(&pLed1->BlinkTimer); del_timer(&pLed1->BlinkTimer);
else else
pLed1->bLedWPSBlinkInProgress = true; pLed1->bLedWPSBlinkInProgress = true;
pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP; pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP;
...@@ -1607,31 +1607,31 @@ static void SwLedControlMode4(struct _adapter *padapter, ...@@ -1607,31 +1607,31 @@ static void SwLedControlMode4(struct _adapter *padapter,
pLed->CurrLedState = LED_OFF; pLed->CurrLedState = LED_OFF;
pLed->BlinkingLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF;
if (pLed->bLedNoLinkBlinkInProgress) { if (pLed->bLedNoLinkBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedNoLinkBlinkInProgress = false; pLed->bLedNoLinkBlinkInProgress = false;
} }
if (pLed->bLedLinkBlinkInProgress) { if (pLed->bLedLinkBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedLinkBlinkInProgress = false; pLed->bLedLinkBlinkInProgress = false;
} }
if (pLed->bLedBlinkInProgress) { if (pLed->bLedBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
if (pLed->bLedWPSBlinkInProgress) { if (pLed->bLedWPSBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false; pLed->bLedWPSBlinkInProgress = false;
} }
if (pLed->bLedScanBlinkInProgress) { if (pLed->bLedScanBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedScanBlinkInProgress = false; pLed->bLedScanBlinkInProgress = false;
} }
if (pLed->bLedStartToLinkBlinkInProgress) { if (pLed->bLedStartToLinkBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedStartToLinkBlinkInProgress = false; pLed->bLedStartToLinkBlinkInProgress = false;
} }
if (pLed1->bLedWPSBlinkInProgress) { if (pLed1->bLedWPSBlinkInProgress) {
del_timer_sync(&pLed1->BlinkTimer); del_timer(&pLed1->BlinkTimer);
pLed1->bLedWPSBlinkInProgress = false; pLed1->bLedWPSBlinkInProgress = false;
} }
pLed1->BlinkingLedState = LED_UNKNOWN; pLed1->BlinkingLedState = LED_UNKNOWN;
...@@ -1671,7 +1671,7 @@ static void SwLedControlMode5(struct _adapter *padapter, ...@@ -1671,7 +1671,7 @@ static void SwLedControlMode5(struct _adapter *padapter,
; /* dummy branch */ ; /* dummy branch */
else if (pLed->bLedScanBlinkInProgress == false) { else if (pLed->bLedScanBlinkInProgress == false) {
if (pLed->bLedBlinkInProgress == true) { if (pLed->bLedBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
pLed->bLedScanBlinkInProgress = true; pLed->bLedScanBlinkInProgress = true;
...@@ -1705,7 +1705,7 @@ static void SwLedControlMode5(struct _adapter *padapter, ...@@ -1705,7 +1705,7 @@ static void SwLedControlMode5(struct _adapter *padapter,
pLed->CurrLedState = LED_OFF; pLed->CurrLedState = LED_OFF;
pLed->BlinkingLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF;
if (pLed->bLedBlinkInProgress) { if (pLed->bLedBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
SwLedOff(padapter, pLed); SwLedOff(padapter, pLed);
...@@ -1756,7 +1756,7 @@ static void SwLedControlMode6(struct _adapter *padapter, ...@@ -1756,7 +1756,7 @@ static void SwLedControlMode6(struct _adapter *padapter,
case LED_CTL_START_WPS_BOTTON: case LED_CTL_START_WPS_BOTTON:
if (pLed->bLedWPSBlinkInProgress == false) { if (pLed->bLedWPSBlinkInProgress == false) {
if (pLed->bLedBlinkInProgress == true) { if (pLed->bLedBlinkInProgress == true) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
pLed->bLedWPSBlinkInProgress = true; pLed->bLedWPSBlinkInProgress = true;
...@@ -1772,7 +1772,7 @@ static void SwLedControlMode6(struct _adapter *padapter, ...@@ -1772,7 +1772,7 @@ static void SwLedControlMode6(struct _adapter *padapter,
case LED_CTL_STOP_WPS_FAIL: case LED_CTL_STOP_WPS_FAIL:
case LED_CTL_STOP_WPS: case LED_CTL_STOP_WPS:
if (pLed->bLedWPSBlinkInProgress) { if (pLed->bLedWPSBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false; pLed->bLedWPSBlinkInProgress = false;
} }
pLed->CurrLedState = LED_ON; pLed->CurrLedState = LED_ON;
...@@ -1784,11 +1784,11 @@ static void SwLedControlMode6(struct _adapter *padapter, ...@@ -1784,11 +1784,11 @@ static void SwLedControlMode6(struct _adapter *padapter,
pLed->CurrLedState = LED_OFF; pLed->CurrLedState = LED_OFF;
pLed->BlinkingLedState = LED_OFF; pLed->BlinkingLedState = LED_OFF;
if (pLed->bLedBlinkInProgress) { if (pLed->bLedBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedBlinkInProgress = false; pLed->bLedBlinkInProgress = false;
} }
if (pLed->bLedWPSBlinkInProgress) { if (pLed->bLedWPSBlinkInProgress) {
del_timer_sync(&pLed->BlinkTimer); del_timer(&pLed->BlinkTimer);
pLed->bLedWPSBlinkInProgress = false; pLed->bLedWPSBlinkInProgress = false;
} }
SwLedOff(padapter, pLed); SwLedOff(padapter, pLed);
......
...@@ -910,7 +910,7 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter, ...@@ -910,7 +910,7 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
if (pcmd->res != H2C_SUCCESS) if (pcmd->res != H2C_SUCCESS)
mod_timer(&pmlmepriv->assoc_timer, mod_timer(&pmlmepriv->assoc_timer,
jiffies + msecs_to_jiffies(1)); jiffies + msecs_to_jiffies(1));
del_timer_sync(&pmlmepriv->assoc_timer); del_timer(&pmlmepriv->assoc_timer);
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
/* endian_convert */ /* endian_convert */
pnetwork->Length = le32_to_cpu(pnetwork->Length); pnetwork->Length = le32_to_cpu(pnetwork->Length);
......
...@@ -582,7 +582,7 @@ void r8712_surveydone_event_callback(struct _adapter *adapter, u8 *pbuf) ...@@ -582,7 +582,7 @@ void r8712_surveydone_event_callback(struct _adapter *adapter, u8 *pbuf)
spin_lock_irqsave(&pmlmepriv->lock, irqL); spin_lock_irqsave(&pmlmepriv->lock, irqL);
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) { if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
del_timer_sync(&pmlmepriv->scan_to_timer); del_timer(&pmlmepriv->scan_to_timer);
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
} }
...@@ -696,7 +696,7 @@ void r8712_ind_disconnect(struct _adapter *padapter) ...@@ -696,7 +696,7 @@ void r8712_ind_disconnect(struct _adapter *padapter)
} }
if (padapter->pwrctrlpriv.pwr_mode != if (padapter->pwrctrlpriv.pwr_mode !=
padapter->registrypriv.power_mgnt) { padapter->registrypriv.power_mgnt) {
del_timer_sync(&pmlmepriv->dhcp_timer); del_timer(&pmlmepriv->dhcp_timer);
r8712_set_ps_mode(padapter, padapter->registrypriv.power_mgnt, r8712_set_ps_mode(padapter, padapter->registrypriv.power_mgnt,
padapter->registrypriv.smart_ps); padapter->registrypriv.smart_ps);
} }
...@@ -910,7 +910,7 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf) ...@@ -910,7 +910,7 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)
== true) == true)
r8712_indicate_connect(adapter); r8712_indicate_connect(adapter);
del_timer_sync(&pmlmepriv->assoc_timer); del_timer(&pmlmepriv->assoc_timer);
} else } else
goto ignore_joinbss_callback; goto ignore_joinbss_callback;
} else { } else {
......
...@@ -103,7 +103,7 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter, ...@@ -103,7 +103,7 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80)) if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80))
return; return;
del_timer_sync(&padapter->pwrctrlpriv.rpwm_check_timer); del_timer(&padapter->pwrctrlpriv.rpwm_check_timer);
_enter_pwrlock(&pwrpriv->lock); _enter_pwrlock(&pwrpriv->lock);
pwrpriv->cpwm = (preportpwrstate->state) & 0xf; pwrpriv->cpwm = (preportpwrstate->state) & 0xf;
if (pwrpriv->cpwm >= PS_STATE_S2) { if (pwrpriv->cpwm >= PS_STATE_S2) {
......
...@@ -198,7 +198,7 @@ void r8712_free_stainfo(struct _adapter *padapter, struct sta_info *psta) ...@@ -198,7 +198,7 @@ void r8712_free_stainfo(struct _adapter *padapter, struct sta_info *psta)
* cancel reordering_ctrl_timer */ * cancel reordering_ctrl_timer */
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
preorder_ctrl = &psta->recvreorder_ctrl[i]; preorder_ctrl = &psta->recvreorder_ctrl[i];
del_timer_sync(&preorder_ctrl->reordering_ctrl_timer); del_timer(&preorder_ctrl->reordering_ctrl_timer);
} }
spin_lock(&(pfree_sta_queue->lock)); spin_lock(&(pfree_sta_queue->lock));
/* insert into free_sta_queue; 20061114 */ /* insert into free_sta_queue; 20061114 */
......
...@@ -162,6 +162,17 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x, ...@@ -162,6 +162,17 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
return put_user(x, ptr); return put_user(x, ptr);
} }
static inline int tty_copy_to_user(struct tty_struct *tty,
void __user *to,
const void *from,
unsigned long n)
{
struct n_tty_data *ldata = tty->disc_data;
tty_audit_add_data(tty, to, n, ldata->icanon);
return copy_to_user(to, from, n);
}
/** /**
* n_tty_kick_worker - start input worker (if required) * n_tty_kick_worker - start input worker (if required)
* @tty: terminal * @tty: terminal
...@@ -2070,8 +2081,8 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, ...@@ -2070,8 +2081,8 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
size = N_TTY_BUF_SIZE - tail; size = N_TTY_BUF_SIZE - tail;
n = eol - tail; n = eol - tail;
if (n > 4096) if (n > N_TTY_BUF_SIZE)
n += 4096; n += N_TTY_BUF_SIZE;
n += found; n += found;
c = n; c = n;
...@@ -2084,12 +2095,12 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, ...@@ -2084,12 +2095,12 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
__func__, eol, found, n, c, size, more); __func__, eol, found, n, c, size, more);
if (n > size) { if (n > size) {
ret = copy_to_user(*b, read_buf_addr(ldata, tail), size); ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), size);
if (ret) if (ret)
return -EFAULT; return -EFAULT;
ret = copy_to_user(*b + size, ldata->read_buf, n - size); ret = tty_copy_to_user(tty, *b + size, ldata->read_buf, n - size);
} else } else
ret = copy_to_user(*b, read_buf_addr(ldata, tail), n); ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), n);
if (ret) if (ret)
return -EFAULT; return -EFAULT;
......
...@@ -562,12 +562,36 @@ static irqreturn_t omap_wake_irq(int irq, void *dev_id) ...@@ -562,12 +562,36 @@ static irqreturn_t omap_wake_irq(int irq, void *dev_id)
return IRQ_NONE; return IRQ_NONE;
} }
#ifdef CONFIG_SERIAL_8250_DMA
static int omap_8250_dma_handle_irq(struct uart_port *port);
#endif
static irqreturn_t omap8250_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
struct uart_8250_port *up = up_to_u8250p(port);
unsigned int iir;
int ret;
#ifdef CONFIG_SERIAL_8250_DMA
if (up->dma) {
ret = omap_8250_dma_handle_irq(port);
return IRQ_RETVAL(ret);
}
#endif
serial8250_rpm_get(up);
iir = serial_port_in(port, UART_IIR);
ret = serial8250_handle_irq(port, iir);
serial8250_rpm_put(up);
return IRQ_RETVAL(ret);
}
static int omap_8250_startup(struct uart_port *port) static int omap_8250_startup(struct uart_port *port)
{ {
struct uart_8250_port *up = struct uart_8250_port *up = up_to_u8250p(port);
container_of(port, struct uart_8250_port, port);
struct omap8250_priv *priv = port->private_data; struct omap8250_priv *priv = port->private_data;
int ret; int ret;
if (priv->wakeirq) { if (priv->wakeirq) {
...@@ -580,10 +604,31 @@ static int omap_8250_startup(struct uart_port *port) ...@@ -580,10 +604,31 @@ static int omap_8250_startup(struct uart_port *port)
pm_runtime_get_sync(port->dev); pm_runtime_get_sync(port->dev);
ret = serial8250_do_startup(port); up->mcr = 0;
if (ret) serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
serial_out(up, UART_LCR, UART_LCR_WLEN8);
up->lsr_saved_flags = 0;
up->msr_saved_flags = 0;
if (up->dma) {
ret = serial8250_request_dma(up);
if (ret) {
dev_warn_ratelimited(port->dev,
"failed to request DMA\n");
up->dma = NULL;
}
}
ret = request_irq(port->irq, omap8250_irq, IRQF_SHARED,
dev_name(port->dev), port);
if (ret < 0)
goto err; goto err;
up->ier = UART_IER_RLSI | UART_IER_RDI;
serial_out(up, UART_IER, up->ier);
#ifdef CONFIG_PM #ifdef CONFIG_PM
up->capabilities |= UART_CAP_RPM; up->capabilities |= UART_CAP_RPM;
#endif #endif
...@@ -610,8 +655,7 @@ static int omap_8250_startup(struct uart_port *port) ...@@ -610,8 +655,7 @@ static int omap_8250_startup(struct uart_port *port)
static void omap_8250_shutdown(struct uart_port *port) static void omap_8250_shutdown(struct uart_port *port)
{ {
struct uart_8250_port *up = struct uart_8250_port *up = up_to_u8250p(port);
container_of(port, struct uart_8250_port, port);
struct omap8250_priv *priv = port->private_data; struct omap8250_priv *priv = port->private_data;
flush_work(&priv->qos_work); flush_work(&priv->qos_work);
...@@ -621,11 +665,24 @@ static void omap_8250_shutdown(struct uart_port *port) ...@@ -621,11 +665,24 @@ static void omap_8250_shutdown(struct uart_port *port)
pm_runtime_get_sync(port->dev); pm_runtime_get_sync(port->dev);
serial_out(up, UART_OMAP_WER, 0); serial_out(up, UART_OMAP_WER, 0);
serial8250_do_shutdown(port);
up->ier = 0;
serial_out(up, UART_IER, 0);
if (up->dma)
serial8250_release_dma(up);
/*
* Disable break condition and FIFOs
*/
if (up->lcr & UART_LCR_SBC)
serial_out(up, UART_LCR, up->lcr & ~UART_LCR_SBC);
serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
pm_runtime_mark_last_busy(port->dev); pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev); pm_runtime_put_autosuspend(port->dev);
free_irq(port->irq, port);
if (priv->wakeirq) if (priv->wakeirq)
free_irq(priv->wakeirq, port); free_irq(priv->wakeirq, port);
} }
...@@ -974,6 +1031,13 @@ static inline int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir) ...@@ -974,6 +1031,13 @@ static inline int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
} }
#endif #endif
static int omap8250_no_handle_irq(struct uart_port *port)
{
/* IRQ has not been requested but handling irq? */
WARN_ONCE(1, "Unexpected irq handling before port startup\n");
return 0;
}
static int omap8250_probe(struct platform_device *pdev) static int omap8250_probe(struct platform_device *pdev)
{ {
struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
...@@ -1075,6 +1139,7 @@ static int omap8250_probe(struct platform_device *pdev) ...@@ -1075,6 +1139,7 @@ static int omap8250_probe(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev); pm_runtime_get_sync(&pdev->dev);
omap_serial_fill_features_erratas(&up, priv); omap_serial_fill_features_erratas(&up, priv);
up.port.handle_irq = omap8250_no_handle_irq;
#ifdef CONFIG_SERIAL_8250_DMA #ifdef CONFIG_SERIAL_8250_DMA
if (pdev->dev.of_node) { if (pdev->dev.of_node) {
/* /*
...@@ -1088,7 +1153,6 @@ static int omap8250_probe(struct platform_device *pdev) ...@@ -1088,7 +1153,6 @@ static int omap8250_probe(struct platform_device *pdev)
ret = of_property_count_strings(pdev->dev.of_node, "dma-names"); ret = of_property_count_strings(pdev->dev.of_node, "dma-names");
if (ret == 2) { if (ret == 2) {
up.dma = &priv->omap8250_dma; up.dma = &priv->omap8250_dma;
up.port.handle_irq = omap_8250_dma_handle_irq;
priv->omap8250_dma.fn = the_no_dma_filter_fn; priv->omap8250_dma.fn = the_no_dma_filter_fn;
priv->omap8250_dma.tx_dma = omap_8250_tx_dma; priv->omap8250_dma.tx_dma = omap_8250_tx_dma;
priv->omap8250_dma.rx_dma = omap_8250_rx_dma; priv->omap8250_dma.rx_dma = omap_8250_rx_dma;
......
...@@ -1249,20 +1249,19 @@ __acquires(&uap->port.lock) ...@@ -1249,20 +1249,19 @@ __acquires(&uap->port.lock)
/* /*
* Transmit a character * Transmit a character
* There must be at least one free entry in the TX FIFO to accept the char.
* *
* Returns true if the FIFO might have space in it afterwards; * Returns true if the character was successfully queued to the FIFO.
* returns false if the FIFO definitely became full. * Returns false otherwise.
*/ */
static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c) static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c)
{ {
if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
return false; /* unable to transmit character */
writew(c, uap->port.membase + UART01x_DR); writew(c, uap->port.membase + UART01x_DR);
uap->port.icount.tx++; uap->port.icount.tx++;
if (likely(uap->tx_irq_seen > 1)) return true;
return true;
return !(readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF);
} }
static bool pl011_tx_chars(struct uart_amba_port *uap) static bool pl011_tx_chars(struct uart_amba_port *uap)
...@@ -1296,7 +1295,8 @@ static bool pl011_tx_chars(struct uart_amba_port *uap) ...@@ -1296,7 +1295,8 @@ static bool pl011_tx_chars(struct uart_amba_port *uap)
return false; return false;
if (uap->port.x_char) { if (uap->port.x_char) {
pl011_tx_char(uap, uap->port.x_char); if (!pl011_tx_char(uap, uap->port.x_char))
goto done;
uap->port.x_char = 0; uap->port.x_char = 0;
--count; --count;
} }
......
...@@ -911,6 +911,14 @@ static void dma_rx_callback(void *data) ...@@ -911,6 +911,14 @@ static void dma_rx_callback(void *data)
status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state); status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
count = RX_BUF_SIZE - state.residue; count = RX_BUF_SIZE - state.residue;
if (readl(sport->port.membase + USR2) & USR2_IDLE) {
/* In condition [3] the SDMA counted up too early */
count--;
writel(USR2_IDLE, sport->port.membase + USR2);
}
dev_dbg(sport->port.dev, "We get %d bytes.\n", count); dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
if (count) { if (count) {
......
...@@ -339,7 +339,7 @@ ...@@ -339,7 +339,7 @@
#define DWC3_DGCMD_SET_ENDPOINT_NRDY 0x0c #define DWC3_DGCMD_SET_ENDPOINT_NRDY 0x0c
#define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10 #define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10
#define DWC3_DGCMD_STATUS(n) (((n) >> 15) & 1) #define DWC3_DGCMD_STATUS(n) (((n) >> 12) & 0x0F)
#define DWC3_DGCMD_CMDACT (1 << 10) #define DWC3_DGCMD_CMDACT (1 << 10)
#define DWC3_DGCMD_CMDIOC (1 << 8) #define DWC3_DGCMD_CMDIOC (1 << 8)
...@@ -355,7 +355,7 @@ ...@@ -355,7 +355,7 @@
#define DWC3_DEPCMD_PARAM_SHIFT 16 #define DWC3_DEPCMD_PARAM_SHIFT 16
#define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT) #define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT)
#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f) #define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
#define DWC3_DEPCMD_STATUS(x) (((x) >> 15) & 1) #define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F)
#define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11) #define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11)
#define DWC3_DEPCMD_CMDACT (1 << 10) #define DWC3_DEPCMD_CMDACT (1 << 10)
#define DWC3_DEPCMD_CMDIOC (1 << 8) #define DWC3_DEPCMD_CMDIOC (1 << 8)
......
...@@ -315,7 +315,6 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf, ...@@ -315,7 +315,6 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
return ret; return ret;
} }
set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
return len; return len;
} }
break; break;
...@@ -847,7 +846,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) ...@@ -847,7 +846,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
ret = ep->status; ret = ep->status;
if (io_data->read && ret > 0) { if (io_data->read && ret > 0) {
ret = copy_to_iter(data, ret, &io_data->data); ret = copy_to_iter(data, ret, &io_data->data);
if (unlikely(iov_iter_count(&io_data->data))) if (!ret)
ret = -EFAULT; ret = -EFAULT;
} }
} }
...@@ -1463,8 +1462,7 @@ static void ffs_data_clear(struct ffs_data *ffs) ...@@ -1463,8 +1462,7 @@ static void ffs_data_clear(struct ffs_data *ffs)
{ {
ENTER(); ENTER();
if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags)) ffs_closed(ffs);
ffs_closed(ffs);
BUG_ON(ffs->gadget); BUG_ON(ffs->gadget);
...@@ -3422,9 +3420,13 @@ static int ffs_ready(struct ffs_data *ffs) ...@@ -3422,9 +3420,13 @@ static int ffs_ready(struct ffs_data *ffs)
ffs_obj->desc_ready = true; ffs_obj->desc_ready = true;
ffs_obj->ffs_data = ffs; ffs_obj->ffs_data = ffs;
if (ffs_obj->ffs_ready_callback) if (ffs_obj->ffs_ready_callback) {
ret = ffs_obj->ffs_ready_callback(ffs); ret = ffs_obj->ffs_ready_callback(ffs);
if (ret)
goto done;
}
set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
done: done:
ffs_dev_unlock(); ffs_dev_unlock();
return ret; return ret;
...@@ -3443,7 +3445,8 @@ static void ffs_closed(struct ffs_data *ffs) ...@@ -3443,7 +3445,8 @@ static void ffs_closed(struct ffs_data *ffs)
ffs_obj->desc_ready = false; ffs_obj->desc_ready = false;
if (ffs_obj->ffs_closed_callback) if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
ffs_obj->ffs_closed_callback)
ffs_obj->ffs_closed_callback(ffs); ffs_obj->ffs_closed_callback(ffs);
if (!ffs_obj->opts || ffs_obj->opts->no_configfs if (!ffs_obj->opts || ffs_obj->opts->no_configfs
......
...@@ -973,7 +973,13 @@ static ssize_t f_midi_opts_id_show(struct f_midi_opts *opts, char *page) ...@@ -973,7 +973,13 @@ static ssize_t f_midi_opts_id_show(struct f_midi_opts *opts, char *page)
int result; int result;
mutex_lock(&opts->lock); mutex_lock(&opts->lock);
result = strlcpy(page, opts->id, PAGE_SIZE); if (opts->id) {
result = strlcpy(page, opts->id, PAGE_SIZE);
} else {
page[0] = 0;
result = 0;
}
mutex_unlock(&opts->lock); mutex_unlock(&opts->lock);
return result; return result;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部