提交 de8fd087 编写于 作者: D Dave Kleikamp

Merge with /home/shaggy/git/linus-clean/

Signed-off-by: NDave Kleikamp <shaggy@austin.ibm.com>
...@@ -63,8 +63,8 @@ __beginning: mov r4, r0 @ save the entry to the firmware ...@@ -63,8 +63,8 @@ __beginning: mov r4, r0 @ save the entry to the firmware
mov pc, r2 mov pc, r2
__copy_target: .long 0x08508000 __copy_target: .long 0x08507FFC
__copy_end: .long 0x08608000 __copy_end: .long 0x08607FFC
.word _start .word _start
.word __bss_start .word __bss_start
...@@ -73,9 +73,10 @@ __copy_end: .long 0x08608000 ...@@ -73,9 +73,10 @@ __copy_end: .long 0x08608000
__temp_stack: .space 128 __temp_stack: .space 128
__mmu_off: __mmu_off:
adr r0, __ofw_data adr r0, __ofw_data @ read the 1. entry of the memory map
ldr r0, [r0, #4] ldr r0, [r0, #4]
orr r0, r0, #0x00600000 orr r0, r0, #0x00600000
sub r0, r0, #4
ldr r1, __copy_end ldr r1, __copy_end
ldr r3, __copy_target ldr r3, __copy_target
...@@ -89,20 +90,43 @@ __mmu_off: ...@@ -89,20 +90,43 @@ __mmu_off:
* from 0x08500000 to 0x08508000 if we have only 8MB * from 0x08500000 to 0x08508000 if we have only 8MB
*/ */
/* As we get more 2.6-kernels it gets more and more
* uncomfortable to be bound to kernel images of 1MB only.
* So we add a loop here, to be able to copy some more.
* Alexander Schulz 2005-07-17
*/
mov r4, #3 @ How many megabytes to copy
__MoveCode: sub r4, r4, #1
__Copy: ldr r2, [r0], #-4 __Copy: ldr r2, [r0], #-4
str r2, [r1], #-4 str r2, [r1], #-4
teq r1, r3 teq r1, r3
bne __Copy bne __Copy
/* The firmware maps us in blocks of 1 MB, the next block is
_below_ the last one. So our decrementing source pointer
ist right here, but the destination pointer must be increased
by 2 MB */
add r1, r1, #0x00200000
add r3, r3, #0x00100000
teq r4, #0
bne __MoveCode
/* and jump to it */ /* and jump to it */
adr r2, __go_on adr r2, __go_on @ where we want to jump
adr r0, __ofw_data adr r0, __ofw_data @ read the 1. entry of the memory map
ldr r0, [r0, #4] ldr r0, [r0, #4]
sub r2, r2, r0 sub r2, r2, r0 @ we are mapped add 0e50 now, sub that (-0e00)
sub r2, r2, #0x00500000 sub r2, r2, #0x00500000 @ -0050
ldr r0, __copy_target ldr r0, __copy_target @ and add 0850 8000 instead
add r0, r0, #4
add r2, r2, r0 add r2, r2, r0
mov pc, r2 mov pc, r2 @ and jump there
__go_on: __go_on:
adr sp, __temp_stack adr sp, __temp_stack
......
...@@ -561,7 +561,6 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y ...@@ -561,7 +561,6 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
# #
CONFIG_SERIAL_S3C2410=y CONFIG_SERIAL_S3C2410=y
CONFIG_SERIAL_S3C2410_CONSOLE=y CONFIG_SERIAL_S3C2410_CONSOLE=y
CONFIG_SERIAL_BAST_SIO=y
CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y CONFIG_UNIX98_PTYS=y
......
...@@ -570,7 +570,6 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y ...@@ -570,7 +570,6 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
# #
CONFIG_SERIAL_S3C2410=y CONFIG_SERIAL_S3C2410=y
CONFIG_SERIAL_S3C2410_CONSOLE=y CONFIG_SERIAL_S3C2410_CONSOLE=y
CONFIG_SERIAL_BAST_SIO=y
CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y CONFIG_UNIX98_PTYS=y
......
...@@ -78,7 +78,7 @@ struct smp_call_struct { ...@@ -78,7 +78,7 @@ struct smp_call_struct {
static struct smp_call_struct * volatile smp_call_function_data; static struct smp_call_struct * volatile smp_call_function_data;
static DEFINE_SPINLOCK(smp_call_function_lock); static DEFINE_SPINLOCK(smp_call_function_lock);
int __init __cpu_up(unsigned int cpu) int __cpuinit __cpu_up(unsigned int cpu)
{ {
struct task_struct *idle; struct task_struct *idle;
pgd_t *pgd; pgd_t *pgd;
...@@ -159,7 +159,7 @@ int __init __cpu_up(unsigned int cpu) ...@@ -159,7 +159,7 @@ int __init __cpu_up(unsigned int cpu)
* This is the secondary CPU boot entry. We're using this CPUs * This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables. * idle thread stack, but a set of temporary page tables.
*/ */
asmlinkage void __init secondary_start_kernel(void) asmlinkage void __cpuinit secondary_start_kernel(void)
{ {
struct mm_struct *mm = &init_mm; struct mm_struct *mm = &init_mm;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
...@@ -209,7 +209,7 @@ asmlinkage void __init secondary_start_kernel(void) ...@@ -209,7 +209,7 @@ asmlinkage void __init secondary_start_kernel(void)
* Called by both boot and secondaries to move global data into * Called by both boot and secondaries to move global data into
* per-processor storage. * per-processor storage.
*/ */
void __init smp_store_cpu_info(unsigned int cpuid) void __cpuinit smp_store_cpu_info(unsigned int cpuid)
{ {
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
......
...@@ -27,12 +27,12 @@ extern void integrator_secondary_startup(void); ...@@ -27,12 +27,12 @@ extern void integrator_secondary_startup(void);
* control for which core is the next to come out of the secondary * control for which core is the next to come out of the secondary
* boot "holding pen" * boot "holding pen"
*/ */
volatile int __initdata pen_release = -1; volatile int __cpuinitdata pen_release = -1;
unsigned long __initdata phys_pen_release = 0; unsigned long __cpuinitdata phys_pen_release = 0;
static DEFINE_SPINLOCK(boot_lock); static DEFINE_SPINLOCK(boot_lock);
void __init platform_secondary_init(unsigned int cpu) void __cpuinit platform_secondary_init(unsigned int cpu)
{ {
/* /*
* the primary core may have used a "cross call" soft interrupt * the primary core may have used a "cross call" soft interrupt
...@@ -61,7 +61,7 @@ void __init platform_secondary_init(unsigned int cpu) ...@@ -61,7 +61,7 @@ void __init platform_secondary_init(unsigned int cpu)
spin_unlock(&boot_lock); spin_unlock(&boot_lock);
} }
int __init boot_secondary(unsigned int cpu, struct task_struct *idle) int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{ {
unsigned long timeout; unsigned long timeout;
......
...@@ -436,7 +436,7 @@ int s3c2410_dma_enqueue(unsigned int channel, void *id, ...@@ -436,7 +436,7 @@ int s3c2410_dma_enqueue(unsigned int channel, void *id,
buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC); buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC);
if (buf == NULL) { if (buf == NULL) {
pr_debug("%s: out of memory (%d alloc)\n", pr_debug("%s: out of memory (%ld alloc)\n",
__FUNCTION__, sizeof(*buf)); __FUNCTION__, sizeof(*buf));
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
* 14-Mar-2006 BJD Updated for __iomem changes * 14-Mar-2006 BJD Updated for __iomem changes
* 22-Jun-2006 BJD Added DM9000 platform information * 22-Jun-2006 BJD Added DM9000 platform information
* 28-Jun-2006 BJD Moved pm functionality out to common code * 28-Jun-2006 BJD Moved pm functionality out to common code
* 17-Jul-2006 BJD Changed to platform device for SuperIO 16550s
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -64,6 +65,8 @@ ...@@ -64,6 +65,8 @@
#include <linux/mtd/nand_ecc.h> #include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h> #include <linux/mtd/partitions.h>
#include <linux/serial_8250.h>
#include "clock.h" #include "clock.h"
#include "devs.h" #include "devs.h"
#include "cpu.h" #include "cpu.h"
...@@ -351,6 +354,39 @@ static struct platform_device bast_device_dm9k = { ...@@ -351,6 +354,39 @@ static struct platform_device bast_device_dm9k = {
} }
}; };
/* serial devices */
#define SERIAL_BASE (S3C2410_CS2 + BAST_PA_SUPERIO)
#define SERIAL_FLAGS (UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_SHARE_IRQ)
#define SERIAL_CLK (1843200)
static struct plat_serial8250_port bast_sio_data[] = {
[0] = {
.mapbase = SERIAL_BASE + 0x2f8,
.irq = IRQ_PCSERIAL1,
.flags = SERIAL_FLAGS,
.iotype = UPIO_MEM,
.regshift = 0,
.uartclk = SERIAL_CLK,
},
[1] = {
.mapbase = SERIAL_BASE + 0x3f8,
.irq = IRQ_PCSERIAL2,
.flags = SERIAL_FLAGS,
.iotype = UPIO_MEM,
.regshift = 0,
.uartclk = SERIAL_CLK,
},
{ }
};
static struct platform_device bast_sio = {
.name = "serial8250",
.id = 0,
.dev = {
.platform_data = &bast_sio_data,
},
};
/* Standard BAST devices */ /* Standard BAST devices */
...@@ -364,6 +400,7 @@ static struct platform_device *bast_devices[] __initdata = { ...@@ -364,6 +400,7 @@ static struct platform_device *bast_devices[] __initdata = {
&s3c_device_nand, &s3c_device_nand,
&bast_device_nor, &bast_device_nor,
&bast_device_dm9k, &bast_device_dm9k,
&bast_sio,
}; };
static struct clk *bast_clocks[] = { static struct clk *bast_clocks[] = {
......
...@@ -29,9 +29,7 @@ ...@@ -29,9 +29,7 @@
* stack+task struct. Use the same method as 'current' uses to * stack+task struct. Use the same method as 'current' uses to
* reach them. * reach them.
*/ */
register unsigned long *user_registers asm("sl"); #define GET_USERREG() ((struct pt_regs *)(THREAD_START_SP + (unsigned long)current_thread_info()) - 1)
#define GET_USERREG() (user_registers)
#include <linux/config.h> #include <linux/config.h>
#include <linux/thread_info.h> #include <linux/thread_info.h>
......
...@@ -132,7 +132,7 @@ void float_raise(signed char flags) ...@@ -132,7 +132,7 @@ void float_raise(signed char flags)
printk(KERN_DEBUG printk(KERN_DEBUG
"NWFPE: %s[%d] takes exception %08x at %p from %08lx\n", "NWFPE: %s[%d] takes exception %08x at %p from %08lx\n",
current->comm, current->pid, flags, current->comm, current->pid, flags,
__builtin_return_address(0), GET_USERREG()[15]); __builtin_return_address(0), GET_USERREG()->ARM_pc);
#endif #endif
/* Keep SoftFloat exception flags up to date. */ /* Keep SoftFloat exception flags up to date. */
......
...@@ -28,8 +28,8 @@ static inline unsigned long readRegister(const unsigned int nReg) ...@@ -28,8 +28,8 @@ static inline unsigned long readRegister(const unsigned int nReg)
for this in this routine. LDF/STF instructions with Rn = PC for this in this routine. LDF/STF instructions with Rn = PC
depend on the PC being correct, as they use PC+8 in their depend on the PC being correct, as they use PC+8 in their
address calculations. */ address calculations. */
unsigned long *userRegisters = GET_USERREG(); struct pt_regs *regs = GET_USERREG();
unsigned int val = userRegisters[nReg]; unsigned int val = regs->uregs[nReg];
if (REG_PC == nReg) if (REG_PC == nReg)
val -= 4; val -= 4;
return val; return val;
...@@ -38,8 +38,8 @@ static inline unsigned long readRegister(const unsigned int nReg) ...@@ -38,8 +38,8 @@ static inline unsigned long readRegister(const unsigned int nReg)
static inline void static inline void
writeRegister(const unsigned int nReg, const unsigned long val) writeRegister(const unsigned int nReg, const unsigned long val)
{ {
unsigned long *userRegisters = GET_USERREG(); struct pt_regs *regs = GET_USERREG();
userRegisters[nReg] = val; regs->uregs[nReg] = val;
} }
static inline unsigned long readCPSR(void) static inline unsigned long readCPSR(void)
...@@ -63,12 +63,12 @@ static inline unsigned long readConditionCodes(void) ...@@ -63,12 +63,12 @@ static inline unsigned long readConditionCodes(void)
static inline void writeConditionCodes(const unsigned long val) static inline void writeConditionCodes(const unsigned long val)
{ {
unsigned long *userRegisters = GET_USERREG(); struct pt_regs *regs = GET_USERREG();
unsigned long rval; unsigned long rval;
/* /*
* Operate directly on userRegisters since * Operate directly on userRegisters since
* the CPSR may be the PC register itself. * the CPSR may be the PC register itself.
*/ */
rval = userRegisters[REG_CPSR] & ~CC_MASK; rval = regs->ARM_cpsr & ~CC_MASK;
userRegisters[REG_CPSR] = rval | (val & CC_MASK); regs->ARM_cpsr = rval | (val & CC_MASK);
} }
...@@ -82,17 +82,6 @@ void kernel_fpu_begin(void) ...@@ -82,17 +82,6 @@ void kernel_fpu_begin(void)
} }
EXPORT_SYMBOL_GPL(kernel_fpu_begin); EXPORT_SYMBOL_GPL(kernel_fpu_begin);
void restore_fpu( struct task_struct *tsk )
{
if ( cpu_has_fxsr ) {
asm volatile( "fxrstor %0"
: : "m" (tsk->thread.i387.fxsave) );
} else {
asm volatile( "frstor %0"
: : "m" (tsk->thread.i387.fsave) );
}
}
/* /*
* FPU tag word conversions. * FPU tag word conversions.
*/ */
......
...@@ -700,23 +700,27 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas ...@@ -700,23 +700,27 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
/* /*
* Restore %fs and %gs if needed. * Restore %fs and %gs if needed.
*
* Glibc normally makes %fs be zero, and %gs is one of
* the TLS segments.
*/ */
if (unlikely(prev->fs | prev->gs | next->fs | next->gs)) { if (unlikely(prev->fs | next->fs))
loadsegment(fs, next->fs); loadsegment(fs, next->fs);
if (prev->gs | next->gs)
loadsegment(gs, next->gs); loadsegment(gs, next->gs);
}
/* /*
* Now maybe reload the debug registers * Now maybe reload the debug registers
*/ */
if (unlikely(next->debugreg[7])) { if (unlikely(next->debugreg[7])) {
set_debugreg(current->thread.debugreg[0], 0); set_debugreg(next->debugreg[0], 0);
set_debugreg(current->thread.debugreg[1], 1); set_debugreg(next->debugreg[1], 1);
set_debugreg(current->thread.debugreg[2], 2); set_debugreg(next->debugreg[2], 2);
set_debugreg(current->thread.debugreg[3], 3); set_debugreg(next->debugreg[3], 3);
/* no 4 and 5 */ /* no 4 and 5 */
set_debugreg(current->thread.debugreg[6], 6); set_debugreg(next->debugreg[6], 6);
set_debugreg(current->thread.debugreg[7], 7); set_debugreg(next->debugreg[7], 7);
} }
if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
......
...@@ -1414,7 +1414,7 @@ static struct nop { ...@@ -1414,7 +1414,7 @@ static struct nop {
This runs before SMP is initialized to avoid SMP problems with This runs before SMP is initialized to avoid SMP problems with
self modifying code. This implies that assymetric systems where self modifying code. This implies that assymetric systems where
APs have less capabilities than the boot processor are not handled. APs have less capabilities than the boot processor are not handled.
In this case boot with "noreplacement". */ Tough. Make sure you disable such features by hand. */
void apply_alternatives(void *start, void *end) void apply_alternatives(void *start, void *end)
{ {
struct alt_instr *a; struct alt_instr *a;
...@@ -1442,24 +1442,12 @@ void apply_alternatives(void *start, void *end) ...@@ -1442,24 +1442,12 @@ void apply_alternatives(void *start, void *end)
} }
} }
static int no_replacement __initdata = 0;
void __init alternative_instructions(void) void __init alternative_instructions(void)
{ {
extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
if (no_replacement)
return;
apply_alternatives(__alt_instructions, __alt_instructions_end); apply_alternatives(__alt_instructions, __alt_instructions_end);
} }
static int __init noreplacement_setup(char *s)
{
no_replacement = 1;
return 0;
}
__setup("noreplacement", noreplacement_setup);
static char * __init machine_specific_memory_setup(void); static char * __init machine_specific_memory_setup(void);
#ifdef CONFIG_MCA #ifdef CONFIG_MCA
......
...@@ -1600,11 +1600,11 @@ sys_clone: flushw ...@@ -1600,11 +1600,11 @@ sys_clone: flushw
ba,pt %xcc, sparc_do_fork ba,pt %xcc, sparc_do_fork
add %sp, PTREGS_OFF, %o2 add %sp, PTREGS_OFF, %o2
ret_from_syscall: ret_from_syscall:
/* Clear SPARC_FLAG_NEWCHILD, switch_to leaves thread.flags in /* Clear current_thread_info()->new_child, and
* %o7 for us. Check performance counter stuff too. * check performance counter stuff too.
*/ */
andn %o7, _TIF_NEWCHILD, %l0 stb %g0, [%g6 + TI_NEW_CHILD]
stx %l0, [%g6 + TI_FLAGS] ldx [%g6 + TI_FLAGS], %l0
call schedule_tail call schedule_tail
mov %g7, %o0 mov %g7, %o0
andcc %l0, _TIF_PERFCTR, %g0 andcc %l0, _TIF_PERFCTR, %g0
...@@ -1720,12 +1720,11 @@ ret_sys_call: ...@@ -1720,12 +1720,11 @@ ret_sys_call:
/* Check if force_successful_syscall_return() /* Check if force_successful_syscall_return()
* was invoked. * was invoked.
*/ */
ldx [%curptr + TI_FLAGS], %l0 ldub [%curptr + TI_SYS_NOERROR], %l0
andcc %l0, _TIF_SYSCALL_SUCCESS, %g0 brz,pt %l0, 1f
be,pt %icc, 1f nop
andn %l0, _TIF_SYSCALL_SUCCESS, %l0
ba,pt %xcc, 80f ba,pt %xcc, 80f
stx %l0, [%curptr + TI_FLAGS] stb %g0, [%curptr + TI_SYS_NOERROR]
1: 1:
cmp %o0, -ERESTART_RESTARTBLOCK cmp %o0, -ERESTART_RESTARTBLOCK
......
...@@ -782,8 +782,14 @@ static void distribute_irqs(void) ...@@ -782,8 +782,14 @@ static void distribute_irqs(void)
} }
#endif #endif
struct sun5_timer {
u64 count0;
u64 limit0;
u64 count1;
u64 limit1;
};
struct sun5_timer *prom_timers; static struct sun5_timer *prom_timers;
static u64 prom_limit0, prom_limit1; static u64 prom_limit0, prom_limit1;
static void map_prom_timers(void) static void map_prom_timers(void)
...@@ -839,18 +845,6 @@ static void kill_prom_timer(void) ...@@ -839,18 +845,6 @@ static void kill_prom_timer(void)
: "g1", "g2"); : "g1", "g2");
} }
void enable_prom_timer(void)
{
if (!prom_timers)
return;
/* Set it to whatever was there before. */
prom_timers->limit1 = prom_limit1;
prom_timers->count1 = 0;
prom_timers->limit0 = prom_limit0;
prom_timers->count0 = 0;
}
void init_irqwork_curcpu(void) void init_irqwork_curcpu(void)
{ {
register struct irq_work_struct *workp asm("o2"); register struct irq_work_struct *workp asm("o2");
......
...@@ -621,8 +621,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, ...@@ -621,8 +621,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ)); memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ));
t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) | t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) |
_TIF_NEWCHILD |
(((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT); (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT);
t->new_child = 1;
t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
t->kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct sparc_stackf)); t->kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct sparc_stackf));
t->fpsaved[0] = 0; t->fpsaved[0] = 0;
......
...@@ -137,7 +137,7 @@ void __init smp_callin(void) ...@@ -137,7 +137,7 @@ void __init smp_callin(void)
/* Clear this or we will die instantly when we /* Clear this or we will die instantly when we
* schedule back to this idler... * schedule back to this idler...
*/ */
clear_thread_flag(TIF_NEWCHILD); current_thread_info()->new_child = 0;
/* Attach to the address space of init_task. */ /* Attach to the address space of init_task. */
atomic_inc(&init_mm.mm_count); atomic_inc(&init_mm.mm_count);
......
...@@ -2125,6 +2125,8 @@ void __init trap_init(void) ...@@ -2125,6 +2125,8 @@ void __init trap_init(void)
TI_PCR != offsetof(struct thread_info, pcr_reg) || TI_PCR != offsetof(struct thread_info, pcr_reg) ||
TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) || TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
TI_FPREGS != offsetof(struct thread_info, fpregs) || TI_FPREGS != offsetof(struct thread_info, fpregs) ||
(TI_FPREGS & (64 - 1))) (TI_FPREGS & (64 - 1)))
thread_info_offsets_are_bolixed_dave(); thread_info_offsets_are_bolixed_dave();
......
...@@ -41,7 +41,7 @@ static unsigned int crypt_slow(const struct cipher_desc *desc, ...@@ -41,7 +41,7 @@ static unsigned int crypt_slow(const struct cipher_desc *desc,
struct scatter_walk *in, struct scatter_walk *in,
struct scatter_walk *out, unsigned int bsize) struct scatter_walk *out, unsigned int bsize)
{ {
unsigned int alignmask = crypto_tfm_alg_alignmask(desc->tfm); unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm);
u8 buffer[bsize * 2 + alignmask]; u8 buffer[bsize * 2 + alignmask];
u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
u8 *dst = src + bsize; u8 *dst = src + bsize;
...@@ -160,7 +160,7 @@ static int crypt_iv_unaligned(struct cipher_desc *desc, ...@@ -160,7 +160,7 @@ static int crypt_iv_unaligned(struct cipher_desc *desc,
unsigned int nbytes) unsigned int nbytes)
{ {
struct crypto_tfm *tfm = desc->tfm; struct crypto_tfm *tfm = desc->tfm;
unsigned int alignmask = crypto_tfm_alg_alignmask(tfm); unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
u8 *iv = desc->info; u8 *iv = desc->info;
if (unlikely(((unsigned long)iv & alignmask))) { if (unlikely(((unsigned long)iv & alignmask))) {
...@@ -424,7 +424,7 @@ int crypto_init_cipher_ops(struct crypto_tfm *tfm) ...@@ -424,7 +424,7 @@ int crypto_init_cipher_ops(struct crypto_tfm *tfm)
} }
if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) { if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
unsigned int align; unsigned long align;
unsigned long addr; unsigned long addr;
switch (crypto_tfm_alg_blocksize(tfm)) { switch (crypto_tfm_alg_blocksize(tfm)) {
......
...@@ -75,7 +75,7 @@ static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg, ...@@ -75,7 +75,7 @@ static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg,
switch (flags & CRYPTO_TFM_MODE_MASK) { switch (flags & CRYPTO_TFM_MODE_MASK) {
case CRYPTO_TFM_MODE_CBC: case CRYPTO_TFM_MODE_CBC:
len = ALIGN(len, alg->cra_alignmask + 1); len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
len += alg->cra_blocksize; len += alg->cra_blocksize;
break; break;
} }
......
...@@ -170,22 +170,19 @@ acpi_ec_enter_burst_mode ( ...@@ -170,22 +170,19 @@ acpi_ec_enter_burst_mode (
status = acpi_ec_read_status(ec); status = acpi_ec_read_status(ec);
if (status != -EINVAL && if (status != -EINVAL &&
!(status & ACPI_EC_FLAG_BURST)){ !(status & ACPI_EC_FLAG_BURST)){
ACPI_DEBUG_PRINT((ACPI_DB_INFO,"entering burst mode \n"));
acpi_hw_low_level_write(8, ACPI_EC_BURST_ENABLE, &ec->command_addr); acpi_hw_low_level_write(8, ACPI_EC_BURST_ENABLE, &ec->command_addr);
status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF);
if (status){ if (status){
acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR);
ACPI_DEBUG_PRINT((ACPI_DB_ERROR," status = %d\n", status));
return_VALUE(-EINVAL); return_VALUE(-EINVAL);
} }
acpi_hw_low_level_read(8, &tmp, &ec->data_addr); acpi_hw_low_level_read(8, &tmp, &ec->data_addr);
acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR);
if(tmp != 0x90 ) {/* Burst ACK byte*/ if(tmp != 0x90 ) {/* Burst ACK byte*/
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,"Ack failed \n"));
return_VALUE(-EINVAL); return_VALUE(-EINVAL);
} }
} else }
ACPI_DEBUG_PRINT((ACPI_DB_INFO,"already be in burst mode \n"));
atomic_set(&ec->leaving_burst , 0); atomic_set(&ec->leaving_burst , 0);
return_VALUE(0); return_VALUE(0);
} }
...@@ -202,7 +199,6 @@ acpi_ec_leave_burst_mode ( ...@@ -202,7 +199,6 @@ acpi_ec_leave_burst_mode (
status = acpi_ec_read_status(ec); status = acpi_ec_read_status(ec);
if (status != -EINVAL && if (status != -EINVAL &&
(status & ACPI_EC_FLAG_BURST)){ (status & ACPI_EC_FLAG_BURST)){
ACPI_DEBUG_PRINT((ACPI_DB_INFO,"leaving burst mode\n"));
acpi_hw_low_level_write(8, ACPI_EC_BURST_DISABLE, &ec->command_addr); acpi_hw_low_level_write(8, ACPI_EC_BURST_DISABLE, &ec->command_addr);
status = acpi_ec_wait(ec, ACPI_EC_FLAG_IBF); status = acpi_ec_wait(ec, ACPI_EC_FLAG_IBF);
if (status){ if (status){
...@@ -212,14 +208,7 @@ acpi_ec_leave_burst_mode ( ...@@ -212,14 +208,7 @@ acpi_ec_leave_burst_mode (
} }
acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR);
status = acpi_ec_read_status(ec); status = acpi_ec_read_status(ec);
if (status != -EINVAL && }
(status & ACPI_EC_FLAG_BURST)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,"------->status fail\n"));
return_VALUE(-EINVAL);
}
}else
ACPI_DEBUG_PRINT((ACPI_DB_INFO,"already be in Non-burst mode \n"));
ACPI_DEBUG_PRINT((ACPI_DB_INFO,"leaving burst mode\n"));
return_VALUE(0); return_VALUE(0);
} }
......
...@@ -794,7 +794,9 @@ static void drain_rx_pools (amb_dev * dev) { ...@@ -794,7 +794,9 @@ static void drain_rx_pools (amb_dev * dev) {
drain_rx_pool (dev, pool); drain_rx_pool (dev, pool);
} }
static inline void fill_rx_pool (amb_dev * dev, unsigned char pool, int priority) { static inline void fill_rx_pool (amb_dev * dev, unsigned char pool,
unsigned int __nocast priority)
{
rx_in rx; rx_in rx;
amb_rxq * rxq; amb_rxq * rxq;
......
...@@ -1374,7 +1374,8 @@ static void reset_chip (struct fs_dev *dev) ...@@ -1374,7 +1374,8 @@ static void reset_chip (struct fs_dev *dev)
} }
} }
static void __devinit *aligned_kmalloc (int size, int flags, int alignment) static void __devinit *aligned_kmalloc (int size, unsigned int __nocast flags,
int alignment)
{ {
void *t; void *t;
...@@ -1464,7 +1465,8 @@ static inline int nr_buffers_in_freepool (struct fs_dev *dev, struct freepool *f ...@@ -1464,7 +1465,8 @@ static inline int nr_buffers_in_freepool (struct fs_dev *dev, struct freepool *f
does. I've seen "receive abort: no buffers" and things started does. I've seen "receive abort: no buffers" and things started
working again after that... -- REW */ working again after that... -- REW */
static void top_off_fp (struct fs_dev *dev, struct freepool *fp, int gfp_flags) static void top_off_fp (struct fs_dev *dev, struct freepool *fp,
unsigned int __nocast gfp_flags)
{ {
struct FS_BPENTRY *qe, *ne; struct FS_BPENTRY *qe, *ne;
struct sk_buff *skb; struct sk_buff *skb;
......
...@@ -57,7 +57,6 @@ ...@@ -57,7 +57,6 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/version.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/pci.h> #include <linux/pci.h>
......
...@@ -46,6 +46,7 @@ static char const rcsid[] = ...@@ -46,6 +46,7 @@ static char const rcsid[] =
#include <linux/init.h> #include <linux/init.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/jiffies.h>
#include <asm/semaphore.h> #include <asm/semaphore.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -780,7 +781,7 @@ push_on_scq(struct idt77252_dev *card, struct vc_map *vc, struct sk_buff *skb) ...@@ -780,7 +781,7 @@ push_on_scq(struct idt77252_dev *card, struct vc_map *vc, struct sk_buff *skb)
return 0; return 0;
out: out:
if (jiffies - scq->trans_start > HZ) { if (time_after(jiffies, scq->trans_start + HZ)) {
printk("%s: Error pushing TBD for %d.%d\n", printk("%s: Error pushing TBD for %d.%d\n",
card->name, vc->tx_vcc->vpi, vc->tx_vcc->vci); card->name, vc->tx_vcc->vpi, vc->tx_vcc->vci);
#ifdef CONFIG_ATM_IDT77252_DEBUG #ifdef CONFIG_ATM_IDT77252_DEBUG
......
...@@ -16,9 +16,9 @@ ...@@ -16,9 +16,9 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/ioport.h> /* for request_region */
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/atm_zatm.h> #include <linux/atm_zatm.h>
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/bitops.h> #include <linux/bitops.h>
...@@ -1257,22 +1257,22 @@ static int __init zatm_init(struct atm_dev *dev) ...@@ -1257,22 +1257,22 @@ static int __init zatm_init(struct atm_dev *dev)
static int __init zatm_start(struct atm_dev *dev) static int __init zatm_start(struct atm_dev *dev)
{ {
struct zatm_dev *zatm_dev; struct zatm_dev *zatm_dev = ZATM_DEV(dev);
struct pci_dev *pdev = zatm_dev->pci_dev;
unsigned long curr; unsigned long curr;
int pools,vccs,rx; int pools,vccs,rx;
int error,i,ld; int error, i, ld;
DPRINTK("zatm_start\n"); DPRINTK("zatm_start\n");
zatm_dev = ZATM_DEV(dev);
zatm_dev->rx_map = zatm_dev->tx_map = NULL; zatm_dev->rx_map = zatm_dev->tx_map = NULL;
for (i = 0; i < NR_MBX; i++) for (i = 0; i < NR_MBX; i++)
zatm_dev->mbx_start[i] = 0; zatm_dev->mbx_start[i] = 0;
if (request_irq(zatm_dev->irq,&zatm_int,SA_SHIRQ,DEV_LABEL,dev)) { error = request_irq(zatm_dev->irq, zatm_int, SA_SHIRQ, DEV_LABEL, dev);
printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n", if (error < 0) {
dev->number,zatm_dev->irq); printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
return -EAGAIN; dev->number,zatm_dev->irq);
goto done;
} }
request_region(zatm_dev->base,uPD98401_PORTS,DEV_LABEL);
/* define memory regions */ /* define memory regions */
pools = NR_POOLS; pools = NR_POOLS;
if (NR_SHAPERS*SHAPER_SIZE > pools*POOL_SIZE) if (NR_SHAPERS*SHAPER_SIZE > pools*POOL_SIZE)
...@@ -1299,51 +1299,66 @@ static int __init zatm_start(struct atm_dev *dev) ...@@ -1299,51 +1299,66 @@ static int __init zatm_start(struct atm_dev *dev)
"%ld VCs\n",dev->number,NR_SHAPERS,pools,rx, "%ld VCs\n",dev->number,NR_SHAPERS,pools,rx,
(zatm_dev->mem-curr*4)/VC_SIZE); (zatm_dev->mem-curr*4)/VC_SIZE);
/* create mailboxes */ /* create mailboxes */
for (i = 0; i < NR_MBX; i++) for (i = 0; i < NR_MBX; i++) {
if (mbx_entries[i]) { void *mbx;
unsigned long here; dma_addr_t mbx_dma;
here = (unsigned long) kmalloc(2*MBX_SIZE(i), if (!mbx_entries[i])
GFP_KERNEL); continue;
if (!here) { mbx = pci_alloc_consistent(pdev, 2*MBX_SIZE(i), &mbx_dma);
error = -ENOMEM; if (!mbx) {
goto out; error = -ENOMEM;
} goto out;
if ((here^(here+MBX_SIZE(i))) & ~0xffffUL)/* paranoia */
here = (here & ~0xffffUL)+0x10000;
zatm_dev->mbx_start[i] = here;
if ((here^virt_to_bus((void *) here)) & 0xffff) {
printk(KERN_ERR DEV_LABEL "(itf %d): system "
"bus incompatible with driver\n",
dev->number);
error = -ENODEV;
goto out;
}
DPRINTK("mbx@0x%08lx-0x%08lx\n",here,here+MBX_SIZE(i));
zatm_dev->mbx_end[i] = (here+MBX_SIZE(i)) & 0xffff;
zout(virt_to_bus((void *) here) >> 16,MSH(i));
zout(virt_to_bus((void *) here),MSL(i));
zout((here+MBX_SIZE(i)) & 0xffff,MBA(i));
zout(here & 0xffff,MTA(i));
zout(here & 0xffff,MWA(i));
} }
/*
* Alignment provided by pci_alloc_consistent() isn't enough
* for this device.
*/
if (((unsigned long)mbx ^ mbx_dma) & 0xffff) {
printk(KERN_ERR DEV_LABEL "(itf %d): system "
"bus incompatible with driver\n", dev->number);
pci_free_consistent(pdev, 2*MBX_SIZE(i), mbx, mbx_dma);
error = -ENODEV;
goto out;
}
DPRINTK("mbx@0x%08lx-0x%08lx\n", mbx, mbx + MBX_SIZE(i));
zatm_dev->mbx_start[i] = (unsigned long)mbx;
zatm_dev->mbx_dma[i] = mbx_dma;
zatm_dev->mbx_end[i] = (zatm_dev->mbx_start[i] + MBX_SIZE(i)) &
0xffff;
zout(mbx_dma >> 16, MSH(i));
zout(mbx_dma, MSL(i));
zout(zatm_dev->mbx_end[i], MBA(i));
zout((unsigned long)mbx & 0xffff, MTA(i));
zout((unsigned long)mbx & 0xffff, MWA(i));
}
error = start_tx(dev); error = start_tx(dev);
if (error) goto out; if (error)
goto out;
error = start_rx(dev); error = start_rx(dev);
if (error) goto out; if (error)
goto out_tx;
error = dev->phy->start(dev); error = dev->phy->start(dev);
if (error) goto out; if (error)
goto out_rx;
zout(0xffffffff,IMR); /* enable interrupts */ zout(0xffffffff,IMR); /* enable interrupts */
/* enable TX & RX */ /* enable TX & RX */
zout(zin(GMR) | uPD98401_GMR_SE | uPD98401_GMR_RE,GMR); zout(zin(GMR) | uPD98401_GMR_SE | uPD98401_GMR_RE,GMR);
return 0; done:
out: return error;
for (i = 0; i < NR_MBX; i++)
kfree(zatm_dev->mbx_start[i]); out_rx:
kfree(zatm_dev->rx_map); kfree(zatm_dev->rx_map);
out_tx:
kfree(zatm_dev->tx_map); kfree(zatm_dev->tx_map);
out:
while (i-- > 0) {
pci_free_consistent(pdev, 2*MBX_SIZE(i),
(void *)zatm_dev->mbx_start[i],
zatm_dev->mbx_dma[i]);
}
free_irq(zatm_dev->irq, dev); free_irq(zatm_dev->irq, dev);
return error; goto done;
} }
......
...@@ -73,6 +73,7 @@ struct zatm_dev { ...@@ -73,6 +73,7 @@ struct zatm_dev {
int chans; /* map size, must be 2^n */ int chans; /* map size, must be 2^n */
/*-------------------------------- mailboxes */ /*-------------------------------- mailboxes */
unsigned long mbx_start[NR_MBX];/* start addresses */ unsigned long mbx_start[NR_MBX];/* start addresses */
dma_addr_t mbx_dma[NR_MBX];
u16 mbx_end[NR_MBX]; /* end offset (in bytes) */ u16 mbx_end[NR_MBX]; /* end offset (in bytes) */
/*-------------------------------- other pointers */ /*-------------------------------- other pointers */
u32 pool_base; /* Free buffer pool dsc (word addr) */ u32 pool_base; /* Free buffer pool dsc (word addr) */
......
...@@ -2544,9 +2544,25 @@ config SHAPER ...@@ -2544,9 +2544,25 @@ config SHAPER
config NETCONSOLE config NETCONSOLE
tristate "Network console logging support (EXPERIMENTAL)" tristate "Network console logging support (EXPERIMENTAL)"
depends on NETDEVICES && EXPERIMENTAL depends on NETDEVICES && INET && EXPERIMENTAL
---help--- ---help---
If you want to log kernel messages over the network, enable this. If you want to log kernel messages over the network, enable this.
See <file:Documentation/networking/netconsole.txt> for details. See <file:Documentation/networking/netconsole.txt> for details.
config NETPOLL
def_bool NETCONSOLE
config NETPOLL_RX
bool "Netpoll support for trapping incoming packets"
default n
depends on NETPOLL
config NETPOLL_TRAP
bool "Netpoll traffic trapping"
default n
depends on NETPOLL
config NET_POLL_CONTROLLER
def_bool NETPOLL
endmenu endmenu
...@@ -132,7 +132,7 @@ static struct net_device_stats *eql_get_stats(struct net_device *dev); ...@@ -132,7 +132,7 @@ static struct net_device_stats *eql_get_stats(struct net_device *dev);
#define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE) #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE)
#define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER) #define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER)
static void eql_kill_one_slave(slave_t *slave); static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
static void eql_timer(unsigned long param) static void eql_timer(unsigned long param)
{ {
...@@ -149,7 +149,7 @@ static void eql_timer(unsigned long param) ...@@ -149,7 +149,7 @@ static void eql_timer(unsigned long param)
if (slave->bytes_queued < 0) if (slave->bytes_queued < 0)
slave->bytes_queued = 0; slave->bytes_queued = 0;
} else { } else {
eql_kill_one_slave(slave); eql_kill_one_slave(&eql->queue, slave);
} }
} }
...@@ -214,9 +214,10 @@ static int eql_open(struct net_device *dev) ...@@ -214,9 +214,10 @@ static int eql_open(struct net_device *dev)
return 0; return 0;
} }
static void eql_kill_one_slave(slave_t *slave) static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave)
{ {
list_del(&slave->list); list_del(&slave->list);
queue->num_slaves--;
slave->dev->flags &= ~IFF_SLAVE; slave->dev->flags &= ~IFF_SLAVE;
dev_put(slave->dev); dev_put(slave->dev);
kfree(slave); kfree(slave);
...@@ -232,8 +233,7 @@ static void eql_kill_slave_queue(slave_queue_t *queue) ...@@ -232,8 +233,7 @@ static void eql_kill_slave_queue(slave_queue_t *queue)
list_for_each_safe(this, tmp, head) { list_for_each_safe(this, tmp, head) {
slave_t *s = list_entry(this, slave_t, list); slave_t *s = list_entry(this, slave_t, list);
eql_kill_one_slave(s); eql_kill_one_slave(queue, s);
queue->num_slaves--;
} }
spin_unlock_bh(&queue->lock); spin_unlock_bh(&queue->lock);
...@@ -318,7 +318,7 @@ static slave_t *__eql_schedule_slaves(slave_queue_t *queue) ...@@ -318,7 +318,7 @@ static slave_t *__eql_schedule_slaves(slave_queue_t *queue)
} }
} else { } else {
/* We found a dead slave, kill it. */ /* We found a dead slave, kill it. */
eql_kill_one_slave(slave); eql_kill_one_slave(queue, slave);
} }
} }
return best_slave; return best_slave;
...@@ -393,7 +393,7 @@ static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave) ...@@ -393,7 +393,7 @@ static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
duplicate_slave = __eql_find_slave_dev(queue, slave->dev); duplicate_slave = __eql_find_slave_dev(queue, slave->dev);
if (duplicate_slave != 0) if (duplicate_slave != 0)
eql_kill_one_slave(duplicate_slave); eql_kill_one_slave(queue, duplicate_slave);
list_add(&slave->list, &queue->all_slaves); list_add(&slave->list, &queue->all_slaves);
queue->num_slaves++; queue->num_slaves++;
...@@ -471,7 +471,7 @@ static int eql_emancipate(struct net_device *master_dev, slaving_request_t __use ...@@ -471,7 +471,7 @@ static int eql_emancipate(struct net_device *master_dev, slaving_request_t __use
slave_dev); slave_dev);
if (slave) { if (slave) {
eql_kill_one_slave(slave); eql_kill_one_slave(&eql->queue, slave);
ret = 0; ret = 0;
} }
} }
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/mii.h> #include <linux/mii.h>
#include <linux/jiffies.h>
#include <pcmcia/cs_types.h> #include <pcmcia/cs_types.h>
#include <pcmcia/cs.h> #include <pcmcia/cs.h>
...@@ -2092,7 +2093,7 @@ static void media_check(u_long arg) ...@@ -2092,7 +2093,7 @@ static void media_check(u_long arg)
} }
/* Ignore collisions unless we've had no rx's recently */ /* Ignore collisions unless we've had no rx's recently */
if (jiffies - dev->last_rx > HZ) { if (time_after(jiffies, dev->last_rx + HZ)) {
if (smc->tx_err || (smc->media_status & EPH_16COL)) if (smc->tx_err || (smc->media_status & EPH_16COL))
media |= EPH_16COL; media |= EPH_16COL;
} }
......
...@@ -152,6 +152,7 @@ ...@@ -152,6 +152,7 @@
#include <asm/io.h> /* for inb(), outb(), etc. */ #include <asm/io.h> /* for inb(), outb(), etc. */
#include <linux/time.h> /* for do_gettimeofday */ #include <linux/time.h> /* for do_gettimeofday */
#include <linux/in.h> /* sockaddr_in */ #include <linux/in.h> /* sockaddr_in */
#include <linux/jiffies.h> /* time_after() macro */
#include <asm/errno.h> #include <asm/errno.h>
#include <linux/ip.h> #include <linux/ip.h>
...@@ -773,7 +774,7 @@ static int update(struct wan_device* wandev) ...@@ -773,7 +774,7 @@ static int update(struct wan_device* wandev)
for(;;) { for(;;) {
if(card->u.f.update_comms_stats == 0) if(card->u.f.update_comms_stats == 0)
break; break;
if ((jiffies - timeout) > (1 * HZ)){ if (time_after(jiffies, timeout + 1 * HZ)){
card->u.f.update_comms_stats = 0; card->u.f.update_comms_stats = 0;
return -EAGAIN; return -EAGAIN;
} }
...@@ -4799,7 +4800,7 @@ static void trigger_unconfig_fr(struct net_device *dev) ...@@ -4799,7 +4800,7 @@ static void trigger_unconfig_fr(struct net_device *dev)
{ {
fr_channel_t *chan = dev->priv; fr_channel_t *chan = dev->priv;
volatile sdla_t *card = chan->card; volatile sdla_t *card = chan->card;
u32 timeout; unsigned long timeout;
fr508_flags_t* flags = card->flags; fr508_flags_t* flags = card->flags;
int reset_critical=0; int reset_critical=0;
...@@ -4821,7 +4822,7 @@ static void trigger_unconfig_fr(struct net_device *dev) ...@@ -4821,7 +4822,7 @@ static void trigger_unconfig_fr(struct net_device *dev)
if(!(card->u.f.timer_int_enabled & TMR_INT_ENABLED_UNCONFIG)) if(!(card->u.f.timer_int_enabled & TMR_INT_ENABLED_UNCONFIG))
break; break;
if ((jiffies - timeout) > (1 * HZ)){ if (time_after(jiffies, timeout + 1 * HZ)){
card->u.f.timer_int_enabled &= ~TMR_INT_ENABLED_UNCONFIG; card->u.f.timer_int_enabled &= ~TMR_INT_ENABLED_UNCONFIG;
printk(KERN_INFO "%s: Failed to delete DLCI %i\n", printk(KERN_INFO "%s: Failed to delete DLCI %i\n",
card->devname,chan->dlci); card->devname,chan->dlci);
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/wanrouter.h> /* WAN router definitions */ #include <linux/wanrouter.h> /* WAN router definitions */
#include <linux/wanpipe.h> /* WANPIPE common user API definitions */ #include <linux/wanpipe.h> /* WANPIPE common user API definitions */
#include <linux/if_arp.h> /* ARPHRD_* defines */ #include <linux/if_arp.h> /* ARPHRD_* defines */
#include <linux/jiffies.h> /* time_after() macro */
#include <linux/inetdevice.h> #include <linux/inetdevice.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -164,7 +165,7 @@ int wpft1_init (sdla_t* card, wandev_conf_t* conf) ...@@ -164,7 +165,7 @@ int wpft1_init (sdla_t* card, wandev_conf_t* conf)
timeout = jiffies; timeout = jiffies;
while (mb->return_code != 'I') /* Wait 1s for board to initialize */ while (mb->return_code != 'I') /* Wait 1s for board to initialize */
if ((jiffies - timeout) > 1*HZ) break; if (time_after(jiffies, timeout + 1*HZ)) break;
if (mb->return_code != 'I') { if (mb->return_code != 'I') {
printk(KERN_INFO printk(KERN_INFO
......
...@@ -101,6 +101,7 @@ ...@@ -101,6 +101,7 @@
#include <linux/if_arp.h> /* ARPHRD_* defines */ #include <linux/if_arp.h> /* ARPHRD_* defines */
#include <asm/byteorder.h> /* htons(), etc. */ #include <asm/byteorder.h> /* htons(), etc. */
#include <linux/in.h> /* sockaddr_in */ #include <linux/in.h> /* sockaddr_in */
#include <linux/jiffies.h> /* time_after() macro */
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -482,7 +483,7 @@ static int update(struct wan_device *wandev) ...@@ -482,7 +483,7 @@ static int update(struct wan_device *wandev)
if(ppp_priv_area->update_comms_stats == 0){ if(ppp_priv_area->update_comms_stats == 0){
break; break;
} }
if ((jiffies - timeout) > (1 * HZ)){ if (time_after(jiffies, timeout + 1 * HZ)){
ppp_priv_area->update_comms_stats = 0; ppp_priv_area->update_comms_stats = 0;
ppp_priv_area->timer_int_enabled &= ppp_priv_area->timer_int_enabled &=
~TMR_INT_ENABLED_UPDATE; ~TMR_INT_ENABLED_UPDATE;
......
...@@ -91,6 +91,7 @@ ...@@ -91,6 +91,7 @@
#include <linux/wanrouter.h> /* WAN router definitions */ #include <linux/wanrouter.h> /* WAN router definitions */
#include <linux/wanpipe.h> /* WANPIPE common user API definitions */ #include <linux/wanpipe.h> /* WANPIPE common user API definitions */
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/jiffies.h> /* time_after() macro */
#include <asm/byteorder.h> /* htons(), etc. */ #include <asm/byteorder.h> /* htons(), etc. */
#include <asm/atomic.h> #include <asm/atomic.h>
#include <linux/delay.h> /* Experimental delay */ #include <linux/delay.h> /* Experimental delay */
...@@ -867,7 +868,7 @@ static int update(struct wan_device* wandev) ...@@ -867,7 +868,7 @@ static int update(struct wan_device* wandev)
if (!(card->u.x.timer_int_enabled & TMR_INT_ENABLED_UPDATE)){ if (!(card->u.x.timer_int_enabled & TMR_INT_ENABLED_UPDATE)){
break; break;
} }
if ((jiffies-timeout) > 1*HZ){ if (time_after(jiffies, timeout + 1*HZ)){
card->u.x.timer_int_enabled &= ~TMR_INT_ENABLED_UPDATE; card->u.x.timer_int_enabled &= ~TMR_INT_ENABLED_UPDATE;
return -EAGAIN; return -EAGAIN;
} }
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/wanrouter.h> /* WAN router definitions */ #include <linux/wanrouter.h> /* WAN router definitions */
#include <linux/wanpipe.h> /* WANPIPE common user API definitions */ #include <linux/wanpipe.h> /* WANPIPE common user API definitions */
#include <linux/if_arp.h> /* ARPHRD_* defines */ #include <linux/if_arp.h> /* ARPHRD_* defines */
#include <linux/jiffies.h> /* time_after() macro */
#include <linux/in.h> /* sockaddr_in */ #include <linux/in.h> /* sockaddr_in */
#include <linux/inet.h> #include <linux/inet.h>
...@@ -270,9 +271,9 @@ int wsppp_init (sdla_t* card, wandev_conf_t* conf) ...@@ -270,9 +271,9 @@ int wsppp_init (sdla_t* card, wandev_conf_t* conf)
ready to accept commands. We expect this to be completed in less ready to accept commands. We expect this to be completed in less
than 1 second. */ than 1 second. */
timeout = jiffies; timeout = jiffies + 1 * HZ;
while (mb->return_code != 'I') /* Wait 1s for board to initialize */ while (mb->return_code != 'I') /* Wait 1s for board to initialize */
if ((jiffies - timeout) > 1*HZ) break; if (time_after(jiffies, timeout)) break;
if (mb->return_code != 'I') { if (mb->return_code != 'I') {
printk(KERN_INFO printk(KERN_INFO
...@@ -493,11 +494,11 @@ static int update(struct wan_device* wandev) ...@@ -493,11 +494,11 @@ static int update(struct wan_device* wandev)
chdlc_priv_area->timer_int_enabled = TMR_INT_ENABLED_UPDATE; chdlc_priv_area->timer_int_enabled = TMR_INT_ENABLED_UPDATE;
/* wait a maximum of 1 second for the statistics to be updated */ /* wait a maximum of 1 second for the statistics to be updated */
timeout = jiffies; timeout = jiffies + 1 * HZ;
for(;;) { for(;;) {
if(chdlc_priv_area->update_comms_stats == 0) if(chdlc_priv_area->update_comms_stats == 0)
break; break;
if ((jiffies - timeout) > (1 * HZ)){ if (time_after(jiffies, timeout)){
chdlc_priv_area->update_comms_stats = 0; chdlc_priv_area->update_comms_stats = 0;
chdlc_priv_area->timer_int_enabled &= chdlc_priv_area->timer_int_enabled &=
~TMR_INT_ENABLED_UPDATE; ~TMR_INT_ENABLED_UPDATE;
......
...@@ -71,20 +71,6 @@ config SUN_JSFLASH ...@@ -71,20 +71,6 @@ config SUN_JSFLASH
# XXX Why don't we do "source drivers/char/Config.in" somewhere? # XXX Why don't we do "source drivers/char/Config.in" somewhere?
# no shit # no shit
config APM_RTC_IS_GMT
bool
depends on EXPERIMENTAL && SPARC32 && PCI
default y
help
Say Y here if your RTC (Real Time Clock a.k.a. hardware clock)
stores the time in GMT (Greenwich Mean Time). Say N if your RTC
stores localtime.
It is in fact recommended to store GMT in your RTC, because then you
don't have to worry about daylight savings time changes. The only
reason not to use GMT in your RTC is if you also run a broken OS
that doesn't understand GMT.
config RTC config RTC
tristate "PC-style Real Time Clock Support" tristate "PC-style Real Time Clock Support"
depends on PCI && EXPERIMENTAL && SPARC32 depends on PCI && EXPERIMENTAL && SPARC32
......
...@@ -1515,8 +1515,7 @@ static void aurora_close(struct tty_struct * tty, struct file * filp) ...@@ -1515,8 +1515,7 @@ static void aurora_close(struct tty_struct * tty, struct file * filp)
*/ */
timeout = jiffies+HZ; timeout = jiffies+HZ;
while(port->SRER & SRER_TXEMPTY) { while(port->SRER & SRER_TXEMPTY) {
current->state = TASK_INTERRUPTIBLE; msleep_interruptible(jiffies_to_msecs(port->timeout));
schedule_timeout(port->timeout);
if (time_after(jiffies, timeout)) if (time_after(jiffies, timeout))
break; break;
} }
...@@ -1533,8 +1532,7 @@ static void aurora_close(struct tty_struct * tty, struct file * filp) ...@@ -1533,8 +1532,7 @@ static void aurora_close(struct tty_struct * tty, struct file * filp)
port->tty = 0; port->tty = 0;
if (port->blocked_open) { if (port->blocked_open) {
if (port->close_delay) { if (port->close_delay) {
current->state = TASK_INTERRUPTIBLE; msleep_interruptible(jiffies_to_msecs(port->close_delay));
schedule_timeout(port->close_delay);
} }
wake_up_interruptible(&port->open_wait); wake_up_interruptible(&port->open_wait);
} }
......
...@@ -4,13 +4,14 @@ ...@@ -4,13 +4,14 @@
* Copyright (C) 2001 David S. Miller (davem@redhat.com) * Copyright (C) 2001 David S. Miller (davem@redhat.com)
*/ */
#define __KERNEL_SYSCALLS__
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <asm/oplib.h> #include <asm/oplib.h>
#include <asm/ebus.h> #include <asm/ebus.h>
#define __KERNEL_SYSCALLS__
static int errno; static int errno;
#include <asm/unistd.h> #include <asm/unistd.h>
......
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
* Daniele Bellucci <bellucda@tiscali.it> * Daniele Bellucci <bellucda@tiscali.it>
*/ */
#define __KERNEL_SYSCALLS__
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -35,7 +37,6 @@ ...@@ -35,7 +37,6 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/envctrl.h> #include <asm/envctrl.h>
#define __KERNEL_SYSCALLS__
static int errno; static int errno;
#include <asm/unistd.h> #include <asm/unistd.h>
...@@ -1007,7 +1008,7 @@ static int kenvctrld(void *__unused) ...@@ -1007,7 +1008,7 @@ static int kenvctrld(void *__unused)
return -ENODEV; return -ENODEV;
} }
poll_interval = 5 * HZ; /* TODO env_mon_interval */ poll_interval = 5000; /* TODO env_mon_interval */
daemonize("kenvctrld"); daemonize("kenvctrld");
allow_signal(SIGKILL); allow_signal(SIGKILL);
...@@ -1016,10 +1017,7 @@ static int kenvctrld(void *__unused) ...@@ -1016,10 +1017,7 @@ static int kenvctrld(void *__unused)
printk(KERN_INFO "envctrl: %s starting...\n", current->comm); printk(KERN_INFO "envctrl: %s starting...\n", current->comm);
for (;;) { for (;;) {
current->state = TASK_INTERRUPTIBLE; if(msleep_interruptible(poll_interval))
schedule_timeout(poll_interval);
if(signal_pending(current))
break; break;
for (whichcpu = 0; whichcpu < ENVCTRL_MAX_CPU; ++whichcpu) { for (whichcpu = 0; whichcpu < ENVCTRL_MAX_CPU; ++whichcpu) {
......
...@@ -88,14 +88,16 @@ void vfc_i2c_delay_wakeup(struct vfc_dev *dev) ...@@ -88,14 +88,16 @@ void vfc_i2c_delay_wakeup(struct vfc_dev *dev)
void vfc_i2c_delay_no_busy(struct vfc_dev *dev, unsigned long usecs) void vfc_i2c_delay_no_busy(struct vfc_dev *dev, unsigned long usecs)
{ {
DEFINE_WAIT(wait);
init_timer(&dev->poll_timer); init_timer(&dev->poll_timer);
dev->poll_timer.expires = jiffies + dev->poll_timer.expires = jiffies + usecs_to_jiffies(usecs);
((unsigned long)usecs*(HZ))/1000000;
dev->poll_timer.data=(unsigned long)dev; dev->poll_timer.data=(unsigned long)dev;
dev->poll_timer.function=(void *)(unsigned long)vfc_i2c_delay_wakeup; dev->poll_timer.function=(void *)(unsigned long)vfc_i2c_delay_wakeup;
add_timer(&dev->poll_timer); add_timer(&dev->poll_timer);
sleep_on(&dev->poll_wait); prepare_to_wait(&dev->poll_wait, &wait, TASK_UNINTERRUPTIBLE);
schedule();
del_timer(&dev->poll_timer); del_timer(&dev->poll_timer);
finish_wait(&dev->poll_wait, &wait);
} }
void inline vfc_i2c_delay(struct vfc_dev *dev) void inline vfc_i2c_delay(struct vfc_dev *dev)
......
...@@ -389,6 +389,9 @@ static void __devexit sbs_exit(struct pci_dev *dev) ...@@ -389,6 +389,9 @@ static void __devexit sbs_exit(struct pci_dev *dev)
* - 10x cards have control registers in IO and/or memory space; * - 10x cards have control registers in IO and/or memory space;
* - 20x cards have control registers in standard PCI configuration space. * - 20x cards have control registers in standard PCI configuration space.
* *
* There are also Quartet Serial cards which use Oxford Semiconductor
* 16954 quad UART PCI chip clocked by 18.432 MHz quartz.
*
* Note: some SIIG cards are probed by the parport_serial object. * Note: some SIIG cards are probed by the parport_serial object.
*/ */
...@@ -1026,6 +1029,8 @@ enum pci_board_num_t { ...@@ -1026,6 +1029,8 @@ enum pci_board_num_t {
pbn_b0_2_921600, pbn_b0_2_921600,
pbn_b0_4_921600, pbn_b0_4_921600,
pbn_b0_4_1152000,
pbn_b0_bt_1_115200, pbn_b0_bt_1_115200,
pbn_b0_bt_2_115200, pbn_b0_bt_2_115200,
pbn_b0_bt_8_115200, pbn_b0_bt_8_115200,
...@@ -1158,6 +1163,12 @@ static struct pci_board pci_boards[] __devinitdata = { ...@@ -1158,6 +1163,12 @@ static struct pci_board pci_boards[] __devinitdata = {
.base_baud = 921600, .base_baud = 921600,
.uart_offset = 8, .uart_offset = 8,
}, },
[pbn_b0_4_1152000] = {
.flags = FL_BASE0,
.num_ports = 4,
.base_baud = 1152000,
.uart_offset = 8,
},
[pbn_b0_bt_1_115200] = { [pbn_b0_bt_1_115200] = {
.flags = FL_BASE0|FL_BASE_BARS, .flags = FL_BASE0|FL_BASE_BARS,
...@@ -1755,33 +1766,30 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent) ...@@ -1755,33 +1766,30 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
static void __devexit pciserial_remove_one(struct pci_dev *dev) static void __devexit pciserial_remove_one(struct pci_dev *dev)
{ {
struct serial_private *priv = pci_get_drvdata(dev); struct serial_private *priv = pci_get_drvdata(dev);
struct pci_serial_quirk *quirk;
int i;
pci_set_drvdata(dev, NULL); pci_set_drvdata(dev, NULL);
if (priv) { for (i = 0; i < priv->nr; i++)
struct pci_serial_quirk *quirk; serial8250_unregister_port(priv->line[i]);
int i;
for (i = 0; i < priv->nr; i++)
serial8250_unregister_port(priv->line[i]);
for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) { for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) {
if (priv->remapped_bar[i]) if (priv->remapped_bar[i])
iounmap(priv->remapped_bar[i]); iounmap(priv->remapped_bar[i]);
priv->remapped_bar[i] = NULL; priv->remapped_bar[i] = NULL;
} }
/* /*
* Find the exit quirks. * Find the exit quirks.
*/ */
quirk = find_quirk(dev); quirk = find_quirk(dev);
if (quirk->exit) if (quirk->exit)
quirk->exit(dev); quirk->exit(dev);
pci_disable_device(dev); pci_disable_device(dev);
kfree(priv); kfree(priv);
}
} }
static int pciserial_suspend_one(struct pci_dev *dev, pm_message_t state) static int pciserial_suspend_one(struct pci_dev *dev, pm_message_t state)
...@@ -1977,6 +1985,9 @@ static struct pci_device_id serial_pci_tbl[] = { ...@@ -1977,6 +1985,9 @@ static struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_OXSEMI_16PCI954, { PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_OXSEMI_16PCI954,
PCI_VENDOR_ID_SPECIALIX, PCI_SUBDEVICE_ID_SPECIALIX_SPEED4, 0, 0, PCI_VENDOR_ID_SPECIALIX, PCI_SUBDEVICE_ID_SPECIALIX_SPEED4, 0, 0,
pbn_b0_4_921600 }, pbn_b0_4_921600 },
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954,
PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL, 0, 0,
pbn_b0_4_1152000 },
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954, { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_4_115200 }, pbn_b0_4_115200 },
......
...@@ -306,13 +306,6 @@ config SERIAL_S3C2410_CONSOLE ...@@ -306,13 +306,6 @@ config SERIAL_S3C2410_CONSOLE
your boot loader about how to pass options to the kernel at your boot loader about how to pass options to the kernel at
boot time.) boot time.)
config SERIAL_BAST_SIO
bool "Support for BAST SuperIO serial ports"
depends on ARCH_BAST && SERIAL_8250=y
help
Support for registerin the SuperIO chip on BAST board with
the 8250/16550 uart code.
config SERIAL_DZ config SERIAL_DZ
bool "DECstation DZ serial driver" bool "DECstation DZ serial driver"
depends on MACH_DECSTATION && MIPS32 depends on MACH_DECSTATION && MIPS32
......
...@@ -44,7 +44,6 @@ obj-$(CONFIG_SERIAL_LH7A40X) += serial_lh7a40x.o ...@@ -44,7 +44,6 @@ obj-$(CONFIG_SERIAL_LH7A40X) += serial_lh7a40x.o
obj-$(CONFIG_SERIAL_AU1X00) += au1x00_uart.o obj-$(CONFIG_SERIAL_AU1X00) += au1x00_uart.o
obj-$(CONFIG_SERIAL_DZ) += dz.o obj-$(CONFIG_SERIAL_DZ) += dz.o
obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
obj-$(CONFIG_SERIAL_BAST_SIO) += bast_sio.o
obj-$(CONFIG_SERIAL_SGI_L1_CONSOLE) += sn_console.o obj-$(CONFIG_SERIAL_SGI_L1_CONSOLE) += sn_console.o
obj-$(CONFIG_SERIAL_CPM) += cpm_uart/ obj-$(CONFIG_SERIAL_CPM) += cpm_uart/
obj-$(CONFIG_SERIAL_IMX) += imx.o obj-$(CONFIG_SERIAL_IMX) += imx.o
......
/* linux/drivers/serial/bast_sio.c
*
* Copyright (c) 2004 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* http://www.simtec.co.uk/products/EB2410ITX/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Modifications:
* 23-Sep-2004 BJD Added copyright header
* 23-Sep-2004 BJD Added serial port remove code
*/
#include <linux/module.h>
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/tty.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/serial.h>
#include <asm/mach-types.h>
#include <asm/arch/map.h>
#include <asm/arch/irqs.h>
#include <asm/arch/bast-map.h>
#include <asm/arch/bast-irq.h>
static int __init serial_bast_register(unsigned long port, unsigned int irq)
{
struct serial_struct serial_req;
serial_req.flags = UPF_AUTOPROBE | UPF_SHARE_IRQ;
serial_req.baud_base = BASE_BAUD;
serial_req.irq = irq;
serial_req.io_type = UPIO_MEM;
serial_req.iomap_base = port;
serial_req.iomem_base = ioremap(port, 0x10);
serial_req.iomem_reg_shift = 0;
return register_serial(&serial_req);
}
#define SERIAL_BASE (S3C2410_CS2 + BAST_PA_SUPERIO)
static int port[2] = { -1, -1 };
static int __init serial_bast_init(void)
{
if (machine_is_bast()) {
port[0] = serial_bast_register(SERIAL_BASE + 0x2f8, IRQ_PCSERIAL1);
port[1] = serial_bast_register(SERIAL_BASE + 0x3f8, IRQ_PCSERIAL2);
}
return 0;
}
static void __exit serial_bast_exit(void)
{
if (port[0] != -1)
unregister_serial(port[0]);
if (port[1] != -1)
unregister_serial(port[1]);
}
module_init(serial_bast_init);
module_exit(serial_bast_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Dooks, ben@simtec.co.uk");
MODULE_DESCRIPTION("BAST Onboard Serial setup");
...@@ -448,19 +448,19 @@ static void speedtch_check_status(struct speedtch_instance_data *instance) ...@@ -448,19 +448,19 @@ static void speedtch_check_status(struct speedtch_instance_data *instance)
case 0: case 0:
atm_dev->signal = ATM_PHY_SIG_LOST; atm_dev->signal = ATM_PHY_SIG_LOST;
if (instance->last_status) if (instance->last_status)
atm_info(usbatm, "ADSL line is down\n"); atm_info(usbatm, "%s\n", "ADSL line is down");
/* It may never resync again unless we ask it to... */ /* It may never resync again unless we ask it to... */
ret = speedtch_start_synchro(instance); ret = speedtch_start_synchro(instance);
break; break;
case 0x08: case 0x08:
atm_dev->signal = ATM_PHY_SIG_UNKNOWN; atm_dev->signal = ATM_PHY_SIG_UNKNOWN;
atm_info(usbatm, "ADSL line is blocked?\n"); atm_info(usbatm, "%s\n", "ADSL line is blocked?");
break; break;
case 0x10: case 0x10:
atm_dev->signal = ATM_PHY_SIG_LOST; atm_dev->signal = ATM_PHY_SIG_LOST;
atm_info(usbatm, "ADSL line is synchronising\n"); atm_info(usbatm, "%s\n", "ADSL line is synchronising");
break; break;
case 0x20: case 0x20:
...@@ -502,7 +502,7 @@ static void speedtch_status_poll(unsigned long data) ...@@ -502,7 +502,7 @@ static void speedtch_status_poll(unsigned long data)
if (instance->poll_delay < MAX_POLL_DELAY) if (instance->poll_delay < MAX_POLL_DELAY)
mod_timer(&instance->status_checker.timer, jiffies + msecs_to_jiffies(instance->poll_delay)); mod_timer(&instance->status_checker.timer, jiffies + msecs_to_jiffies(instance->poll_delay));
else else
atm_warn(instance->usbatm, "Too many failures - disabling line status polling\n"); atm_warn(instance->usbatm, "%s\n", "Too many failures - disabling line status polling");
} }
static void speedtch_resubmit_int(unsigned long data) static void speedtch_resubmit_int(unsigned long data)
...@@ -545,9 +545,9 @@ static void speedtch_handle_int(struct urb *int_urb, struct pt_regs *regs) ...@@ -545,9 +545,9 @@ static void speedtch_handle_int(struct urb *int_urb, struct pt_regs *regs)
if ((count == 6) && !memcmp(up_int, instance->int_data, 6)) { if ((count == 6) && !memcmp(up_int, instance->int_data, 6)) {
del_timer(&instance->status_checker.timer); del_timer(&instance->status_checker.timer);
atm_info(usbatm, "DSL line goes up\n"); atm_info(usbatm, "%s\n", "DSL line goes up");
} else if ((count == 6) && !memcmp(down_int, instance->int_data, 6)) { } else if ((count == 6) && !memcmp(down_int, instance->int_data, 6)) {
atm_info(usbatm, "DSL line goes down\n"); atm_info(usbatm, "%s\n", "DSL line goes down");
} else { } else {
int i; int i;
......
...@@ -249,9 +249,6 @@ static void imxfb_enable_controller(struct imxfb_info *fbi) ...@@ -249,9 +249,6 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)
/* disable hardware cursor */ /* disable hardware cursor */
LCDC_CPOS &= ~(CPOS_CC0 | CPOS_CC1); LCDC_CPOS &= ~(CPOS_CC0 | CPOS_CC1);
/* fixed burst length (see erratum 11) */
LCDC_DMACR = DMACR_BURST | DMACR_HM(8) | DMACR_TM(2);
LCDC_RMCR = RMCR_LCDC_EN; LCDC_RMCR = RMCR_LCDC_EN;
if(fbi->backlight_power) if(fbi->backlight_power)
...@@ -359,6 +356,7 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf ...@@ -359,6 +356,7 @@ static int imxfb_activate_var(struct fb_var_screeninfo *var, struct fb_info *inf
LCDC_PCR = fbi->pcr; LCDC_PCR = fbi->pcr;
LCDC_PWMR = fbi->pwmr; LCDC_PWMR = fbi->pwmr;
LCDC_LSCR1 = fbi->lscr1; LCDC_LSCR1 = fbi->lscr1;
LCDC_DMACR = fbi->dmacr;
return 0; return 0;
} }
...@@ -509,6 +507,7 @@ static int __init imxfb_init_fbinfo(struct device *dev) ...@@ -509,6 +507,7 @@ static int __init imxfb_init_fbinfo(struct device *dev)
fbi->cmap_inverse = inf->cmap_inverse; fbi->cmap_inverse = inf->cmap_inverse;
fbi->pcr = inf->pcr; fbi->pcr = inf->pcr;
fbi->lscr1 = inf->lscr1; fbi->lscr1 = inf->lscr1;
fbi->dmacr = inf->dmacr;
fbi->pwmr = inf->pwmr; fbi->pwmr = inf->pwmr;
fbi->lcd_power = inf->lcd_power; fbi->lcd_power = inf->lcd_power;
fbi->backlight_power = inf->backlight_power; fbi->backlight_power = inf->backlight_power;
...@@ -642,12 +641,12 @@ static int imxfb_remove(struct device *dev) ...@@ -642,12 +641,12 @@ static int imxfb_remove(struct device *dev)
{ {
struct platform_device *pdev = to_platform_device(dev); struct platform_device *pdev = to_platform_device(dev);
struct fb_info *info = dev_get_drvdata(dev); struct fb_info *info = dev_get_drvdata(dev);
struct imxfb_info *fbi = info->par;
struct resource *res; struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
/* disable LCD controller */ imxfb_disable_controller(fbi);
LCDC_RMCR &= ~RMCR_LCDC_EN;
unregister_framebuffer(info); unregister_framebuffer(info);
...@@ -663,8 +662,9 @@ static int imxfb_remove(struct device *dev) ...@@ -663,8 +662,9 @@ static int imxfb_remove(struct device *dev)
void imxfb_shutdown(struct device * dev) void imxfb_shutdown(struct device * dev)
{ {
/* disable LCD Controller */ struct fb_info *info = dev_get_drvdata(dev);
LCDC_RMCR &= ~RMCR_LCDC_EN; struct imxfb_info *fbi = info->par;
imxfb_disable_controller(fbi);
} }
static struct device_driver imxfb_driver = { static struct device_driver imxfb_driver = {
......
...@@ -54,6 +54,7 @@ struct imxfb_info { ...@@ -54,6 +54,7 @@ struct imxfb_info {
u_int pcr; u_int pcr;
u_int pwmr; u_int pwmr;
u_int lscr1; u_int lscr1;
u_int dmacr;
u_int cmap_inverse:1, u_int cmap_inverse:1,
cmap_static:1, cmap_static:1,
unused:30; unused:30;
......
...@@ -88,7 +88,7 @@ static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl, ...@@ -88,7 +88,7 @@ static struct w1_master * w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
dev->groups = 23; dev->groups = 23;
dev->seq = 1; dev->seq = 1;
dev->nls = netlink_kernel_create(NETLINK_NFLOG, NULL); dev->nls = netlink_kernel_create(NETLINK_W1, NULL);
if (!dev->nls) { if (!dev->nls) {
printk(KERN_ERR "Failed to create new netlink socket(%u) for w1 master %s.\n", printk(KERN_ERR "Failed to create new netlink socket(%u) for w1 master %s.\n",
NETLINK_NFLOG, dev->dev.bus_id); NETLINK_NFLOG, dev->dev.bus_id);
......
...@@ -25,6 +25,7 @@ struct imxfb_mach_info { ...@@ -25,6 +25,7 @@ struct imxfb_mach_info {
u_int pcr; u_int pcr;
u_int pwmr; u_int pwmr;
u_int lscr1; u_int lscr1;
u_int dmacr;
u_char * fixed_screen_cpu; u_char * fixed_screen_cpu;
dma_addr_t fixed_screen_dma; dma_addr_t fixed_screen_dma;
......
...@@ -61,7 +61,7 @@ ...@@ -61,7 +61,7 @@
" strex ip, lr, [%0]\n" \ " strex ip, lr, [%0]\n" \
" teq ip, #0\n" \ " teq ip, #0\n" \
" bne 1b\n" \ " bne 1b\n" \
" teq lr, #0\n" \ " cmp lr, #0\n" \
" movle ip, %0\n" \ " movle ip, %0\n" \
" blle " #wake \ " blle " #wake \
: \ : \
...@@ -100,7 +100,7 @@ ...@@ -100,7 +100,7 @@
__asm__ __volatile__( \ __asm__ __volatile__( \
"@ up_op_read\n" \ "@ up_op_read\n" \
"1: ldrex lr, [%0]\n" \ "1: ldrex lr, [%0]\n" \
" add lr, lr, %1\n" \ " adds lr, lr, %1\n" \
" strex ip, lr, [%0]\n" \ " strex ip, lr, [%0]\n" \
" teq ip, #0\n" \ " teq ip, #0\n" \
" bne 1b\n" \ " bne 1b\n" \
......
...@@ -79,7 +79,8 @@ typedef struct { ...@@ -79,7 +79,8 @@ typedef struct {
} rwlock_t; } rwlock_t;
#define RW_LOCK_UNLOCKED (rwlock_t) { 0 } #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
#define rwlock_init(x) do { *(x) + RW_LOCK_UNLOCKED; } while (0) #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0)
#define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)
/* /*
* Write locks are easy - we just set bit 31. When unlocking, we can * Write locks are easy - we just set bit 31. When unlocking, we can
...@@ -100,6 +101,21 @@ static inline void _raw_write_lock(rwlock_t *rw) ...@@ -100,6 +101,21 @@ static inline void _raw_write_lock(rwlock_t *rw)
: "cc", "memory"); : "cc", "memory");
} }
static inline int _raw_write_trylock(rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
" strexeq %0, %2, [%1]"
: "=&r" (tmp)
: "r" (&rw->lock), "r" (0x80000000)
: "cc", "memory");
return tmp == 0;
}
static inline void _raw_write_unlock(rwlock_t *rw) static inline void _raw_write_unlock(rwlock_t *rw)
{ {
__asm__ __volatile__( __asm__ __volatile__(
...@@ -138,6 +154,8 @@ static inline void _raw_read_lock(rwlock_t *rw) ...@@ -138,6 +154,8 @@ static inline void _raw_read_lock(rwlock_t *rw)
static inline void _raw_read_unlock(rwlock_t *rw) static inline void _raw_read_unlock(rwlock_t *rw)
{ {
unsigned long tmp, tmp2;
__asm__ __volatile__( __asm__ __volatile__(
"1: ldrex %0, [%2]\n" "1: ldrex %0, [%2]\n"
" sub %0, %0, #1\n" " sub %0, %0, #1\n"
...@@ -151,19 +169,4 @@ static inline void _raw_read_unlock(rwlock_t *rw) ...@@ -151,19 +169,4 @@ static inline void _raw_read_unlock(rwlock_t *rw)
#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
static inline int _raw_write_trylock(rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__(
"1: ldrex %0, [%1]\n"
" teq %0, #0\n"
" strexeq %0, %2, [%1]"
: "=&r" (tmp)
: "r" (&rw->lock), "r" (0x80000000)
: "cc", "memory");
return tmp == 0;
}
#endif /* __ASM_SPINLOCK_H */ #endif /* __ASM_SPINLOCK_H */
...@@ -19,10 +19,21 @@ ...@@ -19,10 +19,21 @@
extern void mxcsr_feature_mask_init(void); extern void mxcsr_feature_mask_init(void);
extern void init_fpu(struct task_struct *); extern void init_fpu(struct task_struct *);
/* /*
* FPU lazy state save handling... * FPU lazy state save handling...
*/ */
extern void restore_fpu( struct task_struct *tsk );
/*
* The "nop" is needed to make the instructions the same
* length.
*/
#define restore_fpu(tsk) \
alternative_input( \
"nop ; frstor %1", \
"fxrstor %1", \
X86_FEATURE_FXSR, \
"m" ((tsk)->thread.i387.fxsave))
extern void kernel_fpu_begin(void); extern void kernel_fpu_begin(void);
#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0) #define kernel_fpu_end() do { stts(); preempt_enable(); } while(0)
...@@ -32,13 +43,12 @@ extern void kernel_fpu_begin(void); ...@@ -32,13 +43,12 @@ extern void kernel_fpu_begin(void);
*/ */
static inline void __save_init_fpu( struct task_struct *tsk ) static inline void __save_init_fpu( struct task_struct *tsk )
{ {
if ( cpu_has_fxsr ) { alternative_input(
asm volatile( "fxsave %0 ; fnclex" "fnsave %1 ; fwait ;" GENERIC_NOP2,
: "=m" (tsk->thread.i387.fxsave) ); "fxsave %1 ; fnclex",
} else { X86_FEATURE_FXSR,
asm volatile( "fnsave %0 ; fwait" "m" (tsk->thread.i387.fxsave)
: "=m" (tsk->thread.i387.fsave) ); :"memory");
}
tsk->thread_info->status &= ~TS_USEDFPU; tsk->thread_info->status &= ~TS_USEDFPU;
} }
......
...@@ -20,52 +20,52 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr); ...@@ -20,52 +20,52 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
/* "non-atomic" versions... */ /* "non-atomic" versions... */
static __inline__ void __set_bit(int nr, volatile unsigned long *addr) static inline void __set_bit(int nr, volatile unsigned long *addr)
{ {
volatile unsigned long *m = addr + (nr >> 6); unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
*m |= (1UL << (nr & 63)); *m |= (1UL << (nr & 63));
} }
static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) static inline void __clear_bit(int nr, volatile unsigned long *addr)
{ {
volatile unsigned long *m = addr + (nr >> 6); unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
*m &= ~(1UL << (nr & 63)); *m &= ~(1UL << (nr & 63));
} }
static __inline__ void __change_bit(int nr, volatile unsigned long *addr) static inline void __change_bit(int nr, volatile unsigned long *addr)
{ {
volatile unsigned long *m = addr + (nr >> 6); unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
*m ^= (1UL << (nr & 63)); *m ^= (1UL << (nr & 63));
} }
static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr) static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{ {
volatile unsigned long *m = addr + (nr >> 6); unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
long old = *m; unsigned long old = *m;
long mask = (1UL << (nr & 63)); unsigned long mask = (1UL << (nr & 63));
*m = (old | mask); *m = (old | mask);
return ((old & mask) != 0); return ((old & mask) != 0);
} }
static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{ {
volatile unsigned long *m = addr + (nr >> 6); unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
long old = *m; unsigned long old = *m;
long mask = (1UL << (nr & 63)); unsigned long mask = (1UL << (nr & 63));
*m = (old & ~mask); *m = (old & ~mask);
return ((old & mask) != 0); return ((old & mask) != 0);
} }
static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr) static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
{ {
volatile unsigned long *m = addr + (nr >> 6); unsigned long *m = ((unsigned long *)addr) + (nr >> 6);
long old = *m; unsigned long old = *m;
long mask = (1UL << (nr & 63)); unsigned long mask = (1UL << (nr & 63));
*m = (old ^ mask); *m = (old ^ mask);
return ((old & mask) != 0); return ((old & mask) != 0);
...@@ -79,13 +79,13 @@ static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr ...@@ -79,13 +79,13 @@ static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr
#define smp_mb__after_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier()
#endif #endif
static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr) static inline int test_bit(int nr, __const__ volatile unsigned long *addr)
{ {
return (1UL & ((addr)[nr >> 6] >> (nr & 63))) != 0UL; return (1UL & (addr[nr >> 6] >> (nr & 63))) != 0UL;
} }
/* The easy/cheese version for now. */ /* The easy/cheese version for now. */
static __inline__ unsigned long ffz(unsigned long word) static inline unsigned long ffz(unsigned long word)
{ {
unsigned long result; unsigned long result;
...@@ -103,7 +103,7 @@ static __inline__ unsigned long ffz(unsigned long word) ...@@ -103,7 +103,7 @@ static __inline__ unsigned long ffz(unsigned long word)
* *
* Undefined if no bit exists, so code should check against 0 first. * Undefined if no bit exists, so code should check against 0 first.
*/ */
static __inline__ unsigned long __ffs(unsigned long word) static inline unsigned long __ffs(unsigned long word)
{ {
unsigned long result = 0; unsigned long result = 0;
...@@ -144,7 +144,7 @@ static inline int sched_find_first_bit(unsigned long *b) ...@@ -144,7 +144,7 @@ static inline int sched_find_first_bit(unsigned long *b)
* the libc and compiler builtin ffs routines, therefore * the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs). * differs in spirit from the above ffz (man ffs).
*/ */
static __inline__ int ffs(int x) static inline int ffs(int x)
{ {
if (!x) if (!x)
return 0; return 0;
...@@ -158,7 +158,7 @@ static __inline__ int ffs(int x) ...@@ -158,7 +158,7 @@ static __inline__ int ffs(int x)
#ifdef ULTRA_HAS_POPULATION_COUNT #ifdef ULTRA_HAS_POPULATION_COUNT
static __inline__ unsigned int hweight64(unsigned long w) static inline unsigned int hweight64(unsigned long w)
{ {
unsigned int res; unsigned int res;
...@@ -166,7 +166,7 @@ static __inline__ unsigned int hweight64(unsigned long w) ...@@ -166,7 +166,7 @@ static __inline__ unsigned int hweight64(unsigned long w)
return res; return res;
} }
static __inline__ unsigned int hweight32(unsigned int w) static inline unsigned int hweight32(unsigned int w)
{ {
unsigned int res; unsigned int res;
...@@ -174,7 +174,7 @@ static __inline__ unsigned int hweight32(unsigned int w) ...@@ -174,7 +174,7 @@ static __inline__ unsigned int hweight32(unsigned int w)
return res; return res;
} }
static __inline__ unsigned int hweight16(unsigned int w) static inline unsigned int hweight16(unsigned int w)
{ {
unsigned int res; unsigned int res;
...@@ -182,7 +182,7 @@ static __inline__ unsigned int hweight16(unsigned int w) ...@@ -182,7 +182,7 @@ static __inline__ unsigned int hweight16(unsigned int w)
return res; return res;
} }
static __inline__ unsigned int hweight8(unsigned int w) static inline unsigned int hweight8(unsigned int w)
{ {
unsigned int res; unsigned int res;
...@@ -236,7 +236,7 @@ extern unsigned long find_next_zero_bit(const unsigned long *, ...@@ -236,7 +236,7 @@ extern unsigned long find_next_zero_bit(const unsigned long *,
#define test_and_clear_le_bit(nr,addr) \ #define test_and_clear_le_bit(nr,addr) \
test_and_clear_bit((nr) ^ 0x38, (addr)) test_and_clear_bit((nr) ^ 0x38, (addr))
static __inline__ int test_le_bit(int nr, __const__ unsigned long * addr) static inline int test_le_bit(int nr, __const__ unsigned long * addr)
{ {
int mask; int mask;
__const__ unsigned char *ADDR = (__const__ unsigned char *) addr; __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
......
...@@ -94,8 +94,9 @@ struct sparc_trapf { ...@@ -94,8 +94,9 @@ struct sparc_trapf {
#define STACKFRAME32_SZ sizeof(struct sparc_stackf32) #define STACKFRAME32_SZ sizeof(struct sparc_stackf32)
#ifdef __KERNEL__ #ifdef __KERNEL__
#define force_successful_syscall_return() \ #define force_successful_syscall_return() \
set_thread_flag(TIF_SYSCALL_SUCCESS) do { current_thread_info()->syscall_noerror = 1; \
} while (0)
#define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV)) #define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV))
#define instruction_pointer(regs) ((regs)->tpc) #define instruction_pointer(regs) ((regs)->tpc)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -46,54 +46,14 @@ extern void __up_read(struct rw_semaphore *sem); ...@@ -46,54 +46,14 @@ extern void __up_read(struct rw_semaphore *sem);
extern void __up_write(struct rw_semaphore *sem); extern void __up_write(struct rw_semaphore *sem);
extern void __downgrade_write(struct rw_semaphore *sem); extern void __downgrade_write(struct rw_semaphore *sem);
static __inline__ int rwsem_atomic_update(int delta, struct rw_semaphore *sem) static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{ {
int tmp = delta; return atomic_add_return(delta, (atomic_t *)(&sem->count));
__asm__ __volatile__(
"1:\tlduw [%2], %%g1\n\t"
"add %%g1, %1, %%g7\n\t"
"cas [%2], %%g1, %%g7\n\t"
"cmp %%g1, %%g7\n\t"
"membar #StoreLoad | #StoreStore\n\t"
"bne,pn %%icc, 1b\n\t"
" nop\n\t"
"mov %%g7, %0\n\t"
: "=&r" (tmp)
: "0" (tmp), "r" (sem)
: "g1", "g7", "memory", "cc");
return tmp + delta;
}
#define rwsem_atomic_add rwsem_atomic_update
static __inline__ __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 __old, __u16 __new)
{
u32 old = (sem->count & 0xffff0000) | (u32) __old;
u32 new = (old & 0xffff0000) | (u32) __new;
u32 prev;
again:
__asm__ __volatile__("cas [%2], %3, %0\n\t"
"membar #StoreLoad | #StoreStore"
: "=&r" (prev)
: "0" (new), "r" (sem), "r" (old)
: "memory");
/* To give the same semantics as x86 cmpxchgw, keep trying
* if only the upper 16-bits changed.
*/
if (prev != old &&
((prev & 0xffff) == (old & 0xffff)))
goto again;
return prev & 0xffff;
} }
static __inline__ signed long rwsem_cmpxchg(struct rw_semaphore *sem, signed long old, signed long new) static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
{ {
return cmpxchg(&sem->count,old,new); atomic_add(delta, (atomic_t *)(&sem->count));
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -56,52 +56,6 @@ extern void cheetah_enable_pcache(void); ...@@ -56,52 +56,6 @@ extern void cheetah_enable_pcache(void);
SPITFIRE_HIGHEST_LOCKED_TLBENT : \ SPITFIRE_HIGHEST_LOCKED_TLBENT : \
CHEETAH_HIGHEST_LOCKED_TLBENT) CHEETAH_HIGHEST_LOCKED_TLBENT)
static __inline__ unsigned long spitfire_get_isfsr(void)
{
unsigned long ret;
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=r" (ret)
: "r" (TLB_SFSR), "i" (ASI_IMMU));
return ret;
}
static __inline__ unsigned long spitfire_get_dsfsr(void)
{
unsigned long ret;
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=r" (ret)
: "r" (TLB_SFSR), "i" (ASI_DMMU));
return ret;
}
static __inline__ unsigned long spitfire_get_sfar(void)
{
unsigned long ret;
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=r" (ret)
: "r" (DMMU_SFAR), "i" (ASI_DMMU));
return ret;
}
static __inline__ void spitfire_put_isfsr(unsigned long sfsr)
{
__asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_IMMU));
}
static __inline__ void spitfire_put_dsfsr(unsigned long sfsr)
{
__asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_DMMU));
}
/* The data cache is write through, so this just invalidates the /* The data cache is write through, so this just invalidates the
* specified line. * specified line.
*/ */
...@@ -193,90 +147,6 @@ static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data) ...@@ -193,90 +147,6 @@ static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data)
"i" (ASI_ITLB_DATA_ACCESS)); "i" (ASI_ITLB_DATA_ACCESS));
} }
/* Spitfire hardware assisted TLB flushes. */
/* Context level flushes. */
static __inline__ void spitfire_flush_dtlb_primary_context(void)
{
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* No outputs */
: "r" (0x40), "i" (ASI_DMMU_DEMAP));
}
static __inline__ void spitfire_flush_itlb_primary_context(void)
{
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* No outputs */
: "r" (0x40), "i" (ASI_IMMU_DEMAP));
}
static __inline__ void spitfire_flush_dtlb_secondary_context(void)
{
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* No outputs */
: "r" (0x50), "i" (ASI_DMMU_DEMAP));
}
static __inline__ void spitfire_flush_itlb_secondary_context(void)
{
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* No outputs */
: "r" (0x50), "i" (ASI_IMMU_DEMAP));
}
static __inline__ void spitfire_flush_dtlb_nucleus_context(void)
{
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* No outputs */
: "r" (0x60), "i" (ASI_DMMU_DEMAP));
}
static __inline__ void spitfire_flush_itlb_nucleus_context(void)
{
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* No outputs */
: "r" (0x60), "i" (ASI_IMMU_DEMAP));
}
/* Page level flushes. */
static __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page)
{
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* No outputs */
: "r" (page), "i" (ASI_DMMU_DEMAP));
}
static __inline__ void spitfire_flush_itlb_primary_page(unsigned long page)
{
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* No outputs */
: "r" (page), "i" (ASI_IMMU_DEMAP));
}
static __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page)
{
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* No outputs */
: "r" (page | 0x10), "i" (ASI_DMMU_DEMAP));
}
static __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page)
{
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* No outputs */
: "r" (page | 0x10), "i" (ASI_IMMU_DEMAP));
}
static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page) static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page)
{ {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t" __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
......
...@@ -190,24 +190,23 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ ...@@ -190,24 +190,23 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
"wrpr %%g1, %%cwp\n\t" \ "wrpr %%g1, %%cwp\n\t" \
"ldx [%%g6 + %3], %%o6\n\t" \ "ldx [%%g6 + %3], %%o6\n\t" \
"ldub [%%g6 + %2], %%o5\n\t" \ "ldub [%%g6 + %2], %%o5\n\t" \
"ldx [%%g6 + %4], %%o7\n\t" \ "ldub [%%g6 + %4], %%o7\n\t" \
"mov %%g6, %%l2\n\t" \ "mov %%g6, %%l2\n\t" \
"wrpr %%o5, 0x0, %%wstate\n\t" \ "wrpr %%o5, 0x0, %%wstate\n\t" \
"ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
"ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
"wrpr %%g0, 0x94, %%pstate\n\t" \ "wrpr %%g0, 0x94, %%pstate\n\t" \
"mov %%l2, %%g6\n\t" \ "mov %%l2, %%g6\n\t" \
"ldx [%%g6 + %7], %%g4\n\t" \ "ldx [%%g6 + %6], %%g4\n\t" \
"wrpr %%g0, 0x96, %%pstate\n\t" \ "wrpr %%g0, 0x96, %%pstate\n\t" \
"andcc %%o7, %6, %%g0\n\t" \ "brz,pt %%o7, 1f\n\t" \
"beq,pt %%icc, 1f\n\t" \
" mov %%g7, %0\n\t" \ " mov %%g7, %0\n\t" \
"b,a ret_from_syscall\n\t" \ "b,a ret_from_syscall\n\t" \
"1:\n\t" \ "1:\n\t" \
: "=&r" (last) \ : "=&r" (last) \
: "0" (next->thread_info), \ : "0" (next->thread_info), \
"i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_FLAGS), "i" (TI_CWP), \ "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
"i" (_TIF_NEWCHILD), "i" (TI_TASK) \ "i" (TI_CWP), "i" (TI_TASK) \
: "cc", \ : "cc", \
"g1", "g2", "g3", "g7", \ "g1", "g2", "g3", "g7", \
"l2", "l3", "l4", "l5", "l6", "l7", \ "l2", "l3", "l4", "l5", "l6", "l7", \
......
...@@ -47,7 +47,9 @@ struct thread_info { ...@@ -47,7 +47,9 @@ struct thread_info {
struct pt_regs *kregs; struct pt_regs *kregs;
struct exec_domain *exec_domain; struct exec_domain *exec_domain;
int preempt_count; /* 0 => preemptable, <0 => BUG */ int preempt_count; /* 0 => preemptable, <0 => BUG */
int __pad; __u8 new_child;
__u8 syscall_noerror;
__u16 __pad;
unsigned long *utraps; unsigned long *utraps;
...@@ -87,6 +89,8 @@ struct thread_info { ...@@ -87,6 +89,8 @@ struct thread_info {
#define TI_KREGS 0x00000028 #define TI_KREGS 0x00000028
#define TI_EXEC_DOMAIN 0x00000030 #define TI_EXEC_DOMAIN 0x00000030
#define TI_PRE_COUNT 0x00000038 #define TI_PRE_COUNT 0x00000038
#define TI_NEW_CHILD 0x0000003c
#define TI_SYS_NOERROR 0x0000003d
#define TI_UTRAPS 0x00000040 #define TI_UTRAPS 0x00000040
#define TI_REG_WINDOW 0x00000048 #define TI_REG_WINDOW 0x00000048
#define TI_RWIN_SPTRS 0x000003c8 #define TI_RWIN_SPTRS 0x000003c8
...@@ -219,10 +223,10 @@ register struct thread_info *current_thread_info_reg asm("g6"); ...@@ -219,10 +223,10 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */ #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
#define TIF_NEWSIGNALS 6 /* wants new-style signals */ #define TIF_NEWSIGNALS 6 /* wants new-style signals */
#define TIF_32BIT 7 /* 32-bit binary */ #define TIF_32BIT 7 /* 32-bit binary */
#define TIF_NEWCHILD 8 /* just-spawned child process */ /* flag bit 8 is available */
#define TIF_SECCOMP 9 /* secure computing */ #define TIF_SECCOMP 9 /* secure computing */
#define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
#define TIF_SYSCALL_SUCCESS 11 /* flag bit 11 is available */
/* NOTE: Thread flags >= 12 should be ones we have no interest /* NOTE: Thread flags >= 12 should be ones we have no interest
* in using in assembly, else we can't use the mask as * in using in assembly, else we can't use the mask as
* an immediate value in instructions such as andcc. * an immediate value in instructions such as andcc.
...@@ -239,10 +243,8 @@ register struct thread_info *current_thread_info_reg asm("g6"); ...@@ -239,10 +243,8 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_UNALIGNED (1<<TIF_UNALIGNED) #define _TIF_UNALIGNED (1<<TIF_UNALIGNED)
#define _TIF_NEWSIGNALS (1<<TIF_NEWSIGNALS) #define _TIF_NEWSIGNALS (1<<TIF_NEWSIGNALS)
#define _TIF_32BIT (1<<TIF_32BIT) #define _TIF_32BIT (1<<TIF_32BIT)
#define _TIF_NEWCHILD (1<<TIF_NEWCHILD)
#define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SYSCALL_SUCCESS (1<<TIF_SYSCALL_SUCCESS)
#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
......
...@@ -9,49 +9,8 @@ ...@@ -9,49 +9,8 @@
#include <linux/types.h> #include <linux/types.h>
/* How timers work:
*
* On uniprocessors we just use counter zero for the system wide
* ticker, this performs thread scheduling, clock book keeping,
* and runs timer based events. Previously we used the Ultra
* %tick interrupt for this purpose.
*
* On multiprocessors we pick one cpu as the master level 10 tick
* processor. Here this counter zero tick handles clock book
* keeping and timer events only. Each Ultra has it's level
* 14 %tick interrupt set to fire off as well, even the master
* tick cpu runs this locally. This ticker performs thread
* scheduling, system/user tick counting for the current thread,
* and also profiling if enabled.
*/
#include <linux/config.h> #include <linux/config.h>
/* Two timers, traditionally steered to PIL's 10 and 14 respectively.
* But since INO packets are used on sun5, we could use any PIL level
* we like, however for now we use the normal ones.
*
* The 'reg' and 'interrupts' properties for these live in nodes named
* 'counter-timer'. The first of three 'reg' properties describe where
* the sun5_timer registers are. The other two I have no idea. (XXX)
*/
struct sun5_timer {
u64 count0;
u64 limit0;
u64 count1;
u64 limit1;
};
#define SUN5_LIMIT_ENABLE 0x80000000
#define SUN5_LIMIT_TOZERO 0x40000000
#define SUN5_LIMIT_ZRESTART 0x20000000
#define SUN5_LIMIT_CMASK 0x1fffffff
/* Given a HZ value, set the limit register to so that the timer IRQ
* gets delivered that often.
*/
#define SUN5_HZ_TO_LIMIT(__hz) (1000000/(__hz))
struct sparc64_tick_ops { struct sparc64_tick_ops {
void (*init_tick)(unsigned long); void (*init_tick)(unsigned long);
unsigned long (*get_tick)(void); unsigned long (*get_tick)(void);
......
...@@ -197,6 +197,9 @@ struct ip_conntrack_expect ...@@ -197,6 +197,9 @@ struct ip_conntrack_expect
/* Timer function; deletes the expectation. */ /* Timer function; deletes the expectation. */
struct timer_list timeout; struct timer_list timeout;
/* Usage count. */
atomic_t use;
#ifdef CONFIG_IP_NF_NAT_NEEDED #ifdef CONFIG_IP_NF_NAT_NEEDED
/* This is the original per-proto part, used to map the /* This is the original per-proto part, used to map the
* expected connection the way the recipient expects. */ * expected connection the way the recipient expects. */
......
...@@ -30,9 +30,10 @@ extern int ip_conntrack_helper_register(struct ip_conntrack_helper *); ...@@ -30,9 +30,10 @@ extern int ip_conntrack_helper_register(struct ip_conntrack_helper *);
extern void ip_conntrack_helper_unregister(struct ip_conntrack_helper *); extern void ip_conntrack_helper_unregister(struct ip_conntrack_helper *);
/* Allocate space for an expectation: this is mandatory before calling /* Allocate space for an expectation: this is mandatory before calling
ip_conntrack_expect_related. */ ip_conntrack_expect_related. You will have to call put afterwards. */
extern struct ip_conntrack_expect *ip_conntrack_expect_alloc(void); extern struct ip_conntrack_expect *
extern void ip_conntrack_expect_free(struct ip_conntrack_expect *exp); ip_conntrack_expect_alloc(struct ip_conntrack *master);
extern void ip_conntrack_expect_put(struct ip_conntrack_expect *exp);
/* Add an expected connection: can have more than one per connection */ /* Add an expected connection: can have more than one per connection */
extern int ip_conntrack_expect_related(struct ip_conntrack_expect *exp); extern int ip_conntrack_expect_related(struct ip_conntrack_expect *exp);
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#include <linux/types.h> #include <linux/types.h>
#define NETLINK_ROUTE 0 /* Routing/device hook */ #define NETLINK_ROUTE 0 /* Routing/device hook */
#define NETLINK_SKIP 1 /* Reserved for ENskip */ #define NETLINK_W1 1 /* 1-wire subsystem */
#define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */ #define NETLINK_USERSOCK 2 /* Reserved for user mode socket protocols */
#define NETLINK_FIREWALL 3 /* Firewalling hook */ #define NETLINK_FIREWALL 3 /* Firewalling hook */
#define NETLINK_TCPDIAG 4 /* TCP socket monitoring */ #define NETLINK_TCPDIAG 4 /* TCP socket monitoring */
......
...@@ -1872,6 +1872,7 @@ ...@@ -1872,6 +1872,7 @@
#define PCI_DEVICE_ID_CBOARDS_DAS1602_16 0x0001 #define PCI_DEVICE_ID_CBOARDS_DAS1602_16 0x0001
#define PCI_VENDOR_ID_SIIG 0x131f #define PCI_VENDOR_ID_SIIG 0x131f
#define PCI_SUBVENDOR_ID_SIIG 0x131f
#define PCI_DEVICE_ID_SIIG_1S_10x_550 0x1000 #define PCI_DEVICE_ID_SIIG_1S_10x_550 0x1000
#define PCI_DEVICE_ID_SIIG_1S_10x_650 0x1001 #define PCI_DEVICE_ID_SIIG_1S_10x_650 0x1001
#define PCI_DEVICE_ID_SIIG_1S_10x_850 0x1002 #define PCI_DEVICE_ID_SIIG_1S_10x_850 0x1002
...@@ -1909,6 +1910,7 @@ ...@@ -1909,6 +1910,7 @@
#define PCI_DEVICE_ID_SIIG_2S1P_20x_550 0x2060 #define PCI_DEVICE_ID_SIIG_2S1P_20x_550 0x2060
#define PCI_DEVICE_ID_SIIG_2S1P_20x_650 0x2061 #define PCI_DEVICE_ID_SIIG_2S1P_20x_650 0x2061
#define PCI_DEVICE_ID_SIIG_2S1P_20x_850 0x2062 #define PCI_DEVICE_ID_SIIG_2S1P_20x_850 0x2062
#define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050
#define PCI_VENDOR_ID_RADISYS 0x1331 #define PCI_VENDOR_ID_RADISYS 0x1331
#define PCI_DEVICE_ID_RADISYS_ENP2611 0x0030 #define PCI_DEVICE_ID_RADISYS_ENP2611 0x0030
......
...@@ -502,7 +502,8 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, ...@@ -502,7 +502,8 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
* *
* %NULL is returned on a memory allocation failure. * %NULL is returned on a memory allocation failure.
*/ */
static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri) static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
unsigned int __nocast pri)
{ {
might_sleep_if(pri & __GFP_WAIT); might_sleep_if(pri & __GFP_WAIT);
if (skb_cloned(skb)) { if (skb_cloned(skb)) {
......
...@@ -41,19 +41,14 @@ enum ...@@ -41,19 +41,14 @@ enum
TCF_META_ID_LOADAVG_1, TCF_META_ID_LOADAVG_1,
TCF_META_ID_LOADAVG_2, TCF_META_ID_LOADAVG_2,
TCF_META_ID_DEV, TCF_META_ID_DEV,
TCF_META_ID_INDEV,
TCF_META_ID_REALDEV,
TCF_META_ID_PRIORITY, TCF_META_ID_PRIORITY,
TCF_META_ID_PROTOCOL, TCF_META_ID_PROTOCOL,
TCF_META_ID_SECURITY, /* obsolete */
TCF_META_ID_PKTTYPE, TCF_META_ID_PKTTYPE,
TCF_META_ID_PKTLEN, TCF_META_ID_PKTLEN,
TCF_META_ID_DATALEN, TCF_META_ID_DATALEN,
TCF_META_ID_MACLEN, TCF_META_ID_MACLEN,
TCF_META_ID_NFMARK, TCF_META_ID_NFMARK,
TCF_META_ID_TCINDEX, TCF_META_ID_TCINDEX,
TCF_META_ID_TCVERDICT,
TCF_META_ID_TCCLASSID,
TCF_META_ID_RTCLASSID, TCF_META_ID_RTCLASSID,
TCF_META_ID_RTIIF, TCF_META_ID_RTIIF,
TCF_META_ID_SK_FAMILY, TCF_META_ID_SK_FAMILY,
......
...@@ -167,15 +167,12 @@ void sctp_unhash_established(struct sctp_association *); ...@@ -167,15 +167,12 @@ void sctp_unhash_established(struct sctp_association *);
void sctp_hash_endpoint(struct sctp_endpoint *); void sctp_hash_endpoint(struct sctp_endpoint *);
void sctp_unhash_endpoint(struct sctp_endpoint *); void sctp_unhash_endpoint(struct sctp_endpoint *);
struct sock *sctp_err_lookup(int family, struct sk_buff *, struct sock *sctp_err_lookup(int family, struct sk_buff *,
struct sctphdr *, struct sctp_endpoint **, struct sctphdr *, struct sctp_association **,
struct sctp_association **,
struct sctp_transport **); struct sctp_transport **);
void sctp_err_finish(struct sock *, struct sctp_endpoint *, void sctp_err_finish(struct sock *, struct sctp_association *);
struct sctp_association *);
void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
struct sctp_transport *t, __u32 pmtu); struct sctp_transport *t, __u32 pmtu);
void sctp_icmp_proto_unreachable(struct sock *sk, void sctp_icmp_proto_unreachable(struct sock *sk,
struct sctp_endpoint *ep,
struct sctp_association *asoc, struct sctp_association *asoc,
struct sctp_transport *t); struct sctp_transport *t);
......
...@@ -803,7 +803,7 @@ struct xfrm_algo_desc { ...@@ -803,7 +803,7 @@ struct xfrm_algo_desc {
/* XFRM tunnel handlers. */ /* XFRM tunnel handlers. */
struct xfrm_tunnel { struct xfrm_tunnel {
int (*handler)(struct sk_buff *skb); int (*handler)(struct sk_buff *skb);
void (*err_handler)(struct sk_buff *skb, void *info); void (*err_handler)(struct sk_buff *skb, __u32 info);
}; };
struct xfrm6_tunnel { struct xfrm6_tunnel {
......
...@@ -209,22 +209,6 @@ endmenu ...@@ -209,22 +209,6 @@ endmenu
endmenu endmenu
config NETPOLL
def_bool NETCONSOLE
config NETPOLL_RX
bool "Netpoll support for trapping incoming packets"
default n
depends on NETPOLL
config NETPOLL_TRAP
bool "Netpoll traffic trapping"
default n
depends on NETPOLL
config NET_POLL_CONTROLLER
def_bool NETPOLL
source "net/ax25/Kconfig" source "net/ax25/Kconfig"
source "net/irda/Kconfig" source "net/irda/Kconfig"
source "net/bluetooth/Kconfig" source "net/bluetooth/Kconfig"
......
...@@ -60,7 +60,7 @@ config ATM_BR2684 ...@@ -60,7 +60,7 @@ config ATM_BR2684
tristate "RFC1483/2684 Bridged protocols" tristate "RFC1483/2684 Bridged protocols"
depends on ATM && INET depends on ATM && INET
help help
ATM PVCs can carry ethernet PDUs according to rfc2684 (formerly 1483) ATM PVCs can carry ethernet PDUs according to RFC2684 (formerly 1483)
This device will act like an ethernet from the kernels point of view, This device will act like an ethernet from the kernels point of view,
with the traffic being carried by ATM PVCs (currently 1 PVC/device). with the traffic being carried by ATM PVCs (currently 1 PVC/device).
This is sometimes used over DSL lines. If in doubt, say N. This is sometimes used over DSL lines. If in doubt, say N.
...@@ -69,6 +69,6 @@ config ATM_BR2684_IPFILTER ...@@ -69,6 +69,6 @@ config ATM_BR2684_IPFILTER
bool "Per-VC IP filter kludge" bool "Per-VC IP filter kludge"
depends on ATM_BR2684 depends on ATM_BR2684
help help
This is an experimental mechanism for users who need to terminating a This is an experimental mechanism for users who need to terminate a
large number of IP-only vcc's. Do not enable this unless you are sure large number of IP-only vcc's. Do not enable this unless you are sure
you know what you are doing. you know what you are doing.
...@@ -118,10 +118,6 @@ static int svc_bind(struct socket *sock,struct sockaddr *sockaddr, ...@@ -118,10 +118,6 @@ static int svc_bind(struct socket *sock,struct sockaddr *sockaddr,
goto out; goto out;
} }
vcc = ATM_SD(sock); vcc = ATM_SD(sock);
if (test_bit(ATM_VF_SESSION, &vcc->flags)) {
error = -EINVAL;
goto out;
}
addr = (struct sockaddr_atmsvc *) sockaddr; addr = (struct sockaddr_atmsvc *) sockaddr;
if (addr->sas_family != AF_ATMSVC) { if (addr->sas_family != AF_ATMSVC) {
error = -EAFNOSUPPORT; error = -EAFNOSUPPORT;
......
...@@ -138,7 +138,7 @@ config BRIDGE_EBT_VLAN ...@@ -138,7 +138,7 @@ config BRIDGE_EBT_VLAN
# #
config BRIDGE_EBT_ARPREPLY config BRIDGE_EBT_ARPREPLY
tristate "ebt: arp reply target support" tristate "ebt: arp reply target support"
depends on BRIDGE_NF_EBTABLES depends on BRIDGE_NF_EBTABLES && INET
help help
This option adds the arp reply target, which allows This option adds the arp reply target, which allows
automatically sending arp replies to arp requests. automatically sending arp replies to arp requests.
......
...@@ -7,9 +7,10 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \ ...@@ -7,9 +7,10 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
obj-$(CONFIG_SYSCTL) += sysctl_net_core.o obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
obj-y += flow.o dev.o ethtool.o dev_mcast.o dst.o \ obj-y += dev.o ethtool.o dev_mcast.o dst.o \
neighbour.o rtnetlink.o utils.o link_watch.o filter.o neighbour.o rtnetlink.o utils.o link_watch.o filter.o
obj-$(CONFIG_XFRM) += flow.o
obj-$(CONFIG_SYSFS) += net-sysfs.o obj-$(CONFIG_SYSFS) += net-sysfs.o
obj-$(CONFIG_NETFILTER) += netfilter.o obj-$(CONFIG_NETFILTER) += netfilter.o
obj-$(CONFIG_NET_DIVERT) += dv.o obj-$(CONFIG_NET_DIVERT) += dv.o
......
...@@ -377,8 +377,8 @@ struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask) ...@@ -377,8 +377,8 @@ struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask)
C(tc_index); C(tc_index);
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
n->tc_verd = SET_TC_VERD(skb->tc_verd,0); n->tc_verd = SET_TC_VERD(skb->tc_verd,0);
n->tc_verd = CLR_TC_OK2MUNGE(skb->tc_verd); n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
n->tc_verd = CLR_TC_MUNGED(skb->tc_verd); n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
C(input_dev); C(input_dev);
C(tc_classid); C(tc_classid);
#endif #endif
......
...@@ -54,9 +54,9 @@ config IP_ADVANCED_ROUTER ...@@ -54,9 +54,9 @@ config IP_ADVANCED_ROUTER
choice choice
prompt "Choose IP: FIB lookup algorithm (choose FIB_HASH if unsure)" prompt "Choose IP: FIB lookup algorithm (choose FIB_HASH if unsure)"
depends on IP_ADVANCED_ROUTER depends on IP_ADVANCED_ROUTER
default IP_FIB_HASH default ASK_IP_FIB_HASH
config IP_FIB_HASH config ASK_IP_FIB_HASH
bool "FIB_HASH" bool "FIB_HASH"
---help--- ---help---
Current FIB is very proven and good enough for most users. Current FIB is very proven and good enough for most users.
...@@ -82,12 +82,8 @@ config IP_FIB_TRIE ...@@ -82,12 +82,8 @@ config IP_FIB_TRIE
endchoice endchoice
# If the user does not enable advanced routing, he gets the safe
# default of the fib-hash algorithm.
config IP_FIB_HASH config IP_FIB_HASH
bool def_bool ASK_IP_FIB_HASH || !IP_ADVANCED_ROUTER
depends on !IP_ADVANCED_ROUTER
default y
config IP_MULTIPLE_TABLES config IP_MULTIPLE_TABLES
bool "IP: policy routing" bool "IP: policy routing"
...@@ -239,7 +235,6 @@ config IP_PNP_RARP ...@@ -239,7 +235,6 @@ config IP_PNP_RARP
# bool ' IP: ARP support' CONFIG_IP_PNP_ARP # bool ' IP: ARP support' CONFIG_IP_PNP_ARP
config NET_IPIP config NET_IPIP
tristate "IP: tunneling" tristate "IP: tunneling"
select INET_TUNNEL
---help--- ---help---
Tunneling means encapsulating data of one protocol type within Tunneling means encapsulating data of one protocol type within
another protocol and sending it over a channel that understands the another protocol and sending it over a channel that understands the
...@@ -256,7 +251,6 @@ config NET_IPIP ...@@ -256,7 +251,6 @@ config NET_IPIP
config NET_IPGRE config NET_IPGRE
tristate "IP: GRE tunnels over IP" tristate "IP: GRE tunnels over IP"
select XFRM
help help
Tunneling means encapsulating data of one protocol type within Tunneling means encapsulating data of one protocol type within
another protocol and sending it over a channel that understands the another protocol and sending it over a channel that understands the
......
...@@ -1157,7 +1157,7 @@ static int __init ipv4_proc_init(void) ...@@ -1157,7 +1157,7 @@ static int __init ipv4_proc_init(void)
#ifdef CONFIG_IP_FIB_TRIE #ifdef CONFIG_IP_FIB_TRIE
if (fib_stat_proc_init()) if (fib_stat_proc_init())
goto out_fib_stat; goto out_fib_stat;
#endif #endif
if (ip_misc_proc_init()) if (ip_misc_proc_init())
goto out_misc; goto out_misc;
out: out:
......
此差异已折叠。
...@@ -273,7 +273,7 @@ static void ipip_tunnel_uninit(struct net_device *dev) ...@@ -273,7 +273,7 @@ static void ipip_tunnel_uninit(struct net_device *dev)
dev_put(dev); dev_put(dev);
} }
static void ipip_err(struct sk_buff *skb, void *__unused) static void ipip_err(struct sk_buff *skb, u32 info)
{ {
#ifndef I_WISH_WORLD_WERE_PERFECT #ifndef I_WISH_WORLD_WERE_PERFECT
...@@ -852,11 +852,39 @@ static int __init ipip_fb_tunnel_init(struct net_device *dev) ...@@ -852,11 +852,39 @@ static int __init ipip_fb_tunnel_init(struct net_device *dev)
return 0; return 0;
} }
#ifdef CONFIG_INET_TUNNEL
static struct xfrm_tunnel ipip_handler = { static struct xfrm_tunnel ipip_handler = {
.handler = ipip_rcv, .handler = ipip_rcv,
.err_handler = ipip_err, .err_handler = ipip_err,
}; };
static inline int ipip_register(void)
{
return xfrm4_tunnel_register(&ipip_handler);
}
static inline int ipip_unregister(void)
{
return xfrm4_tunnel_deregister(&ipip_handler);
}
#else
static struct net_protocol ipip_protocol = {
.handler = ipip_rcv,
.err_handler = ipip_err,
.no_policy = 1,
};
static inline int ipip_register(void)
{
return inet_add_protocol(&ipip_protocol, IPPROTO_IPIP);
}
static inline int ipip_unregister(void)
{
return inet_del_protocol(&ipip_protocol, IPPROTO_IPIP);
}
#endif
static char banner[] __initdata = static char banner[] __initdata =
KERN_INFO "IPv4 over IPv4 tunneling driver\n"; KERN_INFO "IPv4 over IPv4 tunneling driver\n";
...@@ -866,7 +894,7 @@ static int __init ipip_init(void) ...@@ -866,7 +894,7 @@ static int __init ipip_init(void)
printk(banner); printk(banner);
if (xfrm4_tunnel_register(&ipip_handler) < 0) { if (ipip_register() < 0) {
printk(KERN_INFO "ipip init: can't register tunnel\n"); printk(KERN_INFO "ipip init: can't register tunnel\n");
return -EAGAIN; return -EAGAIN;
} }
...@@ -888,13 +916,13 @@ static int __init ipip_init(void) ...@@ -888,13 +916,13 @@ static int __init ipip_init(void)
err2: err2:
free_netdev(ipip_fb_tunnel_dev); free_netdev(ipip_fb_tunnel_dev);
err1: err1:
xfrm4_tunnel_deregister(&ipip_handler); ipip_unregister();
goto out; goto out;
} }
static void __exit ipip_fini(void) static void __exit ipip_fini(void)
{ {
if (xfrm4_tunnel_deregister(&ipip_handler) < 0) if (ipip_unregister() < 0)
printk(KERN_INFO "ipip close: can't deregister tunnel\n"); printk(KERN_INFO "ipip close: can't deregister tunnel\n");
unregister_netdev(ipip_fb_tunnel_dev); unregister_netdev(ipip_fb_tunnel_dev);
......
...@@ -101,14 +101,13 @@ static int help(struct sk_buff **pskb, ...@@ -101,14 +101,13 @@ static int help(struct sk_buff **pskb,
if (port == 0 || len > 5) if (port == 0 || len > 5)
break; break;
exp = ip_conntrack_expect_alloc(); exp = ip_conntrack_expect_alloc(ct);
if (exp == NULL) { if (exp == NULL) {
ret = NF_DROP; ret = NF_DROP;
goto out; goto out;
} }
exp->expectfn = NULL; exp->expectfn = NULL;
exp->master = ct;
exp->tuple.src.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip; exp->tuple.src.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip;
exp->tuple.src.u.tcp.port = 0; exp->tuple.src.u.tcp.port = 0;
...@@ -126,10 +125,9 @@ static int help(struct sk_buff **pskb, ...@@ -126,10 +125,9 @@ static int help(struct sk_buff **pskb,
ret = ip_nat_amanda_hook(pskb, ctinfo, ret = ip_nat_amanda_hook(pskb, ctinfo,
tmp - amanda_buffer, tmp - amanda_buffer,
len, exp); len, exp);
else if (ip_conntrack_expect_related(exp) != 0) { else if (ip_conntrack_expect_related(exp) != 0)
ip_conntrack_expect_free(exp);
ret = NF_DROP; ret = NF_DROP;
} ip_conntrack_expect_put(exp);
} }
out: out:
......
...@@ -137,19 +137,12 @@ ip_ct_invert_tuple(struct ip_conntrack_tuple *inverse, ...@@ -137,19 +137,12 @@ ip_ct_invert_tuple(struct ip_conntrack_tuple *inverse,
/* ip_conntrack_expect helper functions */ /* ip_conntrack_expect helper functions */
static void destroy_expect(struct ip_conntrack_expect *exp)
{
ip_conntrack_put(exp->master);
IP_NF_ASSERT(!timer_pending(&exp->timeout));
kmem_cache_free(ip_conntrack_expect_cachep, exp);
CONNTRACK_STAT_INC(expect_delete);
}
static void unlink_expect(struct ip_conntrack_expect *exp) static void unlink_expect(struct ip_conntrack_expect *exp)
{ {
ASSERT_WRITE_LOCK(&ip_conntrack_lock); ASSERT_WRITE_LOCK(&ip_conntrack_lock);
IP_NF_ASSERT(!timer_pending(&exp->timeout));
list_del(&exp->list); list_del(&exp->list);
/* Logically in destroy_expect, but we hold the lock here. */ CONNTRACK_STAT_INC(expect_delete);
exp->master->expecting--; exp->master->expecting--;
} }
...@@ -160,7 +153,7 @@ static void expectation_timed_out(unsigned long ul_expect) ...@@ -160,7 +153,7 @@ static void expectation_timed_out(unsigned long ul_expect)
write_lock_bh(&ip_conntrack_lock); write_lock_bh(&ip_conntrack_lock);
unlink_expect(exp); unlink_expect(exp);
write_unlock_bh(&ip_conntrack_lock); write_unlock_bh(&ip_conntrack_lock);
destroy_expect(exp); ip_conntrack_expect_put(exp);
} }
/* If an expectation for this connection is found, it gets delete from /* If an expectation for this connection is found, it gets delete from
...@@ -198,7 +191,7 @@ static void remove_expectations(struct ip_conntrack *ct) ...@@ -198,7 +191,7 @@ static void remove_expectations(struct ip_conntrack *ct)
list_for_each_entry_safe(i, tmp, &ip_conntrack_expect_list, list) { list_for_each_entry_safe(i, tmp, &ip_conntrack_expect_list, list) {
if (i->master == ct && del_timer(&i->timeout)) { if (i->master == ct && del_timer(&i->timeout)) {
unlink_expect(i); unlink_expect(i);
destroy_expect(i); ip_conntrack_expect_put(i);
} }
} }
} }
...@@ -537,7 +530,7 @@ init_conntrack(const struct ip_conntrack_tuple *tuple, ...@@ -537,7 +530,7 @@ init_conntrack(const struct ip_conntrack_tuple *tuple,
if (exp) { if (exp) {
if (exp->expectfn) if (exp->expectfn)
exp->expectfn(conntrack, exp); exp->expectfn(conntrack, exp);
destroy_expect(exp); ip_conntrack_expect_put(exp);
} }
return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL]; return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
...@@ -729,14 +722,14 @@ void ip_conntrack_unexpect_related(struct ip_conntrack_expect *exp) ...@@ -729,14 +722,14 @@ void ip_conntrack_unexpect_related(struct ip_conntrack_expect *exp)
if (expect_matches(i, exp) && del_timer(&i->timeout)) { if (expect_matches(i, exp) && del_timer(&i->timeout)) {
unlink_expect(i); unlink_expect(i);
write_unlock_bh(&ip_conntrack_lock); write_unlock_bh(&ip_conntrack_lock);
destroy_expect(i); ip_conntrack_expect_put(i);
return; return;
} }
} }
write_unlock_bh(&ip_conntrack_lock); write_unlock_bh(&ip_conntrack_lock);
} }
struct ip_conntrack_expect *ip_conntrack_expect_alloc(void) struct ip_conntrack_expect *ip_conntrack_expect_alloc(struct ip_conntrack *me)
{ {
struct ip_conntrack_expect *new; struct ip_conntrack_expect *new;
...@@ -745,18 +738,23 @@ struct ip_conntrack_expect *ip_conntrack_expect_alloc(void) ...@@ -745,18 +738,23 @@ struct ip_conntrack_expect *ip_conntrack_expect_alloc(void)
DEBUGP("expect_related: OOM allocating expect\n"); DEBUGP("expect_related: OOM allocating expect\n");
return NULL; return NULL;
} }
new->master = NULL; new->master = me;
atomic_inc(&new->master->ct_general.use);
atomic_set(&new->use, 1);
return new; return new;
} }
void ip_conntrack_expect_free(struct ip_conntrack_expect *expect) void ip_conntrack_expect_put(struct ip_conntrack_expect *exp)
{ {
kmem_cache_free(ip_conntrack_expect_cachep, expect); if (atomic_dec_and_test(&exp->use)) {
ip_conntrack_put(exp->master);
kmem_cache_free(ip_conntrack_expect_cachep, exp);
}
} }
static void ip_conntrack_expect_insert(struct ip_conntrack_expect *exp) static void ip_conntrack_expect_insert(struct ip_conntrack_expect *exp)
{ {
atomic_inc(&exp->master->ct_general.use); atomic_inc(&exp->use);
exp->master->expecting++; exp->master->expecting++;
list_add(&exp->list, &ip_conntrack_expect_list); list_add(&exp->list, &ip_conntrack_expect_list);
...@@ -778,7 +776,7 @@ static void evict_oldest_expect(struct ip_conntrack *master) ...@@ -778,7 +776,7 @@ static void evict_oldest_expect(struct ip_conntrack *master)
if (i->master == master) { if (i->master == master) {
if (del_timer(&i->timeout)) { if (del_timer(&i->timeout)) {
unlink_expect(i); unlink_expect(i);
destroy_expect(i); ip_conntrack_expect_put(i);
} }
break; break;
} }
...@@ -810,8 +808,6 @@ int ip_conntrack_expect_related(struct ip_conntrack_expect *expect) ...@@ -810,8 +808,6 @@ int ip_conntrack_expect_related(struct ip_conntrack_expect *expect)
/* Refresh timer: if it's dying, ignore.. */ /* Refresh timer: if it's dying, ignore.. */
if (refresh_timer(i)) { if (refresh_timer(i)) {
ret = 0; ret = 0;
/* We don't need the one they've given us. */
ip_conntrack_expect_free(expect);
goto out; goto out;
} }
} else if (expect_clash(i, expect)) { } else if (expect_clash(i, expect)) {
...@@ -881,7 +877,7 @@ void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me) ...@@ -881,7 +877,7 @@ void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me)
list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list, list) { list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list, list) {
if (exp->master->helper == me && del_timer(&exp->timeout)) { if (exp->master->helper == me && del_timer(&exp->timeout)) {
unlink_expect(exp); unlink_expect(exp);
destroy_expect(exp); ip_conntrack_expect_put(exp);
} }
} }
/* Get rid of expecteds, set helpers to NULL. */ /* Get rid of expecteds, set helpers to NULL. */
...@@ -1111,6 +1107,9 @@ void ip_conntrack_cleanup(void) ...@@ -1111,6 +1107,9 @@ void ip_conntrack_cleanup(void)
schedule(); schedule();
goto i_see_dead_people; goto i_see_dead_people;
} }
/* wait until all references to ip_conntrack_untracked are dropped */
while (atomic_read(&ip_conntrack_untracked.ct_general.use) > 1)
schedule();
kmem_cache_destroy(ip_conntrack_cachep); kmem_cache_destroy(ip_conntrack_cachep);
kmem_cache_destroy(ip_conntrack_expect_cachep); kmem_cache_destroy(ip_conntrack_expect_cachep);
......
...@@ -376,7 +376,7 @@ static int help(struct sk_buff **pskb, ...@@ -376,7 +376,7 @@ static int help(struct sk_buff **pskb,
fb_ptr + matchoff, matchlen, ntohl(th->seq) + matchoff); fb_ptr + matchoff, matchlen, ntohl(th->seq) + matchoff);
/* Allocate expectation which will be inserted */ /* Allocate expectation which will be inserted */
exp = ip_conntrack_expect_alloc(); exp = ip_conntrack_expect_alloc(ct);
if (exp == NULL) { if (exp == NULL) {
ret = NF_DROP; ret = NF_DROP;
goto out; goto out;
...@@ -403,8 +403,7 @@ static int help(struct sk_buff **pskb, ...@@ -403,8 +403,7 @@ static int help(struct sk_buff **pskb,
networks, or the packet filter itself). */ networks, or the packet filter itself). */
if (!loose) { if (!loose) {
ret = NF_ACCEPT; ret = NF_ACCEPT;
ip_conntrack_expect_free(exp); goto out_put_expect;
goto out_update_nl;
} }
exp->tuple.dst.ip = htonl((array[0] << 24) | (array[1] << 16) exp->tuple.dst.ip = htonl((array[0] << 24) | (array[1] << 16)
| (array[2] << 8) | array[3]); | (array[2] << 8) | array[3]);
...@@ -419,7 +418,6 @@ static int help(struct sk_buff **pskb, ...@@ -419,7 +418,6 @@ static int help(struct sk_buff **pskb,
{ 0xFFFFFFFF, { .tcp = { 0xFFFF } }, 0xFF }}); { 0xFFFFFFFF, { .tcp = { 0xFFFF } }, 0xFF }});
exp->expectfn = NULL; exp->expectfn = NULL;
exp->master = ct;
/* Now, NAT might want to mangle the packet, and register the /* Now, NAT might want to mangle the packet, and register the
* (possibly changed) expectation itself. */ * (possibly changed) expectation itself. */
...@@ -428,13 +426,15 @@ static int help(struct sk_buff **pskb, ...@@ -428,13 +426,15 @@ static int help(struct sk_buff **pskb,
matchoff, matchlen, exp, &seq); matchoff, matchlen, exp, &seq);
else { else {
/* Can't expect this? Best to drop packet now. */ /* Can't expect this? Best to drop packet now. */
if (ip_conntrack_expect_related(exp) != 0) { if (ip_conntrack_expect_related(exp) != 0)
ip_conntrack_expect_free(exp);
ret = NF_DROP; ret = NF_DROP;
} else else
ret = NF_ACCEPT; ret = NF_ACCEPT;
} }
out_put_expect:
ip_conntrack_expect_put(exp);
out_update_nl: out_update_nl:
/* Now if this ends in \n, update ftp info. Seq may have been /* Now if this ends in \n, update ftp info. Seq may have been
* adjusted by NAT code. */ * adjusted by NAT code. */
......
...@@ -197,7 +197,7 @@ static int help(struct sk_buff **pskb, ...@@ -197,7 +197,7 @@ static int help(struct sk_buff **pskb,
continue; continue;
} }
exp = ip_conntrack_expect_alloc(); exp = ip_conntrack_expect_alloc(ct);
if (exp == NULL) { if (exp == NULL) {
ret = NF_DROP; ret = NF_DROP;
goto out; goto out;
...@@ -221,16 +221,14 @@ static int help(struct sk_buff **pskb, ...@@ -221,16 +221,14 @@ static int help(struct sk_buff **pskb,
{ { 0, { 0 } }, { { 0, { 0 } },
{ 0xFFFFFFFF, { .tcp = { 0xFFFF } }, 0xFF }}); { 0xFFFFFFFF, { .tcp = { 0xFFFF } }, 0xFF }});
exp->expectfn = NULL; exp->expectfn = NULL;
exp->master = ct;
if (ip_nat_irc_hook) if (ip_nat_irc_hook)
ret = ip_nat_irc_hook(pskb, ctinfo, ret = ip_nat_irc_hook(pskb, ctinfo,
addr_beg_p - ib_ptr, addr_beg_p - ib_ptr,
addr_end_p - addr_beg_p, addr_end_p - addr_beg_p,
exp); exp);
else if (ip_conntrack_expect_related(exp) != 0) { else if (ip_conntrack_expect_related(exp) != 0)
ip_conntrack_expect_free(exp);
ret = NF_DROP; ret = NF_DROP;
} ip_conntrack_expect_put(exp);
goto out; goto out;
} /* for .. NUM_DCCPROTO */ } /* for .. NUM_DCCPROTO */
} /* while data < ... */ } /* while data < ... */
......
...@@ -985,7 +985,7 @@ EXPORT_SYMBOL(ip_ct_refresh_acct); ...@@ -985,7 +985,7 @@ EXPORT_SYMBOL(ip_ct_refresh_acct);
EXPORT_SYMBOL(ip_ct_protos); EXPORT_SYMBOL(ip_ct_protos);
EXPORT_SYMBOL(ip_ct_find_proto); EXPORT_SYMBOL(ip_ct_find_proto);
EXPORT_SYMBOL(ip_conntrack_expect_alloc); EXPORT_SYMBOL(ip_conntrack_expect_alloc);
EXPORT_SYMBOL(ip_conntrack_expect_free); EXPORT_SYMBOL(ip_conntrack_expect_put);
EXPORT_SYMBOL(ip_conntrack_expect_related); EXPORT_SYMBOL(ip_conntrack_expect_related);
EXPORT_SYMBOL(ip_conntrack_unexpect_related); EXPORT_SYMBOL(ip_conntrack_unexpect_related);
EXPORT_SYMBOL(ip_conntrack_tuple_taken); EXPORT_SYMBOL(ip_conntrack_tuple_taken);
......
...@@ -65,7 +65,7 @@ static int tftp_help(struct sk_buff **pskb, ...@@ -65,7 +65,7 @@ static int tftp_help(struct sk_buff **pskb,
DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
exp = ip_conntrack_expect_alloc(); exp = ip_conntrack_expect_alloc(ct);
if (exp == NULL) if (exp == NULL)
return NF_DROP; return NF_DROP;
...@@ -75,17 +75,15 @@ static int tftp_help(struct sk_buff **pskb, ...@@ -75,17 +75,15 @@ static int tftp_help(struct sk_buff **pskb,
exp->mask.dst.u.udp.port = 0xffff; exp->mask.dst.u.udp.port = 0xffff;
exp->mask.dst.protonum = 0xff; exp->mask.dst.protonum = 0xff;
exp->expectfn = NULL; exp->expectfn = NULL;
exp->master = ct;
DEBUGP("expect: "); DEBUGP("expect: ");
DUMP_TUPLE(&exp->tuple); DUMP_TUPLE(&exp->tuple);
DUMP_TUPLE(&exp->mask); DUMP_TUPLE(&exp->mask);
if (ip_nat_tftp_hook) if (ip_nat_tftp_hook)
ret = ip_nat_tftp_hook(pskb, ctinfo, exp); ret = ip_nat_tftp_hook(pskb, ctinfo, exp);
else if (ip_conntrack_expect_related(exp) != 0) { else if (ip_conntrack_expect_related(exp) != 0)
ip_conntrack_expect_free(exp);
ret = NF_DROP; ret = NF_DROP;
} ip_conntrack_expect_put(exp);
break; break;
case TFTP_OPCODE_DATA: case TFTP_OPCODE_DATA:
case TFTP_OPCODE_ACK: case TFTP_OPCODE_ACK:
......
...@@ -56,10 +56,8 @@ static unsigned int help(struct sk_buff **pskb, ...@@ -56,10 +56,8 @@ static unsigned int help(struct sk_buff **pskb,
break; break;
} }
if (port == 0) { if (port == 0)
ip_conntrack_expect_free(exp);
return NF_DROP; return NF_DROP;
}
sprintf(buffer, "%u", port); sprintf(buffer, "%u", port);
ret = ip_nat_mangle_udp_packet(pskb, exp->master, ctinfo, ret = ip_nat_mangle_udp_packet(pskb, exp->master, ctinfo,
......
...@@ -143,10 +143,8 @@ static unsigned int ip_nat_ftp(struct sk_buff **pskb, ...@@ -143,10 +143,8 @@ static unsigned int ip_nat_ftp(struct sk_buff **pskb,
break; break;
} }
if (port == 0) { if (port == 0)
ip_conntrack_expect_free(exp);
return NF_DROP; return NF_DROP;
}
if (!mangle[type](pskb, newip, port, matchoff, matchlen, ct, ctinfo, if (!mangle[type](pskb, newip, port, matchoff, matchlen, ct, ctinfo,
seq)) { seq)) {
......
...@@ -65,10 +65,8 @@ static unsigned int help(struct sk_buff **pskb, ...@@ -65,10 +65,8 @@ static unsigned int help(struct sk_buff **pskb,
break; break;
} }
if (port == 0) { if (port == 0)
ip_conntrack_expect_free(exp);
return NF_DROP; return NF_DROP;
}
/* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27 /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27
* strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28 * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28
......
...@@ -35,16 +35,17 @@ icmp_unique_tuple(struct ip_conntrack_tuple *tuple, ...@@ -35,16 +35,17 @@ icmp_unique_tuple(struct ip_conntrack_tuple *tuple,
const struct ip_conntrack *conntrack) const struct ip_conntrack *conntrack)
{ {
static u_int16_t id; static u_int16_t id;
unsigned int range_size unsigned int range_size;
= (unsigned int)range->max.icmp.id - range->min.icmp.id + 1;
unsigned int i; unsigned int i;
range_size = ntohs(range->max.icmp.id) - ntohs(range->min.icmp.id) + 1;
/* If no range specified... */ /* If no range specified... */
if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED))
range_size = 0xFFFF; range_size = 0xFFFF;
for (i = 0; i < range_size; i++, id++) { for (i = 0; i < range_size; i++, id++) {
tuple->src.u.icmp.id = range->min.icmp.id + (id % range_size); tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
(id % range_size));
if (!ip_nat_used_tuple(tuple, conntrack)) if (!ip_nat_used_tuple(tuple, conntrack))
return 1; return 1;
} }
......
...@@ -40,7 +40,8 @@ tcp_unique_tuple(struct ip_conntrack_tuple *tuple, ...@@ -40,7 +40,8 @@ tcp_unique_tuple(struct ip_conntrack_tuple *tuple,
enum ip_nat_manip_type maniptype, enum ip_nat_manip_type maniptype,
const struct ip_conntrack *conntrack) const struct ip_conntrack *conntrack)
{ {
static u_int16_t port, *portptr; static u_int16_t port;
u_int16_t *portptr;
unsigned int range_size, min, i; unsigned int range_size, min, i;
if (maniptype == IP_NAT_MANIP_SRC) if (maniptype == IP_NAT_MANIP_SRC)
......
...@@ -41,7 +41,8 @@ udp_unique_tuple(struct ip_conntrack_tuple *tuple, ...@@ -41,7 +41,8 @@ udp_unique_tuple(struct ip_conntrack_tuple *tuple,
enum ip_nat_manip_type maniptype, enum ip_nat_manip_type maniptype,
const struct ip_conntrack *conntrack) const struct ip_conntrack *conntrack)
{ {
static u_int16_t port, *portptr; static u_int16_t port;
u_int16_t *portptr;
unsigned int range_size, min, i; unsigned int range_size, min, i;
if (maniptype == IP_NAT_MANIP_SRC) if (maniptype == IP_NAT_MANIP_SRC)
......
...@@ -45,10 +45,8 @@ static unsigned int help(struct sk_buff **pskb, ...@@ -45,10 +45,8 @@ static unsigned int help(struct sk_buff **pskb,
exp->saved_proto.udp.port = exp->tuple.dst.u.tcp.port; exp->saved_proto.udp.port = exp->tuple.dst.u.tcp.port;
exp->dir = IP_CT_DIR_REPLY; exp->dir = IP_CT_DIR_REPLY;
exp->expectfn = ip_nat_follow_master; exp->expectfn = ip_nat_follow_master;
if (ip_conntrack_expect_related(exp) != 0) { if (ip_conntrack_expect_related(exp) != 0)
ip_conntrack_expect_free(exp);
return NF_DROP; return NF_DROP;
}
return NF_ACCEPT; return NF_ACCEPT;
} }
......
...@@ -78,10 +78,9 @@ static int ipip_rcv(struct sk_buff *skb) ...@@ -78,10 +78,9 @@ static int ipip_rcv(struct sk_buff *skb)
static void ipip_err(struct sk_buff *skb, u32 info) static void ipip_err(struct sk_buff *skb, u32 info)
{ {
struct xfrm_tunnel *handler = ipip_handler; struct xfrm_tunnel *handler = ipip_handler;
u32 arg = info;
if (handler) if (handler)
handler->err_handler(skb, &arg); handler->err_handler(skb, info);
} }
static int ipip_init_state(struct xfrm_state *x) static int ipip_init_state(struct xfrm_state *x)
......
...@@ -91,7 +91,6 @@ config INET6_TUNNEL ...@@ -91,7 +91,6 @@ config INET6_TUNNEL
config IPV6_TUNNEL config IPV6_TUNNEL
tristate "IPv6: IPv6-in-IPv6 tunnel" tristate "IPv6: IPv6-in-IPv6 tunnel"
depends on IPV6 depends on IPV6
select INET6_TUNNEL
---help--- ---help---
Support for IPv6-in-IPv6 tunnels described in RFC 2473. Support for IPv6-in-IPv6 tunnels described in RFC 2473.
......
...@@ -1110,11 +1110,39 @@ ip6ip6_fb_tnl_dev_init(struct net_device *dev) ...@@ -1110,11 +1110,39 @@ ip6ip6_fb_tnl_dev_init(struct net_device *dev)
return 0; return 0;
} }
#ifdef CONFIG_INET6_TUNNEL
static struct xfrm6_tunnel ip6ip6_handler = { static struct xfrm6_tunnel ip6ip6_handler = {
.handler = ip6ip6_rcv, .handler = ip6ip6_rcv,
.err_handler = ip6ip6_err, .err_handler = ip6ip6_err,
}; };
static inline int ip6ip6_register(void)
{
return xfrm6_tunnel_register(&ip6ip6_handler);
}
static inline int ip6ip6_unregister(void)
{
return xfrm6_tunnel_deregister(&ip6ip6_handler);
}
#else
static struct inet6_protocol xfrm6_tunnel_protocol = {
.handler = ip6ip6_rcv,
.err_handler = ip6ip6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
static inline int ip6ip6_register(void)
{
return inet6_add_protocol(&xfrm6_tunnel_protocol, IPPROTO_IPV6);
}
static inline int ip6ip6_unregister(void)
{
return inet6_del_protocol(&xfrm6_tunnel_protocol, IPPROTO_IPV6);
}
#endif
/** /**
* ip6_tunnel_init - register protocol and reserve needed resources * ip6_tunnel_init - register protocol and reserve needed resources
* *
...@@ -1125,7 +1153,7 @@ static int __init ip6_tunnel_init(void) ...@@ -1125,7 +1153,7 @@ static int __init ip6_tunnel_init(void)
{ {
int err; int err;
if (xfrm6_tunnel_register(&ip6ip6_handler) < 0) { if (ip6ip6_register() < 0) {
printk(KERN_ERR "ip6ip6 init: can't register tunnel\n"); printk(KERN_ERR "ip6ip6 init: can't register tunnel\n");
return -EAGAIN; return -EAGAIN;
} }
...@@ -1144,7 +1172,7 @@ static int __init ip6_tunnel_init(void) ...@@ -1144,7 +1172,7 @@ static int __init ip6_tunnel_init(void)
} }
return 0; return 0;
fail: fail:
xfrm6_tunnel_deregister(&ip6ip6_handler); ip6ip6_unregister();
return err; return err;
} }
...@@ -1154,7 +1182,7 @@ static int __init ip6_tunnel_init(void) ...@@ -1154,7 +1182,7 @@ static int __init ip6_tunnel_init(void)
static void __exit ip6_tunnel_cleanup(void) static void __exit ip6_tunnel_cleanup(void)
{ {
if (xfrm6_tunnel_deregister(&ip6ip6_handler) < 0) if (ip6ip6_unregister() < 0)
printk(KERN_INFO "ip6ip6 close: can't deregister tunnel\n"); printk(KERN_INFO "ip6ip6 close: can't deregister tunnel\n");
unregister_netdev(ip6ip6_fb_tnl_dev); unregister_netdev(ip6ip6_fb_tnl_dev);
......
...@@ -76,7 +76,9 @@ static DECLARE_MUTEX(ipqnl_sem); ...@@ -76,7 +76,9 @@ static DECLARE_MUTEX(ipqnl_sem);
static void static void
ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict) ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
{ {
local_bh_disable();
nf_reinject(entry->skb, entry->info, verdict); nf_reinject(entry->skb, entry->info, verdict);
local_bh_enable();
kfree(entry); kfree(entry);
} }
......
...@@ -373,9 +373,10 @@ ip6t_log_packet(unsigned int hooknum, ...@@ -373,9 +373,10 @@ ip6t_log_packet(unsigned int hooknum,
in ? in->name : "", in ? in->name : "",
out ? out->name : ""); out ? out->name : "");
if (in && !out) { if (in && !out) {
unsigned int len;
/* MAC logging for input chain only. */ /* MAC logging for input chain only. */
printk("MAC="); printk("MAC=");
if (skb->dev && skb->dev->hard_header_len && if (skb->dev && (len = skb->dev->hard_header_len) &&
skb->mac.raw != skb->nh.raw) { skb->mac.raw != skb->nh.raw) {
unsigned char *p = skb->mac.raw; unsigned char *p = skb->mac.raw;
int i; int i;
...@@ -384,9 +385,11 @@ ip6t_log_packet(unsigned int hooknum, ...@@ -384,9 +385,11 @@ ip6t_log_packet(unsigned int hooknum,
(p -= ETH_HLEN) < skb->head) (p -= ETH_HLEN) < skb->head)
p = NULL; p = NULL;
if (p != NULL) if (p != NULL) {
for (i = 0; i < skb->dev->hard_header_len; i++) for (i = 0; i < len; i++)
printk("%02x", p[i]); printk("%02x%s", p[i],
i == len - 1 ? "" : ":");
}
printk(" "); printk(" ");
if (skb->dev->type == ARPHRD_SIT) { if (skb->dev->type == ARPHRD_SIT) {
......
...@@ -648,7 +648,8 @@ void netlink_detachskb(struct sock *sk, struct sk_buff *skb) ...@@ -648,7 +648,8 @@ void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
sock_put(sk); sock_put(sk);
} }
static inline struct sk_buff *netlink_trim(struct sk_buff *skb, int allocation) static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
unsigned int __nocast allocation)
{ {
int delta; int delta;
...@@ -717,7 +718,7 @@ struct netlink_broadcast_data { ...@@ -717,7 +718,7 @@ struct netlink_broadcast_data {
int failure; int failure;
int congested; int congested;
int delivered; int delivered;
int allocation; unsigned int allocation;
struct sk_buff *skb, *skb2; struct sk_buff *skb, *skb2;
}; };
......
...@@ -27,17 +27,17 @@ ...@@ -27,17 +27,17 @@
* lvalue rvalue * lvalue rvalue
* +-----------+ +-----------+ * +-----------+ +-----------+
* | type: INT | | type: INT | * | type: INT | | type: INT |
* def | id: INDEV | | id: VALUE | * def | id: DEV | | id: VALUE |
* | data: | | data: 3 | * | data: | | data: 3 |
* +-----------+ +-----------+ * +-----------+ +-----------+
* | | * | |
* ---> meta_ops[INT][INDEV](...) | * ---> meta_ops[INT][DEV](...) |
* | | * | |
* ----------- | * ----------- |
* V V * V V
* +-----------+ +-----------+ * +-----------+ +-----------+
* | type: INT | | type: INT | * | type: INT | | type: INT |
* obj | id: INDEV | | id: VALUE | * obj | id: DEV | | id: VALUE |
* | data: 2 |<--data got filled out | data: 3 | * | data: 2 |<--data got filled out | data: 3 |
* +-----------+ +-----------+ * +-----------+ +-----------+
* | | * | |
...@@ -170,26 +170,6 @@ META_COLLECTOR(var_dev) ...@@ -170,26 +170,6 @@ META_COLLECTOR(var_dev)
*err = var_dev(skb->dev, dst); *err = var_dev(skb->dev, dst);
} }
META_COLLECTOR(int_indev)
{
*err = int_dev(skb->input_dev, dst);
}
META_COLLECTOR(var_indev)
{
*err = var_dev(skb->input_dev, dst);
}
META_COLLECTOR(int_realdev)
{
*err = int_dev(skb->real_dev, dst);
}
META_COLLECTOR(var_realdev)
{
*err = var_dev(skb->real_dev, dst);
}
/************************************************************************** /**************************************************************************
* skb attributes * skb attributes
**************************************************************************/ **************************************************************************/
...@@ -229,12 +209,14 @@ META_COLLECTOR(int_maclen) ...@@ -229,12 +209,14 @@ META_COLLECTOR(int_maclen)
* Netfilter * Netfilter
**************************************************************************/ **************************************************************************/
#ifdef CONFIG_NETFILTER
META_COLLECTOR(int_nfmark) META_COLLECTOR(int_nfmark)
{ {
#ifdef CONFIG_NETFILTER
dst->value = skb->nfmark; dst->value = skb->nfmark;
} #else
dst->value = 0;
#endif #endif
}
/************************************************************************** /**************************************************************************
* Traffic Control * Traffic Control
...@@ -245,31 +227,21 @@ META_COLLECTOR(int_tcindex) ...@@ -245,31 +227,21 @@ META_COLLECTOR(int_tcindex)
dst->value = skb->tc_index; dst->value = skb->tc_index;
} }
#ifdef CONFIG_NET_CLS_ACT
META_COLLECTOR(int_tcverd)
{
dst->value = skb->tc_verd;
}
META_COLLECTOR(int_tcclassid)
{
dst->value = skb->tc_classid;
}
#endif
/************************************************************************** /**************************************************************************
* Routing * Routing
**************************************************************************/ **************************************************************************/
#ifdef CONFIG_NET_CLS_ROUTE
META_COLLECTOR(int_rtclassid) META_COLLECTOR(int_rtclassid)
{ {
if (unlikely(skb->dst == NULL)) if (unlikely(skb->dst == NULL))
*err = -1; *err = -1;
else else
#ifdef CONFIG_NET_CLS_ROUTE
dst->value = skb->dst->tclassid; dst->value = skb->dst->tclassid;
} #else
dst->value = 0;
#endif #endif
}
META_COLLECTOR(int_rtiif) META_COLLECTOR(int_rtiif)
{ {
...@@ -505,8 +477,6 @@ struct meta_ops ...@@ -505,8 +477,6 @@ struct meta_ops
static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = { static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
[TCF_META_TYPE_VAR] = { [TCF_META_TYPE_VAR] = {
[META_ID(DEV)] = META_FUNC(var_dev), [META_ID(DEV)] = META_FUNC(var_dev),
[META_ID(INDEV)] = META_FUNC(var_indev),
[META_ID(REALDEV)] = META_FUNC(var_realdev),
[META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if), [META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if),
}, },
[TCF_META_TYPE_INT] = { [TCF_META_TYPE_INT] = {
...@@ -515,25 +485,15 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = { ...@@ -515,25 +485,15 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
[META_ID(LOADAVG_1)] = META_FUNC(int_loadavg_1), [META_ID(LOADAVG_1)] = META_FUNC(int_loadavg_1),
[META_ID(LOADAVG_2)] = META_FUNC(int_loadavg_2), [META_ID(LOADAVG_2)] = META_FUNC(int_loadavg_2),
[META_ID(DEV)] = META_FUNC(int_dev), [META_ID(DEV)] = META_FUNC(int_dev),
[META_ID(INDEV)] = META_FUNC(int_indev),
[META_ID(REALDEV)] = META_FUNC(int_realdev),
[META_ID(PRIORITY)] = META_FUNC(int_priority), [META_ID(PRIORITY)] = META_FUNC(int_priority),
[META_ID(PROTOCOL)] = META_FUNC(int_protocol), [META_ID(PROTOCOL)] = META_FUNC(int_protocol),
[META_ID(PKTTYPE)] = META_FUNC(int_pkttype), [META_ID(PKTTYPE)] = META_FUNC(int_pkttype),
[META_ID(PKTLEN)] = META_FUNC(int_pktlen), [META_ID(PKTLEN)] = META_FUNC(int_pktlen),
[META_ID(DATALEN)] = META_FUNC(int_datalen), [META_ID(DATALEN)] = META_FUNC(int_datalen),
[META_ID(MACLEN)] = META_FUNC(int_maclen), [META_ID(MACLEN)] = META_FUNC(int_maclen),
#ifdef CONFIG_NETFILTER
[META_ID(NFMARK)] = META_FUNC(int_nfmark), [META_ID(NFMARK)] = META_FUNC(int_nfmark),
#endif
[META_ID(TCINDEX)] = META_FUNC(int_tcindex), [META_ID(TCINDEX)] = META_FUNC(int_tcindex),
#ifdef CONFIG_NET_CLS_ACT
[META_ID(TCVERDICT)] = META_FUNC(int_tcverd),
[META_ID(TCCLASSID)] = META_FUNC(int_tcclassid),
#endif
#ifdef CONFIG_NET_CLS_ROUTE
[META_ID(RTCLASSID)] = META_FUNC(int_rtclassid), [META_ID(RTCLASSID)] = META_FUNC(int_rtclassid),
#endif
[META_ID(RTIIF)] = META_FUNC(int_rtiif), [META_ID(RTIIF)] = META_FUNC(int_rtiif),
[META_ID(SK_FAMILY)] = META_FUNC(int_sk_family), [META_ID(SK_FAMILY)] = META_FUNC(int_sk_family),
[META_ID(SK_STATE)] = META_FUNC(int_sk_state), [META_ID(SK_STATE)] = META_FUNC(int_sk_state),
......
...@@ -55,9 +55,6 @@ static int em_text_change(struct tcf_proto *tp, void *data, int len, ...@@ -55,9 +55,6 @@ static int em_text_change(struct tcf_proto *tp, void *data, int len,
struct ts_config *ts_conf; struct ts_config *ts_conf;
int flags = 0; int flags = 0;
printk("Configuring text: %s from %d:%d to %d:%d len %d\n", conf->algo, conf->from_offset,
conf->from_layer, conf->to_offset, conf->to_layer, conf->pattern_len);
if (len < sizeof(*conf) || len < (sizeof(*conf) + conf->pattern_len)) if (len < sizeof(*conf) || len < (sizeof(*conf) + conf->pattern_len))
return -EINVAL; return -EINVAL;
......
...@@ -331,11 +331,10 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) ...@@ -331,11 +331,10 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
int prio; int prio;
struct sk_buff_head *list = qdisc_priv(qdisc); struct sk_buff_head *list = qdisc_priv(qdisc);
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++, list++) { for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list); if (!skb_queue_empty(list + prio)) {
if (skb) {
qdisc->q.qlen--; qdisc->q.qlen--;
return skb; return __qdisc_dequeue_head(qdisc, list + prio);
} }
} }
......
...@@ -351,7 +351,6 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, ...@@ -351,7 +351,6 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
* *
*/ */
void sctp_icmp_proto_unreachable(struct sock *sk, void sctp_icmp_proto_unreachable(struct sock *sk,
struct sctp_endpoint *ep,
struct sctp_association *asoc, struct sctp_association *asoc,
struct sctp_transport *t) struct sctp_transport *t)
{ {
...@@ -367,7 +366,6 @@ void sctp_icmp_proto_unreachable(struct sock *sk, ...@@ -367,7 +366,6 @@ void sctp_icmp_proto_unreachable(struct sock *sk,
/* Common lookup code for icmp/icmpv6 error handler. */ /* Common lookup code for icmp/icmpv6 error handler. */
struct sock *sctp_err_lookup(int family, struct sk_buff *skb, struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
struct sctphdr *sctphdr, struct sctphdr *sctphdr,
struct sctp_endpoint **epp,
struct sctp_association **app, struct sctp_association **app,
struct sctp_transport **tpp) struct sctp_transport **tpp)
{ {
...@@ -375,11 +373,10 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb, ...@@ -375,11 +373,10 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
union sctp_addr daddr; union sctp_addr daddr;
struct sctp_af *af; struct sctp_af *af;
struct sock *sk = NULL; struct sock *sk = NULL;
struct sctp_endpoint *ep = NULL;
struct sctp_association *asoc = NULL; struct sctp_association *asoc = NULL;
struct sctp_transport *transport = NULL; struct sctp_transport *transport = NULL;
*app = NULL; *epp = NULL; *tpp = NULL; *app = NULL; *tpp = NULL;
af = sctp_get_af_specific(family); af = sctp_get_af_specific(family);
if (unlikely(!af)) { if (unlikely(!af)) {
...@@ -394,26 +391,15 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb, ...@@ -394,26 +391,15 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
* packet. * packet.
*/ */
asoc = __sctp_lookup_association(&saddr, &daddr, &transport); asoc = __sctp_lookup_association(&saddr, &daddr, &transport);
if (!asoc) { if (!asoc)
/* If there is no matching association, see if it matches any return NULL;
* endpoint. This may happen for an ICMP error generated in
* response to an INIT_ACK.
*/
ep = __sctp_rcv_lookup_endpoint(&daddr);
if (!ep) {
return NULL;
}
}
if (asoc) { sk = asoc->base.sk;
sk = asoc->base.sk;
if (ntohl(sctphdr->vtag) != asoc->c.peer_vtag) { if (ntohl(sctphdr->vtag) != asoc->c.peer_vtag) {
ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); ICMP_INC_STATS_BH(ICMP_MIB_INERRORS);
goto out; goto out;
} }
} else
sk = ep->base.sk;
sctp_bh_lock_sock(sk); sctp_bh_lock_sock(sk);
...@@ -423,7 +409,6 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb, ...@@ -423,7 +409,6 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
if (sock_owned_by_user(sk)) if (sock_owned_by_user(sk))
NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
*epp = ep;
*app = asoc; *app = asoc;
*tpp = transport; *tpp = transport;
return sk; return sk;
...@@ -432,21 +417,16 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb, ...@@ -432,21 +417,16 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
sock_put(sk); sock_put(sk);
if (asoc) if (asoc)
sctp_association_put(asoc); sctp_association_put(asoc);
if (ep)
sctp_endpoint_put(ep);
return NULL; return NULL;
} }
/* Common cleanup code for icmp/icmpv6 error handler. */ /* Common cleanup code for icmp/icmpv6 error handler. */
void sctp_err_finish(struct sock *sk, struct sctp_endpoint *ep, void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
struct sctp_association *asoc)
{ {
sctp_bh_unlock_sock(sk); sctp_bh_unlock_sock(sk);
sock_put(sk); sock_put(sk);
if (asoc) if (asoc)
sctp_association_put(asoc); sctp_association_put(asoc);
if (ep)
sctp_endpoint_put(ep);
} }
/* /*
...@@ -471,7 +451,6 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) ...@@ -471,7 +451,6 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
int type = skb->h.icmph->type; int type = skb->h.icmph->type;
int code = skb->h.icmph->code; int code = skb->h.icmph->code;
struct sock *sk; struct sock *sk;
struct sctp_endpoint *ep;
struct sctp_association *asoc; struct sctp_association *asoc;
struct sctp_transport *transport; struct sctp_transport *transport;
struct inet_sock *inet; struct inet_sock *inet;
...@@ -488,7 +467,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) ...@@ -488,7 +467,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
savesctp = skb->h.raw; savesctp = skb->h.raw;
skb->nh.iph = iph; skb->nh.iph = iph;
skb->h.raw = (char *)sh; skb->h.raw = (char *)sh;
sk = sctp_err_lookup(AF_INET, skb, sh, &ep, &asoc, &transport); sk = sctp_err_lookup(AF_INET, skb, sh, &asoc, &transport);
/* Put back, the original pointers. */ /* Put back, the original pointers. */
skb->nh.raw = saveip; skb->nh.raw = saveip;
skb->h.raw = savesctp; skb->h.raw = savesctp;
...@@ -515,7 +494,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) ...@@ -515,7 +494,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
} }
else { else {
if (ICMP_PROT_UNREACH == code) { if (ICMP_PROT_UNREACH == code) {
sctp_icmp_proto_unreachable(sk, ep, asoc, sctp_icmp_proto_unreachable(sk, asoc,
transport); transport);
goto out_unlock; goto out_unlock;
} }
...@@ -544,7 +523,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) ...@@ -544,7 +523,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
} }
out_unlock: out_unlock:
sctp_err_finish(sk, ep, asoc); sctp_err_finish(sk, asoc);
} }
/* /*
......
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册