Skip to content

Commit

Permalink
MIPS: Make irqflags.h functions preempt-safe for non-mipsr2 cpus
Browse files Browse the repository at this point in the history
For non MIPSr2 processors, such as the BMIPS 5000, calls to
arch_local_irq_disable() and others may be preempted, and in doing
so a stale value may be restored to c0_status.  This fix disables
preemption for such processors prior to the call and enables it
after the call.

Those functions that needed this fix have been "outlined" to
mips-atomic.c, as they are no longer good candidates for inlining.

This bug was observed in a BMIPS 5000, occuring once every few hours
in a continuous reboot test.  It was traced to the write_lock_irq()
function which was being invoked in release_task() in exit.c.
By placing a number of "nops" inbetween the mfc0/mtc0 pair in
arch_local_irq_disable(), which is called by write_lock_irq(), we
were able to greatly increase the occurance of this bug.  Similarly,
the application of this commit silenced the bug.

Signed-off-by: Jim Quinlan <jim2101024@gmail.com>
Cc: linux-mips@linux-mips.org
Cc: David Daney <ddaney.cavm@gmail.com>
Cc: Kevin Cernekee cernekee@gmail.com
Cc: Jim Quinlan <jim2101024@gmail.com>
Patchwork: https://patchwork.linux-mips.org/patch/4321/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
  • Loading branch information
jim2101024 authored and ralfbaechle committed Nov 9, 2012
1 parent 92d1159 commit e97c5b6
Show file tree
Hide file tree
Showing 3 changed files with 253 additions and 133 deletions.
207 changes: 75 additions & 132 deletions arch/mips/include/asm/irqflags.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,83 +16,13 @@
#include <linux/compiler.h>
#include <asm/hazards.h>

__asm__(
" .macro arch_local_irq_enable \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
" ori $1, 0x400 \n"
" xori $1, 0x400 \n"
" mtc0 $1, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2)
" ei \n"
#else
" mfc0 $1,$12 \n"
" ori $1,0x1f \n"
" xori $1,0x1e \n"
" mtc0 $1,$12 \n"
#endif
" irq_enable_hazard \n"
" .set pop \n"
" .endm");
#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)

extern void smtc_ipi_replay(void);

static inline void arch_local_irq_enable(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of call overhead on each local_irq_enable()
*/
smtc_ipi_replay();
#endif
__asm__ __volatile__(
"arch_local_irq_enable"
: /* no outputs */
: /* no inputs */
: "memory");
}


/*
* For cli() we have to insert nops to make sure that the new value
* has actually arrived in the status register before the end of this
* macro.
* R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
* no nops at all.
*/
/*
* For TX49, operating only IE bit is not enough.
*
* If mfc0 $12 follows store and the mfc0 is last instruction of a
* page and fetching the next instruction causes TLB miss, the result
* of the mfc0 might wrongly contain EXL bit.
*
* ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
*
* Workaround: mask EXL bit of the result or place a nop before mfc0.
*/
__asm__(
" .macro arch_local_irq_disable\n"
" .set push \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 $1, $2, 1 \n"
" ori $1, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2)
" di \n"
#else
" mfc0 $1,$12 \n"
" ori $1,0x1f \n"
" xori $1,0x1f \n"
" .set noreorder \n"
" mtc0 $1,$12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
Expand All @@ -106,46 +36,14 @@ static inline void arch_local_irq_disable(void)
: "memory");
}

__asm__(
" .macro arch_local_save_flags flags \n"
" .set push \n"
" .set reorder \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 \\flags, $2, 1 \n"
#else
" mfc0 \\flags, $12 \n"
#endif
" .set pop \n"
" .endm \n");

static inline unsigned long arch_local_save_flags(void)
{
unsigned long flags;
asm volatile("arch_local_save_flags %0" : "=r" (flags));
return flags;
}

__asm__(
" .macro arch_local_irq_save result \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 \\result, $2, 1 \n"
" ori $1, \\result, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
" andi \\result, \\result, 0x400 \n"
#elif defined(CONFIG_CPU_MIPSR2)
" di \\result \n"
" andi \\result, 1 \n"
#else
" mfc0 \\result, $12 \n"
" ori $1, \\result, 0x1f \n"
" xori $1, 0x1f \n"
" .set noreorder \n"
" mtc0 $1, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
Expand All @@ -160,61 +58,37 @@ static inline unsigned long arch_local_irq_save(void)
return flags;
}


__asm__(
" .macro arch_local_irq_restore flags \n"
" .set push \n"
" .set noreorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
"mfc0 $1, $2, 1 \n"
"andi \\flags, 0x400 \n"
"ori $1, 0x400 \n"
"xori $1, 0x400 \n"
"or \\flags, $1 \n"
"mtc0 \\flags, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
#if defined(CONFIG_IRQ_CPU)
/*
* Slow, but doesn't suffer from a relatively unlikely race
* condition we're having since days 1.
*/
" beqz \\flags, 1f \n"
" di \n"
" di \n"
" ei \n"
"1: \n"
#elif defined(CONFIG_CPU_MIPSR2)
#else
/*
* Fast, dangerous. Life is fun, life is good.
*/
" mfc0 $1, $12 \n"
" ins $1, \\flags, 0, 1 \n"
" mtc0 $1, $12 \n"
#else
" mfc0 $1, $12 \n"
" andi \\flags, 1 \n"
" ori $1, 0x1f \n"
" xori $1, 0x1f \n"
" or \\flags, $1 \n"
" mtc0 \\flags, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");


static inline void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;

#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of branch and call overhead on each
* local_irq_restore()
*/
if (unlikely(!(flags & 0x0400)))
smtc_ipi_replay();
#endif

__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
Expand All @@ -232,6 +106,75 @@ static inline void __arch_local_irq_restore(unsigned long flags)
: "0" (flags)
: "memory");
}
#else
/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
void arch_local_irq_disable(void);
unsigned long arch_local_irq_save(void);
void arch_local_irq_restore(unsigned long flags);
void __arch_local_irq_restore(unsigned long flags);
#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */


__asm__(
" .macro arch_local_irq_enable \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
" ori $1, 0x400 \n"
" xori $1, 0x400 \n"
" mtc0 $1, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2)
" ei \n"
#else
" mfc0 $1,$12 \n"
" ori $1,0x1f \n"
" xori $1,0x1e \n"
" mtc0 $1,$12 \n"
#endif
" irq_enable_hazard \n"
" .set pop \n"
" .endm");

extern void smtc_ipi_replay(void);

static inline void arch_local_irq_enable(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of call overhead on each local_irq_enable()
*/
smtc_ipi_replay();
#endif
__asm__ __volatile__(
"arch_local_irq_enable"
: /* no outputs */
: /* no inputs */
: "memory");
}


__asm__(
" .macro arch_local_save_flags flags \n"
" .set push \n"
" .set reorder \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 \\flags, $2, 1 \n"
#else
" mfc0 \\flags, $12 \n"
#endif
" .set pop \n"
" .endm \n");

static inline unsigned long arch_local_save_flags(void)
{
unsigned long flags;
asm volatile("arch_local_save_flags %0" : "=r" (flags));
return flags;
}


static inline int arch_irqs_disabled_flags(unsigned long flags)
{
Expand All @@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
#endif
}

#endif
#endif /* #ifndef __ASSEMBLY__ */

/*
* Do the CPU's IRQ-state tracing from assembly code.
Expand Down
3 changes: 2 additions & 1 deletion arch/mips/lib/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@
#

lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
strlen_user.o strncpy_user.o strnlen_user.o uncached.o
mips-atomic.o strlen_user.o strncpy_user.o \
strnlen_user.o uncached.o

obj-y += iomap.o
obj-$(CONFIG_PCI) += iomap-pci.o
Expand Down
Loading

0 comments on commit e97c5b6

Please sign in to comment.