提交 7170bdc7 编写于 作者: J James Hogan

MIPS: Add return errors to protected cache ops

The protected cache ops contain no out of line fixup code to return an
error code in the event of a fault, with the cache op being skipped in
that case. For KVM however we'd like to detect this case as page
faulting will be disabled so it could happen during normal operation if
the GVA page tables were flushed, and need to be handled by the caller.

Add the out-of-line fixup code to load the error value -EFAULT into the
return variable, and adapt the protected cache line functions to pass
the error back to the caller.
Signed-off-by: NJames Hogan <james.hogan@imgtec.com>
Acked-by: NRalf Baechle <ralf@linux-mips.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
上级 722b4544
...@@ -147,49 +147,64 @@ static inline void flush_scache_line(unsigned long addr) ...@@ -147,49 +147,64 @@ static inline void flush_scache_line(unsigned long addr)
} }
#define protected_cache_op(op,addr) \ #define protected_cache_op(op,addr) \
({ \
int __err = 0; \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set noreorder \n" \ " .set noreorder \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \ " .set "MIPS_ISA_ARCH_LEVEL" \n" \
"1: cache %0, (%1) \n" \ "1: cache %1, (%2) \n" \
"2: .set pop \n" \ "2: .set pop \n" \
" .section .fixup,\"ax\" \n" \
"3: li %0, %3 \n" \
" j 2b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \ " .section __ex_table,\"a\" \n" \
" "STR(PTR)" 1b, 2b \n" \ " "STR(PTR)" 1b, 3b \n" \
" .previous" \ " .previous" \
: \ : "+r" (__err) \
: "i" (op), "r" (addr)) : "i" (op), "r" (addr), "i" (-EFAULT)); \
__err; \
})
#define protected_cachee_op(op,addr) \ #define protected_cachee_op(op,addr) \
({ \
int __err = 0; \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \ " .set push \n" \
" .set noreorder \n" \ " .set noreorder \n" \
" .set mips0 \n" \ " .set mips0 \n" \
" .set eva \n" \ " .set eva \n" \
"1: cachee %0, (%1) \n" \ "1: cachee %1, (%2) \n" \
"2: .set pop \n" \ "2: .set pop \n" \
" .section .fixup,\"ax\" \n" \
"3: li %0, %3 \n" \
" j 2b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \ " .section __ex_table,\"a\" \n" \
" "STR(PTR)" 1b, 2b \n" \ " "STR(PTR)" 1b, 3b \n" \
" .previous" \ " .previous" \
: \ : "+r" (__err) \
: "i" (op), "r" (addr)) : "i" (op), "r" (addr), "i" (-EFAULT)); \
__err; \
})
/* /*
* The next two are for badland addresses like signal trampolines. * The next two are for badland addresses like signal trampolines.
*/ */
static inline void protected_flush_icache_line(unsigned long addr) static inline int protected_flush_icache_line(unsigned long addr)
{ {
switch (boot_cpu_type()) { switch (boot_cpu_type()) {
case CPU_LOONGSON2: case CPU_LOONGSON2:
protected_cache_op(Hit_Invalidate_I_Loongson2, addr); return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
break;
default: default:
#ifdef CONFIG_EVA #ifdef CONFIG_EVA
protected_cachee_op(Hit_Invalidate_I, addr); return protected_cachee_op(Hit_Invalidate_I, addr);
#else #else
protected_cache_op(Hit_Invalidate_I, addr); return protected_cache_op(Hit_Invalidate_I, addr);
#endif #endif
break;
} }
} }
...@@ -199,21 +214,21 @@ static inline void protected_flush_icache_line(unsigned long addr) ...@@ -199,21 +214,21 @@ static inline void protected_flush_icache_line(unsigned long addr)
* caches. We're talking about one cacheline unnecessarily getting invalidated * caches. We're talking about one cacheline unnecessarily getting invalidated
* here so the penalty isn't overly hard. * here so the penalty isn't overly hard.
*/ */
static inline void protected_writeback_dcache_line(unsigned long addr) static inline int protected_writeback_dcache_line(unsigned long addr)
{ {
#ifdef CONFIG_EVA #ifdef CONFIG_EVA
protected_cachee_op(Hit_Writeback_Inv_D, addr); return protected_cachee_op(Hit_Writeback_Inv_D, addr);
#else #else
protected_cache_op(Hit_Writeback_Inv_D, addr); return protected_cache_op(Hit_Writeback_Inv_D, addr);
#endif #endif
} }
static inline void protected_writeback_scache_line(unsigned long addr) static inline int protected_writeback_scache_line(unsigned long addr)
{ {
#ifdef CONFIG_EVA #ifdef CONFIG_EVA
protected_cachee_op(Hit_Writeback_Inv_SD, addr); return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
#else #else
protected_cache_op(Hit_Writeback_Inv_SD, addr); return protected_cache_op(Hit_Writeback_Inv_SD, addr);
#endif #endif
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册