提交 ee0a54d7 编写于 作者: P Paul Mackerras 提交者: Michael Ellerman

powerpc: Don't check MSR FP/VMX/VSX enable bits in analyse_instr()

This removes the checks for the FP/VMX/VSX enable bits in the MSR
from analyse_instr() and adds them to emulate_step() instead.

The reason for this is that we may want to use analyse_instr() in
a situation where the FP/VMX/VSX register values are stored in the
current thread_struct and the FP/VMX/VSX enable bits in the MSR
image in the pt_regs are zero.  Since analyse_instr() doesn't make
any changes to register state, it is reasonable for it to indicate
what the effect of an instruction would be even though the relevant
enable bit is off.
Signed-off-by: NPaul Mackerras <paulus@ozlabs.org>
Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
上级 3cdfcbfd
...@@ -1505,15 +1505,11 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, ...@@ -1505,15 +1505,11 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
case 103: /* lvx */ case 103: /* lvx */
case 359: /* lvxl */ case 359: /* lvxl */
if (!(regs->msr & MSR_VEC))
goto vecunavail;
op->type = MKOP(LOAD_VMX, 0, 16); op->type = MKOP(LOAD_VMX, 0, 16);
break; break;
case 231: /* stvx */ case 231: /* stvx */
case 487: /* stvxl */ case 487: /* stvxl */
if (!(regs->msr & MSR_VEC))
goto vecunavail;
op->type = MKOP(STORE_VMX, 0, 16); op->type = MKOP(STORE_VMX, 0, 16);
break; break;
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
...@@ -1584,29 +1580,21 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, ...@@ -1584,29 +1580,21 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
case 535: /* lfsx */ case 535: /* lfsx */
case 567: /* lfsux */ case 567: /* lfsux */
if (!(regs->msr & MSR_FP))
goto fpunavail;
op->type = MKOP(LOAD_FP, u, 4); op->type = MKOP(LOAD_FP, u, 4);
break; break;
case 599: /* lfdx */ case 599: /* lfdx */
case 631: /* lfdux */ case 631: /* lfdux */
if (!(regs->msr & MSR_FP))
goto fpunavail;
op->type = MKOP(LOAD_FP, u, 8); op->type = MKOP(LOAD_FP, u, 8);
break; break;
case 663: /* stfsx */ case 663: /* stfsx */
case 695: /* stfsux */ case 695: /* stfsux */
if (!(regs->msr & MSR_FP))
goto fpunavail;
op->type = MKOP(STORE_FP, u, 4); op->type = MKOP(STORE_FP, u, 4);
break; break;
case 727: /* stfdx */ case 727: /* stfdx */
case 759: /* stfdux */ case 759: /* stfdux */
if (!(regs->msr & MSR_FP))
goto fpunavail;
op->type = MKOP(STORE_FP, u, 8); op->type = MKOP(STORE_FP, u, 8);
break; break;
#endif #endif
...@@ -1649,16 +1637,12 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, ...@@ -1649,16 +1637,12 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#ifdef CONFIG_VSX #ifdef CONFIG_VSX
case 844: /* lxvd2x */ case 844: /* lxvd2x */
case 876: /* lxvd2ux */ case 876: /* lxvd2ux */
if (!(regs->msr & MSR_VSX))
goto vsxunavail;
op->reg = rd | ((instr & 1) << 5); op->reg = rd | ((instr & 1) << 5);
op->type = MKOP(LOAD_VSX, u, 16); op->type = MKOP(LOAD_VSX, u, 16);
break; break;
case 972: /* stxvd2x */ case 972: /* stxvd2x */
case 1004: /* stxvd2ux */ case 1004: /* stxvd2ux */
if (!(regs->msr & MSR_VSX))
goto vsxunavail;
op->reg = rd | ((instr & 1) << 5); op->reg = rd | ((instr & 1) << 5);
op->type = MKOP(STORE_VSX, u, 16); op->type = MKOP(STORE_VSX, u, 16);
break; break;
...@@ -1724,32 +1708,24 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, ...@@ -1724,32 +1708,24 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
case 48: /* lfs */ case 48: /* lfs */
case 49: /* lfsu */ case 49: /* lfsu */
if (!(regs->msr & MSR_FP))
goto fpunavail;
op->type = MKOP(LOAD_FP, u, 4); op->type = MKOP(LOAD_FP, u, 4);
op->ea = dform_ea(instr, regs); op->ea = dform_ea(instr, regs);
break; break;
case 50: /* lfd */ case 50: /* lfd */
case 51: /* lfdu */ case 51: /* lfdu */
if (!(regs->msr & MSR_FP))
goto fpunavail;
op->type = MKOP(LOAD_FP, u, 8); op->type = MKOP(LOAD_FP, u, 8);
op->ea = dform_ea(instr, regs); op->ea = dform_ea(instr, regs);
break; break;
case 52: /* stfs */ case 52: /* stfs */
case 53: /* stfsu */ case 53: /* stfsu */
if (!(regs->msr & MSR_FP))
goto fpunavail;
op->type = MKOP(STORE_FP, u, 4); op->type = MKOP(STORE_FP, u, 4);
op->ea = dform_ea(instr, regs); op->ea = dform_ea(instr, regs);
break; break;
case 54: /* stfd */ case 54: /* stfd */
case 55: /* stfdu */ case 55: /* stfdu */
if (!(regs->msr & MSR_FP))
goto fpunavail;
op->type = MKOP(STORE_FP, u, 8); op->type = MKOP(STORE_FP, u, 8);
op->ea = dform_ea(instr, regs); op->ea = dform_ea(instr, regs);
break; break;
...@@ -1812,24 +1788,6 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, ...@@ -1812,24 +1788,6 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
op->type = INTERRUPT | 0x700; op->type = INTERRUPT | 0x700;
op->val = SRR1_PROGTRAP; op->val = SRR1_PROGTRAP;
return 0; return 0;
#ifdef CONFIG_PPC_FPU
fpunavail:
op->type = INTERRUPT | 0x800;
return 0;
#endif
#ifdef CONFIG_ALTIVEC
vecunavail:
op->type = INTERRUPT | 0xf20;
return 0;
#endif
#ifdef CONFIG_VSX
vsxunavail:
op->type = INTERRUPT | 0xf40;
return 0;
#endif
} }
EXPORT_SYMBOL_GPL(analyse_instr); EXPORT_SYMBOL_GPL(analyse_instr);
NOKPROBE_SYMBOL(analyse_instr); NOKPROBE_SYMBOL(analyse_instr);
...@@ -2087,6 +2045,8 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) ...@@ -2087,6 +2045,8 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
case LOAD_FP: case LOAD_FP:
if (!(regs->msr & MSR_FP))
return 0;
if (size == 4) if (size == 4)
err = do_fp_load(op.reg, do_lfs, op.ea, size, regs); err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
else else
...@@ -2095,11 +2055,15 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) ...@@ -2095,11 +2055,15 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
#endif #endif
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
case LOAD_VMX: case LOAD_VMX:
if (!(regs->msr & MSR_VEC))
return 0;
err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs); err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
goto ldst_done; goto ldst_done;
#endif #endif
#ifdef CONFIG_VSX #ifdef CONFIG_VSX
case LOAD_VSX: case LOAD_VSX:
if (!(regs->msr & MSR_VSX))
return 0;
err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs); err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
goto ldst_done; goto ldst_done;
#endif #endif
...@@ -2134,6 +2098,8 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) ...@@ -2134,6 +2098,8 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
#ifdef CONFIG_PPC_FPU #ifdef CONFIG_PPC_FPU
case STORE_FP: case STORE_FP:
if (!(regs->msr & MSR_FP))
return 0;
if (size == 4) if (size == 4)
err = do_fp_store(op.reg, do_stfs, op.ea, size, regs); err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
else else
...@@ -2142,11 +2108,15 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) ...@@ -2142,11 +2108,15 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
#endif #endif
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
case STORE_VMX: case STORE_VMX:
if (!(regs->msr & MSR_VEC))
return 0;
err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs); err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
goto ldst_done; goto ldst_done;
#endif #endif
#ifdef CONFIG_VSX #ifdef CONFIG_VSX
case STORE_VSX: case STORE_VSX:
if (!(regs->msr & MSR_VSX))
return 0;
err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs); err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
goto ldst_done; goto ldst_done;
#endif #endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册