提交 19cbd87c 编写于 作者: E Eduardo Habkost

target-i386: Rename XMM_[BWLSDQ] helpers to ZMM_*

They are helpers for the ZMMReg fields, so name them accordingly.

This is just a global search+replace, no other changes are being
introduced.
Signed-off-by: NEduardo Habkost <ehabkost@redhat.com>
上级 fa451874
...@@ -753,24 +753,24 @@ typedef struct BNDCSReg { ...@@ -753,24 +753,24 @@ typedef struct BNDCSReg {
} BNDCSReg; } BNDCSReg;
#ifdef HOST_WORDS_BIGENDIAN #ifdef HOST_WORDS_BIGENDIAN
#define XMM_B(n) _b[63 - (n)] #define ZMM_B(n) _b[63 - (n)]
#define XMM_W(n) _w[31 - (n)] #define ZMM_W(n) _w[31 - (n)]
#define XMM_L(n) _l[15 - (n)] #define ZMM_L(n) _l[15 - (n)]
#define XMM_S(n) _s[15 - (n)] #define ZMM_S(n) _s[15 - (n)]
#define XMM_Q(n) _q[7 - (n)] #define ZMM_Q(n) _q[7 - (n)]
#define XMM_D(n) _d[7 - (n)] #define ZMM_D(n) _d[7 - (n)]
#define MMX_B(n) _b[7 - (n)] #define MMX_B(n) _b[7 - (n)]
#define MMX_W(n) _w[3 - (n)] #define MMX_W(n) _w[3 - (n)]
#define MMX_L(n) _l[1 - (n)] #define MMX_L(n) _l[1 - (n)]
#define MMX_S(n) _s[1 - (n)] #define MMX_S(n) _s[1 - (n)]
#else #else
#define XMM_B(n) _b[n] #define ZMM_B(n) _b[n]
#define XMM_W(n) _w[n] #define ZMM_W(n) _w[n]
#define XMM_L(n) _l[n] #define ZMM_L(n) _l[n]
#define XMM_S(n) _s[n] #define ZMM_S(n) _s[n]
#define XMM_Q(n) _q[n] #define ZMM_Q(n) _q[n]
#define XMM_D(n) _d[n] #define ZMM_D(n) _d[n]
#define MMX_B(n) _b[n] #define MMX_B(n) _b[n]
#define MMX_W(n) _w[n] #define MMX_W(n) _w[n]
......
...@@ -1169,8 +1169,8 @@ static void do_fxsave(CPUX86State *env, target_ulong ptr, int data64, ...@@ -1169,8 +1169,8 @@ static void do_fxsave(CPUX86State *env, target_ulong ptr, int data64,
|| (env->hflags & HF_CPL_MASK) || (env->hflags & HF_CPL_MASK)
|| !(env->hflags & HF_LMA_MASK)) { || !(env->hflags & HF_LMA_MASK)) {
for (i = 0; i < nb_xmm_regs; i++) { for (i = 0; i < nb_xmm_regs; i++) {
cpu_stq_data_ra(env, addr, env->xmm_regs[i].XMM_Q(0), retaddr); cpu_stq_data_ra(env, addr, env->xmm_regs[i].ZMM_Q(0), retaddr);
cpu_stq_data_ra(env, addr + 8, env->xmm_regs[i].XMM_Q(1), retaddr); cpu_stq_data_ra(env, addr + 8, env->xmm_regs[i].ZMM_Q(1), retaddr);
addr += 16; addr += 16;
} }
} }
...@@ -1226,8 +1226,8 @@ static void do_fxrstor(CPUX86State *env, target_ulong ptr, int data64, ...@@ -1226,8 +1226,8 @@ static void do_fxrstor(CPUX86State *env, target_ulong ptr, int data64,
|| (env->hflags & HF_CPL_MASK) || (env->hflags & HF_CPL_MASK)
|| !(env->hflags & HF_LMA_MASK)) { || !(env->hflags & HF_LMA_MASK)) {
for (i = 0; i < nb_xmm_regs; i++) { for (i = 0; i < nb_xmm_regs; i++) {
env->xmm_regs[i].XMM_Q(0) = cpu_ldq_data_ra(env, addr, retaddr); env->xmm_regs[i].ZMM_Q(0) = cpu_ldq_data_ra(env, addr, retaddr);
env->xmm_regs[i].XMM_Q(1) = cpu_ldq_data_ra(env, addr + 8, retaddr); env->xmm_regs[i].ZMM_Q(1) = cpu_ldq_data_ra(env, addr + 8, retaddr);
addr += 16; addr += 16;
} }
} }
......
...@@ -61,8 +61,8 @@ int x86_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n) ...@@ -61,8 +61,8 @@ int x86_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
n -= IDX_XMM_REGS; n -= IDX_XMM_REGS;
if (n < CPU_NB_REGS32 || if (n < CPU_NB_REGS32 ||
(TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) { (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0)); stq_p(mem_buf, env->xmm_regs[n].ZMM_Q(0));
stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1)); stq_p(mem_buf + 8, env->xmm_regs[n].ZMM_Q(1));
return 16; return 16;
} }
} else { } else {
...@@ -170,8 +170,8 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) ...@@ -170,8 +170,8 @@ int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
n -= IDX_XMM_REGS; n -= IDX_XMM_REGS;
if (n < CPU_NB_REGS32 || if (n < CPU_NB_REGS32 ||
(TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) { (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf); env->xmm_regs[n].ZMM_Q(0) = ldq_p(mem_buf);
env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8); env->xmm_regs[n].ZMM_Q(1) = ldq_p(mem_buf + 8);
return 16; return 16;
} }
} else { } else {
......
...@@ -535,10 +535,10 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, ...@@ -535,10 +535,10 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
for(i=0;i<nb;i++) { for(i=0;i<nb;i++) {
cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x", cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
i, i,
env->xmm_regs[i].XMM_L(3), env->xmm_regs[i].ZMM_L(3),
env->xmm_regs[i].XMM_L(2), env->xmm_regs[i].ZMM_L(2),
env->xmm_regs[i].XMM_L(1), env->xmm_regs[i].ZMM_L(1),
env->xmm_regs[i].XMM_L(0)); env->xmm_regs[i].ZMM_L(0));
if ((i & 1) == 1) if ((i & 1) == 1)
cpu_fprintf(f, "\n"); cpu_fprintf(f, "\n");
else else
......
...@@ -1237,8 +1237,8 @@ static int kvm_put_fpu(X86CPU *cpu) ...@@ -1237,8 +1237,8 @@ static int kvm_put_fpu(X86CPU *cpu)
} }
memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs); memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
for (i = 0; i < CPU_NB_REGS; i++) { for (i = 0; i < CPU_NB_REGS; i++) {
stq_p(&fpu.xmm[i][0], env->xmm_regs[i].XMM_Q(0)); stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
stq_p(&fpu.xmm[i][8], env->xmm_regs[i].XMM_Q(1)); stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
} }
fpu.mxcsr = env->mxcsr; fpu.mxcsr = env->mxcsr;
...@@ -1299,14 +1299,14 @@ static int kvm_put_xsave(X86CPU *cpu) ...@@ -1299,14 +1299,14 @@ static int kvm_put_xsave(X86CPU *cpu)
ymmh = (uint8_t *)&xsave->region[XSAVE_YMMH_SPACE]; ymmh = (uint8_t *)&xsave->region[XSAVE_YMMH_SPACE];
zmmh = (uint8_t *)&xsave->region[XSAVE_ZMM_Hi256]; zmmh = (uint8_t *)&xsave->region[XSAVE_ZMM_Hi256];
for (i = 0; i < CPU_NB_REGS; i++, xmm += 16, ymmh += 16, zmmh += 32) { for (i = 0; i < CPU_NB_REGS; i++, xmm += 16, ymmh += 16, zmmh += 32) {
stq_p(xmm, env->xmm_regs[i].XMM_Q(0)); stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
stq_p(xmm+8, env->xmm_regs[i].XMM_Q(1)); stq_p(xmm+8, env->xmm_regs[i].ZMM_Q(1));
stq_p(ymmh, env->xmm_regs[i].XMM_Q(2)); stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
stq_p(ymmh+8, env->xmm_regs[i].XMM_Q(3)); stq_p(ymmh+8, env->xmm_regs[i].ZMM_Q(3));
stq_p(zmmh, env->xmm_regs[i].XMM_Q(4)); stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
stq_p(zmmh+8, env->xmm_regs[i].XMM_Q(5)); stq_p(zmmh+8, env->xmm_regs[i].ZMM_Q(5));
stq_p(zmmh+16, env->xmm_regs[i].XMM_Q(6)); stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6));
stq_p(zmmh+24, env->xmm_regs[i].XMM_Q(7)); stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7));
} }
#ifdef TARGET_X86_64 #ifdef TARGET_X86_64
...@@ -1665,8 +1665,8 @@ static int kvm_get_fpu(X86CPU *cpu) ...@@ -1665,8 +1665,8 @@ static int kvm_get_fpu(X86CPU *cpu)
} }
memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs); memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
for (i = 0; i < CPU_NB_REGS; i++) { for (i = 0; i < CPU_NB_REGS; i++) {
env->xmm_regs[i].XMM_Q(0) = ldq_p(&fpu.xmm[i][0]); env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
env->xmm_regs[i].XMM_Q(1) = ldq_p(&fpu.xmm[i][8]); env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
} }
env->mxcsr = fpu.mxcsr; env->mxcsr = fpu.mxcsr;
...@@ -1717,14 +1717,14 @@ static int kvm_get_xsave(X86CPU *cpu) ...@@ -1717,14 +1717,14 @@ static int kvm_get_xsave(X86CPU *cpu)
ymmh = (const uint8_t *)&xsave->region[XSAVE_YMMH_SPACE]; ymmh = (const uint8_t *)&xsave->region[XSAVE_YMMH_SPACE];
zmmh = (const uint8_t *)&xsave->region[XSAVE_ZMM_Hi256]; zmmh = (const uint8_t *)&xsave->region[XSAVE_ZMM_Hi256];
for (i = 0; i < CPU_NB_REGS; i++, xmm += 16, ymmh += 16, zmmh += 32) { for (i = 0; i < CPU_NB_REGS; i++, xmm += 16, ymmh += 16, zmmh += 32) {
env->xmm_regs[i].XMM_Q(0) = ldq_p(xmm); env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
env->xmm_regs[i].XMM_Q(1) = ldq_p(xmm+8); env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8);
env->xmm_regs[i].XMM_Q(2) = ldq_p(ymmh); env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
env->xmm_regs[i].XMM_Q(3) = ldq_p(ymmh+8); env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8);
env->xmm_regs[i].XMM_Q(4) = ldq_p(zmmh); env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
env->xmm_regs[i].XMM_Q(5) = ldq_p(zmmh+8); env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8);
env->xmm_regs[i].XMM_Q(6) = ldq_p(zmmh+16); env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16);
env->xmm_regs[i].XMM_Q(7) = ldq_p(zmmh+24); env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24);
} }
#ifdef TARGET_X86_64 #ifdef TARGET_X86_64
......
...@@ -36,8 +36,8 @@ static const VMStateDescription vmstate_xmm_reg = { ...@@ -36,8 +36,8 @@ static const VMStateDescription vmstate_xmm_reg = {
.version_id = 1, .version_id = 1,
.minimum_version_id = 1, .minimum_version_id = 1,
.fields = (VMStateField[]) { .fields = (VMStateField[]) {
VMSTATE_UINT64(XMM_Q(0), ZMMReg), VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
VMSTATE_UINT64(XMM_Q(1), ZMMReg), VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
VMSTATE_END_OF_LIST() VMSTATE_END_OF_LIST()
} }
}; };
...@@ -52,8 +52,8 @@ static const VMStateDescription vmstate_ymmh_reg = { ...@@ -52,8 +52,8 @@ static const VMStateDescription vmstate_ymmh_reg = {
.version_id = 1, .version_id = 1,
.minimum_version_id = 1, .minimum_version_id = 1,
.fields = (VMStateField[]) { .fields = (VMStateField[]) {
VMSTATE_UINT64(XMM_Q(2), ZMMReg), VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
VMSTATE_UINT64(XMM_Q(3), ZMMReg), VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
VMSTATE_END_OF_LIST() VMSTATE_END_OF_LIST()
} }
}; };
...@@ -67,10 +67,10 @@ static const VMStateDescription vmstate_zmmh_reg = { ...@@ -67,10 +67,10 @@ static const VMStateDescription vmstate_zmmh_reg = {
.version_id = 1, .version_id = 1,
.minimum_version_id = 1, .minimum_version_id = 1,
.fields = (VMStateField[]) { .fields = (VMStateField[]) {
VMSTATE_UINT64(XMM_Q(4), ZMMReg), VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
VMSTATE_UINT64(XMM_Q(5), ZMMReg), VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
VMSTATE_UINT64(XMM_Q(6), ZMMReg), VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
VMSTATE_UINT64(XMM_Q(7), ZMMReg), VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
VMSTATE_END_OF_LIST() VMSTATE_END_OF_LIST()
} }
}; };
...@@ -85,14 +85,14 @@ static const VMStateDescription vmstate_hi16_zmm_reg = { ...@@ -85,14 +85,14 @@ static const VMStateDescription vmstate_hi16_zmm_reg = {
.version_id = 1, .version_id = 1,
.minimum_version_id = 1, .minimum_version_id = 1,
.fields = (VMStateField[]) { .fields = (VMStateField[]) {
VMSTATE_UINT64(XMM_Q(0), ZMMReg), VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
VMSTATE_UINT64(XMM_Q(1), ZMMReg), VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
VMSTATE_UINT64(XMM_Q(2), ZMMReg), VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
VMSTATE_UINT64(XMM_Q(3), ZMMReg), VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
VMSTATE_UINT64(XMM_Q(4), ZMMReg), VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
VMSTATE_UINT64(XMM_Q(5), ZMMReg), VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
VMSTATE_UINT64(XMM_Q(6), ZMMReg), VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
VMSTATE_UINT64(XMM_Q(7), ZMMReg), VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
VMSTATE_END_OF_LIST() VMSTATE_END_OF_LIST()
} }
}; };
...@@ -787,7 +787,7 @@ static bool avx512_needed(void *opaque) ...@@ -787,7 +787,7 @@ static bool avx512_needed(void *opaque)
} }
for (i = 0; i < CPU_NB_REGS; i++) { for (i = 0; i < CPU_NB_REGS; i++) {
#define ENV_XMM(reg, field) (env->xmm_regs[reg].XMM_Q(field)) #define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
if (ENV_XMM(i, 4) || ENV_XMM(i, 6) || if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
ENV_XMM(i, 5) || ENV_XMM(i, 7)) { ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
return true; return true;
......
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册