2 * This work is licensed under the terms of the GNU GPL, version 2 or later.
3 * See the COPYING file in the top-level directory.
5 #include "qemu/osdep.h"
9 void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen)
11 CPUX86State *env = &cpu->env;
12 const ExtSaveArea *e, *f;
15 X86LegacyXSaveArea *legacy;
16 X86XSaveHeader *header;
17 uint16_t cwd, swd, twd;
19 memset(buf, 0, buflen);
21 e = &x86_ext_save_areas[XSTATE_FP_BIT];
23 legacy = buf + e->offset;
24 header = buf + e->offset + sizeof(*legacy);
27 swd = env->fpus & ~(7 << 11);
28 swd |= (env->fpstt & 7) << 11;
30 for (i = 0; i < 8; ++i) {
31 twd |= (!env->fptags[i]) << i;
36 legacy->fpop = env->fpop;
37 legacy->fpip = env->fpip;
38 legacy->fpdp = env->fpdp;
39 memcpy(&legacy->fpregs, env->fpregs,
41 legacy->mxcsr = env->mxcsr;
43 for (i = 0; i < CPU_NB_REGS; i++) {
44 uint8_t *xmm = legacy->xmm_regs[i];
46 stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
47 stq_p(xmm + 8, env->xmm_regs[i].ZMM_Q(1));
50 header->xstate_bv = env->xstate_bv;
52 e = &x86_ext_save_areas[XSTATE_YMM_BIT];
53 if (e->size && e->offset) {
56 avx = buf + e->offset;
58 for (i = 0; i < CPU_NB_REGS; i++) {
59 uint8_t *ymmh = avx->ymmh[i];
61 stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
62 stq_p(ymmh + 8, env->xmm_regs[i].ZMM_Q(3));
66 e = &x86_ext_save_areas[XSTATE_BNDREGS_BIT];
67 if (e->size && e->offset) {
71 f = &x86_ext_save_areas[XSTATE_BNDCSR_BIT];
75 bndreg = buf + e->offset;
76 bndcsr = buf + f->offset;
78 memcpy(&bndreg->bnd_regs, env->bnd_regs,
79 sizeof(env->bnd_regs));
80 bndcsr->bndcsr = env->bndcs_regs;
83 e = &x86_ext_save_areas[XSTATE_OPMASK_BIT];
84 if (e->size && e->offset) {
86 XSaveZMM_Hi256 *zmm_hi256;
88 XSaveHi16_ZMM *hi16_zmm;
91 f = &x86_ext_save_areas[XSTATE_ZMM_Hi256_BIT];
95 opmask = buf + e->offset;
96 zmm_hi256 = buf + f->offset;
98 memcpy(&opmask->opmask_regs, env->opmask_regs,
99 sizeof(env->opmask_regs));
101 for (i = 0; i < CPU_NB_REGS; i++) {
102 uint8_t *zmmh = zmm_hi256->zmm_hi256[i];
104 stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
105 stq_p(zmmh + 8, env->xmm_regs[i].ZMM_Q(5));
106 stq_p(zmmh + 16, env->xmm_regs[i].ZMM_Q(6));
107 stq_p(zmmh + 24, env->xmm_regs[i].ZMM_Q(7));
111 f = &x86_ext_save_areas[XSTATE_Hi16_ZMM_BIT];
115 hi16_zmm = buf + f->offset;
117 memcpy(&hi16_zmm->hi16_zmm, &env->xmm_regs[16],
118 16 * sizeof(env->xmm_regs[16]));
123 e = &x86_ext_save_areas[XSTATE_PKRU_BIT];
124 if (e->size && e->offset) {
125 XSavePKRU *pkru = buf + e->offset;
127 memcpy(pkru, &env->pkru, sizeof(env->pkru));
132 void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen)
134 CPUX86State *env = &cpu->env;
135 const ExtSaveArea *e, *f, *g;
138 const X86LegacyXSaveArea *legacy;
139 const X86XSaveHeader *header;
140 uint16_t cwd, swd, twd;
142 e = &x86_ext_save_areas[XSTATE_FP_BIT];
144 legacy = buf + e->offset;
145 header = buf + e->offset + sizeof(*legacy);
150 env->fpop = legacy->fpop;
151 env->fpstt = (swd >> 11) & 7;
154 for (i = 0; i < 8; ++i) {
155 env->fptags[i] = !((twd >> i) & 1);
157 env->fpip = legacy->fpip;
158 env->fpdp = legacy->fpdp;
159 env->mxcsr = legacy->mxcsr;
160 memcpy(env->fpregs, &legacy->fpregs,
161 sizeof(env->fpregs));
163 for (i = 0; i < CPU_NB_REGS; i++) {
164 const uint8_t *xmm = legacy->xmm_regs[i];
166 env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
167 env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm + 8);
170 env->xstate_bv = header->xstate_bv;
172 e = &x86_ext_save_areas[XSTATE_YMM_BIT];
173 if (e->size && e->offset) {
176 avx = buf + e->offset;
177 for (i = 0; i < CPU_NB_REGS; i++) {
178 const uint8_t *ymmh = avx->ymmh[i];
180 env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
181 env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh + 8);
185 e = &x86_ext_save_areas[XSTATE_BNDREGS_BIT];
186 if (e->size && e->offset) {
187 const XSaveBNDREG *bndreg;
188 const XSaveBNDCSR *bndcsr;
190 f = &x86_ext_save_areas[XSTATE_BNDCSR_BIT];
194 bndreg = buf + e->offset;
195 bndcsr = buf + f->offset;
197 memcpy(env->bnd_regs, &bndreg->bnd_regs,
198 sizeof(env->bnd_regs));
199 env->bndcs_regs = bndcsr->bndcsr;
202 e = &x86_ext_save_areas[XSTATE_OPMASK_BIT];
203 if (e->size && e->offset) {
204 const XSaveOpmask *opmask;
205 const XSaveZMM_Hi256 *zmm_hi256;
207 const XSaveHi16_ZMM *hi16_zmm;
210 f = &x86_ext_save_areas[XSTATE_ZMM_Hi256_BIT];
214 g = &x86_ext_save_areas[XSTATE_Hi16_ZMM_BIT];
218 opmask = buf + e->offset;
219 zmm_hi256 = buf + f->offset;
221 hi16_zmm = buf + g->offset;
224 memcpy(env->opmask_regs, &opmask->opmask_regs,
225 sizeof(env->opmask_regs));
227 for (i = 0; i < CPU_NB_REGS; i++) {
228 const uint8_t *zmmh = zmm_hi256->zmm_hi256[i];
230 env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
231 env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh + 8);
232 env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh + 16);
233 env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh + 24);
237 memcpy(&env->xmm_regs[16], &hi16_zmm->hi16_zmm,
238 16 * sizeof(env->xmm_regs[16]));
243 e = &x86_ext_save_areas[XSTATE_PKRU_BIT];
244 if (e->size && e->offset) {
245 const XSavePKRU *pkru;
247 pkru = buf + e->offset;
248 memcpy(&env->pkru, pkru, sizeof(env->pkru));