Lines Matching refs:rsp

54 #  define BASE			rsp
93 movq %rax, REGISTER_SAVE_RAX(%rsp)
94 movq %rcx, REGISTER_SAVE_RCX(%rsp)
95 movq %rdx, REGISTER_SAVE_RDX(%rsp)
96 movq %rsi, REGISTER_SAVE_RSI(%rsp)
97 movq %rdi, REGISTER_SAVE_RDI(%rsp)
98 movq %r8, REGISTER_SAVE_R8(%rsp)
99 movq %r9, REGISTER_SAVE_R9(%rsp)
101 fxsave STATE_SAVE_OFFSET(%rsp)
107 movq %rdx, (STATE_SAVE_OFFSET + 512)(%rsp)
108 movq %rdx, (STATE_SAVE_OFFSET + 512 + 8)(%rsp)
110 movq %rdx, (STATE_SAVE_OFFSET + 512 + 8 * 2)(%rsp)
111 movq %rdx, (STATE_SAVE_OFFSET + 512 + 8 * 3)(%rsp)
112 movq %rdx, (STATE_SAVE_OFFSET + 512 + 8 * 4)(%rsp)
113 movq %rdx, (STATE_SAVE_OFFSET + 512 + 8 * 5)(%rsp)
114 movq %rdx, (STATE_SAVE_OFFSET + 512 + 8 * 6)(%rsp)
115 movq %rdx, (STATE_SAVE_OFFSET + 512 + 8 * 7)(%rsp)
117 xsave STATE_SAVE_OFFSET(%rsp)
119 xsavec STATE_SAVE_OFFSET(%rsp)
130 fxrstor STATE_SAVE_OFFSET(%rsp)
134 xrstor STATE_SAVE_OFFSET(%rsp)
136 movq REGISTER_SAVE_R9(%rsp), %r9
137 movq REGISTER_SAVE_R8(%rsp), %r8
138 movq REGISTER_SAVE_RDI(%rsp), %rdi
139 movq REGISTER_SAVE_RSI(%rsp), %rsi
140 movq REGISTER_SAVE_RDX(%rsp), %rdx
141 movq REGISTER_SAVE_RCX(%rsp), %rcx
142 movq REGISTER_SAVE_RAX(%rsp), %rax
145 cfi_def_cfa_register(%rsp)
146 movq (%rsp), %rbx
179 movq %rbx, (%rsp)
195 movq %rax, 8(%rsp)
205 movq %rsp, 24(%rbx)
208 movq %rdx, LR_RDX_OFFSET(%rsp)
209 movq %r8, LR_R8_OFFSET(%rsp)
210 movq %r9, LR_R9_OFFSET(%rsp)
211 movq %rcx, LR_RCX_OFFSET(%rsp)
212 movq %rsi, LR_RSI_OFFSET(%rsp)
213 movq %rdi, LR_RDI_OFFSET(%rsp)
214 movq %rbp, LR_RBP_OFFSET(%rsp)
217 movq %rax, LR_RSP_OFFSET(%rsp)
222 movaps %xmm0, (LR_XMM_OFFSET)(%rsp)
223 movaps %xmm1, (LR_XMM_OFFSET + XMM_SIZE)(%rsp)
224 movaps %xmm2, (LR_XMM_OFFSET + XMM_SIZE*2)(%rsp)
225 movaps %xmm3, (LR_XMM_OFFSET + XMM_SIZE*3)(%rsp)
226 movaps %xmm4, (LR_XMM_OFFSET + XMM_SIZE*4)(%rsp)
227 movaps %xmm5, (LR_XMM_OFFSET + XMM_SIZE*5)(%rsp)
228 movaps %xmm6, (LR_XMM_OFFSET + XMM_SIZE*6)(%rsp)
229 movaps %xmm7, (LR_XMM_OFFSET + XMM_SIZE*7)(%rsp)
233 VMOVA %VEC(0), (LR_VECTOR_OFFSET)(%rsp)
234 VMOVA %VEC(1), (LR_VECTOR_OFFSET + VECTOR_SIZE)(%rsp)
235 VMOVA %VEC(2), (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp)
236 VMOVA %VEC(3), (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp)
237 VMOVA %VEC(4), (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp)
238 VMOVA %VEC(5), (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp)
239 VMOVA %VEC(6), (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp)
240 VMOVA %VEC(7), (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp)
244 vmovdqa %xmm0, (LR_SIZE)(%rsp)
245 vmovdqa %xmm1, (LR_SIZE + XMM_SIZE)(%rsp)
246 vmovdqa %xmm2, (LR_SIZE + XMM_SIZE*2)(%rsp)
247 vmovdqa %xmm3, (LR_SIZE + XMM_SIZE*3)(%rsp)
248 vmovdqa %xmm4, (LR_SIZE + XMM_SIZE*4)(%rsp)
249 vmovdqa %xmm5, (LR_SIZE + XMM_SIZE*5)(%rsp)
250 vmovdqa %xmm6, (LR_SIZE + XMM_SIZE*6)(%rsp)
251 vmovdqa %xmm7, (LR_SIZE + XMM_SIZE*7)(%rsp)
264 movq LR_RDX_OFFSET(%rsp), %rdx
265 movq LR_R8_OFFSET(%rsp), %r8
266 movq LR_R9_OFFSET(%rsp), %r9
268 movaps (LR_XMM_OFFSET)(%rsp), %xmm0
269 movaps (LR_XMM_OFFSET + XMM_SIZE)(%rsp), %xmm1
270 movaps (LR_XMM_OFFSET + XMM_SIZE*2)(%rsp), %xmm2
271 movaps (LR_XMM_OFFSET + XMM_SIZE*3)(%rsp), %xmm3
272 movaps (LR_XMM_OFFSET + XMM_SIZE*4)(%rsp), %xmm4
273 movaps (LR_XMM_OFFSET + XMM_SIZE*5)(%rsp), %xmm5
274 movaps (LR_XMM_OFFSET + XMM_SIZE*6)(%rsp), %xmm6
275 movaps (LR_XMM_OFFSET + XMM_SIZE*7)(%rsp), %xmm7
280 vpcmpeqq (LR_SIZE)(%rsp), %xmm0, %xmm8
284 vmovdqa %xmm0, (LR_VECTOR_OFFSET)(%rsp)
286 2: VMOVA (LR_VECTOR_OFFSET)(%rsp), %VEC(0)
287 vmovdqa %xmm0, (LR_XMM_OFFSET)(%rsp)
289 1: vpcmpeqq (LR_SIZE + XMM_SIZE)(%rsp), %xmm1, %xmm8
293 vmovdqa %xmm1, (LR_VECTOR_OFFSET + VECTOR_SIZE)(%rsp)
295 2: VMOVA (LR_VECTOR_OFFSET + VECTOR_SIZE)(%rsp), %VEC(1)
296 vmovdqa %xmm1, (LR_XMM_OFFSET + XMM_SIZE)(%rsp)
298 1: vpcmpeqq (LR_SIZE + XMM_SIZE*2)(%rsp), %xmm2, %xmm8
302 vmovdqa %xmm2, (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp)
304 2: VMOVA (LR_VECTOR_OFFSET + VECTOR_SIZE*2)(%rsp), %VEC(2)
305 vmovdqa %xmm2, (LR_XMM_OFFSET + XMM_SIZE*2)(%rsp)
307 1: vpcmpeqq (LR_SIZE + XMM_SIZE*3)(%rsp), %xmm3, %xmm8
311 vmovdqa %xmm3, (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp)
313 2: VMOVA (LR_VECTOR_OFFSET + VECTOR_SIZE*3)(%rsp), %VEC(3)
314 vmovdqa %xmm3, (LR_XMM_OFFSET + XMM_SIZE*3)(%rsp)
316 1: vpcmpeqq (LR_SIZE + XMM_SIZE*4)(%rsp), %xmm4, %xmm8
320 vmovdqa %xmm4, (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp)
322 2: VMOVA (LR_VECTOR_OFFSET + VECTOR_SIZE*4)(%rsp), %VEC(4)
323 vmovdqa %xmm4, (LR_XMM_OFFSET + XMM_SIZE*4)(%rsp)
325 1: vpcmpeqq (LR_SIZE + XMM_SIZE*5)(%rsp), %xmm5, %xmm8
329 vmovdqa %xmm5, (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp)
331 2: VMOVA (LR_VECTOR_OFFSET + VECTOR_SIZE*5)(%rsp), %VEC(5)
332 vmovdqa %xmm5, (LR_XMM_OFFSET + XMM_SIZE*5)(%rsp)
334 1: vpcmpeqq (LR_SIZE + XMM_SIZE*6)(%rsp), %xmm6, %xmm8
338 vmovdqa %xmm6, (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp)
340 2: VMOVA (LR_VECTOR_OFFSET + VECTOR_SIZE*6)(%rsp), %VEC(6)
341 vmovdqa %xmm6, (LR_XMM_OFFSET + XMM_SIZE*6)(%rsp)
343 1: vpcmpeqq (LR_SIZE + XMM_SIZE*7)(%rsp), %xmm7, %xmm8
347 vmovdqa %xmm7, (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp)
349 2: VMOVA (LR_VECTOR_OFFSET + VECTOR_SIZE*7)(%rsp), %VEC(7)
350 vmovdqa %xmm7, (LR_XMM_OFFSET + XMM_SIZE*7)(%rsp)
363 movq LR_RCX_OFFSET(%rsp), %rcx
364 movq LR_RSI_OFFSET(%rsp), %rsi
365 movq LR_RDI_OFFSET(%rsp), %rdi
368 movq (%rsp), %rbx
370 cfi_def_cfa_register(%rsp)
454 movq LRV_RAX_OFFSET(%rsp), %rax
455 movq LRV_RDX_OFFSET(%rsp), %rdx
457 movaps LRV_XMM0_OFFSET(%rsp), %xmm0
458 movaps LRV_XMM1_OFFSET(%rsp), %xmm1
462 vpcmpeqq (LRV_SIZE)(%rsp), %xmm0, %xmm2
466 VMOVA LRV_VECTOR0_OFFSET(%rsp), %VEC(0)
468 1: vpcmpeqq (LRV_SIZE + XMM_SIZE)(%rsp), %xmm1, %xmm2
472 VMOVA LRV_VECTOR1_OFFSET(%rsp), %VEC(1)
477 fldt LRV_ST1_OFFSET(%rsp)
478 fldt LRV_ST0_OFFSET(%rsp)
481 movq (%rsp), %rbx
483 cfi_def_cfa_register(%rsp)