Lines Matching refs:r8
168 lea 64(%rdi), %r8
169 and $-64, %r8 /* r8 now aligned to next 64 byte boundary */
172 movdqu (%r8, %rsi), %xmm4
173 movdqu 16(%r8, %rsi), %xmm5
174 movdqu 32(%r8, %rsi), %xmm6
175 movdqu 48(%r8, %rsi), %xmm7
181 movdqa %xmm4, (%r8)
182 movaps %xmm5, 16(%r8)
183 movaps %xmm6, 32(%r8)
184 movaps %xmm7, 48(%r8)
185 add $64, %r8
189 cmp %r8, %rbx
198 prefetcht0 128(%r8, %rsi)
200 movdqu (%r8, %rsi), %xmm0
201 movdqu 16(%r8, %rsi), %xmm1
202 movdqu 32(%r8, %rsi), %xmm2
203 movdqu 48(%r8, %rsi), %xmm3
204 movdqa %xmm0, (%r8)
205 movaps %xmm1, 16(%r8)
206 movaps %xmm2, 32(%r8)
207 movaps %xmm3, 48(%r8)
208 lea 64(%r8), %r8
209 cmp %r8, %rbx
214 sub %r8, %rdx
218 lea (%r8, %rsi), %r9
239 movb %sil, -1(%r8,%rdx)
240 movb %bl, (%r8)
248 movdqu %xmm0, (%r8)
249 movdqu %xmm1, 16(%r8)
250 movdqu %xmm2, -32(%r8, %rdx)
251 movdqu %xmm3, -16(%r8, %rdx)
257 movdqu %xmm0, (%r8)
258 movdqu %xmm1, -16(%r8, %rdx)
264 movl %esi, (%r8)
265 movl %ebx, -4(%r8,%rdx)
271 mov %rsi, (%r8)
272 mov %rbx, -8(%r8, %rdx)
278 movw %si, -2(%r8,%rdx)
279 movw %bx, (%r8)
393 mov %rsi, %r8
394 sub %rdi, %r8 /* r8 = src - dst, diff */
396 movdqu -16(%r9, %r8), %xmm4
397 movdqu -32(%r9, %r8), %xmm5
398 movdqu -48(%r9, %r8), %xmm6
399 movdqu -64(%r9, %r8), %xmm7
423 prefetcht0 -128(%r9, %r8)
425 movdqu -64(%r9, %r8), %xmm0
426 movdqu -48(%r9, %r8), %xmm1
427 movdqu -32(%r9, %r8), %xmm2
428 movdqu -16(%r9, %r8), %xmm3
485 movdqu (%r8, %rsi), %xmm0
486 movdqu 16(%r8, %rsi), %xmm1
487 movdqu 32(%r8, %rsi), %xmm2
488 movdqu 48(%r8, %rsi), %xmm3
489 movntdq %xmm0, (%r8)
490 movntdq %xmm1, 16(%r8)
491 movntdq %xmm2, 32(%r8)
492 movntdq %xmm3, 48(%r8)
493 lea 64(%r8), %r8
494 cmp %r8, %rbx
502 movdqu -64(%r9, %r8), %xmm0
503 movdqu -48(%r9, %r8), %xmm1
504 movdqu -32(%r9, %r8), %xmm2
505 movdqu -16(%r9, %r8), %xmm3