diff options
| author | Rodrigo Vivi <[email protected]> | 2018-07-23 09:13:12 -0700 | 
|---|---|---|
| committer | Rodrigo Vivi <[email protected]> | 2018-07-23 09:13:12 -0700 | 
| commit | c74a7469f97c0f40b46e82ee979f9fb1bb6e847c (patch) | |
| tree | f2690a1a916b73ef94657fbf0e0141ae57701825 /arch/x86/lib/memcpy_64.S | |
| parent | 6f15a7de86c8cf2dc09fc9e6d07047efa40ef809 (diff) | |
| parent | 500775074f88d9cf5416bed2ca19592812d62c41 (diff) | |
Merge drm/drm-next into drm-intel-next-queued
We need a backmerge to get DP_DPCD_REV_14 before we push other
i915 changes to dinq that could break compilation.
Signed-off-by: Rodrigo Vivi <[email protected]>
Diffstat (limited to 'arch/x86/lib/memcpy_64.S')
| -rw-r--r-- | arch/x86/lib/memcpy_64.S | 112 | 
1 files changed, 54 insertions, 58 deletions
| diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 9a53a06e5a3e..298ef1479240 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -3,6 +3,7 @@  #include <linux/linkage.h>  #include <asm/errno.h>  #include <asm/cpufeatures.h> +#include <asm/mcsafe_test.h>  #include <asm/alternative-asm.h>  #include <asm/export.h> @@ -183,12 +184,15 @@ ENTRY(memcpy_orig)  ENDPROC(memcpy_orig)  #ifndef CONFIG_UML + +MCSAFE_TEST_CTL +  /* - * memcpy_mcsafe_unrolled - memory copy with machine check exception handling + * __memcpy_mcsafe - memory copy with machine check exception handling   * Note that we only catch machine checks when reading the source addresses.   * Writes to target are posted and don't generate machine checks.   */ -ENTRY(memcpy_mcsafe_unrolled) +ENTRY(__memcpy_mcsafe)  	cmpl $8, %edx  	/* Less than 8 bytes? Go to byte copy loop */  	jb .L_no_whole_words @@ -204,58 +208,33 @@ ENTRY(memcpy_mcsafe_unrolled)  	subl $8, %ecx  	negl %ecx  	subl %ecx, %edx -.L_copy_leading_bytes: +.L_read_leading_bytes:  	movb (%rsi), %al +	MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes +	MCSAFE_TEST_DST %rdi 1 .E_leading_bytes +.L_write_leading_bytes:  	movb %al, (%rdi)  	incq %rsi  	incq %rdi  	decl %ecx -	jnz .L_copy_leading_bytes +	jnz .L_read_leading_bytes  .L_8byte_aligned: -	/* Figure out how many whole cache lines (64-bytes) to copy */ -	movl %edx, %ecx -	andl $63, %edx -	shrl $6, %ecx -	jz .L_no_whole_cache_lines - -	/* Loop copying whole cache lines */ -.L_cache_w0: movq (%rsi), %r8 -.L_cache_w1: movq 1*8(%rsi), %r9 -.L_cache_w2: movq 2*8(%rsi), %r10 -.L_cache_w3: movq 3*8(%rsi), %r11 -	movq %r8, (%rdi) -	movq %r9, 1*8(%rdi) -	movq %r10, 2*8(%rdi) -	movq %r11, 3*8(%rdi) -.L_cache_w4: movq 4*8(%rsi), %r8 -.L_cache_w5: movq 5*8(%rsi), %r9 -.L_cache_w6: movq 6*8(%rsi), %r10 -.L_cache_w7: movq 7*8(%rsi), %r11 -	movq %r8, 4*8(%rdi) -	movq %r9, 5*8(%rdi) -	movq %r10, 6*8(%rdi) -	movq %r11, 7*8(%rdi) -	leaq 64(%rsi), %rsi -	leaq 64(%rdi), %rdi -	decl %ecx -	jnz .L_cache_w0 - -	/* Are there any trailing 8-byte words? */ -.L_no_whole_cache_lines:  	movl %edx, %ecx  	andl $7, %edx  	shrl $3, %ecx  	jz .L_no_whole_words -	/* Copy trailing words */ -.L_copy_trailing_words: +.L_read_words:  	movq (%rsi), %r8 -	mov %r8, (%rdi) -	leaq 8(%rsi), %rsi -	leaq 8(%rdi), %rdi +	MCSAFE_TEST_SRC %rsi 8 .E_read_words +	MCSAFE_TEST_DST %rdi 8 .E_write_words +.L_write_words: +	movq %r8, (%rdi) +	addq $8, %rsi +	addq $8, %rdi  	decl %ecx -	jnz .L_copy_trailing_words +	jnz .L_read_words  	/* Any trailing bytes? */  .L_no_whole_words: @@ -264,38 +243,55 @@ ENTRY(memcpy_mcsafe_unrolled)  	/* Copy trailing bytes */  	movl %edx, %ecx -.L_copy_trailing_bytes: +.L_read_trailing_bytes:  	movb (%rsi), %al +	MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes +	MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes +.L_write_trailing_bytes:  	movb %al, (%rdi)  	incq %rsi  	incq %rdi  	decl %ecx -	jnz .L_copy_trailing_bytes +	jnz .L_read_trailing_bytes  	/* Copy successful. Return zero */  .L_done_memcpy_trap:  	xorq %rax, %rax  	ret -ENDPROC(memcpy_mcsafe_unrolled) -EXPORT_SYMBOL_GPL(memcpy_mcsafe_unrolled) +ENDPROC(__memcpy_mcsafe) +EXPORT_SYMBOL_GPL(__memcpy_mcsafe)  	.section .fixup, "ax" -	/* Return -EFAULT for any failure */ -.L_memcpy_mcsafe_fail: -	mov	$-EFAULT, %rax +	/* +	 * Return number of bytes not copied for any failure. Note that +	 * there is no "tail" handling since the source buffer is 8-byte +	 * aligned and poison is cacheline aligned. +	 */ +.E_read_words: +	shll	$3, %ecx +.E_leading_bytes: +	addl	%edx, %ecx +.E_trailing_bytes: +	mov	%ecx, %eax  	ret +	/* +	 * For write fault handling, given the destination is unaligned, +	 * we handle faults on multi-byte writes with a byte-by-byte +	 * copy up to the write-protected page. +	 */ +.E_write_words: +	shll	$3, %ecx +	addl	%edx, %ecx +	movl	%ecx, %edx +	jmp mcsafe_handle_tail +  	.previous -	_ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail) -	_ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail) -	_ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail) -	_ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail) -	_ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) -	_ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail) -	_ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail) -	_ASM_EXTABLE_FAULT(.L_cache_w6, .L_memcpy_mcsafe_fail) -	_ASM_EXTABLE_FAULT(.L_cache_w7, .L_memcpy_mcsafe_fail) -	_ASM_EXTABLE_FAULT(.L_copy_trailing_words, .L_memcpy_mcsafe_fail) -	_ASM_EXTABLE_FAULT(.L_copy_trailing_bytes, .L_memcpy_mcsafe_fail) +	_ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes) +	_ASM_EXTABLE_FAULT(.L_read_words, .E_read_words) +	_ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes) +	_ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes) +	_ASM_EXTABLE(.L_write_words, .E_write_words) +	_ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)  #endif |