diff options
Diffstat (limited to 'arch/powerpc/lib')
| -rw-r--r-- | arch/powerpc/lib/alloc.c | 2 | ||||
| -rw-r--r-- | arch/powerpc/lib/copy_32.S | 127 | ||||
| -rw-r--r-- | arch/powerpc/lib/copypage_power7.S | 32 | ||||
| -rw-r--r-- | arch/powerpc/lib/copyuser_power7.S | 226 | ||||
| -rw-r--r-- | arch/powerpc/lib/crtsavres.S | 96 | ||||
| -rw-r--r-- | arch/powerpc/lib/ldstfp.S | 32 | ||||
| -rw-r--r-- | arch/powerpc/lib/locks.c | 1 | ||||
| -rw-r--r-- | arch/powerpc/lib/memcpy_power7.S | 226 | ||||
| -rw-r--r-- | arch/powerpc/lib/ppc_ksyms.c | 4 | ||||
| -rw-r--r-- | arch/powerpc/lib/rheap.c | 2 | 
10 files changed, 309 insertions, 439 deletions
| diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c index 4a6c2cf890d9..60b0b3fc8fc1 100644 --- a/arch/powerpc/lib/alloc.c +++ b/arch/powerpc/lib/alloc.c @@ -10,7 +10,7 @@ void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)  {  	void *p; -	if (mem_init_done) +	if (slab_is_available())  		p = kzalloc(size, mask);  	else {  		p = memblock_virt_alloc(size, 0); diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S index 55f19f9fd708..6813f80d1eec 100644 --- a/arch/powerpc/lib/copy_32.S +++ b/arch/powerpc/lib/copy_32.S @@ -69,54 +69,6 @@ CACHELINE_BYTES = L1_CACHE_BYTES  LG_CACHELINE_BYTES = L1_CACHE_SHIFT  CACHELINE_MASK = (L1_CACHE_BYTES-1) -/* - * Use dcbz on the complete cache lines in the destination - * to set them to zero.  This requires that the destination - * area is cacheable.  -- paulus - */ -_GLOBAL(cacheable_memzero) -	mr	r5,r4 -	li	r4,0 -	addi	r6,r3,-4 -	cmplwi	0,r5,4 -	blt	7f -	stwu	r4,4(r6) -	beqlr -	andi.	r0,r6,3 -	add	r5,r0,r5 -	subf	r6,r0,r6 -	clrlwi	r7,r6,32-LG_CACHELINE_BYTES -	add	r8,r7,r5 -	srwi	r9,r8,LG_CACHELINE_BYTES -	addic.	r9,r9,-1	/* total number of complete cachelines */ -	ble	2f -	xori	r0,r7,CACHELINE_MASK & ~3 -	srwi.	r0,r0,2 -	beq	3f -	mtctr	r0 -4:	stwu	r4,4(r6) -	bdnz	4b -3:	mtctr	r9 -	li	r7,4 -10:	dcbz	r7,r6 -	addi	r6,r6,CACHELINE_BYTES -	bdnz	10b -	clrlwi	r5,r8,32-LG_CACHELINE_BYTES -	addi	r5,r5,4 -2:	srwi	r0,r5,2 -	mtctr	r0 -	bdz	6f -1:	stwu	r4,4(r6) -	bdnz	1b -6:	andi.	r5,r5,3 -7:	cmpwi	0,r5,0 -	beqlr -	mtctr	r5 -	addi	r6,r6,3 -8:	stbu	r4,1(r6) -	bdnz	8b -	blr -  _GLOBAL(memset)  	rlwimi	r4,r4,8,16,23  	rlwimi	r4,r4,16,0,15 @@ -142,85 +94,6 @@ _GLOBAL(memset)  	bdnz	8b  	blr -/* - * This version uses dcbz on the complete cache lines in the - * destination area to reduce memory traffic.  This requires that - * the destination area is cacheable. - * We only use this version if the source and dest don't overlap. - * -- paulus. - */ -_GLOBAL(cacheable_memcpy) -	add	r7,r3,r5		/* test if the src & dst overlap */ -	add	r8,r4,r5 -	cmplw	0,r4,r7 -	cmplw	1,r3,r8 -	crand	0,0,4			/* cr0.lt &= cr1.lt */ -	blt	memcpy			/* if regions overlap */ - -	addi	r4,r4,-4 -	addi	r6,r3,-4 -	neg	r0,r3 -	andi.	r0,r0,CACHELINE_MASK	/* # bytes to start of cache line */ -	beq	58f - -	cmplw	0,r5,r0			/* is this more than total to do? */ -	blt	63f			/* if not much to do */ -	andi.	r8,r0,3			/* get it word-aligned first */ -	subf	r5,r0,r5 -	mtctr	r8 -	beq+	61f -70:	lbz	r9,4(r4)		/* do some bytes */ -	stb	r9,4(r6) -	addi	r4,r4,1 -	addi	r6,r6,1 -	bdnz	70b -61:	srwi.	r0,r0,2 -	mtctr	r0 -	beq	58f -72:	lwzu	r9,4(r4)		/* do some words */ -	stwu	r9,4(r6) -	bdnz	72b - -58:	srwi.	r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */ -	clrlwi	r5,r5,32-LG_CACHELINE_BYTES -	li	r11,4 -	mtctr	r0 -	beq	63f -53: -	dcbz	r11,r6 -	COPY_16_BYTES -#if L1_CACHE_BYTES >= 32 -	COPY_16_BYTES -#if L1_CACHE_BYTES >= 64 -	COPY_16_BYTES -	COPY_16_BYTES -#if L1_CACHE_BYTES >= 128 -	COPY_16_BYTES -	COPY_16_BYTES -	COPY_16_BYTES -	COPY_16_BYTES -#endif -#endif -#endif -	bdnz	53b - -63:	srwi.	r0,r5,2 -	mtctr	r0 -	beq	64f -30:	lwzu	r0,4(r4) -	stwu	r0,4(r6) -	bdnz	30b - -64:	andi.	r0,r5,3 -	mtctr	r0 -	beq+	65f -40:	lbz	r0,4(r4) -	stb	r0,4(r6) -	addi	r4,r4,1 -	addi	r6,r6,1 -	bdnz	40b -65:	blr -  _GLOBAL(memmove)  	cmplw	0,r3,r4  	bgt	backwards_memcpy diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S index d7dafb3777ac..a84d333ecb09 100644 --- a/arch/powerpc/lib/copypage_power7.S +++ b/arch/powerpc/lib/copypage_power7.S @@ -83,23 +83,23 @@ _GLOBAL(copypage_power7)  	li	r12,112  	.align	5 -1:	lvx	vr7,r0,r4 -	lvx	vr6,r4,r6 -	lvx	vr5,r4,r7 -	lvx	vr4,r4,r8 -	lvx	vr3,r4,r9 -	lvx	vr2,r4,r10 -	lvx	vr1,r4,r11 -	lvx	vr0,r4,r12 +1:	lvx	v7,r0,r4 +	lvx	v6,r4,r6 +	lvx	v5,r4,r7 +	lvx	v4,r4,r8 +	lvx	v3,r4,r9 +	lvx	v2,r4,r10 +	lvx	v1,r4,r11 +	lvx	v0,r4,r12  	addi	r4,r4,128 -	stvx	vr7,r0,r3 -	stvx	vr6,r3,r6 -	stvx	vr5,r3,r7 -	stvx	vr4,r3,r8 -	stvx	vr3,r3,r9 -	stvx	vr2,r3,r10 -	stvx	vr1,r3,r11 -	stvx	vr0,r3,r12 +	stvx	v7,r0,r3 +	stvx	v6,r3,r6 +	stvx	v5,r3,r7 +	stvx	v4,r3,r8 +	stvx	v3,r3,r9 +	stvx	v2,r3,r10 +	stvx	v1,r3,r11 +	stvx	v0,r3,r12  	addi	r3,r3,128  	bdnz	1b diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index 92ee840529bc..da0c568d18c4 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S @@ -388,29 +388,29 @@ err3;	std	r0,0(r3)  	li	r11,48  	bf	cr7*4+3,5f -err3;	lvx	vr1,r0,r4 +err3;	lvx	v1,r0,r4  	addi	r4,r4,16 -err3;	stvx	vr1,r0,r3 +err3;	stvx	v1,r0,r3  	addi	r3,r3,16  5:	bf	cr7*4+2,6f -err3;	lvx	vr1,r0,r4 -err3;	lvx	vr0,r4,r9 +err3;	lvx	v1,r0,r4 +err3;	lvx	v0,r4,r9  	addi	r4,r4,32 -err3;	stvx	vr1,r0,r3 -err3;	stvx	vr0,r3,r9 +err3;	stvx	v1,r0,r3 +err3;	stvx	v0,r3,r9  	addi	r3,r3,32  6:	bf	cr7*4+1,7f -err3;	lvx	vr3,r0,r4 -err3;	lvx	vr2,r4,r9 -err3;	lvx	vr1,r4,r10 -err3;	lvx	vr0,r4,r11 +err3;	lvx	v3,r0,r4 +err3;	lvx	v2,r4,r9 +err3;	lvx	v1,r4,r10 +err3;	lvx	v0,r4,r11  	addi	r4,r4,64 -err3;	stvx	vr3,r0,r3 -err3;	stvx	vr2,r3,r9 -err3;	stvx	vr1,r3,r10 -err3;	stvx	vr0,r3,r11 +err3;	stvx	v3,r0,r3 +err3;	stvx	v2,r3,r9 +err3;	stvx	v1,r3,r10 +err3;	stvx	v0,r3,r11  	addi	r3,r3,64  7:	sub	r5,r5,r6 @@ -433,23 +433,23 @@ err3;	stvx	vr0,r3,r11  	 */  	.align	5  8: -err4;	lvx	vr7,r0,r4 -err4;	lvx	vr6,r4,r9 -err4;	lvx	vr5,r4,r10 -err4;	lvx	vr4,r4,r11 -err4;	lvx	vr3,r4,r12 -err4;	lvx	vr2,r4,r14 -err4;	lvx	vr1,r4,r15 -err4;	lvx	vr0,r4,r16 +err4;	lvx	v7,r0,r4 +err4;	lvx	v6,r4,r9 +err4;	lvx	v5,r4,r10 +err4;	lvx	v4,r4,r11 +err4;	lvx	v3,r4,r12 +err4;	lvx	v2,r4,r14 +err4;	lvx	v1,r4,r15 +err4;	lvx	v0,r4,r16  	addi	r4,r4,128 -err4;	stvx	vr7,r0,r3 -err4;	stvx	vr6,r3,r9 -err4;	stvx	vr5,r3,r10 -err4;	stvx	vr4,r3,r11 -err4;	stvx	vr3,r3,r12 -err4;	stvx	vr2,r3,r14 -err4;	stvx	vr1,r3,r15 -err4;	stvx	vr0,r3,r16 +err4;	stvx	v7,r0,r3 +err4;	stvx	v6,r3,r9 +err4;	stvx	v5,r3,r10 +err4;	stvx	v4,r3,r11 +err4;	stvx	v3,r3,r12 +err4;	stvx	v2,r3,r14 +err4;	stvx	v1,r3,r15 +err4;	stvx	v0,r3,r16  	addi	r3,r3,128  	bdnz	8b @@ -463,29 +463,29 @@ err4;	stvx	vr0,r3,r16  	mtocrf	0x01,r6  	bf	cr7*4+1,9f -err3;	lvx	vr3,r0,r4 -err3;	lvx	vr2,r4,r9 -err3;	lvx	vr1,r4,r10 -err3;	lvx	vr0,r4,r11 +err3;	lvx	v3,r0,r4 +err3;	lvx	v2,r4,r9 +err3;	lvx	v1,r4,r10 +err3;	lvx	v0,r4,r11  	addi	r4,r4,64 -err3;	stvx	vr3,r0,r3 -err3;	stvx	vr2,r3,r9 -err3;	stvx	vr1,r3,r10 -err3;	stvx	vr0,r3,r11 +err3;	stvx	v3,r0,r3 +err3;	stvx	v2,r3,r9 +err3;	stvx	v1,r3,r10 +err3;	stvx	v0,r3,r11  	addi	r3,r3,64  9:	bf	cr7*4+2,10f -err3;	lvx	vr1,r0,r4 -err3;	lvx	vr0,r4,r9 +err3;	lvx	v1,r0,r4 +err3;	lvx	v0,r4,r9  	addi	r4,r4,32 -err3;	stvx	vr1,r0,r3 -err3;	stvx	vr0,r3,r9 +err3;	stvx	v1,r0,r3 +err3;	stvx	v0,r3,r9  	addi	r3,r3,32  10:	bf	cr7*4+3,11f -err3;	lvx	vr1,r0,r4 +err3;	lvx	v1,r0,r4  	addi	r4,r4,16 -err3;	stvx	vr1,r0,r3 +err3;	stvx	v1,r0,r3  	addi	r3,r3,16  	/* Up to 15B to go */ @@ -560,42 +560,42 @@ err3;	stw	r7,4(r3)  	li	r10,32  	li	r11,48 -	LVS(vr16,0,r4)		/* Setup permute control vector */ -err3;	lvx	vr0,0,r4 +	LVS(v16,0,r4)		/* Setup permute control vector */ +err3;	lvx	v0,0,r4  	addi	r4,r4,16  	bf	cr7*4+3,5f -err3;	lvx	vr1,r0,r4 -	VPERM(vr8,vr0,vr1,vr16) +err3;	lvx	v1,r0,r4 +	VPERM(v8,v0,v1,v16)  	addi	r4,r4,16 -err3;	stvx	vr8,r0,r3 +err3;	stvx	v8,r0,r3  	addi	r3,r3,16 -	vor	vr0,vr1,vr1 +	vor	v0,v1,v1  5:	bf	cr7*4+2,6f -err3;	lvx	vr1,r0,r4 -	VPERM(vr8,vr0,vr1,vr16) -err3;	lvx	vr0,r4,r9 -	VPERM(vr9,vr1,vr0,vr16) +err3;	lvx	v1,r0,r4 +	VPERM(v8,v0,v1,v16) +err3;	lvx	v0,r4,r9 +	VPERM(v9,v1,v0,v16)  	addi	r4,r4,32 -err3;	stvx	vr8,r0,r3 -err3;	stvx	vr9,r3,r9 +err3;	stvx	v8,r0,r3 +err3;	stvx	v9,r3,r9  	addi	r3,r3,32  6:	bf	cr7*4+1,7f -err3;	lvx	vr3,r0,r4 -	VPERM(vr8,vr0,vr3,vr16) -err3;	lvx	vr2,r4,r9 -	VPERM(vr9,vr3,vr2,vr16) -err3;	lvx	vr1,r4,r10 -	VPERM(vr10,vr2,vr1,vr16) -err3;	lvx	vr0,r4,r11 -	VPERM(vr11,vr1,vr0,vr16) +err3;	lvx	v3,r0,r4 +	VPERM(v8,v0,v3,v16) +err3;	lvx	v2,r4,r9 +	VPERM(v9,v3,v2,v16) +err3;	lvx	v1,r4,r10 +	VPERM(v10,v2,v1,v16) +err3;	lvx	v0,r4,r11 +	VPERM(v11,v1,v0,v16)  	addi	r4,r4,64 -err3;	stvx	vr8,r0,r3 -err3;	stvx	vr9,r3,r9 -err3;	stvx	vr10,r3,r10 -err3;	stvx	vr11,r3,r11 +err3;	stvx	v8,r0,r3 +err3;	stvx	v9,r3,r9 +err3;	stvx	v10,r3,r10 +err3;	stvx	v11,r3,r11  	addi	r3,r3,64  7:	sub	r5,r5,r6 @@ -618,31 +618,31 @@ err3;	stvx	vr11,r3,r11  	 */  	.align	5  8: -err4;	lvx	vr7,r0,r4 -	VPERM(vr8,vr0,vr7,vr16) -err4;	lvx	vr6,r4,r9 -	VPERM(vr9,vr7,vr6,vr16) -err4;	lvx	vr5,r4,r10 -	VPERM(vr10,vr6,vr5,vr16) -err4;	lvx	vr4,r4,r11 -	VPERM(vr11,vr5,vr4,vr16) -err4;	lvx	vr3,r4,r12 -	VPERM(vr12,vr4,vr3,vr16) -err4;	lvx	vr2,r4,r14 -	VPERM(vr13,vr3,vr2,vr16) -err4;	lvx	vr1,r4,r15 -	VPERM(vr14,vr2,vr1,vr16) -err4;	lvx	vr0,r4,r16 -	VPERM(vr15,vr1,vr0,vr16) +err4;	lvx	v7,r0,r4 +	VPERM(v8,v0,v7,v16) +err4;	lvx	v6,r4,r9 +	VPERM(v9,v7,v6,v16) +err4;	lvx	v5,r4,r10 +	VPERM(v10,v6,v5,v16) +err4;	lvx	v4,r4,r11 +	VPERM(v11,v5,v4,v16) +err4;	lvx	v3,r4,r12 +	VPERM(v12,v4,v3,v16) +err4;	lvx	v2,r4,r14 +	VPERM(v13,v3,v2,v16) +err4;	lvx	v1,r4,r15 +	VPERM(v14,v2,v1,v16) +err4;	lvx	v0,r4,r16 +	VPERM(v15,v1,v0,v16)  	addi	r4,r4,128 -err4;	stvx	vr8,r0,r3 -err4;	stvx	vr9,r3,r9 -err4;	stvx	vr10,r3,r10 -err4;	stvx	vr11,r3,r11 -err4;	stvx	vr12,r3,r12 -err4;	stvx	vr13,r3,r14 -err4;	stvx	vr14,r3,r15 -err4;	stvx	vr15,r3,r16 +err4;	stvx	v8,r0,r3 +err4;	stvx	v9,r3,r9 +err4;	stvx	v10,r3,r10 +err4;	stvx	v11,r3,r11 +err4;	stvx	v12,r3,r12 +err4;	stvx	v13,r3,r14 +err4;	stvx	v14,r3,r15 +err4;	stvx	v15,r3,r16  	addi	r3,r3,128  	bdnz	8b @@ -656,36 +656,36 @@ err4;	stvx	vr15,r3,r16  	mtocrf	0x01,r6  	bf	cr7*4+1,9f -err3;	lvx	vr3,r0,r4 -	VPERM(vr8,vr0,vr3,vr16) -err3;	lvx	vr2,r4,r9 -	VPERM(vr9,vr3,vr2,vr16) -err3;	lvx	vr1,r4,r10 -	VPERM(vr10,vr2,vr1,vr16) -err3;	lvx	vr0,r4,r11 -	VPERM(vr11,vr1,vr0,vr16) +err3;	lvx	v3,r0,r4 +	VPERM(v8,v0,v3,v16) +err3;	lvx	v2,r4,r9 +	VPERM(v9,v3,v2,v16) +err3;	lvx	v1,r4,r10 +	VPERM(v10,v2,v1,v16) +err3;	lvx	v0,r4,r11 +	VPERM(v11,v1,v0,v16)  	addi	r4,r4,64 -err3;	stvx	vr8,r0,r3 -err3;	stvx	vr9,r3,r9 -err3;	stvx	vr10,r3,r10 -err3;	stvx	vr11,r3,r11 +err3;	stvx	v8,r0,r3 +err3;	stvx	v9,r3,r9 +err3;	stvx	v10,r3,r10 +err3;	stvx	v11,r3,r11  	addi	r3,r3,64  9:	bf	cr7*4+2,10f -err3;	lvx	vr1,r0,r4 -	VPERM(vr8,vr0,vr1,vr16) -err3;	lvx	vr0,r4,r9 -	VPERM(vr9,vr1,vr0,vr16) +err3;	lvx	v1,r0,r4 +	VPERM(v8,v0,v1,v16) +err3;	lvx	v0,r4,r9 +	VPERM(v9,v1,v0,v16)  	addi	r4,r4,32 -err3;	stvx	vr8,r0,r3 -err3;	stvx	vr9,r3,r9 +err3;	stvx	v8,r0,r3 +err3;	stvx	v9,r3,r9  	addi	r3,r3,32  10:	bf	cr7*4+3,11f -err3;	lvx	vr1,r0,r4 -	VPERM(vr8,vr0,vr1,vr16) +err3;	lvx	v1,r0,r4 +	VPERM(v8,v0,v1,v16)  	addi	r4,r4,16 -err3;	stvx	vr8,r0,r3 +err3;	stvx	v8,r0,r3  	addi	r3,r3,16  	/* Up to 15B to go */ diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S index a5b30c71a8d3..18af0b3d3eb2 100644 --- a/arch/powerpc/lib/crtsavres.S +++ b/arch/powerpc/lib/crtsavres.S @@ -236,78 +236,78 @@ _GLOBAL(_rest32gpr_31_x)  _GLOBAL(_savevr_20)  	li	r11,-192 -	stvx	vr20,r11,r0 +	stvx	v20,r11,r0  _GLOBAL(_savevr_21)  	li	r11,-176 -	stvx	vr21,r11,r0 +	stvx	v21,r11,r0  _GLOBAL(_savevr_22)  	li	r11,-160 -	stvx	vr22,r11,r0 +	stvx	v22,r11,r0  _GLOBAL(_savevr_23)  	li	r11,-144 -	stvx	vr23,r11,r0 +	stvx	v23,r11,r0  _GLOBAL(_savevr_24)  	li	r11,-128 -	stvx	vr24,r11,r0 +	stvx	v24,r11,r0  _GLOBAL(_savevr_25)  	li	r11,-112 -	stvx	vr25,r11,r0 +	stvx	v25,r11,r0  _GLOBAL(_savevr_26)  	li	r11,-96 -	stvx	vr26,r11,r0 +	stvx	v26,r11,r0  _GLOBAL(_savevr_27)  	li	r11,-80 -	stvx	vr27,r11,r0 +	stvx	v27,r11,r0  _GLOBAL(_savevr_28)  	li	r11,-64 -	stvx	vr28,r11,r0 +	stvx	v28,r11,r0  _GLOBAL(_savevr_29)  	li	r11,-48 -	stvx	vr29,r11,r0 +	stvx	v29,r11,r0  _GLOBAL(_savevr_30)  	li	r11,-32 -	stvx	vr30,r11,r0 +	stvx	v30,r11,r0  _GLOBAL(_savevr_31)  	li	r11,-16 -	stvx	vr31,r11,r0 +	stvx	v31,r11,r0  	blr  _GLOBAL(_restvr_20)  	li	r11,-192 -	lvx	vr20,r11,r0 +	lvx	v20,r11,r0  _GLOBAL(_restvr_21)  	li	r11,-176 -	lvx	vr21,r11,r0 +	lvx	v21,r11,r0  _GLOBAL(_restvr_22)  	li	r11,-160 -	lvx	vr22,r11,r0 +	lvx	v22,r11,r0  _GLOBAL(_restvr_23)  	li	r11,-144 -	lvx	vr23,r11,r0 +	lvx	v23,r11,r0  _GLOBAL(_restvr_24)  	li	r11,-128 -	lvx	vr24,r11,r0 +	lvx	v24,r11,r0  _GLOBAL(_restvr_25)  	li	r11,-112 -	lvx	vr25,r11,r0 +	lvx	v25,r11,r0  _GLOBAL(_restvr_26)  	li	r11,-96 -	lvx	vr26,r11,r0 +	lvx	v26,r11,r0  _GLOBAL(_restvr_27)  	li	r11,-80 -	lvx	vr27,r11,r0 +	lvx	v27,r11,r0  _GLOBAL(_restvr_28)  	li	r11,-64 -	lvx	vr28,r11,r0 +	lvx	v28,r11,r0  _GLOBAL(_restvr_29)  	li	r11,-48 -	lvx	vr29,r11,r0 +	lvx	v29,r11,r0  _GLOBAL(_restvr_30)  	li	r11,-32 -	lvx	vr30,r11,r0 +	lvx	v30,r11,r0  _GLOBAL(_restvr_31)  	li	r11,-16 -	lvx	vr31,r11,r0 +	lvx	v31,r11,r0  	blr  #endif /* CONFIG_ALTIVEC */ @@ -443,101 +443,101 @@ _restgpr0_31:  .globl	_savevr_20  _savevr_20:  	li	r12,-192 -	stvx	vr20,r12,r0 +	stvx	v20,r12,r0  .globl	_savevr_21  _savevr_21:  	li	r12,-176 -	stvx	vr21,r12,r0 +	stvx	v21,r12,r0  .globl	_savevr_22  _savevr_22:  	li	r12,-160 -	stvx	vr22,r12,r0 +	stvx	v22,r12,r0  .globl	_savevr_23  _savevr_23:  	li	r12,-144 -	stvx	vr23,r12,r0 +	stvx	v23,r12,r0  .globl	_savevr_24  _savevr_24:  	li	r12,-128 -	stvx	vr24,r12,r0 +	stvx	v24,r12,r0  .globl	_savevr_25  _savevr_25:  	li	r12,-112 -	stvx	vr25,r12,r0 +	stvx	v25,r12,r0  .globl	_savevr_26  _savevr_26:  	li	r12,-96 -	stvx	vr26,r12,r0 +	stvx	v26,r12,r0  .globl	_savevr_27  _savevr_27:  	li	r12,-80 -	stvx	vr27,r12,r0 +	stvx	v27,r12,r0  .globl	_savevr_28  _savevr_28:  	li	r12,-64 -	stvx	vr28,r12,r0 +	stvx	v28,r12,r0  .globl	_savevr_29  _savevr_29:  	li	r12,-48 -	stvx	vr29,r12,r0 +	stvx	v29,r12,r0  .globl	_savevr_30  _savevr_30:  	li	r12,-32 -	stvx	vr30,r12,r0 +	stvx	v30,r12,r0  .globl	_savevr_31  _savevr_31:  	li	r12,-16 -	stvx	vr31,r12,r0 +	stvx	v31,r12,r0  	blr  .globl	_restvr_20  _restvr_20:  	li	r12,-192 -	lvx	vr20,r12,r0 +	lvx	v20,r12,r0  .globl	_restvr_21  _restvr_21:  	li	r12,-176 -	lvx	vr21,r12,r0 +	lvx	v21,r12,r0  .globl	_restvr_22  _restvr_22:  	li	r12,-160 -	lvx	vr22,r12,r0 +	lvx	v22,r12,r0  .globl	_restvr_23  _restvr_23:  	li	r12,-144 -	lvx	vr23,r12,r0 +	lvx	v23,r12,r0  .globl	_restvr_24  _restvr_24:  	li	r12,-128 -	lvx	vr24,r12,r0 +	lvx	v24,r12,r0  .globl	_restvr_25  _restvr_25:  	li	r12,-112 -	lvx	vr25,r12,r0 +	lvx	v25,r12,r0  .globl	_restvr_26  _restvr_26:  	li	r12,-96 -	lvx	vr26,r12,r0 +	lvx	v26,r12,r0  .globl	_restvr_27  _restvr_27:  	li	r12,-80 -	lvx	vr27,r12,r0 +	lvx	v27,r12,r0  .globl	_restvr_28  _restvr_28:  	li	r12,-64 -	lvx	vr28,r12,r0 +	lvx	v28,r12,r0  .globl	_restvr_29  _restvr_29:  	li	r12,-48 -	lvx	vr29,r12,r0 +	lvx	v29,r12,r0  .globl	_restvr_30  _restvr_30:  	li	r12,-32 -	lvx	vr30,r12,r0 +	lvx	v30,r12,r0  .globl	_restvr_31  _restvr_31:  	li	r12,-16 -	lvx	vr31,r12,r0 +	lvx	v31,r12,r0  	blr  #endif /* CONFIG_ALTIVEC */ diff --git a/arch/powerpc/lib/ldstfp.S b/arch/powerpc/lib/ldstfp.S index 85aec08ab234..5d0cdbfbe3f2 100644 --- a/arch/powerpc/lib/ldstfp.S +++ b/arch/powerpc/lib/ldstfp.S @@ -184,16 +184,16 @@ _GLOBAL(do_stfd)  	extab	2b,3b  #ifdef CONFIG_ALTIVEC -/* Get the contents of vrN into vr0; N is in r3. */ +/* Get the contents of vrN into v0; N is in r3. */  _GLOBAL(get_vr)  	mflr	r0  	rlwinm	r3,r3,3,0xf8  	bcl	20,31,1f -	blr			/* vr0 is already in vr0 */ +	blr			/* v0 is already in v0 */  	nop  reg = 1  	.rept	31 -	vor	vr0,reg,reg	/* assembler doesn't know vmr? */ +	vor	v0,reg,reg	/* assembler doesn't know vmr? */  	blr  reg = reg + 1  	.endr @@ -203,16 +203,16 @@ reg = reg + 1  	mtlr	r0  	bctr -/* Put the contents of vr0 into vrN; N is in r3. */ +/* Put the contents of v0 into vrN; N is in r3. */  _GLOBAL(put_vr)  	mflr	r0  	rlwinm	r3,r3,3,0xf8  	bcl	20,31,1f -	blr			/* vr0 is already in vr0 */ +	blr			/* v0 is already in v0 */  	nop  reg = 1  	.rept	31 -	vor	reg,vr0,vr0 +	vor	reg,v0,v0  	blr  reg = reg + 1  	.endr @@ -234,13 +234,13 @@ _GLOBAL(do_lvx)  	MTMSRD(r7)  	isync  	beq	cr7,1f -	stvx	vr0,r1,r8 +	stvx	v0,r1,r8  1:	li	r9,-EFAULT -2:	lvx	vr0,0,r4 +2:	lvx	v0,0,r4  	li	r9,0  3:	beq	cr7,4f  	bl	put_vr -	lvx	vr0,r1,r8 +	lvx	v0,r1,r8  4:	PPC_LL	r0,STKFRM+PPC_LR_STKOFF(r1)  	mtlr	r0  	MTMSRD(r6) @@ -262,13 +262,13 @@ _GLOBAL(do_stvx)  	MTMSRD(r7)  	isync  	beq	cr7,1f -	stvx	vr0,r1,r8 +	stvx	v0,r1,r8  	bl	get_vr  1:	li	r9,-EFAULT -2:	stvx	vr0,0,r4 +2:	stvx	v0,0,r4  	li	r9,0  3:	beq	cr7,4f -	lvx	vr0,r1,r8 +	lvx	v0,r1,r8  4:	PPC_LL	r0,STKFRM+PPC_LR_STKOFF(r1)  	mtlr	r0  	MTMSRD(r6) @@ -280,12 +280,12 @@ _GLOBAL(do_stvx)  #endif /* CONFIG_ALTIVEC */  #ifdef CONFIG_VSX -/* Get the contents of vsrN into vsr0; N is in r3. */ +/* Get the contents of vsN into vs0; N is in r3. */  _GLOBAL(get_vsr)  	mflr	r0  	rlwinm	r3,r3,3,0x1f8  	bcl	20,31,1f -	blr			/* vsr0 is already in vsr0 */ +	blr			/* vs0 is already in vs0 */  	nop  reg = 1  	.rept	63 @@ -299,12 +299,12 @@ reg = reg + 1  	mtlr	r0  	bctr -/* Put the contents of vsr0 into vsrN; N is in r3. */ +/* Put the contents of vs0 into vsN; N is in r3. */  _GLOBAL(put_vsr)  	mflr	r0  	rlwinm	r3,r3,3,0x1f8  	bcl	20,31,1f -	blr			/* vr0 is already in vr0 */ +	blr			/* v0 is already in v0 */  	nop  reg = 1  	.rept	63 diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index 170a0346f756..f7deebdf3365 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c @@ -41,6 +41,7 @@ void __spin_yield(arch_spinlock_t *lock)  	plpar_hcall_norets(H_CONFER,  		get_hard_smp_processor_id(holder_cpu), yield_count);  } +EXPORT_SYMBOL_GPL(__spin_yield);  /*   * Waiting for a read lock or a write lock on a rwlock... diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S index 0830587df16e..786234fd4e91 100644 --- a/arch/powerpc/lib/memcpy_power7.S +++ b/arch/powerpc/lib/memcpy_power7.S @@ -321,29 +321,29 @@ _GLOBAL(memcpy_power7)  	li	r11,48  	bf	cr7*4+3,5f -	lvx	vr1,r0,r4 +	lvx	v1,r0,r4  	addi	r4,r4,16 -	stvx	vr1,r0,r3 +	stvx	v1,r0,r3  	addi	r3,r3,16  5:	bf	cr7*4+2,6f -	lvx	vr1,r0,r4 -	lvx	vr0,r4,r9 +	lvx	v1,r0,r4 +	lvx	v0,r4,r9  	addi	r4,r4,32 -	stvx	vr1,r0,r3 -	stvx	vr0,r3,r9 +	stvx	v1,r0,r3 +	stvx	v0,r3,r9  	addi	r3,r3,32  6:	bf	cr7*4+1,7f -	lvx	vr3,r0,r4 -	lvx	vr2,r4,r9 -	lvx	vr1,r4,r10 -	lvx	vr0,r4,r11 +	lvx	v3,r0,r4 +	lvx	v2,r4,r9 +	lvx	v1,r4,r10 +	lvx	v0,r4,r11  	addi	r4,r4,64 -	stvx	vr3,r0,r3 -	stvx	vr2,r3,r9 -	stvx	vr1,r3,r10 -	stvx	vr0,r3,r11 +	stvx	v3,r0,r3 +	stvx	v2,r3,r9 +	stvx	v1,r3,r10 +	stvx	v0,r3,r11  	addi	r3,r3,64  7:	sub	r5,r5,r6 @@ -366,23 +366,23 @@ _GLOBAL(memcpy_power7)  	 */  	.align	5  8: -	lvx	vr7,r0,r4 -	lvx	vr6,r4,r9 -	lvx	vr5,r4,r10 -	lvx	vr4,r4,r11 -	lvx	vr3,r4,r12 -	lvx	vr2,r4,r14 -	lvx	vr1,r4,r15 -	lvx	vr0,r4,r16 +	lvx	v7,r0,r4 +	lvx	v6,r4,r9 +	lvx	v5,r4,r10 +	lvx	v4,r4,r11 +	lvx	v3,r4,r12 +	lvx	v2,r4,r14 +	lvx	v1,r4,r15 +	lvx	v0,r4,r16  	addi	r4,r4,128 -	stvx	vr7,r0,r3 -	stvx	vr6,r3,r9 -	stvx	vr5,r3,r10 -	stvx	vr4,r3,r11 -	stvx	vr3,r3,r12 -	stvx	vr2,r3,r14 -	stvx	vr1,r3,r15 -	stvx	vr0,r3,r16 +	stvx	v7,r0,r3 +	stvx	v6,r3,r9 +	stvx	v5,r3,r10 +	stvx	v4,r3,r11 +	stvx	v3,r3,r12 +	stvx	v2,r3,r14 +	stvx	v1,r3,r15 +	stvx	v0,r3,r16  	addi	r3,r3,128  	bdnz	8b @@ -396,29 +396,29 @@ _GLOBAL(memcpy_power7)  	mtocrf	0x01,r6  	bf	cr7*4+1,9f -	lvx	vr3,r0,r4 -	lvx	vr2,r4,r9 -	lvx	vr1,r4,r10 -	lvx	vr0,r4,r11 +	lvx	v3,r0,r4 +	lvx	v2,r4,r9 +	lvx	v1,r4,r10 +	lvx	v0,r4,r11  	addi	r4,r4,64 -	stvx	vr3,r0,r3 -	stvx	vr2,r3,r9 -	stvx	vr1,r3,r10 -	stvx	vr0,r3,r11 +	stvx	v3,r0,r3 +	stvx	v2,r3,r9 +	stvx	v1,r3,r10 +	stvx	v0,r3,r11  	addi	r3,r3,64  9:	bf	cr7*4+2,10f -	lvx	vr1,r0,r4 -	lvx	vr0,r4,r9 +	lvx	v1,r0,r4 +	lvx	v0,r4,r9  	addi	r4,r4,32 -	stvx	vr1,r0,r3 -	stvx	vr0,r3,r9 +	stvx	v1,r0,r3 +	stvx	v0,r3,r9  	addi	r3,r3,32  10:	bf	cr7*4+3,11f -	lvx	vr1,r0,r4 +	lvx	v1,r0,r4  	addi	r4,r4,16 -	stvx	vr1,r0,r3 +	stvx	v1,r0,r3  	addi	r3,r3,16  	/* Up to 15B to go */ @@ -494,42 +494,42 @@ _GLOBAL(memcpy_power7)  	li	r10,32  	li	r11,48 -	LVS(vr16,0,r4)		/* Setup permute control vector */ -	lvx	vr0,0,r4 +	LVS(v16,0,r4)		/* Setup permute control vector */ +	lvx	v0,0,r4  	addi	r4,r4,16  	bf	cr7*4+3,5f -	lvx	vr1,r0,r4 -	VPERM(vr8,vr0,vr1,vr16) +	lvx	v1,r0,r4 +	VPERM(v8,v0,v1,v16)  	addi	r4,r4,16 -	stvx	vr8,r0,r3 +	stvx	v8,r0,r3  	addi	r3,r3,16 -	vor	vr0,vr1,vr1 +	vor	v0,v1,v1  5:	bf	cr7*4+2,6f -	lvx	vr1,r0,r4 -	VPERM(vr8,vr0,vr1,vr16) -	lvx	vr0,r4,r9 -	VPERM(vr9,vr1,vr0,vr16) +	lvx	v1,r0,r4 +	VPERM(v8,v0,v1,v16) +	lvx	v0,r4,r9 +	VPERM(v9,v1,v0,v16)  	addi	r4,r4,32 -	stvx	vr8,r0,r3 -	stvx	vr9,r3,r9 +	stvx	v8,r0,r3 +	stvx	v9,r3,r9  	addi	r3,r3,32  6:	bf	cr7*4+1,7f -	lvx	vr3,r0,r4 -	VPERM(vr8,vr0,vr3,vr16) -	lvx	vr2,r4,r9 -	VPERM(vr9,vr3,vr2,vr16) -	lvx	vr1,r4,r10 -	VPERM(vr10,vr2,vr1,vr16) -	lvx	vr0,r4,r11 -	VPERM(vr11,vr1,vr0,vr16) +	lvx	v3,r0,r4 +	VPERM(v8,v0,v3,v16) +	lvx	v2,r4,r9 +	VPERM(v9,v3,v2,v16) +	lvx	v1,r4,r10 +	VPERM(v10,v2,v1,v16) +	lvx	v0,r4,r11 +	VPERM(v11,v1,v0,v16)  	addi	r4,r4,64 -	stvx	vr8,r0,r3 -	stvx	vr9,r3,r9 -	stvx	vr10,r3,r10 -	stvx	vr11,r3,r11 +	stvx	v8,r0,r3 +	stvx	v9,r3,r9 +	stvx	v10,r3,r10 +	stvx	v11,r3,r11  	addi	r3,r3,64  7:	sub	r5,r5,r6 @@ -552,31 +552,31 @@ _GLOBAL(memcpy_power7)  	 */  	.align	5  8: -	lvx	vr7,r0,r4 -	VPERM(vr8,vr0,vr7,vr16) -	lvx	vr6,r4,r9 -	VPERM(vr9,vr7,vr6,vr16) -	lvx	vr5,r4,r10 -	VPERM(vr10,vr6,vr5,vr16) -	lvx	vr4,r4,r11 -	VPERM(vr11,vr5,vr4,vr16) -	lvx	vr3,r4,r12 -	VPERM(vr12,vr4,vr3,vr16) -	lvx	vr2,r4,r14 -	VPERM(vr13,vr3,vr2,vr16) -	lvx	vr1,r4,r15 -	VPERM(vr14,vr2,vr1,vr16) -	lvx	vr0,r4,r16 -	VPERM(vr15,vr1,vr0,vr16) +	lvx	v7,r0,r4 +	VPERM(v8,v0,v7,v16) +	lvx	v6,r4,r9 +	VPERM(v9,v7,v6,v16) +	lvx	v5,r4,r10 +	VPERM(v10,v6,v5,v16) +	lvx	v4,r4,r11 +	VPERM(v11,v5,v4,v16) +	lvx	v3,r4,r12 +	VPERM(v12,v4,v3,v16) +	lvx	v2,r4,r14 +	VPERM(v13,v3,v2,v16) +	lvx	v1,r4,r15 +	VPERM(v14,v2,v1,v16) +	lvx	v0,r4,r16 +	VPERM(v15,v1,v0,v16)  	addi	r4,r4,128 -	stvx	vr8,r0,r3 -	stvx	vr9,r3,r9 -	stvx	vr10,r3,r10 -	stvx	vr11,r3,r11 -	stvx	vr12,r3,r12 -	stvx	vr13,r3,r14 -	stvx	vr14,r3,r15 -	stvx	vr15,r3,r16 +	stvx	v8,r0,r3 +	stvx	v9,r3,r9 +	stvx	v10,r3,r10 +	stvx	v11,r3,r11 +	stvx	v12,r3,r12 +	stvx	v13,r3,r14 +	stvx	v14,r3,r15 +	stvx	v15,r3,r16  	addi	r3,r3,128  	bdnz	8b @@ -590,36 +590,36 @@ _GLOBAL(memcpy_power7)  	mtocrf	0x01,r6  	bf	cr7*4+1,9f -	lvx	vr3,r0,r4 -	VPERM(vr8,vr0,vr3,vr16) -	lvx	vr2,r4,r9 -	VPERM(vr9,vr3,vr2,vr16) -	lvx	vr1,r4,r10 -	VPERM(vr10,vr2,vr1,vr16) -	lvx	vr0,r4,r11 -	VPERM(vr11,vr1,vr0,vr16) +	lvx	v3,r0,r4 +	VPERM(v8,v0,v3,v16) +	lvx	v2,r4,r9 +	VPERM(v9,v3,v2,v16) +	lvx	v1,r4,r10 +	VPERM(v10,v2,v1,v16) +	lvx	v0,r4,r11 +	VPERM(v11,v1,v0,v16)  	addi	r4,r4,64 -	stvx	vr8,r0,r3 -	stvx	vr9,r3,r9 -	stvx	vr10,r3,r10 -	stvx	vr11,r3,r11 +	stvx	v8,r0,r3 +	stvx	v9,r3,r9 +	stvx	v10,r3,r10 +	stvx	v11,r3,r11  	addi	r3,r3,64  9:	bf	cr7*4+2,10f -	lvx	vr1,r0,r4 -	VPERM(vr8,vr0,vr1,vr16) -	lvx	vr0,r4,r9 -	VPERM(vr9,vr1,vr0,vr16) +	lvx	v1,r0,r4 +	VPERM(v8,v0,v1,v16) +	lvx	v0,r4,r9 +	VPERM(v9,v1,v0,v16)  	addi	r4,r4,32 -	stvx	vr8,r0,r3 -	stvx	vr9,r3,r9 +	stvx	v8,r0,r3 +	stvx	v9,r3,r9  	addi	r3,r3,32  10:	bf	cr7*4+3,11f -	lvx	vr1,r0,r4 -	VPERM(vr8,vr0,vr1,vr16) +	lvx	v1,r0,r4 +	VPERM(v8,v0,v1,v16)  	addi	r4,r4,16 -	stvx	vr8,r0,r3 +	stvx	v8,r0,r3  	addi	r3,r3,16  	/* Up to 15B to go */ diff --git a/arch/powerpc/lib/ppc_ksyms.c b/arch/powerpc/lib/ppc_ksyms.c index f993959647b5..c7f8e9586316 100644 --- a/arch/powerpc/lib/ppc_ksyms.c +++ b/arch/powerpc/lib/ppc_ksyms.c @@ -8,10 +8,6 @@ EXPORT_SYMBOL(memset);  EXPORT_SYMBOL(memmove);  EXPORT_SYMBOL(memcmp);  EXPORT_SYMBOL(memchr); -#ifdef CONFIG_PPC32 -EXPORT_SYMBOL(cacheable_memcpy); -EXPORT_SYMBOL(cacheable_memzero); -#endif  EXPORT_SYMBOL(strcpy);  EXPORT_SYMBOL(strncpy); diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c index a1060a868e69..69abf844c2c3 100644 --- a/arch/powerpc/lib/rheap.c +++ b/arch/powerpc/lib/rheap.c @@ -284,7 +284,7 @@ EXPORT_SYMBOL_GPL(rh_create);   */  void rh_destroy(rh_info_t * info)  { -	if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL) +	if ((info->flags & RHIF_STATIC_BLOCK) == 0)  		kfree(info->block);  	if ((info->flags & RHIF_STATIC_INFO) == 0) |