powerpc: Restore registers on error exit from csum_partial_copy_generic()

The csum_partial_copy_generic() function saves the PowerPC non-volatile
r14, r15, and r16 registers for the main checksum-and-copy loop.
Unfortunately, it fails to restore them upon error exit from this loop,
which results in silent corruption of these registers in the presumably
rare event of an access exception within that loop.

This commit therefore restores these register on error exit from the loop.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Anton Blanchard <anton@samba.org>
Cc: stable@vger.kernel.org
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
Paul E. McKenney 2013-10-01 17:11:35 +10:00 committed by Benjamin Herrenschmidt
parent d9813c3681
commit 8f21bd0090
1 changed files with 40 additions and 14 deletions

View File

@ -226,19 +226,35 @@ _GLOBAL(csum_partial)
blr
.macro source
.macro srcnr
100:
.section __ex_table,"a"
.align 3
.llong 100b,.Lsrc_error
.llong 100b,.Lsrc_error_nr
.previous
.endm
.macro source
150:
.section __ex_table,"a"
.align 3
.llong 150b,.Lsrc_error
.previous
.endm
.macro dstnr
200:
.section __ex_table,"a"
.align 3
.llong 200b,.Ldest_error_nr
.previous
.endm
.macro dest
200:
250:
.section __ex_table,"a"
.align 3
.llong 200b,.Ldest_error
.llong 250b,.Ldest_error
.previous
.endm
@ -274,11 +290,11 @@ _GLOBAL(csum_partial_copy_generic)
mtctr r6
1:
source; lhz r6,0(r3) /* align to doubleword */
srcnr; lhz r6,0(r3) /* align to doubleword */
subi r5,r5,2
addi r3,r3,2
adde r0,r0,r6
dest; sth r6,0(r4)
dstnr; sth r6,0(r4)
addi r4,r4,2
bdnz 1b
@ -392,10 +408,10 @@ dest; std r16,56(r4)
mtctr r6
3:
source; ld r6,0(r3)
srcnr; ld r6,0(r3)
addi r3,r3,8
adde r0,r0,r6
dest; std r6,0(r4)
dstnr; std r6,0(r4)
addi r4,r4,8
bdnz 3b
@ -405,10 +421,10 @@ dest; std r6,0(r4)
srdi. r6,r5,2
beq .Lcopy_tail_halfword
source; lwz r6,0(r3)
srcnr; lwz r6,0(r3)
addi r3,r3,4
adde r0,r0,r6
dest; stw r6,0(r4)
dstnr; stw r6,0(r4)
addi r4,r4,4
subi r5,r5,4
@ -416,10 +432,10 @@ dest; stw r6,0(r4)
srdi. r6,r5,1
beq .Lcopy_tail_byte
source; lhz r6,0(r3)
srcnr; lhz r6,0(r3)
addi r3,r3,2
adde r0,r0,r6
dest; sth r6,0(r4)
dstnr; sth r6,0(r4)
addi r4,r4,2
subi r5,r5,2
@ -427,10 +443,10 @@ dest; sth r6,0(r4)
andi. r6,r5,1
beq .Lcopy_finish
source; lbz r6,0(r3)
srcnr; lbz r6,0(r3)
sldi r9,r6,8 /* Pad the byte out to 16 bits */
adde r0,r0,r9
dest; stb r6,0(r4)
dstnr; stb r6,0(r4)
.Lcopy_finish:
addze r0,r0 /* add in final carry */
@ -440,6 +456,11 @@ dest; stb r6,0(r4)
blr
.Lsrc_error:
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
addi r1,r1,STACKFRAMESIZE
.Lsrc_error_nr:
cmpdi 0,r7,0
beqlr
li r6,-EFAULT
@ -447,6 +468,11 @@ dest; stb r6,0(r4)
blr
.Ldest_error:
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
addi r1,r1,STACKFRAMESIZE
.Ldest_error_nr:
cmpdi 0,r8,0
beqlr
li r6,-EFAULT