sparc64: Use indirect calls in hamming weight stubs

Otherwise, depending upon link order, the branch relocation
limits could be exceeded.

Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
This commit is contained in:
David S. Miller 2017-06-22 10:56:48 -04:00 committed by Masahiro Yamada
parent 3c2993b8c6
commit 9289ea7f95

View File

@ -4,8 +4,8 @@
.text
.align 32
ENTRY(__arch_hweight8)
ba,pt %xcc, __sw_hweight8
nop
sethi %hi(__sw_hweight8), %g1
jmpl %g1 + %lo(__sw_hweight8), %g0
nop
ENDPROC(__arch_hweight8)
EXPORT_SYMBOL(__arch_hweight8)
@ -17,8 +17,8 @@ EXPORT_SYMBOL(__arch_hweight8)
.previous
ENTRY(__arch_hweight16)
ba,pt %xcc, __sw_hweight16
nop
sethi %hi(__sw_hweight16), %g1
jmpl %g1 + %lo(__sw_hweight16), %g0
nop
ENDPROC(__arch_hweight16)
EXPORT_SYMBOL(__arch_hweight16)
@ -30,8 +30,8 @@ EXPORT_SYMBOL(__arch_hweight16)
.previous
ENTRY(__arch_hweight32)
ba,pt %xcc, __sw_hweight32
nop
sethi %hi(__sw_hweight32), %g1
jmpl %g1 + %lo(__sw_hweight32), %g0
nop
ENDPROC(__arch_hweight32)
EXPORT_SYMBOL(__arch_hweight32)
@ -43,8 +43,8 @@ EXPORT_SYMBOL(__arch_hweight32)
.previous
ENTRY(__arch_hweight64)
ba,pt %xcc, __sw_hweight64
nop
sethi %hi(__sw_hweight16), %g1
jmpl %g1 + %lo(__sw_hweight16), %g0
nop
ENDPROC(__arch_hweight64)
EXPORT_SYMBOL(__arch_hweight64)