
 ; Three functions from LAME that have been tweaked after compiling.

 AREA |__lame_tweaks|, CODE, READONLY

 align

 EXPORT |count_bits|
|count_bits|
 stmfd r13!, {r4 - r9, r14}
 ldr r4, |L..515|
 mov r6, r0
 mov r7, r1
 mov r8, r2
 mov r9, r3
 ldr r12, [r3, #12]
 adr r0, |L..515|+4
 ldmia r0, {r0, r1}
 add r12, r4, r12, lsl #3
 ldmia r12, {r2, r3}
 mov r5, #0
 bl |__divdf3|
 mov r3, r8
 mov r5, #576

|L..150|                ; range check loop
 ldmia r3!, {r2, r12}
 cmp r2, r0             ; quick comparison (no negative numbers allowed)
 cmpeq r12, r1
 bgt |L..514|
 ldmia r3!, {r2, r12}
 cmp r2, r0             ; quick comparison (no negative numbers allowed)
 cmpeq r12, r1
 bgt |L..514|
 subs r5, r5, #2
 bgt |L..150|

 mov r0, r6
 mov r1, r8
 mov r2, r7
 mov r3, r9
 ldmfd r13!, {r4 - r9, r14}
 b |count_bits_long|

|L..514|
 ldr r0, |L..515|+12
 ldmfd r13!, {r4 - r9, r15}


|L..515|
 DCD |ipow20|
 dcd &400C, &80380000   ; 8206
 dcd 100000

|L..272|
 DCD |adj43|


 EXPORT |quantize_xrpow|
|quantize_xrpow|
 stmfd r13!, {r4 - r8, r14}
 mov r14, #0x4000
 add r14, r14, #0x1E
 mov r8, r3
 sub r7, r2, #0x3f00
 sub r7, r7, #0xfe
 mov r6, #576
 mov r5, r0
 mov r4, r1

|L..260|
 ldmia r5!, {r0, r1}
 add r0, r0, r7
 umull r12, r1, r8, r1
 ldr r2, |L..272|
 movs r3, r1, lsl #1    ; zero *hopefully* never happens
 subcc r0, r0, #1
 orrcc r1, r3, r12, lsr #31
 sub r12, r14, r0
 movs r3, r12, lsr #5
 moveq r12, r1, lsr r12
 addeq r2, r2, r12, lsl #3
 ldmia r2, {r2, r3}
 subs r12, r0, r2       ; which exponent was smaller?
 bgt xrpow_Ba303        ; branch if second exponent was smaller
 rsb r12, r12, #0       ; make r12 positive (difference between exponents)
 adds r1, r3, r1, lsr r12 ; shift the fraction right and add
 adc r0, r2, #0         ; if the result carried, increment the exponent
 movcs r1, r1, rrx      ; bring any carried bit back into the fraction
 sub r12, r14, r0
 movs r3, r12, lsr #5
 moveq r12, r1, lsr r12
 movne r12, #0
 subs r6, r6, #1
 str r12, [r4], #4
 bgt |L..260|

 ldmfd r13!, {r4 - r8, r15}

xrpow_Ba303
 adds r1, r1, r3, lsr r12 ; shift the fraction right and add
 addcs r0, r0, #1       ; if result carried, increment the exponent
 movcs r1, r1, rrx      ; bring any carried bit back into the fraction
 sub r12, r14, r0
 movs r3, r12, lsr #5
 moveq r12, r1, lsr r12
 movne r12, #0
 subs r6, r6, #1
 str r12, [r4], #4
 bgt |L..260|

 ldmfd r13!, {r4 - r8, r15}


 EXPORT |quantize_xrpow_ISO|
|quantize_xrpow_ISO| ; executed when q = 9
 stmfd r13!, {r4 - r10, r14}
 mov r4, r0
 mov r7, r1
 adr r0, |L..274|
 ldmia r0, {r0, r1}
 mov r6, r3
 mov r5, r2
 mov r8, #576
 bl |__divdf3|
 mov r9, r0
 mov r10, r1

|L..266|
 ldmia r4, {r2, r3}
 mov r0, r9
 mov r12, r9, lsl #1
 cmp r12, r2, lsl #1
 cmpeq r10, r3
 mvncc r0, r2
 subeq r0, r0, r2
 cmp r0, #0
 ble |L..267|
 add r4, r4, #8
 mov r0, #0
 subs r8, r8, #1
 str r0, [r7], #4
 bgt |L..266|

 ldmia r13!, {r4 - r10, r15}

|L..267|
 ldmia r4!, {r2, r3}
 add r0, r5, r2         ; add the exponents and eor the sign
 umull r12, r1, r3, r6
 adr r2, |L..274|+8
 sub r0, r0, #0x3FC0
 movs r12, r1, lsl #1
 sbc r0, r0, #0x3E
 movcc r1, r12
 andcc r0, r0, r1, asr #31 ; =0
 ldmia r2, {r2, r3}
 bl |__adddf3|
 bic r3, r0, #1<<31
 rsb r12, r3, #0x4000
 add r12, r12, #0x1E
 movs r3, r12, lsr #5
 bic r12, r12, r3, lsl #5
 moveq r12, r1, lsr r12
 movne r12, #0
 cmp r0, #0
 rsbmi r12, r12, #0
 subs r8, r8, #1
 str r12, [r7], #4
 bgt |L..266|

 ldmia r13!, {r4 - r10, r15}

|L..274|
 dcd &3FFE, &9837B4A2   ; 5.94600000000000017408e-1
 dcd &3FFD, &CF9096BC   ; 4.05399999999999982592e-1

 END
