
; Generated by gcc 2.95.2 19991024 (release) for ARM/RISC OS

 get doopermacros.s

 AREA |C$$code1|, CODE, READONLY

 ALIGN

 dcd 0,0,0,0,0

|costab|
 KEEP |costab|
 dcd &3FFE, &EC835E7A ; double 9.23879532511286738483e-1
 dcd &3FFD, &C3EF1535 ; double 3.82683432365089781779e-1
 dcd &3FFE, &FEC46D1F ; double 9.95184726672196928732e-1
 dcd &3FFB, &C8BD35E1 ; double 9.80171403295606036288e-2
 dcd &3FFE, &FFEC4304 ; double 9.99698818696204249967e-1
 dcd &3FF9, &C90AAFBD ; double 2.45412285229122915930e-2
 dcd &3FFE, &FFFEC42C ; double 9.99981175282601109089e-1
 dcd &3FF7, &C90F87F3 ; double 6.13588464915447526909e-3

|L..21|
 dcd &B504F334 ; sqrt(2) fraction


|fht|
 ; frame = 252
 mov r12, r13
 stmfd r13!, {r4 - r9, r11, r12, r14, r15}
 sub r11, r12, #4
 cmp r13, r10
 bllt |__rt_stkovf_split_small|
 stmdb r13, {r10, r11}
 sub r13, r13, #260
 stmia r13, {r0, r1} ; phole stm
 mov r2, #4
 str r2, [r13, #12]
 add r0, r0, r1, lsl #3
 adr r1, |costab|
 str r0, [r13, #20]
 str r1, [r13, #8]

|L..19|
 ldr r12, [r13, #12]
 ldr r0, [r13]
 mov r3, r12, asr #1
 str r0, [r13, #16]
 mov r2, r12, lsl #1
 str r3, [r13, #44]
 add r3, r2, r12
 mov r14, r12, lsl #2
 str r14, [r13, #12]
 ldr r1, [r13, #44]
 mov r12, r12, lsl #3
 str r12, [r13, #48]
 mov r2, r2, lsl #3
 str r2, [r13, #72]
 str r2, [r13, #236]
 mov r3, r3, lsl #3
 str r3, [r13, #76]
 str r3, [r13, #240]
 add r1, r0, r1, lsl #3
 ldr r0, [r13, #12]
 str r1, [r13, #24]
 mov r0, r0, lsl #3
 str r0, [r13, #224]

|L..9|
 ldr r1, [r13, #16]
 ldr r2, [r13, #48]
 ldmia r1, {r6, r7}
 add r2, r2, r1
 str r2, [r13, #52]
 ldmia r2, {r4, r5}
 mov r0, r6
 mov r1, r7
 mov r2, r4
 mov r3, r5
 bl |__adddf3|    ; preserves r2, r3
 add r12, r13, #64
 stmia r12, {r0, r1}
 mov r1, r7
 mov r0, r6
 bl |__subdf3|
 add r3, r13, #56
 stmia r3, {r0, r1}
 ldr r0, [r13, #16]
 ldr r1, [r13, #76]
 ldr r14, [r13, #72]
 add r9, r1, r0
 ldmia r9, {r4, r5}
 add r8, r14, r0
 ldmia r8, {r6, r7}
 mov r2, r6
 mov r3, r7
 eor r0, r4, #1<<31
 mov r1, r5
 bl |__adddf3|    ; -a + b    preserves r2, r3
 add r12, r13, #80
 stmia r12, {r0, r1}
 mov r0, r4
 mov r1, r5
 bl |__adddf3|
 mov r2, r0
 mov r3, r1
 add r12, r13, #64
 ldmia r12, {r0, r1}
 bl |__adddf3|    ; preserves r2, r3
 ldr r12, [r13, #16]
 stmia r12, {r0, r1}
 add r12, r13, #64
 ldmia r12, {r0, r1}
 bl |__subdf3|
 stmia r8, {r0, r1}
 add r12, r13, #56
 ldmia r12, {r0, r1}
 add r12, r13, #80
 ldmia r12, {r2, r3}
 bl |__adddf3|    ; preserves r2, r3
 ldr r12, [r13, #52]
 stmia r12, {r0, r1}
 add r12, r13, #56
 ldmia r12, {r0, r1}
 bl |__subdf3|
 stmia r9, {r0, r1}
 ldr r0, [r13, #24]
 ldr r1, [r13, #48]
 ldmia r0, {r6, r7}
 add r1, r1, r0
 str r1, [r13, #88]
 ldmia r1, {r4, r5}
 mov r2, r6
 mov r3, r7
 eor r0, r4, #1<<31
 mov r1, r5
 bl |__adddf3|          ; -a + b   preserves r2, r3
 add r12, r13, #92
 stmia r12, {r0, r1}
 mov r0, r4
 mov r1, r5
 bl |__adddf3|
 ldr r5, [r13, #76]
 mov r6, r0
 ldr r12, [r13, #24]
 mov r7, r1
 ldr r3, |L..21|
 add r9, r5, r12
 ldr r1, [r9, #4]
 umull r12, r1, r3, r1  ; times root2
 ldr r0, [r9]
 movs r2, r1, lsl #1    ; test for nomalisation
 adc r0, r0, #0
 orrcc r1, r2, r12, lsr #31 ; no point wasting the msb of r12 if we have to normalise
 ldr r5, [r13, #24]
 andcc r0, r0, r1, asr #31 ; clear exponent if = 0
 ldr r4, [r13, #72]
 add r12, r13, #100
 add r8, r4, r5
 ldr r2, [r8, #4]
 stmia r12, {r0, r1}
 umull r12, r5, r3, r2  ; times root2
 ldr r0, [r8]
 movs r2, r5, lsl #1    ; test for nomalisation
 adc r4, r0, #0
 orrcc r5, r2, r12, lsr #31 ; no point wasting the msb of r12 if we have to normalise
 andcc r4, r4, r1, asr #31 ; clear exponent if = 0
 mov r2, r6
 mov r3, r7
 eor r0, r4, #1<<31
 mov r1, r5
 bl |__adddf3|          ; -a + b   preserves r2, r3
 stmia r8, {r0, r1}
 mov r0, r4
 mov r1, r5
 bl |__adddf3|
 ldr r14, [r13, #24]
 add r12, r13, #96
 stmia r14, {r0, r1}
 ldmda r12, {r0, r1}
 ldmib r12, {r2, r3}
 bl |__adddf3|    ; preserves r2, r3
 ldr r12, [r13, #88]
 add r14, r13, #92
 stmia r12, {r0, r1}
 ldmia r14, {r0, r1}
 bl |__subdf3|
 ldr r14, [r13, #12]
 ldr r2, [r13, #24]
 stmia r9, {r0, r1}
 ldr r3, [r13, #16]
 mov r12, r14, lsl #3
 add r2, r2, r12
 str r2, [r13, #24]
 add r3, r3, r12
 ldr r12, [r13, #20]
 str r3, [r13, #16]
 cmp r3, r12
 bcc |L..9|

 ldr r12, [r13, #8]
 ldmia r12!, {r2, r3}
 add r14, r13, #32
 stmib r14, {r2, r3}
 ldr r0, [r13, #44]
 mov r9, #1
 ldmia r12, {r2, r3}
 cmp r9, r0
 stmda r14, {r2, r3}
 bge |L..11|

|L..13|
 add r2, r13, #28
 ldmia r2, {r0, r1}
 mov r3, r1
 muldf3        r2, r3,    r0, r1,    r0, r3
  orrs r1, r3, r2, lsl #1  ; *2
  addne r2, r2, #1
 mov r0, #&ff
 add r0, r0, #&3f00
 mov r1, #1<<31
 add r12, r9, #1
 str r12, [r13, #220]
 bl |__subdf3|
 add r12, r13, #108
 stmia r12, {r0, r1}
 add r12, r13, #28
 ldmia r12!, {r0, r1}
 ldmia r12, {r2, r3}
 muldf3        r0, r1,    r0, r1,    r2, r3
  orrs r3, r1, r0, lsl #1  ; *2
  addne r0, r0, #1
 add r12, r13, #116
 stmia r12, {r0, r1}
 ldr r14, [r13]
 mov r2, r9, lsl #3
 ldr r0, [r13, #48]

 add r10, r14, r2
 add r12, r14, r0
 sub r11, r12, r2

|L..17|
 ldr r8, [r13, #48]
 add r12, r8, r10
 ldmia r12, {r0, r1}
 str r12, [r13, #124]
 add r12, r13, #244
 stmia r12, {r0, r1}
 add r12, r13, #116
 ldmia r12, {r2, r3}
   muldf3            r0, r1,    r0, r1,    r2, r3
 add r12, r8, r11
 ldmia r12, {r6, r7}
 add r12, r13, #108
 ldmia r12, {r4, r5}
   muldf3            r2, r3,    r4, r5,    r6, r7
 bl |__subdf3|
 mov r8, r0
 mov r9, r1
 add r12, r13, #244
 ldmia r12, {r2, r3}
   muldf3            r0, r1,    r4, r5,    r2, r3
 add r12, r13, #116
 ldmia r12, {r2, r3}
   muldf3            r2, r3,    r2, r3,    r6, r7
 bl |__adddf3|
 mov r2, r0
 mov r3, r1
 ldmia r10, {r4, r5}
 mov r0, r4
 mov r1, r5
 bl |__adddf3|    ; preserves r2, r3
 add r12, r13, #140
 stmia r12, {r0, r1}
 mov r0, r4
 mov r1, r5
 bl |__subdf3|
 mov r2, r8
 mov r3, r9
 ldmia r11, {r4, r5}
 add r12, r13, #132
 stmia r12, {r0, r1}
 mov r0, r4
 mov r1, r5
 bl |__adddf3|    ; preserves r2, r3
 add r12, r13, #156
 stmia r12, {r0, r1}
 mov r1, r5
 mov r0, r4
 bl |__subdf3|
 add r12, r13, #148
 stmia r12, {r0, r1}
 ldr r12, [r13, #240]
 add r12, r12, r10
 str r12, [r13, #164]
 ldmia r12, {r8, r9}
 add r14, r13, #112
 ldmib r14, {r4, r5}
   muldf3            r0, r1,    r4, r5,    r8, r9
 ldr r3, [r13, #240]
 add r3, r3, r11
 str r3, [r13, #168]
 ldmia r3, {r6, r7}
 ldmda r14, {r2, r3}
   muldf3            r2, r3,    r2, r3,    r6, r7
 bl |__subdf3|
 add r2, r13, #176
 stmda r2, {r0, r1}
 add r12, r13, #112
 ldmda r12, {r0, r1}
   muldf3            r0, r1,    r0, r1,    r8, r9
   muldf3            r2, r3,    r4, r5,    r6, r7
 bl |__adddf3|
 ldr r9, [r13, #236]
 mov r2, r0
 mov r3, r1
 add r12, r9, r10
 ldmia r12, {r4, r5}
 mov r0, r4
 mov r1, r5
 bl |__adddf3|    ; preserves r2, r3
 mov r6, r0
 mov r7, r1
 mov r0, r4
 mov r1, r5
 bl |__subdf3|
 add r12, r13, #172
 ldmia r12!, {r2, r3}
 stmia r12, {r0, r1}
 add r12, r9, r11
 ldmia r12, {r4, r5}
 mov r0, r4
 mov r1, r5
 bl |__adddf3|    ; preserves r2, r3
 add r12, r13, #196
 stmia r12, {r0, r1}
 mov r0, r4
 mov r1, r5
 bl |__subdf3|
 mov r2, r0
 mov r3, r1
 add r12, r13, #188
 stmia r12, {r2, r3}
 add r12, r13, #28
 ldmia r12, {r4, r5, r8, r9}
   muldf3            r0, r1,    r4, r5,    r6, r7
   muldf3            r2, r3,    r2, r3,    r8, r9
 bl |__subdf3|
 add r12, r13, #204
 stmia r12, {r0, r1}
  muldf3            r0, r1,    r8, r9,    r6, r7
 add r12, r13, #188
 ldmia r12, {r2, r3}
   muldf3           r2, r3,    r2, r3,    r4, r5
 bl |__adddf3|
 mov r4, r0
 mov r5, r1
 add r12, r13, #140
 ldmia r12, {r2, r3}
 bl |__adddf3|
 stmia r10, {r0, r1}
 mov r1, r5
 eor r0, r4, #1<<31
 bl |__adddf3|       ; -a + b
 ldr r12, [r13, #236]
 add r12, r12, r10
 stmia r12, {r0, r1}
 ldr r12, [r13, #224]
 add r10, r10, r12
 add r12, r13, #148
 ldmia r12, {r0, r1}
 add r12, r13, #204
 ldmia r12, {r2, r3}
 bl |__adddf3|    ; preserves r2, r3
 ldr r12, [r13, #48]
 add r12, r12, r11
 stmia r12, {r0, r1}
 add r12, r13, #148
 ldmia r12, {r0, r1}
 bl |__subdf3|
 ldr r12, [r13, #168]
 stmia r12, {r0, r1}
 add r12, r13, #28
 ldmia r12, {r4, r5, r6, r7}
 add r12, r13, #196
 ldmia r12, {r2, r3}
  muldf3            r0, r1,    r6, r7,    r2, r3
 add r12, r13, #180
 ldmia r12, {r2, r3}
  muldf3            r2, r3,    r2, r3,    r4, r5
 bl |__subdf3|
 mov r8, r0
 mov r9, r1
 add r12, r13, #196
 ldmia r12, {r2, r3}
  muldf3            r0, r1,    r4, r5,    r2, r3
 add r12, r13, #180
 ldmia r12, {r2, r3}
  muldf3            r2, r3,    r2, r3,    r6, r7
 bl |__adddf3|
 add r12, r13, #156
 ldmia r12, {r4, r5}
 mov r2, r0
 mov r3, r1
 mov r0, r4
 mov r1, r5
 bl |__adddf3|    ; preserves r2, r3
 stmia r11, {r0, r1}
 mov r0, r4
 mov r1, r5
 bl |__subdf3|
 ldr r12, [r13, #236]
 add r12, r12, r11
 stmia r12, {r0, r1}
 add r12, r13, #132
 ldmia r12, {r0, r1}
 ldr r12, [r13, #224]
 mov r2, r8
 mov r3, r9
 add r11, r11, r12
 bl |__adddf3|    ; preserves r2, r3
 ldr r12, [r13, #124]
 stmia r12, {r0, r1}
 add r12, r13, #132
 ldmia r12, {r0, r1}
 bl |__subdf3|
 ldr r14, [r13, #164]
 ldr r12, [r13, #20]
 stmia r14, {r0, r1}
 cmp r10, r12
 bcc |L..17|

 add r0, r13, #36
 ldmia r0, {r0, r1}
 ldr r8, [r13, #8]
 add r12, r13, #208
 ldmia r8, {r2, r3, r4, r5}
 stmib r12, {r2, r3}
 muldf3        r6, r7,    r0, r1,    r2, r3
 add r14, r13, #28
 ldmia r14, {r0, r1}
 muldf3        r2, r3,    r0, r1,    r4, r5
 mov r1, r7
 mov r0, r6
 bl |__subdf3|
 add r12, r13, #36
 ldmia r12, {r2, r3}
 stmia r12, {r0, r1}
 muldf3        r0, r1,    r2, r3,    r4, r5
 add r12, r13, #32
 ldmda r12, {r4, r5}
 add r14, r13, #208
 ldmib r14, {r2, r3}
 muldf3        r2, r3,    r4, r5,    r2, r3
 ldr r9, [r13, #220]
 bl |__adddf3|
 add r2, r13, #32
 ldr r3, [r13, #44]
 stmda r2, {r0, r1}
 cmp r9, r3
 blt |L..13|

|L..11|
 ldr r12, [r13, #8]
 ldr r0, [r13, #4]
 add r12, r12, #16
 ldr r14, [r13, #12]
 str r12, [r13, #8]
 cmp r14, r0
 blt |L..19|

 add r12, r13, #252
 ldmia r12, {r10, r11}

 ldmea r11, {r4 - r9, r11, r13, r15}


|rv_tbl|
 KEEP |rv_tbl|
 DCB 0
 DCB 128
 DCB 64
 DCB 192
 DCB 32
 DCB 160
 DCB 96
 DCB 224
 DCB 16
 DCB 144
 DCB 80
 DCB 208
 DCB 48
 DCB 176
 DCB 112
 DCB 240
 DCB 8
 DCB 136
 DCB 72
 DCB 200
 DCB 40
 DCB 168
 DCB 104
 DCB 232
 DCB 24
 DCB 152
 DCB 88
 DCB 216
 DCB 56
 DCB 184
 DCB 120
 DCB 248
 DCB 4
 DCB 132
 DCB 68
 DCB 196
 DCB 36
 DCB 164
 DCB 100
 DCB 228
 DCB 20
 DCB 148
 DCB 84
 DCB 212
 DCB 52
 DCB 180
 DCB 116
 DCB 244
 DCB 12
 DCB 140
 DCB 76
 DCB 204
 DCB 44
 DCB 172
 DCB 108
 DCB 236
 DCB 28
 DCB 156
 DCB 92
 DCB 220
 DCB 60
 DCB 188
 DCB 124
 DCB 252
 DCB 2
 DCB 130
 DCB 66
 DCB 194
 DCB 34
 DCB 162
 DCB 98
 DCB 226
 DCB 18
 DCB 146
 DCB 82
 DCB 210
 DCB 50
 DCB 178
 DCB 114
 DCB 242
 DCB 10
 DCB 138
 DCB 74
 DCB 202
 DCB 42
 DCB 170
 DCB 106
 DCB 234
 DCB 26
 DCB 154
 DCB 90
 DCB 218
 DCB 58
 DCB 186
 DCB 122
 DCB 250
 DCB 6
 DCB 134
 DCB 70
 DCB 198
 DCB 38
 DCB 166
 DCB 102
 DCB 230
 DCB 22
 DCB 150
 DCB 86
 DCB 214
 DCB 54
 DCB 182
 DCB 118
 DCB 246
 DCB 14
 DCB 142
 DCB 78
 DCB 206
 DCB 46
 DCB 174
 DCB 110
 DCB 238
 DCB 30
 DCB 158
 DCB 94
 DCB 222
 DCB 62
 DCB 190
 DCB 126
 DCB 254
 ALIGN


 EXPORT |fft_short|
|fft_short|
 ; frame = 84
 mov r12, r13
 stmfd r13!, {r4 - r9, r11, r12, r14, r15}
 sub r11, r12, #4
 cmp r13, r10
 bllt |__rt_stkovf_split_small|
 sub r13, r13, #84
 str r1, [r13]
; add r0, r0, #258048
 str r2, [r13, #4]
; add r0, r0, #480
 str r3, [r13, #8]
 mov r2, #0
 str r0, [r13, #12]

|L..27|
 ldr r3, [r13]
 add r12, r3, r2, lsl #11
 add r4, r12, #1024
 add r2, r2, #1
 str r2, [r13, #80]
 add r12, r2, r2, lsl #1
 mov r12, r12, lsl #22
 mov r12, r12, asr #16
 str r12, [r13, #28]
 mov r2, #31
 str r2, [r13, #20]

|L..31|
 ldr r8, [r13, #12]
 adr r12, |rv_tbl|
 ldrb r5, [r12, r2, lsl #2] ; zero_extendqisi2
 mov r5, r5, lsl #3
 add r8, r8, r5
 ldmia r8, {r0, r1}
 ldr r12, [r13, #28]
 str r8, [r13, #32]
 add r12, r5, r12, lsl #3
 ldmib r13, {r2, r3} ; phole ldm
 ldr r8, [r3, r2, lsl #2]
 str r12, [r13, #36]
 add r8, r8, r12
 ldmia r8, {r2, r3}
   muldf3          r6, r7,    r0, r1,    r2, r3
 ldr r3, [r13, #12]
 rsb r12, r5, #127<<3
 add r12, r12, r3
 ldmia r12, {r0, r1}
 add r12, r8, #1024
 ldmia r12, {r2, r3}
   muldf3          r2, r3,    r0, r1,    r2, r3
 mov r0, r6
 mov r1, r7
 bl |__adddf3|
 add r12, r13, #48
 stmia r12, {r0, r1}
 mov r0, r6
 mov r1, r7
 bl |__subdf3|
 add r2, r13, #40
 stmia r2, {r0, r1}
 ldr r3, [r13, #32]
 add r12, r3, #512
 ldmia r12, {r0, r1}
 add r12, r8, #512
 ldmia r12, {r2, r3}
   muldf3      r6, r7,    r0, r1,    r2, r3
 ldr r3, [r13, #12]
 rsb r12, r5, #63<<3
 add r12, r12, r3
 ldmia r12, {r0, r1}
 add r8, r8, #1536
 ldmia r8, {r2, r3}
 sub r4, r4, #32
   muldf3      r2, r3,    r0, r1,    r2, r3
 mov r0, r6
 mov r1, r7
 bl |__subdf3|    ; preserves r3, inverts bit 31 of r2
 mov r8, r0
 mov r9, r1
 mov r0, r6
 mov r1, r7
 bl |__subdf3|    ; a - -b
 mov r2, r0
 mov r3, r1
 add r12, r13, #48
 ldmia r12, {r0, r1}
 bl |__adddf3|    ; preserves r2, r3
 stmia r4, {r0, r1}
 add r12, r13, #48
 ldmia r12, {r0, r1}
 bl |__subdf3|
 add r12, r4, #16
 stmia r12, {r0, r1}
 add r12, r13, #40
 ldmia r12, {r0, r1}
 mov r2, r8
 mov r3, r9
 bl |__adddf3|    ; preserves r2, r3
 add r12, r4, #8
 stmia r12, {r0, r1}
 add r12, r13, #40
 ldmia r12, {r0, r1}
 bl |__subdf3|
 add r12, r4, #24
 stmia r12, {r0, r1}
 ldmib r13, {r3, r12} ; phole ldm
 ldr r2, [r12, r3, lsl #2]
 ldr r3, [r13, #32]
 add r12, r3, #8
 ldmia r12, {r0, r1}
 ldr r12, [r13, #36]
 add r9, r12, r2
 add r12, r9, #8
 ldmia r12, {r2, r3}
   muldf3      r6, r7,    r0, r1,    r2, r3
 ldr r3, [r13, #12]
 rsb r12, r5, #126<<3
 add r12, r12, r3
 ldmia r12, {r0, r1}
 add r12, r9, #1040
 ldmdb r12, {r2, r3}
   muldf3      r2, r3,    r0, r1,    r2, r3
 mov r0, r6
 mov r1, r7
 bl |__adddf3|    ; preserves r2, r3
 add r12, r13, #64
 stmia r12, {r0, r1}
 mov r0, r6
 mov r1, r7
 bl |__subdf3|
 add r12, r13, #56
 stmia r12, {r0, r1}
 ldr r6, [r13, #32]
 add r12, r9, #520
 ldmia r12, {r2, r3}
 add r12, r6, #520
 ldmia r12, {r0, r1}
   muldf3      r6, r7,    r0, r1,    r2, r3
 add r2, r9, #1536
 ldr r12, [r13, #12]
 rsb r0, r5, #62<<3
 add r0, r0, r12
 ldmia r0, {r0, r1}
 add r2, r2, #8
 ldmia r2, {r2, r3}
   muldf3      r2, r3,    r0, r1,    r2, r3
 mov r0, r6
 mov r1, r7
 bl |__subdf3|    ; preserves r3, inverts bit 31 of r2
 add r12, r13, #72
 stmia r12, {r0, r1}
 mov r0, r6
 mov r1, r7
 bl |__subdf3|    ; a - -b
 mov r2, r0
 mov r3, r1
 add r12, r13, #64
 ldmia r12, {r0, r1}
 bl |__adddf3|    ; preserves r2, r3
 add r12, r4, #1024
 stmia r12, {r0, r1}
 add r12, r13, #64
 ldmia r12, {r0, r1}
 bl |__subdf3|
 add r12, r4, #1040
 stmia r12, {r0, r1}
 add r12, r13, #56
 ldmia r12, {r0, r1}
 add r12, r13, #72
 ldmia r12, {r2, r3}
 bl |__adddf3|    ; preserves r2, r3
 add r12, r4, #1040
 stmdb r12, {r0, r1}
 add r12, r13, #56
 ldmia r12, {r0, r1}
 bl |__subdf3|
 ldr r2, [r13, #20]
 add r12, r4, #1056
 stmdb r12, {r0, r1}
 subs r2, r2, #1
 str r2, [r13, #20]
 bpl |L..31|

 mov r0, r4
 mov r1, #256
 bl |fht|
 ldr r2, [r13, #80]
 cmp r2, #2
 ble |L..27|

 ldmea r11, {r4 - r9, r11, r13, r15}



 EXPORT |fft_long|
|fft_long|
 ; frame = 36
 mov r12, r13
 stmfd r13!, {r4 - r9, r11, r12, r14, r15}
 sub r11, r12, #4
 cmp r13, r10
 bllt |__rt_stkovf_split_small|
 sub r13, r13, #36
; add r0, r0, #249856
; add r0, r0, #480
 stmia r13, {r0, r2, r3}
 mov r8, #127
 add r4, r1, #4096

|L..40|
 ldr r3, |L..42|
 ldrb r6, [r3, r8] ; zero_extendqisi2
 sub r4, r4, #32
 ldmia r13, {r0, r2, r3}
 mov r9, r6, lsl #3
 ldr r6, [r3, r2, lsl #2]
 add r5, r9, r0
 ldmia r5, {r0, r1}
 add r9, r9, r6
 ldmia r9, {r2, r3}
   muldf3      r6, r7,    r0, r1,    r2, r3
 add r12, r5, #4096
 ldmia r12, {r0, r1}
 add r12, r9, #4096
 ldmia r12, {r2, r3}
   muldf3      r2, r3,    r0, r1,    r2, r3
 mov r0, r6
 mov r1, r7
 bl |__adddf3|
 add r12, r13, #20
 stmia r12, {r0, r1}
 mov r0, r6
 mov r1, r7
 bl |__subdf3|
 add r12, r13, #12
 stmia r12, {r0, r1}
 add r12, r5, #2048
 ldmia r12, {r0, r1}
 add r12, r9, #2048
 ldmia r12, {r2, r3}
   muldf3      r6, r7,    r0, r1,    r2, r3
 add r12, r5, #6144
 ldmia r12, {r0, r1}
 add r12, r9, #6144
 ldmia r12, {r2, r3}
   muldf3      r2, r3,    r0, r1,    r2, r3
 mov r0, r6
 mov r1, r7
 bl |__subdf3|      ; preserves r3, inverts bit 31 of r2
 add r12, r13, #28
 stmia r12, {r0, r1}
 mov r0, r6
 mov r1, r7
 bl |__subdf3|     ; a - -b
 add r12, r13, #20
 ldmia r12, {r2, r3}
 mov r6, r0
 mov r7, r1
 bl |__adddf3|     ; preserves r2, r3
 stmia r4, {r0, r1}
 eor r0, r6, #1<<31
 mov r1, r7
 bl |__adddf3|     ; -a + b
 add r12, r4, #16
 stmia r12, {r0, r1}
 add r12, r13, #12
 ldmia r12, {r0, r1}
 add r12, r13, #28
 ldmia r12, {r2, r3}
 bl |__adddf3|     ; preserves r2, r3
 add r12, r4, #8
 stmia r12, {r0, r1}
 add r12, r13, #12
 ldmia r12, {r0, r1}
 bl |__subdf3|
 add r12, r4, #24
 stmia r12, {r0, r1}
 add r5, r5, #8
 ldmia r5, {r0, r1}
 add r9, r9, #8
 ldmia r9, {r2, r3}
   muldf3      r6, r7,    r0, r1,    r2, r3
 add r12, r5, #4096
 ldmia r12, {r0, r1}
 add r12, r9, #4096
 ldmia r12, {r2, r3}
   muldf3      r2, r3,    r0, r1,    r2, r3
 mov r0, r6
 mov r1, r7
 bl |__adddf3|     ; preserves r2, r3
 add r12, r13, #28
 stmia r12, {r0, r1}
 mov r0, r6
 mov r1, r7
 bl |__subdf3|
 add r12, r13, #12
 stmia r12, {r0, r1}
 add r12, r5, #2048
 ldmia r12, {r0, r1}
 add r12, r9, #2048
 ldmia r12, {r2, r3}
   muldf3      r6, r7,    r0, r1,    r2, r3
 add r12, r5, #6144
 ldmia r12, {r0, r1}
 add r12, r9, #6144
 ldmia r12, {r2, r3}
   muldf3      r2, r3,    r0, r1,    r2, r3
 mov r0, r6
 mov r1, r7
 bl |__subdf3|      ; preserves r3, inverts bit 31 of r2
 add r12, r13, #20
 stmia r12, {r0, r1}
 mov r0, r6
 mov r1, r7
 bl |__subdf3|     ; a - -b
 add r12, r13, #28
 ldmia r12, {r2, r3}
 mov r6, r0
 mov r7, r1
 bl |__adddf3|     ; preserves r2, r3
 add r12, r4, #4096
 stmia r12, {r0, r1}
 eor r0, r6, #1<<31
 mov r1, r7
 bl |__adddf3|     ; -a + b
 add r12, r4, #4096
 add r12, r12, #16
 stmia r12, {r0, r1}
 add r12, r13, #12
 ldmia r12, {r0, r1}
 add r12, r13, #20
 ldmia r12, {r2, r3}
 bl |__adddf3|     ; preserves r2, r3
 add r12, r4, #4096
 add r12, r12, #8
 stmia r12, {r0, r1}
 add r12, r13, #12
 ldmia r12, {r0, r1}
 bl |__subdf3|
 add r12, r4, #4096
 add r12, r12, #24
 stmia r12, {r0, r1}
 subs r8, r8, #1
 bpl |L..40|

 mov r0, r4
 mov r1, #1024
 ldmea r11, {r4 - r9, r11, r13, r14}
 b |fht|

|L..42|
 DCD |rv_tbl|

 END
