177
|
1 |
// Copyright (c) 2010-2010 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
// e32\nkern\arm\nklib.cia
|
|
15 |
//
|
|
16 |
//
|
|
17 |
|
|
18 |
#include <e32atomics.h>
|
|
19 |
#include <nklib.h>
|
|
20 |
|
|
21 |
#ifdef __SRATIO_MACHINE_CODED__
|
|
22 |
__NAKED__ void SRatio::Set(TUint32 /*aInt*/, TInt /*aDivisorExp*/)
|
|
23 |
{
|
|
24 |
#ifdef __CPU_ARM_HAS_CLZ
|
|
25 |
CLZ( 3,1); // r3=31-MSB(r1), 32 if r1=0
|
|
26 |
asm("add r2, r2, r3 "); // r2=shift+aDivisorExp
|
|
27 |
asm("movs r1, r1, lsl r3 "); // shift r1 left so bit 31=1
|
|
28 |
asm("rsb r2, r2, #0 "); // r2 = -shift-aDivisorExp
|
|
29 |
asm("moveq r2, #0 "); // if aInt=0, r2=0
|
|
30 |
asm("bicne r2, r2, #0xff000000 "); // else clear iSpare fields
|
|
31 |
asm("bicne r2, r2, #0x00ff0000 "); //
|
|
32 |
#else
|
|
33 |
asm("rsb r2, r2, #0 "); // r2 = -aDivisorExp
|
|
34 |
asm("cmp r1, #0x00010000 "); // if aInt top 16 bits clear ...
|
|
35 |
asm("movcc r1, r1, lsl #16 "); // ... shift 16 bits left ...
|
|
36 |
asm("subcc r2, r2, #16 "); // ... and subtract 16 from iX
|
|
37 |
asm("cmp r1, #0x01000000 ");
|
|
38 |
asm("movcc r1, r1, lsl #8 ");
|
|
39 |
asm("subcc r2, r2, #8 ");
|
|
40 |
asm("cmp r1, #0x10000000 ");
|
|
41 |
asm("movcc r1, r1, lsl #4 ");
|
|
42 |
asm("subcc r2, r2, #4 ");
|
|
43 |
asm("cmp r1, #0x40000000 ");
|
|
44 |
asm("movcc r1, r1, lsl #2 ");
|
|
45 |
asm("subcc r2, r2, #2 ");
|
|
46 |
asm("cmp r1, #0x80000000 ");
|
|
47 |
asm("subcc r2, r2, #1 ");
|
|
48 |
asm("cmp r1, #0 ");
|
|
49 |
asm("moveq r2, #0 "); // if aInt=0, r2=0
|
|
50 |
asm("bicne r2, r2, #0xff000000 "); // else clear iSpare fields
|
|
51 |
asm("bicne r2, r2, #0x00ff0000 "); //
|
|
52 |
#endif
|
|
53 |
asm("stmia r0, {r1,r2} "); // iM in r1, iX in bottom 16 bits of r2
|
|
54 |
__JUMP(, lr);
|
|
55 |
}
|
|
56 |
|
|
57 |
__NAKED__ TInt SRatio::Reciprocal()
|
|
58 |
{
|
|
59 |
asm("ldr r1, [r0] "); // r1 = iM
|
|
60 |
asm("ldrsh r12, [r0, #4] "); // r12 = iX
|
|
61 |
asm("rsbs r2, r1, #0 ");
|
|
62 |
asm("beq 0f "); // divide by zero
|
|
63 |
asm("add r12, r12, #63 ");
|
|
64 |
asm("rsb r12, r12, #0 "); // r12 = -63 - iX
|
|
65 |
asm("addvs r12, r12, #1 "); // if iM==0x80000000 r12 = -62 - iX (ratio = 2^(31+iX) so reciprocal = 2^(-31-iX) = 2^(31 + (-62-iX))
|
|
66 |
asm("bvs 1f "); // iM=0x80000000
|
|
67 |
|
|
68 |
// 2^(32+iX) > r > 2^(31+iX)
|
|
69 |
// 2^(-32-iX) < 1/r < 2^(-31-iX)
|
|
70 |
// 2^(31+(-63-iX)) < 1/r < 2^(31+(-62-iX))
|
|
71 |
asm("mov r2, #0 "); // accumulates result
|
|
72 |
asm("mov r3, #0x80000000 "); // 33 bit accumulator in C:R3 initialised to 2^32
|
|
73 |
asm("2: ");
|
|
74 |
asm("adds r3, r3, r3 ");
|
|
75 |
asm("cmpcc r3, r1 ");
|
|
76 |
asm("subcs r3, r3, r1 "); // if C=1 or r3>=r1, r3-=r1
|
|
77 |
asm("adcs r2, r2, r2 "); // next result bit
|
|
78 |
asm("bcc 2b "); // finished when we have 33 bits (when top bit shifted off)
|
|
79 |
asm("movs r2, r2, lsr #1 "); // rounding bit into C
|
|
80 |
asm("orr r2, r2, #0x80000000 "); // top bit back
|
|
81 |
asm("adcs r2, r2, #0 "); // add rounding bit
|
|
82 |
asm("movcs r2, #0x80000000 "); // if carry, increment exponent
|
|
83 |
asm("addcs r12, r12, #1 ");
|
|
84 |
|
|
85 |
asm("1: ");
|
|
86 |
asm("cmp r12, #-32768 ");
|
|
87 |
asm("blt 9f "); // underflow
|
|
88 |
asm("cmp r12, #32768 ");
|
|
89 |
asm("bge 8f "); // overflow
|
|
90 |
asm("str r2, [r0] "); // iM
|
|
91 |
asm("strh r12, [r0, #4] "); // iX
|
|
92 |
asm("mov r0, #0 ");
|
|
93 |
__JUMP(, lr);
|
|
94 |
|
|
95 |
asm("0: ");
|
|
96 |
asm("mov r0, #%a0" : : "i" ((TInt)KErrDivideByZero));
|
|
97 |
__JUMP(, lr);
|
|
98 |
|
|
99 |
asm("8: ");
|
|
100 |
asm("mov r0, #%a0" : : "i" ((TInt)KErrOverflow));
|
|
101 |
__JUMP(, lr);
|
|
102 |
|
|
103 |
asm("9: ");
|
|
104 |
asm("mov r0, #%a0" : : "i" ((TInt)KErrUnderflow));
|
|
105 |
__JUMP(, lr);
|
|
106 |
}
|
|
107 |
|
|
108 |
__NAKED__ TInt SRatio::Mult(TUint32& /*aInt32*/)
|
|
109 |
{
|
|
110 |
asm("ldr r3, [r0] "); // r3 = iM
|
|
111 |
asm("mov r12, r0 ");
|
|
112 |
asm("ldr r0, [r1] "); // r0 = aInt32
|
|
113 |
asm("cmp r3, #0 ");
|
|
114 |
asm("cmpne r0, #0 ");
|
|
115 |
asm("beq 0f "); // result zero
|
|
116 |
asm("umull r2, r3, r0, r3 "); // r3:r2 = aInt32 * iM (lowest value 0x0000000080000000)
|
|
117 |
asm("ldrsh r12, [r12, #4] "); // r12 = iX
|
|
118 |
#ifdef __CPU_ARM_HAS_CLZ
|
|
119 |
CLZ( 0, 3); // r0 = number of leading zeros in r3:r2 (can't be >32)
|
|
120 |
#else
|
|
121 |
asm("str r12, [sp, #-4]! ");
|
|
122 |
asm("movs r12, r3 ");
|
|
123 |
asm("mov r0, #0 ");
|
|
124 |
asm("cmp r12, #0x00010000 ");
|
|
125 |
asm("movcc r12, r12, lsl #16 ");
|
|
126 |
asm("addcc r0, r0, #16 ");
|
|
127 |
asm("cmp r12, #0x01000000 ");
|
|
128 |
asm("movcc r12, r12, lsl #8 ");
|
|
129 |
asm("addcc r0, r0, #8 ");
|
|
130 |
asm("cmp r12, #0x10000000 ");
|
|
131 |
asm("movcc r12, r12, lsl #4 ");
|
|
132 |
asm("addcc r0, r0, #4 ");
|
|
133 |
asm("cmp r12, #0x40000000 ");
|
|
134 |
asm("movcc r12, r12, lsl #2 ");
|
|
135 |
asm("addcc r0, r0, #2 ");
|
|
136 |
asm("cmp r12, #0 ");
|
|
137 |
asm("ldr r12, [sp], #4 "); // r12 = iX
|
|
138 |
asm("addgt r0, r0, #1 ");
|
|
139 |
asm("moveq r0, #32 "); // r0 = number of leading zeros in r3:r2 (can't be >32)
|
|
140 |
#endif
|
|
141 |
asm("rsb r0, r0, #63 "); // bit number of most significant bit
|
|
142 |
asm("add r0, r0, r12 "); // bit number of most significant bit after exponent shift
|
|
143 |
asm("cmp r0, #32 ");
|
|
144 |
asm("bge 8f "); // overflow
|
|
145 |
asm("cmp r0, #-1 ");
|
|
146 |
asm("blt 9f "); // underflow
|
|
147 |
asm("adds r12, r12, #32 "); // shift needed to get result into top 32 bits (>0 left, <0 right)
|
|
148 |
asm("beq 1f "); // no shift
|
|
149 |
asm("blt 2f "); // right shift
|
|
150 |
asm("rsb r0, r12, #32 ");
|
|
151 |
asm("mov r3, r3, lsl r12 ");
|
|
152 |
asm("orr r3, r3, r2, lsr r0 ");
|
|
153 |
asm("mov r2, r2, lsl r12 "); // r3:r2 <<= r12
|
|
154 |
asm("b 1f ");
|
|
155 |
asm("2: ");
|
|
156 |
asm("rsb r12, r12, #0 ");
|
|
157 |
asm("rsb r0, r12, #32 ");
|
|
158 |
asm("mov r2, r2, lsr r12 ");
|
|
159 |
asm("orr r2, r2, r3, lsl r0 ");
|
|
160 |
asm("mov r3, r3, lsr r12 "); // r3:r2 >>= r12
|
|
161 |
asm("1: ");
|
|
162 |
asm("adds r2, r2, r2 "); // rounding
|
|
163 |
asm("adcs r3, r3, #0 ");
|
|
164 |
asm("bcs 8f "); // overflow
|
|
165 |
asm("beq 9f "); // underflow
|
|
166 |
asm("mov r0, #0 ");
|
|
167 |
asm("str r3, [r1] ");
|
|
168 |
__JUMP(, lr);
|
|
169 |
|
|
170 |
asm("0: ");
|
|
171 |
asm("mov r0, #0 ");
|
|
172 |
asm("str r0, [r1] ");
|
|
173 |
__JUMP(, lr);
|
|
174 |
|
|
175 |
asm("8: ");
|
|
176 |
asm("mvn r0, #0 ");
|
|
177 |
asm("str r0, [r1] ");
|
|
178 |
asm("mov r0, #%a0" : : "i" ((TInt)KErrOverflow));
|
|
179 |
__JUMP(, lr);
|
|
180 |
|
|
181 |
asm("9: ");
|
|
182 |
asm("mov r0, #0 ");
|
|
183 |
asm("str r0, [r1] ");
|
|
184 |
asm("mov r0, #%a0" : : "i" ((TInt)KErrUnderflow));
|
|
185 |
__JUMP(, lr);
|
|
186 |
}
|
|
187 |
|
|
188 |
//TInt SRatio::Mult(TUint64& aInt64)
|
|
189 |
// {
|
|
190 |
// }
|
|
191 |
|
|
192 |
#endif
|
|
193 |
|
|
194 |
|