0
|
1 |
// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
|
|
2 |
// All rights reserved.
|
|
3 |
// This component and the accompanying materials are made available
|
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
|
5 |
// which accompanies this distribution, and is available
|
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
|
7 |
//
|
|
8 |
// Initial Contributors:
|
|
9 |
// Nokia Corporation - initial contribution.
|
|
10 |
//
|
|
11 |
// Contributors:
|
|
12 |
//
|
|
13 |
// Description:
|
|
14 |
// e32\nkernsmp\arm\ncutils.cpp
|
|
15 |
//
|
|
16 |
//
|
|
17 |
|
|
18 |
#include <arm.h>
|
|
19 |
#include <arm_gic.h>
|
|
20 |
#include <arm_scu.h>
|
|
21 |
#include <arm_tmr.h>
|
|
22 |
#include <nk_irq.h>
|
|
23 |
|
|
24 |
extern "C" {
|
|
25 |
extern SVariantInterfaceBlock* VIB;
|
|
26 |
}
|
|
27 |
|
|
28 |
/******************************************************************************
|
|
29 |
* Spin lock
|
|
30 |
******************************************************************************/
|
|
31 |
/** Create a spin lock
|
|
32 |
|
|
33 |
@publishedPartner
|
|
34 |
@released
|
|
35 |
*/
|
|
36 |
EXPORT_C TSpinLock::TSpinLock(TUint aOrder)
|
|
37 |
{
|
|
38 |
(void)aOrder;
|
|
39 |
__NK_ASSERT_DEBUG( (aOrder==EOrderNone) || ((aOrder&0x7f)<0x20) );
|
|
40 |
if (aOrder>=0x80 && aOrder!=EOrderNone)
|
|
41 |
aOrder -= 0x60;
|
|
42 |
aOrder |= 0xFF00u;
|
|
43 |
iLock = TUint64(aOrder)<<48; // byte 6 = 00-1F for interrupt, 20-3F for preemption
|
|
44 |
// byte 7 = FF if not held
|
|
45 |
}
|
|
46 |
|
|
47 |
|
|
48 |
/******************************************************************************
|
|
49 |
* Read/Write Spin lock
|
|
50 |
******************************************************************************/
|
|
51 |
/** Create a spin lock
|
|
52 |
|
|
53 |
@publishedPartner
|
|
54 |
@released
|
|
55 |
*/
|
|
56 |
EXPORT_C TRWSpinLock::TRWSpinLock(TUint aOrder)
|
|
57 |
{
|
|
58 |
(void)aOrder;
|
|
59 |
__NK_ASSERT_DEBUG( (aOrder==TSpinLock::EOrderNone) || ((aOrder&0x7f)<0x20) );
|
|
60 |
if (aOrder>=0x80 && aOrder!=TSpinLock::EOrderNone)
|
|
61 |
aOrder -= 0x60;
|
|
62 |
aOrder |= 0xFF00u;
|
|
63 |
iLock = TUint64(aOrder)<<48; // byte 6 = 00-1F for interrupt, 20-3F for preemption
|
|
64 |
// byte 7 = FF if not held
|
|
65 |
}
|
|
66 |
|
|
67 |
|
|
68 |
|
|
69 |
#ifdef _DEBUG
|
|
70 |
void FastMutexNestAttempt()
|
|
71 |
{
|
|
72 |
FAULT();
|
|
73 |
}
|
|
74 |
|
|
75 |
void FastMutexSignalError()
|
|
76 |
{
|
|
77 |
FAULT();
|
|
78 |
}
|
|
79 |
#endif
|
|
80 |
|
|
81 |
void NKern::Init0(TAny* a)
|
|
82 |
{
|
|
83 |
__KTRACE_OPT(KBOOT,DEBUGPRINT("VIB=%08x", a));
|
|
84 |
VIB = (SVariantInterfaceBlock*)a;
|
|
85 |
__NK_ASSERT_ALWAYS(VIB && VIB->iVer==0 && VIB->iSize==sizeof(SVariantInterfaceBlock));
|
|
86 |
__KTRACE_OPT(KBOOT,DEBUGPRINT("iVer=%d iSize=%d", VIB->iVer, VIB->iSize));
|
|
87 |
__KTRACE_OPT(KBOOT,DEBUGPRINT("iMaxCpuClock=%08x %08x", I64HIGH(VIB->iMaxCpuClock), I64LOW(VIB->iMaxCpuClock)));
|
|
88 |
__KTRACE_OPT(KBOOT,DEBUGPRINT("iMaxTimerClock=%u", VIB->iMaxTimerClock));
|
|
89 |
__KTRACE_OPT(KBOOT,DEBUGPRINT("iScuAddr=%08x", VIB->iScuAddr));
|
|
90 |
__KTRACE_OPT(KBOOT,DEBUGPRINT("iGicDistAddr=%08x", VIB->iGicDistAddr));
|
|
91 |
__KTRACE_OPT(KBOOT,DEBUGPRINT("iGicCpuIfcAddr=%08x", VIB->iGicCpuIfcAddr));
|
|
92 |
__KTRACE_OPT(KBOOT,DEBUGPRINT("iLocalTimerAddr=%08x", VIB->iLocalTimerAddr));
|
|
93 |
|
|
94 |
TScheduler& s = TheScheduler;
|
|
95 |
s.i_ScuAddr = (TAny*)VIB->iScuAddr;
|
|
96 |
s.i_GicDistAddr = (TAny*)VIB->iGicDistAddr;
|
|
97 |
s.i_GicCpuIfcAddr = (TAny*)VIB->iGicCpuIfcAddr;
|
|
98 |
s.i_LocalTimerAddr = (TAny*)VIB->iLocalTimerAddr;
|
|
99 |
s.i_TimerMax = (TAny*)(VIB->iMaxTimerClock / 1); // use prescaler value of 1
|
|
100 |
|
|
101 |
TInt i;
|
|
102 |
for (i=0; i<KMaxCpus; ++i)
|
|
103 |
{
|
|
104 |
TSubScheduler& ss = TheSubSchedulers[i];
|
|
105 |
ss.i_TimerMultF = (TAny*)KMaxTUint32;
|
|
106 |
ss.i_TimerMultI = (TAny*)0x01000000u;
|
|
107 |
ss.i_CpuMult = (TAny*)KMaxTUint32;
|
|
108 |
ss.i_LastTimerSet = (TAny*)KMaxTInt32;
|
|
109 |
ss.i_TimestampError = (TAny*)0;
|
|
110 |
ss.i_TimerGap = (TAny*)16;
|
|
111 |
ss.i_MaxCorrection = (TAny*)64;
|
|
112 |
VIB->iTimerMult[i] = (volatile STimerMult*)&ss.i_TimerMultF;
|
|
113 |
VIB->iCpuMult[i] = (volatile TUint32*)&ss.i_CpuMult;
|
|
114 |
}
|
|
115 |
InterruptInit0();
|
|
116 |
}
|
|
117 |
|
|
118 |
/** Register the global IRQ handler
|
|
119 |
Called by the base port at boot time to bind the top level IRQ dispatcher
|
|
120 |
to the ARM IRQ vector. Should not be called at any other time.
|
|
121 |
|
|
122 |
The handler specified will be called in mode_irq with IRQs disabled and
|
|
123 |
FIQs enabled. R0-R3, R12 and the return address from the interrupt will
|
|
124 |
be on the top of the mode_irq stack. R14_irq will point to the kernel's
|
|
125 |
IRQ postamble routine, which will run IDFCs and reschedule if necessary.
|
|
126 |
R13_irq will point to the top of the mode_irq stack and will be 8-byte aligned.
|
|
127 |
The handler should preserve all registers other than R0-R3, R12, R14_irq
|
|
128 |
and should return to the address in R14_irq.
|
|
129 |
|
|
130 |
@param aHandler The address of the top level IRQ dispatcher routine
|
|
131 |
*/
|
|
132 |
EXPORT_C void Arm::SetIrqHandler(TLinAddr aHandler)
|
|
133 |
{
|
|
134 |
ArmInterruptInfo.iIrqHandler=aHandler;
|
|
135 |
}
|
|
136 |
|
|
137 |
/** Register the global FIQ handler
|
|
138 |
Called by the base port at boot time to bind the top level FIQ dispatcher
|
|
139 |
to the ARM FIQ vector. Should not be called at any other time.
|
|
140 |
|
|
141 |
The handler specified will be called in mode_fiq with both IRQs and FIQs
|
|
142 |
disabled. The return address from the interrupt will be on the top of the
|
|
143 |
mode_fiq stack. R14_fiq will point to the kernel's FIQ postamble routine,
|
|
144 |
which will run IDFCs and reschedule if necessary.
|
|
145 |
R13_fiq will point to the top of the mode_fiq stack and will be 4 modulo 8.
|
|
146 |
The handler should preserve all registers other than R8_fiq-R12_fiq and
|
|
147 |
R14_fiq and should return to the address in R14_fiq.
|
|
148 |
|
|
149 |
@param aHandler The address of the top level FIQ dispatcher routine
|
|
150 |
*/
|
|
151 |
EXPORT_C void Arm::SetFiqHandler(TLinAddr aHandler)
|
|
152 |
{
|
|
153 |
ArmInterruptInfo.iFiqHandler=aHandler;
|
|
154 |
}
|
|
155 |
|
|
156 |
extern void initialiseState(TInt aCpu, TSubScheduler* aSS);
|
|
157 |
|
|
158 |
void Arm::Init1Interrupts()
|
|
159 |
//
|
|
160 |
// Initialise the interrupt and exception vector handlers.
|
|
161 |
//
|
|
162 |
{
|
|
163 |
__KTRACE_OPT(KBOOT,DEBUGPRINT(">Arm::Init1Interrupts()"));
|
|
164 |
|
|
165 |
TSubScheduler* ss = &TheSubSchedulers[0];
|
|
166 |
initialiseState(0, ss);
|
|
167 |
|
|
168 |
ArmLocalTimer& T = LOCAL_TIMER;
|
|
169 |
T.iWatchdogDisable = E_ArmTmrWDD_1;
|
|
170 |
T.iWatchdogDisable = E_ArmTmrWDD_2;
|
|
171 |
T.iTimerCtrl = 0;
|
|
172 |
T.iTimerIntStatus = E_ArmTmrIntStatus_Event;
|
|
173 |
T.iWatchdogCtrl = 0;
|
|
174 |
T.iWatchdogIntStatus = E_ArmTmrIntStatus_Event;
|
|
175 |
|
|
176 |
NIrq::HwInit1();
|
|
177 |
|
|
178 |
__KTRACE_OPT(KBOOT,DEBUGPRINT("<Arm::Init1Interrupts()"));
|
|
179 |
}
|
|
180 |
|
|
181 |
extern "C" void __ArmVectorReset()
|
|
182 |
{
|
|
183 |
FAULT();
|
|
184 |
}
|
|
185 |
|
|
186 |
extern "C" void __ArmVectorReserved()
|
|
187 |
{
|
|
188 |
FAULT();
|
|
189 |
}
|
|
190 |
|
|
191 |
|
|
192 |
TInt BTraceDefaultControl(BTrace::TControl /*aFunction*/, TAny* /*aArg1*/, TAny* /*aArg2*/)
|
|
193 |
{
|
|
194 |
return KErrNotSupported;
|
|
195 |
}
|
|
196 |
|
|
197 |
|
|
198 |
EXPORT_C void BTrace::SetHandlers(BTrace::THandler aNewHandler, BTrace::TControlFunction aNewControl, BTrace::THandler& aOldHandler, BTrace::TControlFunction& aOldControl)
|
|
199 |
{
|
|
200 |
BTrace::TControlFunction nc = aNewControl ? aNewControl : &BTraceDefaultControl;
|
|
201 |
__ACQUIRE_BTRACE_LOCK();
|
|
202 |
BTrace::THandler oldh = (BTrace::THandler)__e32_atomic_swp_ord_ptr(&BTraceData.iHandler, aNewHandler);
|
|
203 |
BTrace::TControlFunction oldc = (BTrace::TControlFunction)__e32_atomic_swp_ord_ptr(&BTraceData.iControl, nc);
|
|
204 |
__RELEASE_BTRACE_LOCK();
|
|
205 |
aOldHandler = oldh;
|
|
206 |
aOldControl = oldc;
|
|
207 |
}
|
|
208 |
|
|
209 |
|
|
210 |
EXPORT_C TInt BTrace::SetFilter(TUint aCategory, TInt aValue)
|
|
211 |
{
|
|
212 |
if(!IsSupported(aCategory))
|
|
213 |
return KErrNotSupported;
|
|
214 |
TUint8* filter = BTraceData.iFilter+aCategory;
|
|
215 |
TUint oldValue = *filter;
|
|
216 |
if(TUint(aValue)<=1u)
|
|
217 |
{
|
|
218 |
oldValue = __e32_atomic_swp_ord8(filter, (TUint8)aValue);
|
|
219 |
BTraceContext4(BTrace::EMetaTrace, BTrace::EMetaTraceFilterChange, (TUint8)aCategory | (aValue<<8));
|
|
220 |
}
|
|
221 |
return oldValue;
|
|
222 |
}
|
|
223 |
|
|
224 |
EXPORT_C SCpuIdleHandler* NKern::CpuIdleHandler()
|
|
225 |
{
|
|
226 |
return &ArmInterruptInfo.iCpuIdleHandler;
|
|
227 |
}
|
|
228 |
|
|
229 |
TUint32 NKern::IdleGenerationCount()
|
|
230 |
{
|
|
231 |
return TheScheduler.iIdleGenerationCount;
|
|
232 |
}
|
|
233 |
|
|
234 |
void NKern::Idle()
|
|
235 |
{
|
|
236 |
TScheduler& s = TheScheduler;
|
|
237 |
TSubScheduler& ss = SubScheduler(); // OK since idle thread locked to CPU
|
|
238 |
TUint32 m = ss.iCpuMask;
|
|
239 |
s.iIdleSpinLock.LockIrq();
|
|
240 |
TUint32 orig_cpus_not_idle = __e32_atomic_and_acq32(&s.iCpusNotIdle, ~m);
|
|
241 |
if (orig_cpus_not_idle == m)
|
|
242 |
{
|
|
243 |
// all CPUs idle
|
|
244 |
if (!s.iIdleDfcs.IsEmpty())
|
|
245 |
{
|
|
246 |
__e32_atomic_ior_ord32(&s.iCpusNotIdle, m); // we aren't idle after all
|
|
247 |
s.iIdleGeneration ^= 1;
|
|
248 |
++s.iIdleGenerationCount;
|
|
249 |
s.iIdleSpillCpu = (TUint8)ss.iCpuNum;
|
|
250 |
ss.iDfcs.MoveFrom(&s.iIdleDfcs);
|
|
251 |
ss.iDfcPendingFlag = 1;
|
|
252 |
s.iIdleSpinLock.UnlockIrq();
|
|
253 |
NKern::Lock();
|
|
254 |
NKern::Unlock(); // process idle DFCs here
|
|
255 |
return;
|
|
256 |
}
|
|
257 |
}
|
|
258 |
|
|
259 |
// postamble happens here - interrupts cannot be reenabled
|
|
260 |
s.iIdleSpinLock.UnlockOnly();
|
|
261 |
NKIdle(orig_cpus_not_idle & ~m);
|
|
262 |
|
|
263 |
// interrupts have not been reenabled
|
|
264 |
s.iIdleSpinLock.LockOnly();
|
|
265 |
__e32_atomic_ior_ord32(&s.iCpusNotIdle, m);
|
|
266 |
if (ArmInterruptInfo.iCpuIdleHandler.iPostambleRequired)
|
|
267 |
{
|
|
268 |
ArmInterruptInfo.iCpuIdleHandler.iPostambleRequired = FALSE;
|
|
269 |
NKIdle(-1);
|
|
270 |
}
|
|
271 |
s.iIdleSpinLock.UnlockIrq(); // reenables interrupts
|
|
272 |
}
|
|
273 |
|
|
274 |
|
|
275 |
EXPORT_C TUint32 NKern::CpuTimeMeasFreq()
|
|
276 |
{
|
|
277 |
return NKern::TimestampFrequency();
|
|
278 |
}
|
|
279 |
|
|
280 |
|
|
281 |
/** Converts a time interval in microseconds to thread timeslice ticks
|
|
282 |
|
|
283 |
@param aMicroseconds time interval in microseconds.
|
|
284 |
@return Number of thread timeslice ticks. Non-integral results are rounded up.
|
|
285 |
|
|
286 |
@pre aMicroseconds should be nonnegative
|
|
287 |
@pre any context
|
|
288 |
*/
|
|
289 |
EXPORT_C TInt NKern::TimesliceTicks(TUint32 aMicroseconds)
|
|
290 |
{
|
|
291 |
TUint32 mf32 = (TUint32)TheScheduler.i_TimerMax;
|
|
292 |
TUint64 mf(mf32);
|
|
293 |
TUint64 ticks = mf*TUint64(aMicroseconds) + UI64LIT(999999);
|
|
294 |
ticks /= UI64LIT(1000000);
|
|
295 |
if (ticks > TUint64(TInt(KMaxTInt)))
|
|
296 |
return KMaxTInt;
|
|
297 |
else
|
|
298 |
return (TInt)ticks;
|
|
299 |
}
|
|
300 |
|
|
301 |
|
|
302 |
/** Get the frequency of counter queried by NKern::Timestamp().
|
|
303 |
|
|
304 |
@publishedPartner
|
|
305 |
@prototype
|
|
306 |
*/
|
|
307 |
EXPORT_C TUint32 NKern::TimestampFrequency()
|
|
308 |
{
|
|
309 |
return (TUint32)TheScheduler.i_TimerMax;
|
|
310 |
}
|
|
311 |
|