kernel/eka/drivers/power/smppower/idlehelper.cpp
branchRCL_3
changeset 257 3e88ff8f41d5
parent 256 c1f20ce4abcf
child 258 880ff05ad710
child 263 9e2d4f7f5028
equal deleted inserted replaced
256:c1f20ce4abcf 257:3e88ff8f41d5
     1 // Copyright (c) 2010 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL " http://www.eclipse.org/legal/epl-v10.html ".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // os\kernelhwsrv\kernel\eka\drivers\power\smppower\idlehelper.cpp
       
    15 // Impelentation of helper classes required to implement CPU idle
       
    16 // functionality in a SMP BSP.
       
    17 
       
    18 /**
       
    19  @file
       
    20  @prototype
       
    21 */
       
    22 
       
    23 #include <kernel/arm/arm.h>
       
    24 #include <smppower/idlehelper.h>
       
    25 
       
    26 #ifdef __SMP__
       
    27 //-/-/-/-/-/-/-/-/-/ class TIdleSupport/-/-/-/-/-/-/-/-/-/
       
    28 
       
    29 TUint TIdleSupport::iGlobalIntDistAddress=0;
       
    30 TUint TIdleSupport::iBaseIntIfAddress=0;
       
    31 volatile TUint32* TIdleSupport::iTimerCount=0;
       
    32 volatile TUint32 TIdleSupport::iIdlingCpus=0;
       
    33 volatile TUint32 TIdleSupport::iAllEngagedCpusMask=0;
       
    34 volatile TUint32 TIdleSupport::iRousingCpus=0;
       
    35 volatile TUint32 TIdleSupport::iExitRequired=EFalse;
       
    36 
       
    37 /**
       
    38    Setup interrupt access for static library  by setting up
       
    39    interrupt distributor and CPU interrupt interface addresses
       
    40    aGlobalIntDistAddress = interrupt distributor base address
       
    41    aBaseIntIfAddress = CPU interrupt base address
       
    42    aTimerCount = optional pointer to hw timer counter reg from bsp (only used for btrace)   
       
    43    @pre 
       
    44  */
       
    45  
       
    46 void TIdleSupport::SetupIdleSupport(TUint32 aGlobalIntDistAddress, TUint32 aBaseIntIfAddress, TUint32* aTimerCount)
       
    47 	{
       
    48 	iGlobalIntDistAddress=aGlobalIntDistAddress;
       
    49 	iBaseIntIfAddress=aBaseIntIfAddress;
       
    50 	iTimerCount=aTimerCount; /*NULL by default*/
       
    51     iAllEngagedCpusMask=AllCpusMask();
       
    52 	}
       
    53 /**
       
    54    Returns the current HW timer count reg value by default
       
    55    Only used for btrace. If this is not set NKern::FastCounter is
       
    56    returned.
       
    57 */	
       
    58 	
       
    59 TUint32 TIdleSupport::GetTimerCount()
       
    60 	{
       
    61     if(iTimerCount)
       
    62         return *iTimerCount;
       
    63     else
       
    64         return NKern::FastCounter();
       
    65 	}
       
    66 
       
    67 /**
       
    68    Returns TRUE if any interrupt is pending,FALSE otherwise 
       
    69 */	
       
    70 
       
    71 TBool TIdleSupport::IsIntPending()
       
    72 	{
       
    73 	return ((TUint32)IntPending()!=KNoInterruptsPending);
       
    74 	}
       
    75 		
       
    76 /**
       
    77    Set the piroity of the Idle IPI to be the highest
       
    78    @pre 
       
    79 */		
       
    80 	
       
    81 void TIdleSupport::SetIdleIPIToHighestPriority()
       
    82 	{	
       
    83     // Set Idle IPI to highest priority
       
    84     NKern::ThreadEnterCS();
       
    85     TInt frz = NKern::FreezeCpu();
       
    86     __PM_IDLE_ASSERT_ALWAYS(!frz);
       
    87     TInt orig_cpu = NKern::CurrentCpu();
       
    88     TInt ncpu = NKern::NumberOfCpus();
       
    89     TInt cpu = orig_cpu;
       
    90     TUint32 orig_affinity = 0;
       
    91     do	
       
    92         {
       
    93         TUint32 affinity = NKern::ThreadSetCpuAffinity(NKern::CurrentThread(), (TUint32)cpu);
       
    94         if (cpu == orig_cpu)
       
    95             {
       
    96             orig_affinity = affinity;
       
    97             NKern::EndFreezeCpu(frz);
       
    98             }
       
    99         TInt cpu_now = NKern::CurrentCpu();
       
   100         __PM_IDLE_ASSERT_ALWAYS(cpu_now == cpu);
       
   101           
       
   102         // here we can set the priority of the IPI vector for each CPU in turn
       
   103         GicDistributor* theGIC = (GicDistributor*) TIdleSupport::iGlobalIntDistAddress;
       
   104         TUint8* priorities = (TUint8*) &(theGIC->iPriority);
       
   105         priorities[IDLE_WAKEUP_IPI_VECTOR]=0x0;
       
   106         __e32_io_completion_barrier();
       
   107         if (++cpu == ncpu)
       
   108             cpu = 0;
       
   109         } while (cpu != orig_cpu);
       
   110     NKern::ThreadSetCpuAffinity(NKern::CurrentThread(), orig_affinity);
       
   111     NKern::ThreadLeaveCS(); 
       
   112 	}
       
   113 
       
   114 
       
   115 /**
       
   116    Atomically clears the current cpu idle mask bit to indicate current core has woken 
       
   117    up from an interrupt or IPI.   
       
   118    return TRUE only if all other cores are in idle and we were woken from an IPI from the last 
       
   119    core going idle (otherwisw FALSE). 
       
   120    aCpuMask- Bit mask with only current CPU bit set
       
   121    Normal usage:use in idle handler after waking from all cores down IPI    
       
   122       
       
   123    @pre 
       
   124  */	
       
   125 TBool TIdleSupport::ClearLocalAndCheckGlobalIdle(TUint32 aCpuMask)
       
   126     {
       
   127     return (__e32_atomic_and_ord32(&iIdlingCpus,~aCpuMask) & KGlobalIdleFlag);
       
   128     }
       
   129 
       
   130 
       
   131 /**
       
   132 	Atomically sets the cpu bit rousing mask only to indicate current CPU has woken. 
       
   133 	return TRUE only if this is first CPU awake.(otherwise FALSE).
       
   134 	aCMask- Bit mask with only current CPU bit set	
       
   135 	Normal usage: use in idle handler just after core is woken
       
   136 	
       
   137    @pre  */		
       
   138 	
       
   139 	
       
   140 TBool TIdleSupport::FirstCoreAwake(TUint32 aCMask)
       
   141 	{
       
   142 	//TInt c = NKern::CurrentCpu();
       
   143     //TUint32 cMask = (1<<c);//only current cpu mask is set 
       
   144 	return (!__e32_atomic_ior_acq32(&iRousingCpus,aCMask));	
       
   145 	}
       
   146 
       
   147 /**
       
   148    Sets the exit required flag in TIdleSupport. Exit required is
       
   149    normaly required be set if an interrupt is pending on a Core
       
   150    aBreakSyncPoint- TBreakableSyncPoint* that all cores were waiting on
       
   151    before interrupt occured. Normal usage: after interrupt pending check	
       
   152       
       
   153    @pre */
       
   154    
       
   155 void TIdleSupport::SetExitRequired(TBreakableSyncPoint* aBreakSyncPoint)	
       
   156 	{
       
   157 	iExitRequired=ETrue;
       
   158 	if(aBreakSyncPoint)
       
   159 		aBreakSyncPoint->Break();
       
   160 	}
       
   161 	
       
   162 /**
       
   163    Sets the exit required flag in TIdleSupport. Exit required is
       
   164    normaly required be set if an interrupt is pending on a Core
       
   165    aBreakSyncPoint- TBreakableSyncPoint that all cores were waiting on
       
   166    before interrupt occured.  
       
   167       
       
   168    @pre */	
       
   169 
       
   170 TBool TIdleSupport::GetExitRequired()
       
   171 	{
       
   172 	return iExitRequired;
       
   173 	}
       
   174 	
       
   175 /**
       
   176    Resets all the control flags/syncpoints. This is normally done by the 
       
   177    last core when all cores are confirmed to be idle.
       
   178    
       
   179       
       
   180    @pre */		
       
   181 	
       
   182 void TIdleSupport::ResetLogic()	
       
   183 	{
       
   184     iIdlingCpus = 0;         // clear idle CPUs
       
   185     iRousingCpus = 0;         // clear rousing CPUs
       
   186     iExitRequired = EFalse; 
       
   187 	}
       
   188 
       
   189 
       
   190 /**
       
   191    mark a core as retired
       
   192 
       
   193    @pre called by idle handler as part of idle entry before 
       
   194           any syncpoint or calls to SetLocalAndCheckSetGlobalIdle
       
   195 */	
       
   196 void TIdleSupport::MarkCoreRetired(TUint32 aCpuMask)
       
   197     {
       
   198     __e32_atomic_and_rlx32(&iAllEngagedCpusMask,~aCpuMask);
       
   199     PMBTRACE4(KRetireCore,KRetireMarkCoreRetired,aCpuMask);
       
   200     }
       
   201 
       
   202 /**
       
   203    mark a core as enaged
       
   204    @pre called outside idle handler ( can be called in idle entry before 
       
   205         any syncpoint or calls to SetLocalAndCheckSetGlobalIdle
       
   206  */	
       
   207 void TIdleSupport::MarkCoreEngaged(TUint32 aCpuMask)
       
   208     {
       
   209     __e32_atomic_ior_rlx32(&iAllEngagedCpusMask,aCpuMask);
       
   210     PMBTRACE4(KEngageCore,KEngageMarkCoreEngaged,aCpuMask);
       
   211     }
       
   212 
       
   213 /**
       
   214    Returns the current cpu idling bit mask
       
   215    @pre */	
       
   216 
       
   217 TUint32 TIdleSupport::GetCpusIdleMask()
       
   218 	{
       
   219 	return iIdlingCpus;
       
   220 	}
       
   221 
       
   222 /**
       
   223    Returns address of enaged cpus mask, needed for synch point construction
       
   224 
       
   225    */	
       
   226 
       
   227 volatile TUint32* TIdleSupport::EngagedCpusMaskAddr()
       
   228     { 
       
   229     return &iAllEngagedCpusMask; 
       
   230     }
       
   231 
       
   232 /**
       
   233    Returns address of enaged cpus mask, needed for synch point construction
       
   234 
       
   235    */	
       
   236 
       
   237 TUint32 TIdleSupport::AllCpusMask()
       
   238     { 
       
   239     return ((0x1<<NKern::NumberOfCpus())-1); 
       
   240     }
       
   241 
       
   242 /**
       
   243    clears IPI and asserts so in 
       
   244    @pre */	
       
   245 #ifdef _DEBUG
       
   246 void TIdleSupport::ClearIdleIPI()
       
   247     {
       
   248     __PM_IDLE_ASSERT_ALWAYS((DoClearIdleIPI()&0x1ff)==IDLE_WAKEUP_IPI_VECTOR);
       
   249     }
       
   250 #endif
       
   251 
       
   252 
       
   253 //-/-/-/-/-/-/-/-/-/ class TSyncPointBase /-/-/-/-/-/-/-/-/-/
       
   254 TSyncPointBase::TSyncPointBase()
       
   255     :iStageAndCPUWaitingMask(0),
       
   256      iAllEnagedCpusMask(TIdleSupport::EngagedCpusMaskAddr())
       
   257     {
       
   258     }
       
   259 
       
   260 
       
   261 #ifdef _DEBUG
       
   262 void TSyncPointBase::SignalAndWait(TUint32 aStage)
       
   263     {	
       
   264     PMBTRACE8(KSyncPoint,KSignalAndWaitEntry,aStage,*iAllEnagedCpusMask);    
       
   265 #else
       
   266 void TSyncPointBase::SignalAndWait()
       
   267     {
       
   268 #endif	
       
   269     TInt c = NKern::CurrentCpu();
       
   270     DoSW(1<<c);
       
   271 #ifdef _DEBUG
       
   272 	PMBTRACE0(KSyncPoint,KSignalAndWaiteXit);	
       
   273 #endif
       
   274     }
       
   275 
       
   276 
       
   277 /**
       
   278    Resets a syncpoint. 
       
   279    No barriers are used in function so add them if required. For breakable synchpoints this must be called before sync point can be used, 
       
   280    for normal syncpoints this must be called whenever a CPU gets enaged
       
   281    @pre Should be called from one CPU. 
       
   282  */
       
   283 void TSyncPointBase::Reset()
       
   284     {
       
   285     // Could assert it is already broken // not using atomics because this must be called from only one cpu before
       
   286     // and be synchronised
       
   287     iStageAndCPUWaitingMask = 0;
       
   288     }
       
   289 
       
   290 
       
   291 //-/-/-/-/-/-/-/-/-/ class TBreakableSyncPoint /-/-/-/-/-/-/-/-/-/
       
   292 
       
   293 /**
       
   294    Breaks the sync point until it is reset again. Any attempt to wait on the point will return inmediatelly until the point is reset
       
   295  */
       
   296 void TBreakableSyncPoint::Break()
       
   297     {
       
   298     __e32_atomic_ior_ord32(&iStageAndCPUWaitingMask,0x80000000);
       
   299     }
       
   300 
       
   301 
       
   302 #endif //__SMP__