kernel/eka/drivers/power/smppower/idlehelper.cia
branchRCL_3
changeset 43 c1f20ce4abcf
equal deleted inserted replaced
42:a179b74831c9 43:c1f20ce4abcf
       
     1 // Copyright (c) 2010 Nokia Corporation and/or its subsidiary(-ies).
       
     2 // All rights reserved.
       
     3 // This component and the accompanying materials are made available
       
     4 // under the terms of the License "Eclipse Public License v1.0"
       
     5 // which accompanies this distribution, and is available
       
     6 // at the URL " http://www.eclipse.org/legal/epl-v10.html ".
       
     7 //
       
     8 // Initial Contributors:
       
     9 // Nokia Corporation - initial contribution.
       
    10 //
       
    11 // Contributors:
       
    12 //
       
    13 // Description:
       
    14 // os\kernelhwsrv\kernel\eka\drivers\power\smpidlehelper.cpp
       
    15 // Impelentation of helper classes required to implement CPU idle
       
    16 // functionality in a SMP BSP.
       
    17 
       
    18 /**
       
    19  @file
       
    20  @prototype
       
    21 */
       
    22 
       
    23 
       
    24 
       
    25 #ifdef __SMP__
       
    26 
       
    27 
       
    28 #include <smppower/idlehelper.h>
       
    29 
       
    30 
       
    31 #ifndef DISABLE_TRACE
       
    32 
       
    33 extern "C" void btrace_printfn(const TAny *aPtr,const TInt regNum)
       
    34     {
       
    35     PMBTRACE4(KPrintReg,regNum,aPtr);
       
    36     }
       
    37 
       
    38 extern "C" void btrace_printfnId(const TAny *aPtr,const TInt regNum, TUint aId)
       
    39     {
       
    40     PMBTRACE8(KPrintReg,0xffu,regNum,aPtr);
       
    41     }
       
    42 
       
    43 #define PRINTREG(Rn)                 \
       
    44         asm("stmfd sp!,{r0-r12,lr}"); \
       
    45         asm("mov r0,r"#Rn); \
       
    46         asm("mov r1,#"#Rn); \
       
    47         asm("bl btrace_printfn"); \
       
    48         asm("ldmfd sp!,{r0-r12,lr}"); 
       
    49 
       
    50 #define PRINTREGID(Rn,n)               \
       
    51         asm("stmfd sp!,{r0-r12,lr}"); \
       
    52         asm("mov r0,r"#Rn); \
       
    53         asm("mov r1,#"#Rn); \
       
    54         asm("mov r2,#"#n); \
       
    55         asm("bl btrace_printfnId"); \
       
    56         asm("ldmfd sp!,{r0-r12,lr}"); 
       
    57 
       
    58 
       
    59 #else
       
    60 #define PRINTREG(Rn)
       
    61 #define PRINTREGID(Rn,n)
       
    62 #endif
       
    63 
       
    64 
       
    65 /**
       
    66    Atomically does the following:
       
    67      sets the current cpu idle mask bit to indicate current core wants to idle
       
    68      if all enaged cores have set their bit the flag KGlobalIdleFlag is also 
       
    69      orred into the idle mask to indicate all cores are going down. In this case
       
    70      the function returned true. False otherwise
       
    71 
       
    72    aCMask- Bit mask with only current CPU bit set
       
    73    Normal Usage:use in idle handler before waiting for all cores down IPI
       
    74    
       
    75    @pre 
       
    76  */		
       
    77 	
       
    78 __NAKED__ TBool TIdleSupport::SetLocalAndCheckSetGlobalIdle(TUint32 /*aCpuMask*/)
       
    79     {
       
    80 	asm("ldr    r1,__iAllEngagedCpusMask");                           //r1 = address of iAllEngagedCpusMask
       
    81 	asm("ldr	r2, [r1]");                                           //r2 = iAllEngagedCpusMask
       
    82     asm("add    r1,r1,#4");                                           //r1 = address of iIdlingCpus 
       
    83     __DATA_MEMORY_BARRIER_Z__(r12);   
       
    84     asm("1: ");
       
    85 	LDREX(3,1);                                                       // r3 = iIdlingCpus
       
    86     asm("orr    r3,r0,r3");                                           // orr in mask for this CPU
       
    87     asm("cmp    r3,r2");                                              // compare to iAllEngagedCpusMask
       
    88     asm("orreq  r3,r3,#%a0" : : "i" ((TInt)TIdleSupport::KGlobalIdleFlag)); // if equal orr in KGlobalIdleFlag
       
    89     STREX(12,3,1);
       
    90     asm("cmp    r12, #0 ");                                              // 
       
    91 	asm("bne    1b ");                                                   // write didn't succeed try again
       
    92     __DATA_MEMORY_BARRIER__(r12);
       
    93     asm("and    r0,r3,#%a0" : : "i" ((TInt)TIdleSupport::KGlobalIdleFlag));
       
    94 	__JUMP(,lr);
       
    95     asm("__iAllEngagedCpusMask:");
       
    96     asm(".word %a0" : : "i" ((TInt)&TIdleSupport::iAllEngagedCpusMask));//
       
    97     }
       
    98 
       
    99 /** 
       
   100 
       
   101 Wait for all CPUs to reach the sync point. A CPU will only exit this function when all other CPUs
       
   102 have reached it. 
       
   103 
       
   104 Works like this:
       
   105 
       
   106 cpuMask = 1 << NKern::CurrentCpu()
       
   107 BEGIN_ATOMIC
       
   108     stage = iStageAndCPUWaitingMask >> 16
       
   109     waitingCpus = iStageAndCPUWaitingMask&iAllCpusMask
       
   110     oldstage = stage;
       
   111     if (waitingCpus == iAllCpusMask) // we synched already and this is new 
       
   112              waitingCpus = 0;
       
   113      waitingCpus |= cpuMask
       
   114      if (waitingCpus == iAllCpusMask) stage++
       
   115      iStageAndCPUWaitingMask = (stage << 16) | waitingCpus
       
   116 END_ATOMIC
       
   117 FOREVER 
       
   118    if (oldstage!=stage) return
       
   119    stage = iStageAndCPUWaitingMask >> 16 // reread stage
       
   120 END_FOREVER   
       
   121 
       
   122 */
       
   123 __NAKED__ void TSyncPoint::DoSW(TUint32 /*aCpuMask*/)
       
   124     {
       
   125     asm("stmfd sp!, {r4-r5,lr} ");	
       
   126     asm("add r0,r0,#%a0" : : "i"  _FOFF(TSyncPointBase, iStageAndCPUWaitingMask)); // skip vt
       
   127     asm("ldr r4,[r0,#4]"); 
       
   128     asm("ldr r4,[r4]");
       
   129    __DATA_MEMORY_BARRIER_Z__(r12);          // 
       
   130     asm("1: ");
       
   131 	LDREX(2,0);                             // r2 =  iStageAndCPUWaitingMask, r4 = iAllEnagedCpusMask
       
   132     asm("mov r5,r2,lsr #16");               // r5 has old staging value
       
   133     asm("and r2,r2,r4");                    // r2 has currently waiting cpus
       
   134     asm("cmp r2,r4");                       // if r2 == r4 then we previously have had all cpus synched 
       
   135     asm("moveq r2,#0");                     // reset 
       
   136     asm("orr r2, r2, r1");                  // orr mask for this CPU 
       
   137     asm("mov r3,r5");                       // r3 will have new stage
       
   138     asm("cmp r2,r4");                       // if r2 == r4 then all cpus have set
       
   139     asm("addeq r3,r3,#1");                  // increment new stage count
       
   140     asm("orr r2,r2,r3, lsl #16");          
       
   141 	STREX(12,2,0);                           // try to atomically iStageAndCPUWaitingMask
       
   142 	asm("cmp r12, #0 ");
       
   143 	asm("bne 1b ");                         // write didn't succeed try again
       
   144     __DATA_MEMORY_BARRIER__(r12);          // ensure that's written
       
   145 #ifdef SYNCPOINT_WFE	
       
   146 	asm("ands r2,r2,r4");
       
   147     asm("cmp r2,r4");                       // if r2 == r4 then all cpus have set      			
       
   148 	ARM_SEVcc(CC_EQ);
       
   149 #endif		
       
   150     asm("2: ");
       
   151     asm("cmp r3,r5");                       // all (old stage does not equal new stage)
       
   152     asm("ldmnefd sp!, {r4-r5,pc}");         // yup return
       
   153 #ifdef SYNCPOINT_WFE		
       
   154 	__DATA_MEMORY_BARRIER__(r12);        
       
   155 	ARM_WFE;
       
   156 #endif	
       
   157     asm("ldr r2,[r0]");                     // otherwise re read iWaitingCpusMask into r5
       
   158     __DATA_MEMORY_BARRIER__(r12);           // ensure read is observed
       
   159     asm("mov r3,r2,lsr #16");               // re-read new stage
       
   160     asm("b 2b");                            // loop back
       
   161     }
       
   162 
       
   163 /** 
       
   164 
       
   165 Wait for all CPUs to reach the sync point. A CPU will only exit this function when all other CPUs
       
   166 have reached it or if another CPU has called the Break function. An attempt to wait on a broken
       
   167 syncpoint will return immediately.
       
   168 
       
   169 Works like this:
       
   170 
       
   171 cpuMask = 1 << NKern::CurrentCpu()
       
   172 BEGIN_ATOMIC
       
   173      waitingCpus = iStageAndCPUWaitingMask&iAllEnagedCpusMask
       
   174      if (iStageAndCPUWaitingMask & 0x80000000) // sync point is broken
       
   175         END_ATOMIC and return
       
   176 
       
   177      waitingCpus |= cpuMask
       
   178      if (waitingCpus == iAllEnagedCpusMask) waitingCpus |= 0x80000000
       
   179      iStageAndCPUWaitingMask = waitingCpus
       
   180 END_ATOMIC
       
   181 FOREVER 
       
   182      if (iStageAndCPUWaitingMask&0x80000000) break
       
   183 END_FOREVER   
       
   184 
       
   185 */
       
   186 __NAKED__ void TBreakableSyncPoint::DoSW(TUint32 /*aCpuMask*/)
       
   187     {
       
   188     asm("stmfd sp!, {r4,lr} ");	
       
   189     asm("add r0,r0,#%a0" : : "i"  _FOFF(TSyncPointBase, iStageAndCPUWaitingMask)); // skip vt
       
   190     asm("ldr r4,[r0,#4]");
       
   191     asm("ldr r4,[r4]");
       
   192     __DATA_MEMORY_BARRIER_Z__(r12);          // 
       
   193     asm("1: ");
       
   194 	LDREX(2,0);                             // r2 =  iStageAndCPUWaitingMask, r4 = iAllEnagedCpusMask
       
   195     asm("ands r3,r2,#0x80000000");          // 
       
   196     asm("bne 3f");                          // sync point broken so return
       
   197     asm("and r2,r2,r4");                    // r2 has currently waiting cpus
       
   198     asm("orr r2, r2, r1");                  // orr mask for this CPU 
       
   199     asm("cmp r2,r4");                       // if r2 == r4 then all cpus have set
       
   200     asm("orreq r2,r2,#0x80000000");         // set MSB
       
   201 	STREX(12,2,0);                          // try to atomically iStageAndCPUWaitingMask
       
   202 	asm("cmp r12, #0 ");
       
   203 	asm("bne 1b ");                         // write didn't succeed try again		
       
   204     __DATA_MEMORY_BARRIER__(r12);           // ensure that's written
       
   205 #ifdef SYNCPOINT_WFE	
       
   206 	asm("ands r3,r2,#0x80000000");          // MSB set?
       
   207 	ARM_SEVcc(CC_NE);
       
   208 #endif		
       
   209     asm("2: ");
       
   210     asm("ands r3,r2,#0x80000000");          // MSB set?	
       
   211     asm("ldmnefd sp!, {r4,pc}");            // yup return
       
   212 #ifdef SYNCPOINT_WFE		
       
   213 	__DATA_MEMORY_BARRIER__(r12);
       
   214 	ARM_WFE;
       
   215 #endif	
       
   216     asm("ldr r2,[r0]");                     // otherwise re read iWaitingCpusMask into r5
       
   217     //__DATA_MEMORY_BARRIER_Z__(r12);        // ensure read is observed
       
   218     asm("b 2b");                            // loop back
       
   219     asm("3:");
       
   220     CLREX;
       
   221 #ifdef SYNCPOINT_WFE	 
       
   222     __DATA_MEMORY_BARRIER__(r12);           // ensure that's written
       
   223 	ARM_SEV;
       
   224 #endif	
       
   225     asm("ldmfd sp!, {r4,pc}");            // yup return
       
   226     }
       
   227 	
       
   228 	
       
   229 #ifdef PROPER_WFI
       
   230 __NAKED__ void TIdleSupport::DoWFI()
       
   231     {
       
   232     __DATA_SYNC_BARRIER_Z__(r12);  // generally good idea to a barrier before WFI            
       
   233     ARM_WFI;
       
   234 	__JUMP(,lr);
       
   235     }
       
   236 #else
       
   237 void TIdleSupport::DoWFI()
       
   238     {
       
   239     TInt c=NKern::CurrentCpu();
       
   240     FOREVER
       
   241         {
       
   242         TInt isr = Pending();
       
   243         if (isr!=1023) 
       
   244             {
       
   245             BTRACE0(KIsrPendingCat,isr&0xff);
       
   246             break;
       
   247             }
       
   248         }
       
   249     }
       
   250 #endif
       
   251 
       
   252 __NAKED__ void TIdleSupport::DoIdleIPI(TUint32 /*aMask*/) 
       
   253     {
       
   254 	//r0 = cpu mask
       
   255     asm("ldr    r2,__EnagedCpusMask");  // only IPI enaged cores r2 has enaged core mask addr
       
   256     asm("ldr    r2,[r2]");   
       
   257     asm("and    r0,r0,r2");            // and out retired cores
       
   258     asm("ldr	r1,__KGICAddr");//r1 = address off iGlobalIntDistAddress
       
   259     asm("ldr	r1, [r1]");//r1 = address of Hw GIC interrupt dispatcher base			
       
   260     __DATA_SYNC_BARRIER_Z__(r12);			// need DSB before sending any IPI
       
   261     asm("movs	r0, r0, lsl #16 ");		// CPU mask into bits 16-23 - any bits set in aMask?
       
   262     asm("orrne	r0, r0, #%a0" : : "i" ((TInt) IDLE_WAKEUP_IPI_VECTOR));
       
   263     asm("strne	r0, [r1, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs if any
       
   264     __JUMP(,lr);
       
   265     asm("__KGICAddr:");
       
   266     asm(".word %a0" : : "i" ((TInt)&TIdleSupport::iGlobalIntDistAddress));
       
   267     asm("__EnagedCpusMask:");
       
   268     asm(".word %a0" : : "i" ((TInt)&TIdleSupport::iAllEngagedCpusMask));
       
   269     }
       
   270 
       
   271 #ifdef _DEBUG
       
   272 __NAKED__  TInt TIdleSupport::DoClearIdleIPI()
       
   273 #else
       
   274 __NAKED__  void TIdleSupport::ClearIdleIPI()
       
   275 #endif
       
   276     {
       
   277     __DATA_SYNC_BARRIER_Z__(r12);			// DSB    
       
   278     asm("ldr    r1,__KCPUIFAddr");//r1 = address of iBaseIntIfAddress
       
   279     asm("ldr	r1, [r1]");//r1 = address of Hw GIC CPU interrupt interface base address
       
   280     asm("ldr    r0,[r1, #%a0]" : : "i" _FOFF(GicCpuIfc, iAck));
       
   281     // asm("mov    r0,#%a0" : : "i" ((TInt) IDLE_WAKEUP_IPI_VECTOR)); // has to be!
       
   282     asm("str    r0, [r1, #%a0]" : : "i" _FOFF(GicCpuIfc, iEoi));
       
   283 	__JUMP(,lr);
       
   284     asm("__KCPUIFAddr:");
       
   285     asm(".word %a0" : : "i" ((TInt)&TIdleSupport::iBaseIntIfAddress));// CPU interrupt interface base address
       
   286     }
       
   287 
       
   288 #ifndef _DEBUG
       
   289 TInt TIdleSupport::DoClearIdleIPI()
       
   290     {
       
   291     return 0;
       
   292     }
       
   293 #endif
       
   294 
       
   295 __NAKED__  TInt TIdleSupport::IntPending()  
       
   296     {
       
   297 	asm("ldr    r1,__KCPUIFAddr");//r1 = address of iBaseIntIfAddress
       
   298 	asm("ldr	r1, [r1]");//r1 = address of Hw GIC CPU interrupt interface base address
       
   299 	asm("ldr    r0, [r1, #%a0]" : : "i" _FOFF(GicCpuIfc, iHighestPending));    
       
   300 	__JUMP(,lr);
       
   301     }	
       
   302 
       
   303 
       
   304 #endif // ifdef __SMP__