kernel/eka/drivers/power/smppower/idlehelper.cia
changeset 90 947f0dc9f7a8
child 102 ef2a444a7410
equal deleted inserted replaced
52:2d65c2f76d7b 90:947f0dc9f7a8
       
     1 /*
       
     2 * Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
       
     3 * All rights reserved.
       
     4 * This component and the accompanying materials are made available
       
     5 * under the terms of "Eclipse Public License v1.0"
       
     6 * which accompanies this distribution, and is available
       
     7 * at the URL "http://www.eclipse.org/legal/epl-v10.html".
       
     8 *
       
     9 * Initial Contributors:
       
    10 * Nokia Corporation - initial contribution.
       
    11 *
       
    12 * Contributors:
       
    13 *
       
    14 * Description:  
       
    15 * os\kernelhwsrv\kernel\eka\drivers\power\smpidlehelper.cpp
       
    16 * Impelentation of helper classes required to implement CPU idle
       
    17 * functionality in a SMP BSP.
       
    18 *
       
    19 */
       
    20 
       
    21 
       
    22 /**
       
    23  @file
       
    24  @prototype
       
    25 */
       
    26 
       
    27 
       
    28 
       
    29 #ifdef __SMP__
       
    30 
       
    31 
       
    32 #include <smppower/idlehelper.h>
       
    33 
       
    34 
       
    35 #ifndef DISABLE_TRACE
       
    36 
       
    37 extern "C" void btrace_printfn(const TAny *aPtr,const TInt regNum)
       
    38     {
       
    39     PMBTRACE4(KPrintReg,regNum,aPtr);
       
    40     }
       
    41 
       
    42 extern "C" void btrace_printfnId(const TAny *aPtr,const TInt regNum, TUint aId)
       
    43     {
       
    44     PMBTRACE8(KPrintReg,0xffu,regNum,aPtr);
       
    45     }
       
    46 
       
    47 #define PRINTREG(Rn)                 \
       
    48         asm("stmfd sp!,{r0-r12,lr}"); \
       
    49         asm("mov r0,r"#Rn); \
       
    50         asm("mov r1,#"#Rn); \
       
    51         asm("bl btrace_printfn"); \
       
    52         asm("ldmfd sp!,{r0-r12,lr}"); 
       
    53 
       
    54 #define PRINTREGID(Rn,n)               \
       
    55         asm("stmfd sp!,{r0-r12,lr}"); \
       
    56         asm("mov r0,r"#Rn); \
       
    57         asm("mov r1,#"#Rn); \
       
    58         asm("mov r2,#"#n); \
       
    59         asm("bl btrace_printfnId"); \
       
    60         asm("ldmfd sp!,{r0-r12,lr}"); 
       
    61 
       
    62 
       
    63 #else
       
    64 #define PRINTREG(Rn)
       
    65 #define PRINTREGID(Rn,n)
       
    66 #endif
       
    67 
       
    68 
       
    69 /**
       
    70    Atomically does the following:
       
    71      sets the current cpu idle mask bit to indicate current core wants to idle
       
    72      if all enaged cores have set their bit the flag KGlobalIdleFlag is also 
       
    73      orred into the idle mask to indicate all cores are going down. In this case
       
    74      the function returned true. False otherwise
       
    75 
       
    76    aCMask- Bit mask with only current CPU bit set
       
    77    Normal Usage:use in idle handler before waiting for all cores down IPI
       
    78    
       
    79    @pre 
       
    80  */		
       
    81 	
       
    82 __NAKED__ TBool TIdleSupport::SetLocalAndCheckSetGlobalIdle(TUint32 /*aCpuMask*/)
       
    83     {
       
    84 	asm("ldr    r1,__iAllEngagedCpusMask");                           //r1 = address of iAllEngagedCpusMask
       
    85 	asm("ldr	r2, [r1]");                                           //r2 = iAllEngagedCpusMask
       
    86     asm("add    r1,r1,#4");                                           //r1 = address of iIdlingCpus 
       
    87     __DATA_MEMORY_BARRIER_Z__(r12);   
       
    88     asm("1: ");
       
    89 	LDREX(3,1);                                                       // r3 = iIdlingCpus
       
    90     asm("orr    r3,r0,r3");                                           // orr in mask for this CPU
       
    91     asm("cmp    r3,r2");                                              // compare to iAllEngagedCpusMask
       
    92     asm("orreq  r3,r3,#%a0" : : "i" (TIdleSupport::KGlobalIdleFlag)); // if equal orr in KGlobalIdleFlag
       
    93     STREX(12,3,1);
       
    94     asm("cmp    r12, #0 ");                                              // 
       
    95 	asm("bne    1b ");                                                   // write didn't succeed try again
       
    96     __DATA_MEMORY_BARRIER__(r12);
       
    97     asm("and    r0,r3,#%a0" : : "i" (TIdleSupport::KGlobalIdleFlag));
       
    98 	__JUMP(,lr);
       
    99     asm("__iAllEngagedCpusMask:");
       
   100     asm(".word %a0" : : "i" ((TInt)&TIdleSupport::iAllEngagedCpusMask));//
       
   101     }
       
   102 
       
   103 /** 
       
   104 
       
   105 Wait for all CPUs to reach the sync point. A CPU will only exit this function when all other CPUs
       
   106 have reached it. 
       
   107 
       
   108 Works like this:
       
   109 
       
   110 cpuMask = 1 << NKern::CurrentCpu()
       
   111 BEGIN_ATOMIC
       
   112     stage = iStageAndCPUWaitingMask >> 16
       
   113     waitingCpus = iStageAndCPUWaitingMask&iAllCpusMask
       
   114     oldstage = stage;
       
   115     if (waitingCpus == iAllCpusMask) // we synched already and this is new 
       
   116              waitingCpus = 0;
       
   117      waitingCpus |= cpuMask
       
   118      if (waitingCpus == iAllCpusMask) stage++
       
   119      iStageAndCPUWaitingMask = (stage << 16) | waitingCpus
       
   120 END_ATOMIC
       
   121 FOREVER 
       
   122    if (oldstage!=stage) return
       
   123    stage = iStageAndCPUWaitingMask >> 16 // reread stage
       
   124 END_FOREVER   
       
   125 
       
   126 */
       
   127 __NAKED__ void TSyncPoint::DoSW(TUint32 /*aCpuMask*/)
       
   128     {
       
   129     asm("stmfd sp!, {r4-r5,lr} ");	
       
   130     asm("add r0,r0,#%a0" : : "i"  _FOFF(TSyncPointBase, iStageAndCPUWaitingMask)); // skip vt
       
   131     asm("ldr r4,[r0,#4]"); 
       
   132     asm("ldr r4,[r4]")
       
   133    __DATA_MEMORY_BARRIER_Z__(r12);          // 
       
   134     asm("1: ");
       
   135 	LDREX(2,0);                             // r2 =  iStageAndCPUWaitingMask, r4 = iAllEnagedCpusMask
       
   136     asm("mov r5,r2,lsr #16");               // r5 has old staging value
       
   137     asm("and r2,r2,r4");                    // r2 has currently waiting cpus
       
   138     asm("cmp r2,r4");                       // if r2 == r4 then we previously have had all cpus synched 
       
   139     asm("moveq r2,#0");                     // reset 
       
   140     asm("orr r2, r2, r1");                  // orr mask for this CPU 
       
   141     asm("mov r3,r5");                       // r3 will have new stage
       
   142     asm("cmp r2,r4");                       // if r2 == r4 then all cpus have set
       
   143     asm("addeq r3,r3,#1");                  // increment new stage count
       
   144     asm("orr r2,r2,r3, lsl #16");          
       
   145 	STREX(12,2,0);                           // try to atomically iStageAndCPUWaitingMask
       
   146 	asm("cmp r12, #0 ");
       
   147 	asm("bne 1b ");                         // write didn't succeed try again
       
   148     __DATA_MEMORY_BARRIER__(r12);          // ensure that's written
       
   149 #ifdef SYNCPOINT_WFE	
       
   150 	asm("ands r2,r2,r4");
       
   151     asm("cmp r2,r4");                       // if r2 == r4 then all cpus have set      			
       
   152 	ARM_SEVcc(CC_EQ);
       
   153 #endif		
       
   154     asm("2: ");
       
   155     asm("cmp r3,r5");                       // all (old stage does not equal new stage)
       
   156     asm("ldmfdne sp!, {r4-r5,pc}");         // yup return
       
   157 #ifdef SYNCPOINT_WFE		
       
   158 	__DATA_MEMORY_BARRIER__(r12);        
       
   159 	ARM_WFE;
       
   160 #endif	
       
   161     asm("ldr r2,[r0]");                     // otherwise re read iWaitingCpusMask into r5
       
   162     __DATA_MEMORY_BARRIER__(r12);           // ensure read is observed
       
   163     asm("mov r3,r2,lsr #16");               // re-read new stage
       
   164     asm("b 2b");                            // loop back
       
   165     }
       
   166 
       
   167 /** 
       
   168 
       
   169 Wait for all CPUs to reach the sync point. A CPU will only exit this function when all other CPUs
       
   170 have reached it or if another CPU has called the Break function. An attempt to wait on a broken
       
   171 syncpoint will return immediately.
       
   172 
       
   173 Works like this:
       
   174 
       
   175 cpuMask = 1 << NKern::CurrentCpu()
       
   176 BEGIN_ATOMIC
       
   177      waitingCpus = iStageAndCPUWaitingMask&iAllEnagedCpusMask
       
   178      if (iStageAndCPUWaitingMask & 0x80000000) // sync point is broken
       
   179         END_ATOMIC and return
       
   180 
       
   181      waitingCpus |= cpuMask
       
   182      if (waitingCpus == iAllEnagedCpusMask) waitingCpus |= 0x80000000
       
   183      iStageAndCPUWaitingMask = waitingCpus
       
   184 END_ATOMIC
       
   185 FOREVER 
       
   186      if (iStageAndCPUWaitingMask&0x80000000) break
       
   187 END_FOREVER   
       
   188 
       
   189 */
       
   190 __NAKED__ void TBreakableSyncPoint::DoSW(TUint32 /*aCpuMask*/)
       
   191     {
       
   192     asm("stmfd sp!, {r4,lr} ");	
       
   193     asm("add r0,r0,#%a0" : : "i"  _FOFF(TSyncPointBase, iStageAndCPUWaitingMask)); // skip vt
       
   194     asm("ldr r4,[r0,#4]");
       
   195     asm("ldr r4,[r4]")
       
   196     __DATA_MEMORY_BARRIER_Z__(r12);          // 
       
   197     asm("1: ");
       
   198 	LDREX(2,0);                             // r2 =  iStageAndCPUWaitingMask, r4 = iAllEnagedCpusMask
       
   199     asm("ands r3,r2,#0x80000000");          // 
       
   200     asm("bne 3f");                          // sync point broken so return
       
   201     asm("and r2,r2,r4");                    // r2 has currently waiting cpus
       
   202     asm("orr r2, r2, r1");                  // orr mask for this CPU 
       
   203     asm("cmp r2,r4");                       // if r2 == r4 then all cpus have set
       
   204     asm("orreq r2,r2,#0x80000000");         // set MSB
       
   205 	STREX(12,2,0);                          // try to atomically iStageAndCPUWaitingMask
       
   206 	asm("cmp r12, #0 ");
       
   207 	asm("bne 1b ");                         // write didn't succeed try again		
       
   208     __DATA_MEMORY_BARRIER__(r12);           // ensure that's written
       
   209 #ifdef SYNCPOINT_WFE	
       
   210 	asm("ands r3,r2,#0x80000000");          // MSB set?
       
   211 	ARM_SEVcc(CC_NE);
       
   212 #endif		
       
   213     asm("2: ");
       
   214     asm("ands r3,r2,#0x80000000");          // MSB set?	
       
   215     asm("ldmfdne sp!, {r4,pc}");            // yup return
       
   216 #ifdef SYNCPOINT_WFE		
       
   217 	__DATA_MEMORY_BARRIER__(r12);
       
   218 	ARM_WFE;
       
   219 #endif	
       
   220     asm("ldr r2,[r0]");                     // otherwise re read iWaitingCpusMask into r5
       
   221     //__DATA_MEMORY_BARRIER_Z__(r12);        // ensure read is observed
       
   222     asm("b 2b");                            // loop back
       
   223     asm("3:");
       
   224     CLREX;
       
   225 #ifdef SYNCPOINT_WFE	 
       
   226     __DATA_MEMORY_BARRIER__(r12);           // ensure that's written
       
   227 	ARM_SEV;
       
   228 #endif	
       
   229     asm("ldmfd sp!, {r4,pc}");            // yup return
       
   230     }
       
   231 	
       
   232 	
       
   233 #ifdef PROPER_WFI
       
   234 __NAKED__ void TIdleSupport::DoWFI()
       
   235     {
       
   236     __DATA_SYNC_BARRIER_Z__(r12);  // generally good idea to a barrier before WFI            
       
   237     ARM_WFI;
       
   238 	__JUMP(,lr);
       
   239     }
       
   240 #else
       
   241 void TIdleSupport::DoWFI()
       
   242     {
       
   243     TInt c=NKern::CurrentCpu();
       
   244     FOREVER
       
   245         {
       
   246         TInt isr = Pending();
       
   247         if (isr!=1023) 
       
   248             {
       
   249             BTRACE0(KIsrPendingCat,isr&0xff);
       
   250             break;
       
   251             }
       
   252         }
       
   253     }
       
   254 #endif
       
   255 
       
   256 __NAKED__ void TIdleSupport::DoIdleIPI(TUint32 /*aMask*/) 
       
   257     {
       
   258 	//r0 = cpu mask
       
   259     asm("ldr    r2,__EnagedCpusMask");  // only IPI enaged cores r2 has enaged core mask addr
       
   260     asm("ldr    r2,[r2]");   
       
   261     asm("and    r0,r0,r2");            // and out retired cores
       
   262     asm("ldr	r1,__KGICAddr");//r1 = address off iGlobalIntDistAddress
       
   263     asm("ldr	r1, [r1]");//r1 = address of Hw GIC interrupt dispatcher base			
       
   264     __DATA_SYNC_BARRIER_Z__(r12);			// need DSB before sending any IPI
       
   265     asm("movs	r0, r0, lsl #16 ");		// CPU mask into bits 16-23 - any bits set in aMask?
       
   266     asm("orrne	r0, r0, #%a0" : : "i" ((TInt) IDLE_WAKEUP_IPI_VECTOR));
       
   267     asm("strne	r0, [r1, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq));	// trigger IPIs if any
       
   268     __JUMP(,lr);
       
   269     asm("__KGICAddr:");
       
   270     asm(".word %a0" : : "i" ((TInt)&TIdleSupport::iGlobalIntDistAddress));
       
   271     asm("__EnagedCpusMask:");
       
   272     asm(".word %a0" : : "i" ((TInt)&TIdleSupport::iAllEngagedCpusMask));
       
   273     }
       
   274 
       
   275 #ifdef _DEBUG
       
   276 __NAKED__  TInt TIdleSupport::DoClearIdleIPI()
       
   277 #else
       
   278 __NAKED__  void TIdleSupport::ClearIdleIPI()
       
   279 #endif
       
   280     {
       
   281     __DATA_SYNC_BARRIER_Z__(r12);			// DSB    
       
   282     asm("ldr    r1,__KCPUIFAddr");//r1 = address of iBaseIntIfAddress
       
   283     asm("ldr	r1, [r1]");//r1 = address of Hw GIC CPU interrupt interface base address
       
   284     asm("ldr    r0,[r1, #%a0]" : : "i" _FOFF(GicCpuIfc, iAck));
       
   285     // asm("mov    r0,#%a0" : : "i" ((TInt) IDLE_WAKEUP_IPI_VECTOR)); // has to be!
       
   286     asm("str    r0, [r1, #%a0]" : : "i" _FOFF(GicCpuIfc, iEoi));
       
   287 	__JUMP(,lr);
       
   288     asm("__KCPUIFAddr:");
       
   289     asm(".word %a0" : : "i" ((TInt)&TIdleSupport::iBaseIntIfAddress));// CPU interrupt interface base address
       
   290     }
       
   291 
       
   292 #ifndef _DEBUG
       
   293 TInt TIdleSupport::DoClearIdleIPI()
       
   294     {
       
   295     return 0;
       
   296     }
       
   297 #endif
       
   298 
       
   299 __NAKED__  TInt TIdleSupport::IntPending()  
       
   300     {
       
   301 	asm("ldr    r1,__KCPUIFAddr");//r1 = address of iBaseIntIfAddress
       
   302 	asm("ldr	r1, [r1]");//r1 = address of Hw GIC CPU interrupt interface base address
       
   303 	asm("ldr    r0, [r1, #%a0]" : : "i" _FOFF(GicCpuIfc, iHighestPending));    
       
   304 	__JUMP(,lr);
       
   305     }	
       
   306 
       
   307 
       
   308 #endif // ifdef __SMP__