kernel/eka/nkernsmp/arm/ncutils.cpp
changeset 90 947f0dc9f7a8
parent 0 a41df078684a
child 177 a232af6b0b1f
equal deleted inserted replaced
52:2d65c2f76d7b 90:947f0dc9f7a8
    21 #include <arm_tmr.h>
    21 #include <arm_tmr.h>
    22 #include <nk_irq.h>
    22 #include <nk_irq.h>
    23 
    23 
    24 extern "C" {
    24 extern "C" {
    25 extern SVariantInterfaceBlock* VIB;
    25 extern SVariantInterfaceBlock* VIB;
       
    26 
       
    27 extern TUint KernCoreStats_EnterIdle(TUint aCore);
       
    28 extern void KernCoreStats_LeaveIdle(TInt aCookie,TUint aCore);
       
    29 
       
    30 extern void DetachComplete();
       
    31 extern void send_irq_ipi(TSubScheduler*, TInt);
    26 }
    32 }
       
    33 
       
    34 
    27 
    35 
    28 /******************************************************************************
    36 /******************************************************************************
    29  * Spin lock
    37  * Spin lock
    30  ******************************************************************************/
    38  ******************************************************************************/
    31 /** Create a spin lock
    39 /** Create a spin lock
    88 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iMaxTimerClock=%u", VIB->iMaxTimerClock));
    96 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iMaxTimerClock=%u", VIB->iMaxTimerClock));
    89 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iScuAddr=%08x", VIB->iScuAddr));
    97 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iScuAddr=%08x", VIB->iScuAddr));
    90 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iGicDistAddr=%08x", VIB->iGicDistAddr));
    98 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iGicDistAddr=%08x", VIB->iGicDistAddr));
    91 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iGicCpuIfcAddr=%08x", VIB->iGicCpuIfcAddr));
    99 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iGicCpuIfcAddr=%08x", VIB->iGicCpuIfcAddr));
    92 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iLocalTimerAddr=%08x", VIB->iLocalTimerAddr));
   100 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iLocalTimerAddr=%08x", VIB->iLocalTimerAddr));
       
   101 	__KTRACE_OPT(KBOOT,DEBUGPRINT("iGlobalTimerAddr=%08x", VIB->iGlobalTimerAddr));
    93 
   102 
    94 	TScheduler& s = TheScheduler;
   103 	TScheduler& s = TheScheduler;
    95 	s.i_ScuAddr = (TAny*)VIB->iScuAddr;
   104 	s.iSX.iScuAddr = (ArmScu*)VIB->iScuAddr;
    96 	s.i_GicDistAddr = (TAny*)VIB->iGicDistAddr;
   105 	s.iSX.iGicDistAddr = (GicDistributor*)VIB->iGicDistAddr;
    97 	s.i_GicCpuIfcAddr = (TAny*)VIB->iGicCpuIfcAddr;
   106 	s.iSX.iGicCpuIfcAddr = (GicCpuIfc*)VIB->iGicCpuIfcAddr;
    98 	s.i_LocalTimerAddr = (TAny*)VIB->iLocalTimerAddr;
   107 	s.iSX.iLocalTimerAddr = (ArmLocalTimer*)VIB->iLocalTimerAddr;
    99 	s.i_TimerMax = (TAny*)(VIB->iMaxTimerClock / 1);		// use prescaler value of 1
   108 	s.iSX.iTimerMax = (VIB->iMaxTimerClock / 1);		// use prescaler value of 1
       
   109 #ifdef	__CPU_ARM_HAS_GLOBAL_TIMER_BLOCK
       
   110 	s.iSX.iGlobalTimerAddr = (ArmGlobalTimer*)VIB->iGlobalTimerAddr;
       
   111 #endif
   100 
   112 
   101 	TInt i;
   113 	TInt i;
   102 	for (i=0; i<KMaxCpus; ++i)
   114 	for (i=0; i<KMaxCpus; ++i)
   103 		{
   115 		{
   104 		TSubScheduler& ss = TheSubSchedulers[i];
   116 		TSubScheduler& ss = TheSubSchedulers[i];
   105 		ss.i_TimerMultF = (TAny*)KMaxTUint32;
   117 		ss.iSSX.iCpuFreqM = KMaxTUint32;
   106 		ss.i_TimerMultI = (TAny*)0x01000000u;
   118 		ss.iSSX.iCpuFreqS = 0;
   107 		ss.i_CpuMult = (TAny*)KMaxTUint32;
   119 		ss.iSSX.iCpuPeriodM = 0x80000000u;
   108 		ss.i_LastTimerSet = (TAny*)KMaxTInt32;
   120 		ss.iSSX.iCpuPeriodS = 31;
   109 		ss.i_TimestampError = (TAny*)0;
   121 		ss.iSSX.iNTimerFreqM = KMaxTUint32;
   110 		ss.i_TimerGap = (TAny*)16;
   122 		ss.iSSX.iNTimerFreqS = 0;
   111 		ss.i_MaxCorrection = (TAny*)64;
   123 		ss.iSSX.iNTimerPeriodM = 0x80000000u;
   112 		VIB->iTimerMult[i] = (volatile STimerMult*)&ss.i_TimerMultF;
   124 		ss.iSSX.iNTimerPeriodS = 31;
   113 		VIB->iCpuMult[i] = (volatile TUint32*)&ss.i_CpuMult;
   125 		ss.iSSX.iTimerFreqM = KMaxTUint32;
   114 		}
   126 		ss.iSSX.iTimerFreqS = 0;
       
   127 		ss.iSSX.iTimerPeriodM = 0x80000000u;
       
   128 		ss.iSSX.iTimerPeriodS = 31;
       
   129 		ss.iSSX.iLastSyncTime = 0;
       
   130 		ss.iSSX.iTicksSinceLastSync = 0;
       
   131 		ss.iSSX.iLastTimerSet = 0;
       
   132 		ss.iSSX.iGapEstimate = 10<<16;
       
   133 		ss.iSSX.iGapCount = 0;
       
   134 		ss.iSSX.iTotalTicks = 0;
       
   135 		ss.iSSX.iDitherer = 1;
       
   136 		ss.iSSX.iFreqErrorEstimate = 0;
       
   137 		ss.iSSX.iFreqErrorLimit = 0x00100000;
       
   138 		ss.iSSX.iErrorIntegrator = 0;
       
   139 		ss.iSSX.iRefAtLastCorrection = 0;
       
   140 		ss.iSSX.iM = 4;
       
   141 		ss.iSSX.iN = 18;
       
   142 		ss.iSSX.iD = 3;
       
   143 		VIB->iTimerMult[i] = 0;
       
   144 		VIB->iCpuMult[i] = 0;
       
   145 		UPerCpuUncached* u = VIB->iUncached[i];
       
   146 		ss.iUncached = u;
       
   147 		u->iU.iDetachCount = 0;
       
   148 		u->iU.iAttachCount = 0;
       
   149 		u->iU.iPowerOffReq = FALSE;
       
   150 		u->iU.iDetachCompleteFn = &DetachComplete;
       
   151 		}
       
   152 	__e32_io_completion_barrier();
   115 	InterruptInit0();
   153 	InterruptInit0();
   116 	}
   154 	}
   117 
   155 
   118 /** Register the global IRQ handler
   156 /** Register the global IRQ handler
   119 	Called by the base port at boot time to bind the top level IRQ dispatcher
   157 	Called by the base port at boot time to bind the top level IRQ dispatcher
   151 EXPORT_C void Arm::SetFiqHandler(TLinAddr aHandler)
   189 EXPORT_C void Arm::SetFiqHandler(TLinAddr aHandler)
   152 	{
   190 	{
   153 	ArmInterruptInfo.iFiqHandler=aHandler;
   191 	ArmInterruptInfo.iFiqHandler=aHandler;
   154 	}
   192 	}
   155 
   193 
       
   194 /** Register the global Idle handler
       
   195 	Called by the base port at boot time to register a handler containing a pointer to
       
   196 	a function that is called by the Kernel when each core reaches idle.
       
   197 	Should not be called at any other time.
       
   198 
       
   199 	@param	aHandler Pointer to idle handler function
       
   200 	@param	aPtr Idle handler function argument
       
   201  */
       
   202 EXPORT_C void Arm::SetIdleHandler(TCpuIdleHandlerFn aHandler, TAny* aPtr)
       
   203 	{
       
   204 	ArmInterruptInfo.iCpuIdleHandler.iHandler = aHandler;
       
   205 	ArmInterruptInfo.iCpuIdleHandler.iPtr = aPtr;
       
   206 	ArmInterruptInfo.iCpuIdleHandler.iPostambleRequired = EFalse;
       
   207 	}
       
   208 
   156 extern void initialiseState(TInt aCpu, TSubScheduler* aSS);
   209 extern void initialiseState(TInt aCpu, TSubScheduler* aSS);
   157 
   210 
   158 void Arm::Init1Interrupts()
   211 void Arm::Init1Interrupts()
   159 //
   212 //
   160 // Initialise the interrupt and exception vector handlers.
   213 // Initialise the interrupt and exception vector handlers.
   229 TUint32 NKern::IdleGenerationCount()
   282 TUint32 NKern::IdleGenerationCount()
   230 	{
   283 	{
   231 	return TheScheduler.iIdleGenerationCount;
   284 	return TheScheduler.iIdleGenerationCount;
   232 	}
   285 	}
   233 
   286 
   234 void NKern::Idle()
   287 void NKern::DoIdle()
   235 	{
   288 	{
   236 	TScheduler& s = TheScheduler;
   289 	TScheduler& s = TheScheduler;
   237 	TSubScheduler& ss = SubScheduler();	// OK since idle thread locked to CPU
   290 	TSubScheduler& ss = SubScheduler();	// OK since idle thread locked to CPU
       
   291 	SPerCpuUncached* u0 = &((UPerCpuUncached*)ss.iUncached)->iU;
   238 	TUint32 m = ss.iCpuMask;
   292 	TUint32 m = ss.iCpuMask;
       
   293 	TUint32 retire = 0;
       
   294 	TBool global_defer = FALSE;
       
   295 	TBool event_kick = FALSE;
   239 	s.iIdleSpinLock.LockIrq();
   296 	s.iIdleSpinLock.LockIrq();
   240 	TUint32 orig_cpus_not_idle = __e32_atomic_and_acq32(&s.iCpusNotIdle, ~m);
   297 	TUint32 orig_cpus_not_idle = __e32_atomic_and_acq32(&s.iCpusNotIdle, ~m);
   241 	if (orig_cpus_not_idle == m)
   298 	if (orig_cpus_not_idle == m)
   242 		{
   299 		{
   243 		// all CPUs idle
   300 		// all CPUs idle
   253 			NKern::Lock();
   310 			NKern::Lock();
   254 			NKern::Unlock();	// process idle DFCs here
   311 			NKern::Unlock();	// process idle DFCs here
   255 			return;
   312 			return;
   256 			}
   313 			}
   257 		}
   314 		}
       
   315 	TBool shutdown_check = !((s.iThreadAcceptCpus|s.iCCReactivateCpus) & m);
       
   316 	if (shutdown_check)
       
   317 		{
       
   318 		// check whether this CPU is ready to be powered off
       
   319 		s.iGenIPILock.LockOnly();
       
   320 		ss.iEventHandlerLock.LockOnly();
       
   321 		if ( !((s.iThreadAcceptCpus|s.iCCReactivateCpus) & m) && !ss.iDeferShutdown && !ss.iNextIPI && !ss.iEventHandlersPending)
       
   322 			{
       
   323 			for(;;)
       
   324 				{
       
   325 				if (s.iCCDeferCount)
       
   326 					{
       
   327 					global_defer = TRUE;
       
   328 					break;
       
   329 					}
       
   330 				if (s.iPoweringOff)
       
   331 					{
       
   332 					// another CPU might be in the process of powering off
       
   333 					SPerCpuUncached* u = &((UPerCpuUncached*)s.iPoweringOff->iUncached)->iU;
       
   334 					if (u->iDetachCount == s.iDetachCount)
       
   335 						{
       
   336 						// still powering off so we must wait
       
   337 						global_defer = TRUE;
       
   338 						break;
       
   339 						}
       
   340 					}
       
   341 				TUint32 more = s.CpuShuttingDown(ss);
       
   342 				retire = SCpuIdleHandler::ERetire;
       
   343 				if (more)
       
   344 					retire |= SCpuIdleHandler::EMore;
       
   345 				s.iPoweringOff = &ss;
       
   346 				s.iDetachCount = u0->iDetachCount;
       
   347 				break;
       
   348 				}
       
   349 			}
       
   350 		ss.iEventHandlerLock.UnlockOnly();
       
   351 		s.iGenIPILock.UnlockOnly();
       
   352 		}
       
   353 	if (!retire && ss.iCurrentThread->iSavedSP)
       
   354 		{
       
   355 		// rescheduled between entry to NKern::Idle() and here
       
   356 		// go round again to see if any more threads to pull from other CPUs
       
   357 		__e32_atomic_ior_ord32(&s.iCpusNotIdle, m);	// we aren't idle after all
       
   358 		s.iIdleSpinLock.UnlockIrq();
       
   359 		return;
       
   360 		}
       
   361 	if (global_defer)
       
   362 		{
       
   363 		// Don't WFI if we're only waiting for iCCDeferCount to reach zero or for
       
   364 		// another CPU to finish powering down since we might not get another IPI.
       
   365 		__e32_atomic_ior_ord32(&s.iCpusNotIdle, m);	// we aren't idle after all
       
   366 		s.iIdleSpinLock.UnlockIrq();
       
   367 		__snooze();
       
   368 		return;
       
   369 		}
   258 
   370 
   259 	// postamble happens here - interrupts cannot be reenabled
   371 	// postamble happens here - interrupts cannot be reenabled
       
   372 	TUint32 arg = orig_cpus_not_idle & ~m;
       
   373 	if (arg == 0)
       
   374 		s.AllCpusIdle();
   260 	s.iIdleSpinLock.UnlockOnly();
   375 	s.iIdleSpinLock.UnlockOnly();
   261 	NKIdle(orig_cpus_not_idle & ~m);
   376 
       
   377 	//TUint cookie = KernCoreStats::EnterIdle((TUint8)ss.iCpuNum);
       
   378 	TUint cookie = KernCoreStats_EnterIdle((TUint8)ss.iCpuNum);
       
   379 
       
   380 	arg |= retire;
       
   381 	NKIdle(arg);
       
   382 
       
   383 	//KernCoreStats::LeaveIdle(cookie, (TUint8)ss.iCpuNum);
       
   384 	KernCoreStats_LeaveIdle(cookie, (TUint8)ss.iCpuNum);
       
   385 
   262 
   386 
   263 	// interrupts have not been reenabled
   387 	// interrupts have not been reenabled
   264 	s.iIdleSpinLock.LockOnly();
   388 	s.iIdleSpinLock.LockOnly();
   265 	__e32_atomic_ior_ord32(&s.iCpusNotIdle, m);
   389 
       
   390 	if (retire)
       
   391 		{
       
   392 		// we just came back from power down
       
   393 		SPerCpuUncached* u = &((UPerCpuUncached*)ss.iUncached)->iU;
       
   394 		u->iPowerOnReq = 0;
       
   395 		__e32_io_completion_barrier();
       
   396 		s.iGenIPILock.LockOnly();
       
   397 		ss.iEventHandlerLock.LockOnly();
       
   398 		s.iIpiAcceptCpus |= m;
       
   399 		s.iCCReactivateCpus |= m;
       
   400 		s.iCpusGoingDown &= ~m;
       
   401 		if (s.iPoweringOff == &ss)
       
   402 			s.iPoweringOff = 0;
       
   403 		if (ss.iEventHandlersPending)
       
   404 			event_kick = TRUE;
       
   405 		ss.iEventHandlerLock.UnlockOnly();
       
   406 		s.iGenIPILock.UnlockOnly();
       
   407 		}
       
   408 
       
   409 	TUint32 ci = __e32_atomic_ior_ord32(&s.iCpusNotIdle, m);
   266 	if (ArmInterruptInfo.iCpuIdleHandler.iPostambleRequired)
   410 	if (ArmInterruptInfo.iCpuIdleHandler.iPostambleRequired)
   267 		{
   411 		{
   268 		ArmInterruptInfo.iCpuIdleHandler.iPostambleRequired = FALSE;
   412 		ArmInterruptInfo.iCpuIdleHandler.iPostambleRequired = FALSE;
   269 		NKIdle(-1);
   413 		NKIdle(ci|m|SCpuIdleHandler::EPostamble);
       
   414 		}
       
   415 	if (ci == 0)
       
   416 		s.FirstBackFromIdle();
       
   417 
       
   418 	if (retire)
       
   419 		{
       
   420 		s.iCCReactivateDfc.RawAdd();	// kick load balancer to give us some work
       
   421 		if (event_kick)
       
   422 			send_irq_ipi(&ss, EQueueEvent_Kick);	// so that we will process pending events
   270 		}
   423 		}
   271 	s.iIdleSpinLock.UnlockIrq();	// reenables interrupts
   424 	s.iIdleSpinLock.UnlockIrq();	// reenables interrupts
   272 	}
   425 	}
   273 
   426 
       
   427 TBool TSubScheduler::Detached()
       
   428 	{
       
   429 	SPerCpuUncached* u = &((UPerCpuUncached*)iUncached)->iU;
       
   430 	return u->iDetachCount != u->iAttachCount;
       
   431 	}
       
   432 
       
   433 TBool TScheduler::CoreControlSupported()
       
   434 	{
       
   435 	return VIB->iCpuPowerUpFn != 0;
       
   436 	}
       
   437 
       
   438 void TScheduler::CCInitiatePowerUp(TUint32 aCores)
       
   439 	{
       
   440 	TCpuPowerUpFn pUp = VIB->iCpuPowerUpFn;
       
   441 	if (pUp && aCores)
       
   442 		{
       
   443 		TInt i;
       
   444 		for (i=0; i<KMaxCpus; ++i)
       
   445 			{
       
   446 			if (aCores & (1u<<i))
       
   447 				{
       
   448 				TSubScheduler& ss = TheSubSchedulers[i];
       
   449 				SPerCpuUncached& u = ((UPerCpuUncached*)ss.iUncached)->iU;
       
   450 				u.iPowerOnReq = TRUE;
       
   451 				__e32_io_completion_barrier();
       
   452 				pUp(i, &u);
       
   453 
       
   454 				// wait for core to reattach
       
   455 				while (u.iDetachCount != u.iAttachCount)
       
   456 					{
       
   457 					__snooze();
       
   458 					}
       
   459 				}
       
   460 			}
       
   461 		}
       
   462 	}
       
   463 
       
   464 void TScheduler::CCIndirectPowerDown(TAny*)
       
   465 	{
       
   466 	TCpuPowerDownFn pDown = VIB->iCpuPowerDownFn;
       
   467 	if (pDown)
       
   468 		{
       
   469 		TInt i;
       
   470 		for (i=0; i<KMaxCpus; ++i)
       
   471 			{
       
   472 			TSubScheduler& ss = TheSubSchedulers[i];
       
   473 			SPerCpuUncached& u = ((UPerCpuUncached*)ss.iUncached)->iU;
       
   474 			if (u.iPowerOffReq)
       
   475 				{
       
   476 				pDown(i, &u);
       
   477 				__e32_io_completion_barrier();
       
   478 				u.iPowerOffReq = FALSE;
       
   479 				__e32_io_completion_barrier();
       
   480 				}
       
   481 			}
       
   482 		}
       
   483 	}
       
   484 
       
   485 // Called on any CPU which receives an indirect power down IPI
       
   486 extern "C" void handle_indirect_powerdown_ipi()
       
   487 	{
       
   488 	TScheduler& s = TheScheduler;
       
   489 	TSubScheduler& ss = SubScheduler();
       
   490 	if (s.iIpiAcceptCpus & ss.iCpuMask)
       
   491 		s.iCCPowerDownDfc.Add();
       
   492 	}
   274 
   493 
   275 EXPORT_C TUint32 NKern::CpuTimeMeasFreq()
   494 EXPORT_C TUint32 NKern::CpuTimeMeasFreq()
   276 	{
   495 	{
   277 	return NKern::TimestampFrequency();
   496 	return NKern::TimestampFrequency();
   278 	}
   497 	}
   286  	@pre aMicroseconds should be nonnegative
   505  	@pre aMicroseconds should be nonnegative
   287 	@pre any context
   506 	@pre any context
   288  */
   507  */
   289 EXPORT_C TInt NKern::TimesliceTicks(TUint32 aMicroseconds)
   508 EXPORT_C TInt NKern::TimesliceTicks(TUint32 aMicroseconds)
   290 	{
   509 	{
   291 	TUint32 mf32 = (TUint32)TheScheduler.i_TimerMax;
   510 	TUint32 mf32 = TheScheduler.iSX.iTimerMax;
   292 	TUint64 mf(mf32);
   511 	TUint64 mf(mf32);
   293 	TUint64 ticks = mf*TUint64(aMicroseconds) + UI64LIT(999999);
   512 	TUint64 ticks = mf*TUint64(aMicroseconds) + UI64LIT(999999);
   294 	ticks /= UI64LIT(1000000);
   513 	ticks /= UI64LIT(1000000);
   295 	if (ticks > TUint64(TInt(KMaxTInt)))
   514 	if (ticks > TUint64(TInt(KMaxTInt)))
   296 		return KMaxTInt;
   515 		return KMaxTInt;
   297 	else
   516 	else
   298 		return (TInt)ticks;
   517 		return (TInt)ticks;
   299 	}
   518 	}
   300 
   519 
   301 
   520 
       
   521 #if defined(__NKERN_TIMESTAMP_USE_LOCAL_TIMER__)
       
   522 	// Assembler
       
   523 #elif defined(__NKERN_TIMESTAMP_USE_SCU_GLOBAL_TIMER__)
       
   524 	// Assembler
       
   525 #elif defined(__NKERN_TIMESTAMP_USE_INLINE_BSP_CODE__)
       
   526 #define __DEFINE_NKERN_TIMESTAMP_CPP__
       
   527 #include <variant_timestamp.h>
       
   528 #undef __DEFINE_NKERN_TIMESTAMP_CPP__
       
   529 #elif defined(__NKERN_TIMESTAMP_USE_BSP_CALLOUT__)
       
   530 	// Assembler
       
   531 #else
       
   532 #error No definition for NKern::Timestamp()
       
   533 #endif
       
   534 
   302 /** Get the frequency of counter queried by NKern::Timestamp().
   535 /** Get the frequency of counter queried by NKern::Timestamp().
   303 
   536 
   304 @publishedPartner
   537 @publishedPartner
   305 @prototype
   538 @prototype
   306 */
   539 */
   307 EXPORT_C TUint32 NKern::TimestampFrequency()
   540 EXPORT_C TUint32 NKern::TimestampFrequency()
   308 	{
   541 	{
   309 	return (TUint32)TheScheduler.i_TimerMax;
   542 #if defined(__NKERN_TIMESTAMP_USE_LOCAL_TIMER__)
   310 	}
   543 	// Use per-CPU local timer in Cortex A9 or ARM11MP
   311 
   544 	return TheScheduler.iSX.iTimerMax;
       
   545 #elif defined(__NKERN_TIMESTAMP_USE_SCU_GLOBAL_TIMER__)
       
   546 	// Use global timer in Cortex A9 r1p0
       
   547 	return TheScheduler.iSX.iTimerMax;
       
   548 #elif defined(__NKERN_TIMESTAMP_USE_INLINE_BSP_CODE__)
       
   549 	// Use code in <variant_timestamp.h> supplied by BSP
       
   550 	return KTimestampFrequency;
       
   551 #elif defined(__NKERN_TIMESTAMP_USE_BSP_CALLOUT__)
       
   552 	// Call function defined in variant
       
   553 #else
       
   554 #error No definition for NKern::TimestampFrequency()
       
   555 #endif
       
   556 	}
       
   557