--- a/kernel/eka/bmarm/ekernsmp.def Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/bmarm/ekernsmp.def Tue May 11 17:28:22 2010 +0300
@@ -1046,4 +1046,13 @@
RandomSalt__4KernUxUi @ 1045 NONAME ; Kern::RandomSalt(unsigned long long, unsigned int)
RandomSalt__4KernPCUcUiUi @ 1046 NONAME R3UNUSED ; Kern::RandomSalt(unsigned char const *, unsigned int, unsigned int)
SecureRandom__4KernR5TDes8 @ 1047 NONAME R3UNUSED ; Kern::SecureRandom(TDes8 &)
+ NotificationLock__13DPagingDevice @ 1048 NONAME R3UNUSED ; DPagingDevice::NotificationLock(void)
+ Configure__13KernCoreStatsUi @ 1049 NONAME R3UNUSED ABSENT ; KernCoreStats::Configure(unsigned int)
+ Engage__13KernCoreStatsi @ 1050 NONAME R3UNUSED ABSENT ; KernCoreStats::Engage(int)
+ Retire__13KernCoreStatsii @ 1051 NONAME R3UNUSED ABSENT ; KernCoreStats::Retire(int, int)
+ Stats__13KernCoreStatsPv @ 1052 NONAME R3UNUSED ABSENT ; KernCoreStats::Stats(void *)
+ SetNumberOfActiveCpus__5NKerni @ 1053 NONAME ABSENT
+ SetIdleHandler__3ArmPFPvUlPVv_vPv @ 1054 NONAME R3UNUSED ABSENT ; Arm::SetIdleHandler(void (*)(void *, unsigned long, void volatile *), void *)
+ FreeRamZone__4EpocUi @ 1055 NONAME R3UNUSED ABSENT ; Epoc::FreeRamZone(unsigned int)
+ SelectiveAlloc__16TBitMapAllocatorii @ 1056 NONAME R3UNUSED ; TBitMapAllocator::SelectiveAlloc(int, int)
--- a/kernel/eka/bmarm/ekernu.def Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/bmarm/ekernu.def Tue May 11 17:28:22 2010 +0300
@@ -1042,4 +1042,14 @@
RandomSalt__4KernUxUi @ 1041 NONAME ; Kern::RandomSalt(unsigned long long, unsigned int)
RandomSalt__4KernPCUcUiUi @ 1042 NONAME R3UNUSED ; Kern::RandomSalt(unsigned char const *, unsigned int, unsigned int)
SecureRandom__4KernR5TDes8 @ 1043 NONAME R3UNUSED ; Kern::SecureRandom(TDes8 &)
+ NotificationLock__13DPagingDevice @ 1044 NONAME R3UNUSED ; DPagingDevice::NotificationLock(void)
+ Configure__13KernCoreStatsUi @ 1045 NONAME R3UNUSED ABSENT ; KernCoreStats::Configure(unsigned int)
+ Engage__13KernCoreStatsi @ 1046 NONAME R3UNUSED ABSENT ; KernCoreStats::Engage(int)
+ EnterIdle__13KernCoreStats @ 1047 NONAME R3UNUSED ABSENT ; KernCoreStats::EnterIdle()
+ LeaveIdle__13KernCoreStatsUi @ 1048 NONAME R3UNUSED ABSENT ; KernCoreStats::LeaveIdle(unsigned int)
+ Retire__13KernCoreStatsii @ 1049 NONAME R3UNUSED ABSENT ; KernCoreStats::Retire(int, int)
+ Stats__13KernCoreStatsPv @ 1050 NONAME R3UNUSED ABSENT ; KernCoreStats::Stats(void *)
+ SetIdleHandler__3ArmPFPvUl_vPv @ 1051 NONAME R3UNUSED ABSENT ; Arm::SetIdleHandler(void (*)(void *, unsigned long), void *)
+ FreeRamZone__4EpocUi @ 1052 NONAME R3UNUSED ABSENT ; Epoc::FreeRamZone(unsigned int)
+ SelectiveAlloc__16TBitMapAllocatorii @ 1053 NONAME R3UNUSED ; TBitMapAllocator::SelectiveAlloc(int, int)
--- a/kernel/eka/bwins/ekernu.def Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/bwins/ekernu.def Tue May 11 17:28:22 2010 +0300
@@ -929,4 +929,10 @@
?RandomSalt@Kern@@SAX_KI@Z @ 928 NONAME ; public: static void __cdecl Kern::RandomSalt(unsigned __int64,unsigned int)
?RandomSalt@Kern@@SAXPBEII@Z @ 929 NONAME ; void Kern::RandomSalt(unsigned char const *, unsigned int, unsigned int)
?SecureRandom@Kern@@SAHAAVTDes8@@@Z @ 930 NONAME ; int Kern::SecureRandom(class TDes8 &)
+ ?NotificationLock@DPagingDevice@@QAEPAVNFastMutex@@XZ @ 931 NONAME ; public: class NFastMutex * __thiscall DPagingDevice::NotificationLock(void)
+ ?Configure@KernCoreStats@@SAHI@Z @ 932 NONAME ABSENT ; public: static int KernCoreStats::Configure(unsigned int)
+ ?Engage@KernCoreStats@@SAHH@Z @ 933 NONAME ABSENT ; public: static int KernCoreStats::Engage(int)
+ ?Retire@KernCoreStats@@SAHHH@Z @ 934 NONAME ABSENT ; public: static int KernCoreStats::Retire(int, int)
+ ?Stats@KernCoreStats@@SAHPAX@Z @ 935 NONAME ABSENT ; public: static int KernCoreStats::Stats(void *)
+ ?SelectiveAlloc@TBitMapAllocator@@QAEIHH@Z @ 936 NONAME ; public: unsigned int __thiscall TBitMapAllocator::SelectiveAlloc(int,int)
--- a/kernel/eka/bx86/ekernsmp.def Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/bx86/ekernsmp.def Tue May 11 17:28:22 2010 +0300
@@ -1004,4 +1004,12 @@
?RandomSalt@Kern@@SAX_KI@Z @ 1003 NONAME ; public: static void __cdecl Kern::RandomSalt(unsigned __int64,unsigned int)
?RandomSalt@Kern@@SAXPBEII@Z @ 1004 NONAME ; void Kern::RandomSalt(unsigned char const *, unsigned int, unsigned int)
?SecureRandom@Kern@@SAHAAVTDes8@@@Z @ 1005 NONAME ; int Kern::SecureRandom(class TDes8 &)
+ ?NotificationLock@DPagingDevice@@QAEPAVNFastMutex@@XZ @ 1006 NONAME ; public: class NFastMutex * __thiscall DPagingDevice::NotificationLock(void)
+ ?Configure@KernCoreStats@@SAHI@Z @ 1007 NONAME ABSENT ; public: static int KernCoreStats::Configure(unsigned int)
+ ?Engage@KernCoreStats@@SAHH@Z @ 1008 NONAME ABSENT ; public: static int KernCoreStats::Engage(int)
+ ?Retire@KernCoreStats@@SAHHH@Z @ 1009 NONAME ABSENT ; public: static int KernCoreStats::Retire(int, int)
+ ?Stats@KernCoreStats@@SAHPAX@Z @ 1010 NONAME ABSENT ; public: static int KernCoreStats::Stats(void *)
+ ?SetNumberOfActiveCpus@NKern@@SAXH@Z @ 1011 NONAME ABSENT ; public: static void __cdecl NKern::SetNumberOfActiveCpus(int)
+ ?FreeRamZone@Epoc@@SAHI@Z @ 1012 NONAME ABSENT ; public: static int Epoc::FreeRamZone(unsigned int)
+ ?SelectiveAlloc@TBitMapAllocator@@QAEIHH@Z @ 1013 NONAME ; public: unsigned int __thiscall TBitMapAllocator::SelectiveAlloc(int,int)
--- a/kernel/eka/bx86/ekernu.def Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/bx86/ekernu.def Tue May 11 17:28:22 2010 +0300
@@ -948,4 +948,13 @@
?RandomSalt@Kern@@SAX_KI@Z @ 947 NONAME ; public: static void __cdecl Kern::RandomSalt(unsigned __int64,unsigned int)
?RandomSalt@Kern@@SAXPBEII@Z @ 948 NONAME ; void Kern::RandomSalt(unsigned char const *, unsigned int, unsigned int)
?SecureRandom@Kern@@SAHAAVTDes8@@@Z @ 949 NONAME ; int Kern::SecureRandom(class TDes8 &)
+ ?NotificationLock@DPagingDevice@@QAEPAVNFastMutex@@XZ @ 950 NONAME ; public: class NFastMutex * __thiscall DPagingDevice::NotificationLock(void)
+ ?Configure@KernCoreStats@@SAHI@Z @ 951 NONAME ABSENT ; public: static int KernCoreStats::Configure(unsigned int)
+ ?Engage@KernCoreStats@@SAHH@Z @ 952 NONAME ABSENT ; public: static int KernCoreStats::Engage(int)
+ ?EnterIdle@KernCoreStats@@SAIXZ @ 953 NONAME ABSENT ; public: static unsigned int KernCoreStats::EnterIdle()
+ ?LeaveIdle@KernCoreStats@@SAXI@Z @ 954 NONAME ABSENT ; public: static void KernCoreStats::LeaveIdle(unsigned int)
+ ?Retire@KernCoreStats@@SAHHH@Z @ 955 NONAME ABSENT ; public: static int KernCoreStats::Retire(int, int)
+ ?Stats@KernCoreStats@@SAHPAX@Z @ 956 NONAME ABSENT ; public: static int KernCoreStats::Stats(void *)
+ ?FreeRamZone@Epoc@@SAHI@Z @ 957 NONAME ABSENT ; public: static int Epoc::FreeRamZone(unsigned int)
+ ?SelectiveAlloc@TBitMapAllocator@@QAEIHH@Z @ 958 NONAME ; public: unsigned int __thiscall TBitMapAllocator::SelectiveAlloc(int,int)
--- a/kernel/eka/bx86gcc/ekernsmp.def Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/bx86gcc/ekernsmp.def Tue May 11 17:28:22 2010 +0300
@@ -1092,4 +1092,12 @@
_ZN4Kern10RandomSaltEyj @ 1091 NONAME
_ZN4Kern10RandomSaltEPKhjj @ 1092 NONAME
_ZN4Kern12SecureRandomER5TDes8 @ 1093 NONAME
+ _ZN13DPagingDevice16NotificationLockEv @ 1094 NONAME
+ _ZN13KernCoreStats5StatsEPv @ 1095 NONAME ABSENT
+ _ZN13KernCoreStats6EngageEi @ 1096 NONAME ABSENT
+ _ZN13KernCoreStats6RetireEii @ 1097 NONAME ABSENT
+ _ZN13KernCoreStats9ConfigureEj @ 1098 NONAME ABSENT
+ _ZN5NKern21SetNumberOfActiveCpusEi @ 1099 NONAME ABSENT
+ _ZN4Epoc11FreeRamZoneEj @ 1100 NONAME ABSENT
+ _ZN16TBitMapAllocator14SelectiveAllocEii @ 1101 NONAME
--- a/kernel/eka/bx86gcc/ekernu.def Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/bx86gcc/ekernu.def Tue May 11 17:28:22 2010 +0300
@@ -1031,4 +1031,13 @@
_ZN4Kern10RandomSaltEyj @ 1030 NONAME
_ZN4Kern10RandomSaltEPKhjj @ 1031 NONAME
_ZN4Kern12SecureRandomER5TDes8 @ 1032 NONAME
+ _ZN13DPagingDevice16NotificationLockEv @ 1033 NONAME
+ _ZN13KernCoreStats5StatsEPv @ 1034 NONAME ABSENT
+ _ZN13KernCoreStats6EngageEi @ 1035 NONAME ABSENT
+ _ZN13KernCoreStats6RetireEii @ 1036 NONAME ABSENT
+ _ZN13KernCoreStats9ConfigureEj @ 1037 NONAME ABSENT
+ _ZN13KernCoreStats9EnterIdleEv @ 1038 NONAME ABSENT
+ _ZN13KernCoreStats9LeaveIdleEj @ 1039 NONAME ABSENT
+ _ZN4Epoc11FreeRamZoneEj @ 1040 NONAME ABSENT
+ _ZN16TBitMapAllocator14SelectiveAllocEii @ 1041 NONAME
--- a/kernel/eka/drivers/locmedia/locmedia.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/drivers/locmedia/locmedia.cpp Tue May 11 17:28:22 2010 +0300
@@ -3615,15 +3615,19 @@
void DPrimaryMediaBase::RequestCountInc()
{
__ASSERT_DEBUG(iBody, LOCM_FAULT());
- TInt oldVal = (TInt) __e32_atomic_add_ord32(&iBody->iRequestCount, (TUint) 1);
-//Kern::Printf("RCINC: this %x cnt %d, old %d", this, iBody->iRequestCount, oldVal);
-
- OstTraceDefExt2( OST_TRACE_CATEGORY_RND, TRACE_DEMANDPAGING, DPRIMARYMEDIABASE_REQUESTCOUNTINC, "new count=%d; old count=%d", iBody->iRequestCount, oldVal );
-
- if (oldVal == 0 && iBody->iPagingDevice)
+ if (iBody->iPagingDevice)
{
-//Kern::Printf("RCINC: NotifyBusy()");
- iBody->iPagingDevice->NotifyBusy();
+ NFastMutex* lock = iBody->iPagingDevice->NotificationLock();
+ NKern::FMWait(lock);
+ TInt oldVal = iBody->iRequestCount++;
+ //Kern::Printf("RCINC: this %x cnt %d, old %d", this, iBody->iRequestCount, oldVal);
+ OstTraceDefExt2( OST_TRACE_CATEGORY_RND, TRACE_DEMANDPAGING, DPRIMARYMEDIABASE_REQUESTCOUNTINC, "new count=%d; old count=%d", iBody->iRequestCount, oldVal );
+ if (oldVal == 0)
+ {
+ //Kern::Printf("RCINC: NotifyBusy()");
+ iBody->iPagingDevice->NotifyBusy();
+ }
+ NKern::FMSignal(lock);
}
}
@@ -3635,17 +3639,21 @@
void DPrimaryMediaBase::RequestCountDec()
{
__ASSERT_DEBUG(iBody, LOCM_FAULT());
- TInt oldVal = (TInt) __e32_atomic_add_ord32(&iBody->iRequestCount, (TUint) -1);
-//Kern::Printf("RCDEC: this %x cnt %d, old %d", this, iBody->iRequestCount, oldVal);
-
- OstTraceDefExt2( OST_TRACE_CATEGORY_RND, TRACE_DEMANDPAGING, DPRIMARYMEDIABASE_REQUESTCOUNTDEC, "new count=%d; old count=%d", iBody->iRequestCount, oldVal );
-
- if (oldVal == 1 && iBody->iPagingDevice)
+ if (iBody->iPagingDevice)
{
-//Kern::Printf("RCDEC: NotifyIdle()");
- iBody->iPagingDevice->NotifyIdle();
+ NFastMutex* lock = iBody->iPagingDevice->NotificationLock();
+ NKern::FMWait(lock);
+ TInt oldVal = iBody->iRequestCount--;
+ //Kern::Printf("RCDEC: this %x cnt %d, old %d", this, iBody->iRequestCount, oldVal);
+ OstTraceDefExt2( OST_TRACE_CATEGORY_RND, TRACE_DEMANDPAGING, DPRIMARYMEDIABASE_REQUESTCOUNTDEC, "new count=%d; old count=%d", iBody->iRequestCount, oldVal );
+ if (oldVal == 1)
+ {
+ //Kern::Printf("RCDEC: NotifyIdle()");
+ iBody->iPagingDevice->NotifyIdle();
+ }
+ NKern::FMSignal(lock);
+ __ASSERT_DEBUG(iBody->iRequestCount >= 0, LOCM_FAULT());
}
- __ASSERT_DEBUG(iBody->iRequestCount >= 0, LOCM_FAULT());
}
#endif // __DEMAND_PAGING__
--- a/kernel/eka/drivers/pbus/mmc/stack.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/drivers/pbus/mmc/stack.cpp Tue May 11 17:28:22 2010 +0300
@@ -3517,13 +3517,10 @@
OstTrace0( TRACE_INTERNALS, DMMCSTACK_DETERMINEBUSWIDTHANDCLOCKSM3, "EStWritePowerClass" );
// Check the card type is valid
- // The only currently valid values for this field are 0x01 or 0x03
- TUint cardType = cardP->iExtendedCSD.CardType();
- if (cardType != (TExtendedCSD::EHighSpeedCard26Mhz) &&
- cardType != (TExtendedCSD::EHighSpeedCard26Mhz | TExtendedCSD::EHighSpeedCard52Mhz))
- {
- __KTRACE_OPT(KPBUS1, Kern::Printf("Unsupported card type %u", cardType));
- OstTrace1( TRACE_INTERNALS, DMMCSTACK_DETERMINEBUSWIDTHANDCLOCKSM4, "Unsupported card type=%u", cardType );
+ if (!(cardP->iExtendedCSD.IsSupportedCardType()))
+ {
+ __KTRACE_OPT(KPBUS1, Kern::Printf("Unsupported card type %u", cardP->iExtendedCSD.CardType()));
+ OstTrace1( TRACE_INTERNALS, DMMCSTACK_DETERMINEBUSWIDTHANDCLOCKSM4, "Unsupported card type=%u", cardP->iExtendedCSD.CardType() );
SMF_GOTOS(EStExit);
}
@@ -3618,13 +3615,10 @@
cardP->SetHighSpeedClock(0);
// Check the card type is valid
- // The only currently valid values for this field are 0x01 or 0x03
- TUint cardType = cardP->iExtendedCSD.CardType();
- if (cardType != (TExtendedCSD::EHighSpeedCard26Mhz) &&
- cardType != (TExtendedCSD::EHighSpeedCard26Mhz | TExtendedCSD::EHighSpeedCard52Mhz))
- {
- __KTRACE_OPT(KPBUS1, Kern::Printf("Unsupported card type %u", cardType));
- OstTrace1( TRACE_INTERNALS, DMMCSTACK_CONFIGUREHIGHSPEEDSM4, "Unsupported card type=%u", cardType );
+ if (!(cardP->iExtendedCSD.IsSupportedCardType()))
+ {
+ __KTRACE_OPT(KPBUS1, Kern::Printf("Unsupported card type %u", cardP->iExtendedCSD.CardType()));
+ OstTrace1( TRACE_INTERNALS, DMMCSTACK_CONFIGUREHIGHSPEEDSM4, "Unsupported card type=%u", cardP->iExtendedCSD.CardType() );
SMF_GOTOS(EStExit);
}
--- a/kernel/eka/drivers/usbcc/chapter9.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/drivers/usbcc/chapter9.cpp Tue May 11 17:28:22 2010 +0300
@@ -1136,8 +1136,19 @@
// New configuration is the same as the old one (but not 0)
if (iCurrentConfig == aValue)
{
- // no-op
__KTRACE_OPT(KUSB, Kern::Printf(" Configuration: New == Old == %d --> exiting", aValue));
+
+ // From the spec 9.1.1.5, Data toggle is reset to zero here when
+ // setconfiguration(x->x)(x!=0) received, although we only support
+ // single configuration currently.
+ TInt num = 0;
+ TInt ret = DoForEveryEndpointInUse(&DUsbClientController::ResetDataToggle, num);
+ if(ret != KErrNone)
+ {
+ __KTRACE_OPT(KPANIC, Kern::Printf(" Error: Endpoint data toggle reset failed"));
+ }
+ __KTRACE_OPT(KUSB, Kern::Printf(" Called ResetDataToggle()for %d endpoints", num));
+
return;
}
// Device is already configured
--- a/kernel/eka/eabi/ekernsmp.def Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/eabi/ekernsmp.def Tue May 11 17:28:22 2010 +0300
@@ -1180,4 +1180,13 @@
_ZN4Kern10RandomSaltEyj @ 1179 NONAME
_ZN4Kern10RandomSaltEPKhjj @ 1180 NONAME
_ZN4Kern12SecureRandomER5TDes8 @ 1181 NONAME
+ _ZN13DPagingDevice16NotificationLockEv @ 1182 NONAME
+ _ZN13KernCoreStats5StatsEPv @ 1183 NONAME ABSENT
+ _ZN13KernCoreStats6EngageEi @ 1184 NONAME ABSENT
+ _ZN13KernCoreStats6RetireEii @ 1185 NONAME ABSENT
+ _ZN13KernCoreStats9ConfigureEj @ 1186 NONAME ABSENT
+ _ZN5NKern21SetNumberOfActiveCpusEi @ 1187 NONAME ABSENT
+ _ZN3Arm14SetIdleHandlerEPFvPvmPVvES0_ @ 1188 NONAME ABSENT
+ _ZN4Epoc11FreeRamZoneEj @ 1189 NONAME ABSENT
+ _ZN16TBitMapAllocator14SelectiveAllocEii @ 1190 NONAME
--- a/kernel/eka/eabi/ekernu.def Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/eabi/ekernu.def Tue May 11 17:28:22 2010 +0300
@@ -1171,4 +1171,14 @@
_ZN4Kern10RandomSaltEyj @ 1170 NONAME
_ZN4Kern10RandomSaltEPKhjj @ 1171 NONAME
_ZN4Kern12SecureRandomER5TDes8 @ 1172 NONAME
+ _ZN13DPagingDevice16NotificationLockEv @ 1173 NONAME
+ _ZN13KernCoreStats5StatsEPv @ 1174 NONAME ABSENT
+ _ZN13KernCoreStats6EngageEi @ 1175 NONAME ABSENT
+ _ZN13KernCoreStats6RetireEii @ 1176 NONAME ABSENT
+ _ZN13KernCoreStats9ConfigureEj @ 1177 NONAME ABSENT
+ _ZN13KernCoreStats9EnterIdleEv @ 1178 NONAME ABSENT
+ _ZN13KernCoreStats9LeaveIdleEj @ 1179 NONAME ABSENT
+ _ZN3Arm14SetIdleHandlerEPFvPvmES0_ @ 1180 NONAME ABSENT
+ _ZN4Epoc11FreeRamZoneEj @ 1181 NONAME ABSENT
+ _ZN16TBitMapAllocator14SelectiveAllocEii @ 1182 NONAME
--- a/kernel/eka/include/d32usbdi_errors.h Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/include/d32usbdi_errors.h Tue May 11 17:28:22 2010 +0300
@@ -201,6 +201,7 @@
EUsbDevMonDeviceAttachDenied = 41,
EUsbHubDriverZeroInterfaceTokenProduced = 42,
EUsbInterfaceSuccessfulPipeOpenWithNoPipe = 43,
+ EFailedToLockHostStackInWaitDeviceStateMutex = 44,
};
_LIT(KUsbDescFaultCat, "USBDesc-Fault");
--- a/kernel/eka/include/drivers/mmc.h Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/include/drivers/mmc.h Tue May 11 17:28:22 2010 +0300
@@ -1540,7 +1540,8 @@
enum TCardTypes
{
EHighSpeedCard26Mhz = 0x01,
- EHighSpeedCard52Mhz = 0x02
+ EHighSpeedCard52Mhz = 0x02,
+ ECardTypeMsk = 0x03
};
/**
@@ -1714,6 +1715,9 @@
/** returns the contents of the S_A_TIMEOUT field */
inline TUint SleepAwakeTimeout() const;
+
+ /** returns True if the CARD_TYPE field conatains a valid value **/
+ inline TBool IsSupportedCardType() const;
private:
/**
--- a/kernel/eka/include/drivers/mmc.inl Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/include/drivers/mmc.inl Tue May 11 17:28:22 2010 +0300
@@ -190,6 +190,19 @@
inline TUint TExtendedCSD::BootBusWidth() const {return iData[EBootBusWidthIndex];}
inline TUint TExtendedCSD::EraseGroupDef() const {return iData[EEraseGroupDefIndex];}
+/*
+ * MMC v4.3 specification states the only valid values for CardType are 0x01 or 0x03
+ */
+inline TBool TExtendedCSD::IsSupportedCardType() const
+ {
+ switch (CardType()&ECardTypeMsk)
+ {
+ case 0x01:
+ case 0x03: return ETrue;
+ default: return EFalse;
+ }
+ }
+
// -------- class TMMCStatus --------
/**
* Constructor for TMMCStatus.
--- a/kernel/eka/include/e32ver.h Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/include/e32ver.h Tue May 11 17:28:22 2010 +0300
@@ -28,7 +28,7 @@
const TInt KE32MajorVersionNumber=2;
const TInt KE32MinorVersionNumber=0;
-const TInt KE32BuildVersionNumber=2111;
+const TInt KE32BuildVersionNumber=2117;
const TInt KMachineConfigurationMajorVersionNumber=1;
const TInt KMachineConfigurationMinorVersionNumber=0;
--- a/kernel/eka/include/kernel/kbma.h Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/include/kernel/kbma.h Tue May 11 17:28:22 2010 +0300
@@ -39,6 +39,7 @@
IMPORT_C void Free(TInt aPos);
IMPORT_C void Alloc(TInt aStart, TInt aLength);
IMPORT_C void Free(TInt aStart, TInt aLength);
+ IMPORT_C TUint SelectiveAlloc(TInt aStart, TInt aLength);
IMPORT_C void SelectiveFree(TInt aStart, TInt aLength);
IMPORT_C TBool NotFree(TInt aStart, TInt aLength) const;
IMPORT_C TBool NotAllocated(TInt aStart, TInt aLength) const;
--- a/kernel/eka/include/kernel/kern_priv.h Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/include/kernel/kern_priv.h Tue May 11 17:28:22 2010 +0300
@@ -2804,6 +2804,7 @@
static TUint NumberOfFreeDpPages();
static TUint NumberOfDirtyDpPages();
static TInt MovePage(TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest);
+ static TInt MoveAndAllocPage(TPhysAddr aAddr, TZonePageType aPageType);
static TInt DiscardPage(TPhysAddr aAddr, TUint aBlockZoneId, TBool aBlockRest);
static void RamZoneClaimed(SZone* aZone);
static TInt RamDefragFault(TAny* aExceptionInfo);
--- a/kernel/eka/include/kernel/kernel.h Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/include/kernel/kernel.h Tue May 11 17:28:22 2010 +0300
@@ -257,8 +257,8 @@
const TUint8 KMutexOrdRamAlloc = 0x04; /**< @internalComponent */
#if defined(__MEMMODEL_FLEXIBLE__)
const TUint8 KMutexOrdSyncPhysMem = 0x03; /**< @internalComponent */
+const TUint8 KMutexOrdPageOut = 0x02; /**< @internalComponent */
#endif
-const TUint8 KMutexOrdPageOut = 0x02; /**< @internalComponent */
const TUint8 KMutexOrdResourceManager = 0x01; /**< @internalComponent */
@@ -2838,6 +2838,12 @@
inline virtual TInt DeleteNotify(TThreadMessage* aReq,TUint aOffset,TUint aSize);
/**
+ Return the lock that should be used to synchronise calculation of the idle/busy state and
+ subsequent calls to #NotifyIdle and #NotifyBusy.
+ */
+ IMPORT_C NFastMutex* NotificationLock();
+
+ /**
Called by the paging device to notify the kernel that the device has just become idle and is not
currently processing any requests.
--- a/kernel/eka/include/memmodel/epoc/mmubase/mmubase.h Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/include/memmodel/epoc/mmubase/mmubase.h Tue May 11 17:28:22 2010 +0300
@@ -525,8 +525,8 @@
public:
TInt AllocRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aPageType, TUint aBlockedZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
TInt ZoneAllocRamPages(TUint* aZoneIdList, TUint aZoneIdCount, TPhysAddr* aPageList, TInt aNumPages, TZonePageType aPageType);
- TInt AllocContiguousRam(TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign, TUint aBlockedZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
- TInt ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign);
+ TInt AllocContiguousRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign);
+ TInt ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign);
public:
TInt iPageSize; // page size in bytes
--- a/kernel/eka/include/memmodel/epoc/mmubase/ramalloc.h Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/include/memmodel/epoc/mmubase/ramalloc.h Tue May 11 17:28:22 2010 +0300
@@ -142,8 +142,14 @@
void FreeRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType);
TInt AllocRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType, TUint aBlockedZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
TInt ZoneAllocRamPages(TUint* aZoneIdList, TUint aZoneIdCount, TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType);
- TInt AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign=0, TUint aBlockZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
- TInt ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign);
+ TInt AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TInt aAlign=0);
+#if !defined(__MEMMODEL_MULTIPLE__) && !defined(__MEMMODEL_MOVING__)
+ void BlockContiguousRegion(TPhysAddr aAddrBase, TUint aNumPages);
+ void UnblockSetAllocRuns(TUint& aOffset1, TUint& aOffset2, TUint aRunLength1, TUint aRunLength2, TUint& aAllocLength, TUint& aAllocStart);
+ void UnblockContiguousRegion(TPhysAddr aAddrBase, TUint aNumPages);
+ TBool ClearContiguousRegion(TPhysAddr aAddrBase, TPhysAddr aZoneBase, TUint aNumPages, TInt& aOffset);
+#endif
+ TInt ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign);
#ifdef _DEBUG
void DebugDump();
#endif
@@ -158,6 +164,7 @@
TInt GetZoneAddress(TUint aZoneId, TPhysAddr& aPhysBase, TUint& aNumPages);
TInt HalFunction(TInt aFunction, TAny* a1, TAny* a2);
TInt NextAllocatedPage(SZone* aZone, TUint& aOffset, TZonePageType aType) const;
+ TInt NextAllocatedRun(SZone* aZone, TUint& aOffset, TUint aEndOffset, TZonePageType aType) const;
TUint GenDefragFreePages(TZonePageType aType) const;
SZone* GeneralDefragStart0(TGenDefragStage& aStage, TUint& aRequiredToBeDiscarded);
SZone* GeneralDefragNextZone0();
@@ -205,9 +212,7 @@
SDblQueLink* iZoneGeneralPrefLink; /**< Link to the current RAM zone being defragged*/
SDblQueLink* iZoneGeneralTmpLink; /**< Link to the current RAM zone being defragged*/
TUint iZoneGeneralStage; /**< The current stage of any general defrag operation*/
-#ifdef _DEBUG
- TBool iAllowBmaVerify;
-#endif
+ TUint iContiguousReserved; /**< The count of the number of separate contiguous allocations that have reserved pages*/
};
#endif
--- a/kernel/eka/include/u32hal.h Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/include/u32hal.h Tue May 11 17:28:22 2010 +0300
@@ -2868,6 +2868,7 @@
EPagingBmDeleteNotifyDataPage,
EPagingBmReadDataMedia,
EPagingBmWriteDataMedia,
+ EPagingBmRejuvenate, // only implemented on FMM
EMaxPagingBm
};
--- a/kernel/eka/kernel/arm/cipc.cia Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/kernel/arm/cipc.cia Tue May 11 17:28:22 2010 +0300
@@ -57,7 +57,7 @@
__NAKED__ void ExecHandler::MessageComplete(RMessageK* /*aMsg*/, TInt /*aReason*/)
{
- asm("ldr ip, [r0, #%a0]" : : "i" _FOFF(RMessageK, iFunction)); // get iFunction, as per preprocessor
+ asm("ldr ip, [r0, #%a0]" : : "i" _FOFF(RMessageK, iFunction)); // get iFunction, as per preprocessor
// Subroutine MessageComplete
// Complete an IPC message
@@ -69,65 +69,73 @@
#ifdef BTRACE_CLIENT_SERVER
asm("stmfd sp!,{r0,r1,ip,lr}");
- asm("mov r2,r1"); // arg2 = aReason
- asm("mov r1,r0"); // arg1 = aMsg
- asm("ldr r0,_messageCompleteTraceHeader"); // arg0 = header
+ asm("mov r2,r1"); // arg2 = aReason
+ asm("mov r1,r0"); // arg1 = aMsg
+ asm("ldr r0,_messageCompleteTraceHeader"); // arg0 = header
asm("bl " CSM_ZN6BTrace4OutXEmmmm);
asm("ldmfd sp!,{r0,r1,ip,lr}");
#endif
asm("cmp ip, #%a0" : : "i" (RMessage2::EDisConnect));
asm("ldreq r0, [r0, #%a0]" : : "i" _FOFF(RMessageK,iSession));
- asm("beq " CSM_ZN8DSession19CloseFromDisconnectEv ); // if disconnect, do it in C++
- asm("mov r2, r1 "); // r2=aReason
+ asm("beq " CSM_ZN8DSession19CloseFromDisconnectEv ); // if disconnect, do it in C++
+
+ asm("mov r2, r1 "); // r2 = aReason
ASM_DEBUG2(Complete,r0,r2);
- asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(RMessageK, iSession)); // r3=iSession
-
- asm("subs r1, ip, #%a0" : : "i" (RMessage2::EConnect)); // (m.iFunction == RMessage2::EConnect)?
- asm("streq r1, [r3, #%a0] " : : "i" _FOFF(DSession, iConnectMsgPtr)); // iSession->iConnectMsgPtr = NULL
-
- asm("ldr r1, [r3, #%a0]" : : "i" _FOFF(DSession,iAccessCount)); // r1=iSession->iAccessCount
- asm("cmp r1, #0 "); // iAccessCount = 0?
+ asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(RMessageK, iSession)); // r3 = iSession
+ asm("subs r1, ip, #%a0" : : "i" (RMessage2::EConnect)); // (m.iFunction == RMessage2::EConnect)?
+ asm("streq r1, [r3, #%a0] " : : "i" _FOFF(DSession, iConnectMsgPtr)); // iSession->iConnectMsgPtr = NULL
+ asm("ldr r1, [r3, #%a0]" : : "i" _FOFF(DSession,iAccessCount)); // r1 = iSession->iAccessCount
+ asm("cmp r1, #0 "); // iAccessCount = 0?
asm("beq 2f ");
- // if (!s->IsClosing())
- asm("mov r1, r0"); // r1 = RMessageK ptr
- asm("ldr r0, [r0, #%a0] " : : "i" _FOFF(RMessageK,iClient)); // r0=iClient
+ // !s->IsClosing()
+ asm("mov r1, r0"); // r1 = RMessageK ptr
+ asm("ldr r0, [r0, #%a0] " : : "i" _FOFF(RMessageK,iClient)); // r0 = iClient
+ asm("ldrb ip, [r0, #%a0] " : : "i" _FOFF(DThread,iMState)); // ip = iClient->iMState
+ asm("cmp ip, #%a0" : : "i" (DThread::EDead)); // (iMState == EDead)?
+ asm("beq 1f ");
+
+ // if (!s->IsClosing() && m.iClient->iMState != DThread::EDead)
asm("mov ip, #1");
- asm("str ip, [r1, #%a0]" : : "i" _FOFF(RMessageK, iServerLink.iNext)); // iServerLink.iNext=1
- asm("b " CSM_ZN4Kern20QueueRequestCompleteEP7DThreadP14TClientRequesti);
+ asm("str ip, [r1, #%a0]" : : "i" _FOFF(RMessageK, iServerLink.iNext)); // iServerLink.iNext=1
+ asm("b " CSM_ZN4Kern20QueueRequestCompleteEP7DThreadP14TClientRequesti); // tail call
- // if (s->IsClosing())
+ // m.iClient->iMState == DThread::EDead
+ asm("1: "); // shuffle RMessageK and iFunction back to expected registers
+ asm("ldr ip, [r1, #%a0]" : : "i" _FOFF(RMessageK, iFunction)); // refetch iFunction
+ asm("mov r0, r1"); // r0 = RMessageK ptr
+
+ // else (closing or dead)
asm("2: ");
- asm("cmp ip, #%a0" : : "i" (RMessage2::EConnect)); // (m.iFunction == RMessage2::EConnect)?
- asm("beq 4f ");
- asm("3: ");
- asm("stmfd sp!, {r0,lr} ");
- asm("bl " CSM_ZN14TClientRequest5ResetEv);
- asm("ldmfd sp!, {r0,lr} ");
- asm("b " CSM_ZN9RMessageK8CloseRefEv);
+ asm("cmp ip, #%a0" : : "i" (RMessage2::EConnect)); // (m.iFunction == RMessage2::EConnect)?
+ asm("bne 3f ");
- asm("4: ");
- // if closing & connect msg
- asm("ldr r2, [r3, #%a0] " : : "i" _FOFF(DSession, iSessionCookie)); // r2=iSession->iSessionCookie
+ // (closing or dead) and it's a connect msg
+ asm("ldr r2, [r3, #%a0] " : : "i" _FOFF(DSession, iSessionCookie)); // r2=iSession->iSessionCookie
asm("teq r2, #0");
#ifdef _DEBUG
asm("beq nosession ");
- asm("ldr r1, [r3, #%a0] " : : "i" _FOFF(DSession, iServer)); // r1=iSession->iServer
+ asm("ldr r1, [r3, #%a0] " : : "i" _FOFF(DSession, iServer)); // r1=iSession->iServer
asm("cmp r1, #0 ");
asm("beq noserver ");
- asm("ldr r2, [r3, #%a0] " : : "i" (_FOFF(DSession, iDisconnectMsgPtr))); // r2=iSession->iDisconnectMsgPtr
+ asm("ldr r2, [r3, #%a0] " : : "i" (_FOFF(DSession, iDisconnectMsgPtr))); // r2=iSession->iDisconnectMsgPtr
asm("ldr r2, [r2, #%a0] " : : "i" (_FOFF(RMessageK, iServerLink.iNext))); // r2=iDisconnectMsgPtr->iServerLink.iNext
asm("cmp r2, #0 ");
- asm("beq __FaultMsgCompleteDiscNotSent "); // die if a session has been created and no disc msg sent
- asm("ldr r2, [r3, #%a0] " : : "i" _FOFF(DSession, iSessionCookie)); // r2=iSession->iSessionCookie
+ asm("beq __FaultMsgCompleteDiscNotSent "); // tail call to die if a session has been created and no disc msg sent
+ asm("ldr r2, [r3, #%a0] " : : "i" _FOFF(DSession, iSessionCookie)); // r2=iSession->iSessionCookie
asm("noserver: ");
asm("teq r2, #0");
asm("nosession: ");
#endif //_DEBUG
asm("moveq r0, r3 ");
- asm("beq __SendDiscMsg "); // if no session object to clean up, send disc msg in C++
- asm("b 3b "); // return
+ asm("beq __SendDiscMsg "); // if no session object to clean up, tail call to send disc msg in C++
+
+ asm("3: ");
+ asm("stmfd sp!, {r0,lr} ");
+ asm("bl " CSM_ZN14TClientRequest5ResetEv);
+ asm("ldmfd sp!, {r0,lr} ");
+ asm("b " CSM_ZN9RMessageK8CloseRefEv); // tail call
#ifdef BTRACE_CLIENT_SERVER
asm("_messageCompleteTraceHeader:");
@@ -358,8 +366,8 @@
#endif
asm("ldr r1, [r1, #%a0]" : : "i" (_FOFF(DThread,iOwningProcess)-_FOFF(DThread,iNThread))); // r1->process to check
asm("bl do_messagek ");
+ asm("bcc 0f "); // if bad handle, panic
asm("ldr ip, [r0, #%a0]" : : "i" _FOFF(RMessageK,iFunction)); // ip = function
- asm("bcc 0f "); // if bad handle, panic
asm("cmp ip, #%a0" : : "i" ((TInt)RMessage2::EDisConnect)); // check iFunction != RMessage2::EDisConnect
asm("ldmnefd sp!, {r4,pc} "); // if not, return OK
asm("0: ");
--- a/kernel/eka/klib/bma.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/klib/bma.cpp Tue May 11 17:28:22 2010 +0300
@@ -762,6 +762,49 @@
}
+/** Allocates a specific range of bit positions.
+
+ The specified range must lie within the total range for this allocator but it is
+ not necessary that all the positions are currently free.
+
+ @param aStart First position to allocate.
+ @param aLength Number of consecutive positions to allocate, must be >0.
+ @return The number of previously free positions that were allocated.
+ */
+EXPORT_C TUint TBitMapAllocator::SelectiveAlloc(TInt aStart, TInt aLength)
+ {
+ __ASSERT_ALWAYS(TUint(aStart) < TUint(iSize), TBMA_FAULT());
+ __ASSERT_ALWAYS(TUint(aStart + aLength) >= TUint(aStart), TBMA_FAULT());
+ __ASSERT_ALWAYS(TUint(aStart + aLength) <= TUint(iSize), TBMA_FAULT());
+ TInt wix = aStart >> 5;
+ TInt sbit = aStart & 31;
+ TUint32* pW = iMap + wix;
+ iAvail -= aLength; // update free count assuming no positions already allocated
+ TInt ebit = sbit + aLength;
+ if (ebit < 32)
+ {
+ TUint32 b = ((0xffffffffu >> aLength) >> sbit) | ~(0xffffffffu >> sbit);
+ TUint32 w = *pW;
+ *pW = w & b; // mark all positions allocated
+ TUint allocated = __e32_bit_count_32(~w & ~b);
+ iAvail += allocated; // increase free count by number of positions already allocated
+ return aLength - allocated;
+ }
+ TUint32 b = ~(0xffffffffu >> sbit);
+ while (ebit > 0)
+ {
+ TUint32 w = *pW;
+ *pW++ = w & b; // mark all positions allocated
+ TUint allocated = __e32_bit_count_32(~w & ~b);
+ iAvail += allocated; // increase free count by number of positions already allocated
+ aLength -= allocated;
+ ebit -= 32;
+ b = (ebit >= 32)? 0 : 0xffffffff >> ebit;
+ }
+ return aLength;
+ }
+
+
/** Copies a range from another allocator, mark remainder as occupied.
Values of bit positions from aFirst to aFirst+aLen-1 inclusive in allocator
--- a/kernel/eka/memmodel/emul/win32/mutils.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/emul/win32/mutils.cpp Tue May 11 17:28:22 2010 +0300
@@ -412,6 +412,12 @@
// Misc DPagingDevice methods
+EXPORT_C NFastMutex* DPagingDevice::NotificationLock()
+ {
+ // use the system lock
+ return &TheScheduler.iLock;
+ }
+
EXPORT_C void DPagingDevice::NotifyIdle()
{
// Not used on this memory model
--- a/kernel/eka/memmodel/epoc/direct/mutils.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/direct/mutils.cpp Tue May 11 17:28:22 2010 +0300
@@ -673,6 +673,12 @@
// Misc DPagingDevice methods
+EXPORT_C NFastMutex* DPagingDevice::NotificationLock()
+ {
+ // use the system lock
+ return &TheScheduler.iLock;
+ }
+
EXPORT_C void DPagingDevice::NotifyIdle()
{
// Not used on this memory model
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mcodepaging.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mcodepaging.cpp Tue May 11 17:28:22 2010 +0300
@@ -239,7 +239,8 @@
{
TRACE2(("DCodePagedMemoryManager::ReadPage(0x%08x,0x%08x,0x%08x,?,?)",aMemory,aIndex,aCount));
- __NK_ASSERT_DEBUG(aRequest->CheckUse(aMemory,aIndex,aCount));
+ __NK_ASSERT_DEBUG(aRequest->CheckUseContiguous(aMemory,aIndex,aCount));
+ __ASSERT_CRITICAL;
START_PAGING_BENCHMARK;
@@ -256,6 +257,7 @@
TLinAddr linAddr = aRequest->MapPages(aIndex,aCount,aPages);
TInt r = KErrNone;
+ TThreadMessage message;
if(!info.iCodeSize)
{
@@ -292,7 +294,7 @@
device.iReadUnitShift,
ReadFunc,
(TAny*)info.iCodeLocalDrive,
- (TAny*)&aRequest->iMessage);
+ (TAny*)&message);
if(bufferStart<0)
{
@@ -332,7 +334,7 @@
pagedCodeInfo->AsyncClose();
- END_PAGING_BENCHMARK(EPagingBmReadCodePage);
+ END_PAGING_BENCHMARK_N(EPagingBmReadCodePage, aCount);
return r;
}
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mdatapaging.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mdatapaging.cpp Tue May 11 17:28:22 2010 +0300
@@ -31,17 +31,26 @@
{
public:
- enum TSwapFlags
+ /// The state of swap for a logical page in a memory object.
+ ///
+ /// Note that this does not always correspond to the state of the page in RAM - for example a
+ /// page can be dirty in RAM but blank in swap if it has never been written out.
+ enum TSwapState
{
- EAllocated = 1 << 0,
- EUninitialised = 1 << 1,
- ESaved = 1 << 2,
- ESwapFlagsMask = 0x7,
-
- ESwapIndexShift = 3,
- ESwapIndexMask = 0xffffffff << ESwapIndexShift,
+ EStateUnreserved = 0, ///< swap space not yet reserved, or page is being decommitted
+ EStateBlank = 1, ///< swap page has never been written
+ EStateWritten = 2, ///< swap page has been written out at least once
+ EStateWriting = 3 ///< swap page is in the process of being written out
+ };
+
+ enum
+ {
+ ESwapIndexShift = 2,
+ ESwapStateMask = (1 << ESwapIndexShift) - 1,
+ ESwapIndexMask = 0xffffffff & ~ESwapStateMask
};
+public:
TInt Create(DPagingDevice* aDevice);
TInt ReserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
@@ -49,21 +58,32 @@
TBool IsReserved(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount);
TInt ReadSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageReadRequest* aRequest, TPhysAddr* aPhysAddrs);
- TInt WriteSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageWriteRequest* aRequest);
- void DoDeleteNotify(TUint aSwapData);
+ TInt WriteSwapPages(DMemoryObject** aMemory, TUint* aIndex, TUint aCount, TLinAddr aLinAddr, TBool aBackground);
void GetSwapInfo(SVMSwapInfo& aInfoOut);
TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds);
- void CheckSwapThresholds(TUint aInitial, TUint aFinal);
+
+private:
+ inline TSwapState SwapState(TUint aSwapData);
+ inline TInt SwapIndex(TUint aSwapData);
+ inline TUint SwapData(TSwapState aSwapState, TInt aSwapIndex);
+
+ TInt AllocSwapIndex(TInt aCount);
+ void FreeSwapIndex(TInt aSwapIndex);
+ void CheckSwapThresholdsAndUnlock(TUint aInitial);
-protected:
- DPagingDevice* iDevice;
- TBitMapAllocator* iBitMap;
- TUint iBitMapFree;
- TUint iAllocOffset;
+ void DoDeleteNotify(TUint aSwapIndex);
+ TInt DoWriteSwapPages(DMemoryObject** aMemory, TUint* aIndex, TUint aCount, TLinAddr aLinAddr, TInt aSwapIndex, TBool aBackground);
+
+private:
+ DPagingDevice* iDevice; ///< Paging device used to read and write swap pages
+
+ NFastMutex iSwapLock; ///< Fast mutex protecting access to all members below
+ TUint iFreePageCount; ///< Number of swap pages that have not been reserved
+ TBitMapAllocator* iBitMap; ///< Bitmap of swap pages that have been allocated
+ TUint iAllocOffset; ///< Next offset to try when allocating a swap page
TUint iSwapThesholdLow;
TUint iSwapThesholdGood;
- TThreadMessage iDelNotifyMsg;
};
@@ -81,15 +101,14 @@
virtual TInt Alloc(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
virtual void Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
virtual TInt Wipe(DMemoryObject* aMemory);
- virtual TInt CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry);
+ virtual void CleanPages(TUint aPageCount, SPageInfo** aPageInfos, TBool aBackground);
// Methods inherited from DPagedMemoryManager
virtual void Init3();
virtual TInt InstallPagingDevice(DPagingDevice* aDevice);
virtual TInt AcquirePageReadRequest(DPageReadRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount);
- virtual TInt AcquirePageWriteRequest(DPageWriteRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount);
+ virtual TInt AcquirePageWriteRequest(DPageWriteRequest*& aRequest, DMemoryObject** aMemory, TUint* aIndex, TUint aCount);
virtual TInt ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest);
- virtual TInt WritePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageWriteRequest* aRequest);
virtual TBool IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
public:
@@ -97,6 +116,9 @@
TInt SetSwapThresholds(const SVMSwapThresholds& aThresholds);
private:
+ TInt WritePages(DMemoryObject** aMemory, TUint* aIndex, TPhysAddr* aPages, TUint aCount, DPageWriteRequest *aRequest, TBool aAnyExecutable, TBool aBackground);
+
+private:
/**
The paging device used for accessing the backing store.
This is set by #InstallPagingDevice.
@@ -127,7 +149,7 @@
*/
TInt DSwapManager::Create(DPagingDevice* aDevice)
{
- __ASSERT_COMPILE(!(ESwapIndexMask & ESwapFlagsMask));
+ __ASSERT_COMPILE(!(ESwapIndexMask & ESwapStateMask));
__NK_ASSERT_DEBUG(iDevice == NULL);
iDevice = aDevice;
@@ -147,12 +169,88 @@
{// Not enough RAM to keep track of the swap.
return KErrNoMemory;
}
- iBitMapFree = swapPages;
+ iFreePageCount = swapPages;
iAllocOffset = 0;
return KErrNone;
}
+inline DSwapManager::TSwapState DSwapManager::SwapState(TUint aSwapData)
+ {
+ TSwapState state = (TSwapState)(aSwapData & ESwapStateMask);
+ __NK_ASSERT_DEBUG(state >= EStateWritten || (aSwapData & ~ESwapStateMask) == 0);
+ return state;
+ }
+
+
+inline TInt DSwapManager::SwapIndex(TUint aSwapData)
+ {
+ return aSwapData >> ESwapIndexShift;
+ }
+
+
+inline TUint DSwapManager::SwapData(TSwapState aSwapState, TInt aSwapIndex)
+ {
+ return (aSwapIndex << ESwapIndexShift) | aSwapState;
+ }
+
+
+/**
+Allocate one or more page's worth of space within the swap area.
+
+The location is represented by a page-based index into the swap area.
+
+@param aCount The number of page's worth of space to allocate.
+
+@return The swap index of the first location allocated.
+*/
+TInt DSwapManager::AllocSwapIndex(TInt aCount)
+ {
+ __NK_ASSERT_DEBUG(aCount > 0 && aCount <= KMaxPagesToClean);
+ NKern::FMWait(&iSwapLock);
+
+ // search for run of aCount from iAllocOffset to end
+ TInt carry = 0;
+ TInt l = KMaxTInt;
+ TInt swapIndex = iBitMap->AllocAligned(aCount, 0, 0, EFalse, carry, l, iAllocOffset);
+
+ // if search failed, retry from beginning
+ if (swapIndex < 0)
+ {
+ iAllocOffset = 0;
+ carry = 0;
+ swapIndex = iBitMap->AllocAligned(aCount, 0, 0, EFalse, carry, l, iAllocOffset);
+ }
+
+ // if we found one then mark it as allocated and update iAllocOffset
+ if (swapIndex >= 0)
+ {
+ __NK_ASSERT_DEBUG(swapIndex <= (iBitMap->iSize - aCount));
+ iBitMap->Alloc(swapIndex, aCount);
+ iAllocOffset = (swapIndex + aCount) % iBitMap->iSize;
+ }
+
+ NKern::FMSignal(&iSwapLock);
+ __NK_ASSERT_DEBUG(swapIndex >= 0 || aCount > 1); // can't fail to allocate single page
+ return swapIndex;
+ }
+
+
+/**
+Free one page's worth of space within the swap area.
+
+The index must have been previously allocated with AllocSwapIndex().
+*/
+void DSwapManager::FreeSwapIndex(TInt aSwapIndex)
+ {
+ __NK_ASSERT_DEBUG(aSwapIndex >= 0 && aSwapIndex < iBitMap->iSize);
+ DoDeleteNotify(aSwapIndex);
+ NKern::FMWait(&iSwapLock);
+ iBitMap->Free(aSwapIndex);
+ NKern::FMSignal(&iSwapLock);
+ }
+
+
/**
Reserve some swap pages for the requested region of the memory object
@@ -167,40 +265,29 @@
TInt DSwapManager::ReserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount)
{
__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
- __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
- const TUint indexEnd = aStartIndex + aPageCount;
- TUint index = aStartIndex;
-
-#ifdef _DEBUG
- for (; index < indexEnd; index++)
- {// This page shouldn't already be in use.
- MmuLock::Lock();
- __NK_ASSERT_DEBUG(!(aMemory->PagingManagerData(index) & ESwapFlagsMask));
- MmuLock::Unlock();
- }
-#endif
-
- if (iBitMapFree < aPageCount)
+ NKern::FMWait(&iSwapLock);
+ TUint initFree = iFreePageCount;
+ if (iFreePageCount < aPageCount)
{
+ NKern::FMSignal(&iSwapLock);
Kern::AsyncNotifyChanges(EChangesOutOfMemory);
return KErrNoMemory;
}
- // Reserve the required swap space and mark each page as allocated and uninitialised.
- TUint initFree = iBitMapFree;
- iBitMapFree -= aPageCount;
- for (index = aStartIndex; index < indexEnd; index++)
+ iFreePageCount -= aPageCount;
+ CheckSwapThresholdsAndUnlock(initFree);
+
+ // Mark each page as allocated and uninitialised.
+ const TUint indexEnd = aStartIndex + aPageCount;
+ for (TUint index = aStartIndex; index < indexEnd; index++)
{
// Grab MmuLock to stop manager data being accessed.
MmuLock::Lock();
- TUint swapData = aMemory->PagingManagerData(index);
- __NK_ASSERT_DEBUG(!(swapData & EAllocated));
- swapData = EAllocated | EUninitialised;
- aMemory->SetPagingManagerData(index, swapData);
+ __NK_ASSERT_DEBUG(SwapState(aMemory->PagingManagerData(index)) == EStateUnreserved);
+ aMemory->SetPagingManagerData(index, EStateBlank);
MmuLock::Unlock();
}
- CheckSwapThresholds(initFree, iBitMapFree);
return KErrNone;
}
@@ -219,9 +306,7 @@
TInt DSwapManager::UnreserveSwap(DMemoryObject* aMemory, TUint aStartIndex, TUint aPageCount)
{
__NK_ASSERT_DEBUG(MemoryObjectLock::IsHeld(aMemory));
- __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
- TUint initFree = iBitMapFree;
TUint freedPages = 0;
const TUint indexEnd = aStartIndex + aPageCount;
for (TUint index = aStartIndex; index < indexEnd; index++)
@@ -229,30 +314,35 @@
// Grab MmuLock to stop manager data being accessed.
MmuLock::Lock();
TUint swapData = aMemory->PagingManagerData(index);
- TUint swapIndex = swapData >> ESwapIndexShift;
- TBool notifyDelete = EFalse;
- if (swapData & EAllocated)
+ TSwapState state = SwapState(swapData);
+ if (state != EStateUnreserved)
{
- if (swapData & ESaved)
- {
- notifyDelete = ETrue;
- iBitMap->Free(swapIndex);
- }
freedPages++;
- aMemory->SetPagingManagerData(index, 0);
+ aMemory->SetPagingManagerData(index, EStateUnreserved);
}
-#ifdef _DEBUG
- else
- __NK_ASSERT_DEBUG(swapData == 0);
-#endif
-
MmuLock::Unlock();
- if (notifyDelete)
- DoDeleteNotify(swapIndex);
+ if (state == EStateWritten)
+ FreeSwapIndex(SwapIndex(swapData));
+ else if (state == EStateWriting)
+ {
+ // Wait for cleaning to finish before deallocating swap space
+ PageCleaningLock::Lock();
+ PageCleaningLock::Unlock();
+
+#ifdef _DEBUG
+ MmuLock::Lock();
+ __NK_ASSERT_DEBUG(SwapState(aMemory->PagingManagerData(index)) == EStateUnreserved);
+ MmuLock::Unlock();
+#endif
+ }
}
- iBitMapFree += freedPages;
- CheckSwapThresholds(initFree, iBitMapFree);
+
+ NKern::FMWait(&iSwapLock);
+ TUint initFree = iFreePageCount;
+ iFreePageCount += freedPages;
+ CheckSwapThresholdsAndUnlock(initFree);
+
return freedPages;
}
@@ -275,7 +365,7 @@
const TUint indexEnd = aStartIndex + aPageCount;
for (TUint index = aStartIndex; index < indexEnd; index++)
{
- if (!(aMemory->PagingManagerData(index) & DSwapManager::EAllocated))
+ if (SwapState(aMemory->PagingManagerData(index)) == EStateUnreserved)
{// This page is not allocated by swap manager.
return EFalse;
}
@@ -296,15 +386,12 @@
*/
TInt DSwapManager::ReadSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageReadRequest* aRequest, TPhysAddr* aPhysAddrs)
{
+ __ASSERT_CRITICAL;
+
TInt r = KErrNone;
const TUint readUnitShift = iDevice->iReadUnitShift;
TUint readSize = KPageSize >> readUnitShift;
- TThreadMessage* msg = const_cast<TThreadMessage*>(&aRequest->iMessage);
-
- // Determine the wipe byte values for uninitialised pages.
- TUint allocFlags = aMemory->RamAllocFlags();
- TBool wipePages = !(allocFlags & Mmu::EAllocNoWipe);
- TUint8 wipeByte = (allocFlags & Mmu::EAllocUseCustomWipeByte) ? (allocFlags >> Mmu::EAllocWipeByteShift) & 0xff : 0x03;
+ TThreadMessage message;
const TUint indexEnd = aIndex + aCount;
for (TUint index = aIndex; index < indexEnd; index++, aLinAddr += KPageSize, aPhysAddrs++)
@@ -313,36 +400,43 @@
MmuLock::Lock(); // MmuLock required for atomic access to manager data.
TUint swapData = aMemory->PagingManagerData(index);
+ TSwapState state = SwapState(swapData);
- if (!(swapData & EAllocated))
+ if (state == EStateUnreserved)
{// This page is not committed to the memory object
MmuLock::Unlock();
return KErrNotFound;
}
- if (swapData & EUninitialised)
+ else if (state == EStateBlank)
{// This page has not been written to yet so don't read from swap
// just wipe it if required.
+ TUint allocFlags = aMemory->RamAllocFlags();
MmuLock::Unlock();
+ TBool wipePages = !(allocFlags & Mmu::EAllocNoWipe);
if (wipePages)
{
+ TUint8 wipeByte = (allocFlags & Mmu::EAllocUseCustomWipeByte) ?
+ (allocFlags >> Mmu::EAllocWipeByteShift) & 0xff :
+ 0x03;
memset((TAny*)aLinAddr, wipeByte, KPageSize);
}
}
else
{
- __NK_ASSERT_DEBUG(swapData & ESaved);
- TUint swapIndex = swapData >> ESwapIndexShift;
+ // It is not possible to get here if the page is in state EStateWriting as if so it must
+ // be present in RAM, and so will not need to be read in.
+ __NK_ASSERT_DEBUG(state == EStateWritten);
+
// OK to release as if the object's data is decommitted the pager
// will check that data is still valid before mapping it.
MmuLock::Unlock();
- TUint readStart = (swapIndex << KPageShift) >> readUnitShift;
+ TUint readStart = (SwapIndex(swapData) << KPageShift) >> readUnitShift;
START_PAGING_BENCHMARK;
- r = iDevice->Read(msg, aLinAddr, readStart, readSize, DPagingDevice::EDriveDataPaging);
+ r = iDevice->Read(&message, aLinAddr, readStart, readSize, DPagingDevice::EDriveDataPaging);
if (r != KErrNone)
__KTRACE_OPT(KPANIC, Kern::Printf("DSwapManager::ReadSwapPages: error reading media at %08x + %x: %d", readStart << readUnitShift, readSize << readUnitShift, r));
__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocate memory, therefore can't fail with KErrNoMemory
END_PAGING_BENCHMARK(EPagingBmReadDataMedia);
- // TODO: Work out what to do if page in fails, unmap all pages????
__NK_ASSERT_ALWAYS(r == KErrNone);
}
END_PAGING_BENCHMARK(EPagingBmReadDataPage);
@@ -359,102 +453,166 @@
@param aIndex The index within the memory object.
@param aCount The number of pages to write out.
@param aLinAddr The location of the pages to write out.
-@param aRequest The demand paging request to use.
+@param aBackground Whether this is being called in the background by the page cleaning thread
+ as opposed to on demand when a free page is required.
+@pre Called with page cleaning lock held
*/
-TInt DSwapManager::WriteSwapPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TLinAddr aLinAddr, DPageWriteRequest* aRequest)
- {// The RamAllocLock prevents the object's swap pages being reassigned.
- __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
-
- // Write the page out to the swap.
- TInt r = KErrNone;
- const TUint readUnitShift = iDevice->iReadUnitShift;
- TUint writeSize = KPageSize >> readUnitShift;
- TThreadMessage* msg = const_cast<TThreadMessage*>(&aRequest->iMessage);
+TInt DSwapManager::WriteSwapPages(DMemoryObject** aMemory, TUint* aIndex, TUint aCount, TLinAddr aLinAddr, TBool aBackground)
+ {
+ __ASSERT_CRITICAL; // so we can pass the paging device a stack-allocated TThreadMessage
+ __NK_ASSERT_DEBUG(PageCleaningLock::IsHeld());
- const TUint indexEnd = aIndex + aCount;
- for (TUint index = aIndex; index < indexEnd; index++)
+ START_PAGING_BENCHMARK;
+
+ TUint i;
+ TUint swapData[KMaxPagesToClean + 1];
+
+ MmuLock::Lock();
+ for (i = 0 ; i < aCount ; ++i)
{
- START_PAGING_BENCHMARK;
+ swapData[i] = aMemory[i]->PagingManagerData(aIndex[i]);
+ TSwapState s = SwapState(swapData[i]);
+ // It's not possible to write a page while it's already being written, because we always hold
+ // the PageCleaning mutex when we clean
+ __NK_ASSERT_DEBUG(s == EStateUnreserved || s == EStateBlank || s == EStateWritten);
+ if (s == EStateBlank || s == EStateWritten)
+ aMemory[i]->SetPagingManagerData(aIndex[i], SwapData(EStateWriting, 0));
+ }
+ MmuLock::Unlock();
- MmuLock::Lock();
- TUint swapData = aMemory->PagingManagerData(index);
- // OK to release as ram alloc lock prevents manager data being updated.
- MmuLock::Unlock();
- if (!(swapData & EAllocated))
- {// This page is being decommited from aMemory so it is clean/unrequired.
- continue;
+ // By the time we get here, some pages may have been decommitted, so write out only those runs
+ // of pages which are still committed.
+
+ TInt r = KErrNone;
+ TInt startIndex = -1;
+ swapData[aCount] = SwapData(EStateUnreserved, 0); // end of list marker
+ for (i = 0 ; i < (aCount + 1) ; ++i)
+ {
+ if (SwapState(swapData[i]) != EStateUnreserved)
+ {
+ if (startIndex == -1)
+ startIndex = i;
+
+ // Free swap page corresponding to old version of the pages we are going to write
+ if (SwapState(swapData[i]) == EStateWritten)
+ FreeSwapIndex(SwapIndex(swapData[i]));
}
- TInt swapIndex = swapData >> ESwapIndexShift;
- if (swapData & ESaved)
- {// An old version of this page has been saved to swap so free it now
- // as it will be out of date.
- iBitMap->Free(swapIndex);
- DoDeleteNotify(swapIndex);
- }
- // Get a new swap location for this page.
- swapIndex = iBitMap->AllocFrom(iAllocOffset);
- __NK_ASSERT_DEBUG(swapIndex != -1 && swapIndex < iBitMap->iSize);
- iAllocOffset = swapIndex + 1;
- if (iAllocOffset == (TUint)iBitMap->iSize)
- iAllocOffset = 0;
+ else
+ {
+ if (startIndex != -1)
+ {
+ // write pages from startIndex to i exclusive
+ TInt count = i - startIndex;
+ __NK_ASSERT_DEBUG(count > 0 && count <= KMaxPagesToClean);
- TUint writeOffset = (swapIndex << KPageShift) >> readUnitShift;
- {
- START_PAGING_BENCHMARK;
- r = iDevice->Write(msg, aLinAddr, writeOffset, writeSize, EFalse);
- if (r != KErrNone)
- __KTRACE_OPT(KPANIC, Kern::Printf("DSwapManager::WriteSwapPages: error writing media at %08x + %x: %d", writeOffset << readUnitShift, writeSize << readUnitShift, r));
- __NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocate memory, therefore can't fail with KErrNoMemory
- END_PAGING_BENCHMARK(EPagingBmWriteDataMedia);
+ // Get a new swap location for these pages, writing them all together if possible
+ TInt swapIndex = AllocSwapIndex(count);
+ if (swapIndex >= 0)
+ r = DoWriteSwapPages(&aMemory[startIndex], &aIndex[startIndex], count, aLinAddr + (startIndex << KPageShift), swapIndex, aBackground);
+ else
+ {
+ // Otherwise, write them individually
+ for (TUint j = startIndex ; j < i ; ++j)
+ {
+ swapIndex = AllocSwapIndex(1);
+ __NK_ASSERT_DEBUG(swapIndex >= 0);
+ r = DoWriteSwapPages(&aMemory[j], &aIndex[j], 1, aLinAddr + (j << KPageShift), swapIndex, aBackground);
+ if (r != KErrNone)
+ break;
+ }
+ }
+
+ startIndex = -1;
+ }
+ }
}
- // TODO: Work out what to do if page out fails.
- __NK_ASSERT_ALWAYS(r == KErrNone);
- MmuLock::Lock();
- // The swap data should not have been modified.
- __NK_ASSERT_DEBUG(swapData == aMemory->PagingManagerData(index));
- // Store the new swap location and mark the page as saved.
- swapData &= ~(EUninitialised | ESwapIndexMask);
- swapData |= (swapIndex << ESwapIndexShift) | ESaved;
- aMemory->SetPagingManagerData(index, swapData);
- MmuLock::Unlock();
-
- END_PAGING_BENCHMARK(EPagingBmWriteDataPage);
- }
+
+ END_PAGING_BENCHMARK_N(EPagingBmWriteDataPage, aCount);
return r;
}
+TInt DSwapManager::DoWriteSwapPages(DMemoryObject** aMemory, TUint* aIndex, TUint aCount, TLinAddr aLinAddr, TInt aSwapIndex, TBool aBackground)
+ {
+
+ const TUint readUnitShift = iDevice->iReadUnitShift;
+ const TUint writeSize = aCount << (KPageShift - readUnitShift);
+ const TUint writeOffset = aSwapIndex << (KPageShift - readUnitShift);
+
+ TThreadMessage msg;
+ START_PAGING_BENCHMARK;
+ TInt r = iDevice->Write(&msg, aLinAddr, writeOffset, writeSize, aBackground);
+ if (r != KErrNone)
+ {
+ __KTRACE_OPT(KPANIC, Kern::Printf("DSwapManager::WriteSwapPages: error writing media from %08x to %08x + %x: %d", aLinAddr, writeOffset << readUnitShift, writeSize << readUnitShift, r));
+ }
+ __NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocate memory, therefore can't fail with KErrNoMemory
+ __NK_ASSERT_ALWAYS(r == KErrNone);
+ END_PAGING_BENCHMARK(EPagingBmWriteDataMedia);
+
+ TUint i;
+ TUint swapData[KMaxPagesToClean];
+
+ MmuLock::Lock();
+ for (i = 0 ; i < aCount ; ++i)
+ {
+ // Re-check the swap state in case page was decommitted while we were writing
+ swapData[i] = aMemory[i]->PagingManagerData(aIndex[i]);
+ TSwapState s = SwapState(swapData[i]);
+ __NK_ASSERT_DEBUG(s == EStateUnreserved || s == EStateWriting);
+ if (s == EStateWriting)
+ {
+ // Store the new swap location and mark the page as saved.
+ aMemory[i]->SetPagingManagerData(aIndex[i], SwapData(EStateWritten, aSwapIndex + i));
+ }
+ }
+ MmuLock::Unlock();
+
+ for (i = 0 ; i < aCount ; ++i)
+ {
+ TSwapState s = SwapState(swapData[i]);
+ if (s == EStateUnreserved)
+ {
+ // The page was decommitted while we were cleaning it, so free the swap page we
+ // allocated and continue, leaving this page in the unreserved state.
+ FreeSwapIndex(aSwapIndex + i);
+ }
+ }
+
+ return KErrNone;
+ }
+
/**
Notify the media driver that the page written to swap is no longer required.
*/
void DSwapManager::DoDeleteNotify(TUint aSwapIndex)
{
- // Ram Alloc lock prevents the swap location being assigned to another page.
- __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
-
+ __ASSERT_CRITICAL; // so we can pass the paging device a stack-allocated TThreadMessage
#ifdef __PAGING_DELETE_NOTIFY_ENABLED
const TUint readUnitShift = iDevice->iReadUnitShift;
const TUint size = KPageSize >> readUnitShift;
TUint offset = (aSwapIndex << KPageShift) >> readUnitShift;
+ TThreadMessage msg;
START_PAGING_BENCHMARK;
// Ignore the return value as this is just an optimisation that is not supported on all media.
- (void)iDevice->DeleteNotify(&iDelNotifyMsg, offset, size);
+ (void)iDevice->DeleteNotify(&msg, offset, size);
END_PAGING_BENCHMARK(EPagingBmDeleteNotifyDataPage);
#endif
}
// Check swap thresholds and notify (see K::CheckFreeMemoryLevel)
-void DSwapManager::CheckSwapThresholds(TUint aInitial, TUint aFinal)
+void DSwapManager::CheckSwapThresholdsAndUnlock(TUint aInitial)
{
TUint changes = 0;
- if (aFinal < iSwapThesholdLow && aInitial >= iSwapThesholdLow)
+ if (iFreePageCount < iSwapThesholdLow && aInitial >= iSwapThesholdLow)
changes |= (EChangesFreeMemory | EChangesLowMemory);
- if (aFinal >= iSwapThesholdGood && aInitial < iSwapThesholdGood)
+ if (iFreePageCount >= iSwapThesholdGood && aInitial < iSwapThesholdGood)
changes |= EChangesFreeMemory;
+ NKern::FMSignal(&iSwapLock);
if (changes)
Kern::AsyncNotifyChanges(changes);
}
@@ -462,23 +620,25 @@
void DSwapManager::GetSwapInfo(SVMSwapInfo& aInfoOut)
{
- __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
aInfoOut.iSwapSize = iBitMap->iSize << KPageShift;
- aInfoOut.iSwapFree = iBitMapFree << KPageShift;
+ NKern::FMWait(&iSwapLock);
+ aInfoOut.iSwapFree = iFreePageCount << KPageShift;
+ NKern::FMSignal(&iSwapLock);
}
TInt DSwapManager::SetSwapThresholds(const SVMSwapThresholds& aThresholds)
{
- __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
if (aThresholds.iLowThreshold > aThresholds.iGoodThreshold)
return KErrArgument;
TInt low = (aThresholds.iLowThreshold + KPageSize - 1) >> KPageShift;
TInt good = (aThresholds.iGoodThreshold + KPageSize - 1) >> KPageShift;
if (good > iBitMap->iSize)
return KErrArgument;
+ NKern::FMWait(&iSwapLock);
iSwapThesholdLow = low;
iSwapThesholdGood = good;
+ NKern::FMSignal(&iSwapLock);
return KErrNone;
}
@@ -527,7 +687,7 @@
}
-TInt DDataPagedMemoryManager::AcquirePageWriteRequest(DPageWriteRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+TInt DDataPagedMemoryManager::AcquirePageWriteRequest(DPageWriteRequest*& aRequest, DMemoryObject** aMemory, TUint* aIndex, TUint aCount)
{
aRequest = iDevice->iRequestPool->AcquirePageWriteRequest(aMemory,aIndex,aCount);
return KErrNone;
@@ -547,11 +707,7 @@
ReAllocDecommitted(aMemory,aIndex,aCount);
// Reserve the swap pages required.
- RamAllocLock::Lock();
- TInt r = iSwapManager->ReserveSwap(aMemory, aIndex, aCount);
- RamAllocLock::Unlock();
-
- return r;
+ return iSwapManager->ReserveSwap(aMemory, aIndex, aCount);
}
@@ -562,10 +718,8 @@
// Unreserve the swap pages associated with the memory object. Do this before
// removing the page array entries to prevent a page fault reallocating these pages.
- RamAllocLock::Lock();
TInt freed = iSwapManager->UnreserveSwap(aMemory, aIndex, aCount);
(void)freed;
- RamAllocLock::Unlock();
DoFree(aMemory,aIndex,aCount);
}
@@ -573,12 +727,16 @@
/**
@copydoc DMemoryManager::Wipe
-@todo Not yet implemented.
- Need to handle this smartly, e.g. throw RAM away and set to uninitialised
*/
TInt DDataPagedMemoryManager::Wipe(DMemoryObject* aMemory)
{
- __NK_ASSERT_ALWAYS(0); // not implemented yet
+ // This is not implemented
+ //
+ // It's possible to implement this by throwing away all pages that are paged in and just setting
+ // the backing store state to EStateBlank, however there are currently no use cases which
+ // involve calling Wipe on paged memory.
+
+ __NK_ASSERT_ALWAYS(0);
return KErrNotSupported;
}
@@ -586,7 +744,7 @@
TInt DDataPagedMemoryManager::ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest)
{
- __NK_ASSERT_DEBUG(aRequest->CheckUse(aMemory,aIndex,aCount));
+ __NK_ASSERT_DEBUG(aRequest->CheckUseContiguous(aMemory,aIndex,aCount));
// Map pages temporarily so that we can copy into them.
const TLinAddr linAddr = aRequest->MapPages(aIndex, aCount, aPages);
@@ -600,70 +758,78 @@
}
-TInt DDataPagedMemoryManager::WritePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageWriteRequest* aRequest)
+TInt DDataPagedMemoryManager::WritePages(DMemoryObject** aMemory, TUint* aIndex, TPhysAddr* aPages, TUint aCount, DPageWriteRequest* aRequest, TBool aAnyExecutable, TBool aBackground)
{
- __NK_ASSERT_DEBUG(aRequest->CheckUse(aMemory,aIndex,aCount));
+ // Map pages temporarily so that we can copy into them.
+ const TLinAddr linAddr = aRequest->MapPages(aIndex[0], aCount, aPages);
- // Map pages temporarily so that we can copy into them.
- const TLinAddr linAddr = aRequest->MapPages(aIndex, aCount, aPages);
-
- TInt r = iSwapManager->WriteSwapPages(aMemory, aIndex, aCount, linAddr, aRequest);
+ TInt r = iSwapManager->WriteSwapPages(aMemory, aIndex, aCount, linAddr, aBackground);
// The memory object allows executable mappings then need IMB.
- aRequest->UnmapPages(aMemory->IsExecutable());
+ aRequest->UnmapPages(aAnyExecutable);
return r;
}
-TInt DDataPagedMemoryManager::CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry)
+void DDataPagedMemoryManager::CleanPages(TUint aPageCount, SPageInfo** aPageInfos, TBool aBackground)
{
- if(!aPageInfo->IsDirty())
- return KErrNone;
-
- // shouldn't be asked to clean a page which is writable...
- __NK_ASSERT_DEBUG(!aPageInfo->IsWritable());
+ __NK_ASSERT_DEBUG(PageCleaningLock::IsHeld());
+ __NK_ASSERT_DEBUG(MmuLock::IsHeld());
+ __NK_ASSERT_DEBUG(aPageCount <= (TUint)KMaxPagesToClean);
+
+ TUint i;
+ DMemoryObject* memory[KMaxPagesToClean];
+ TUint index[KMaxPagesToClean];
+ TPhysAddr physAddr[KMaxPagesToClean];
+ TBool anyExecutable = EFalse;
+
+ for (i = 0 ; i < aPageCount ; ++i)
+ {
+ SPageInfo* pi = aPageInfos[i];
- // mark page as being modified by us...
- TUint modifierInstance; // dummy variable used only for it's storage address on the stack
- aPageInfo->SetModifier(&modifierInstance);
+ __NK_ASSERT_DEBUG(!pi->IsWritable());
+ __NK_ASSERT_DEBUG(pi->IsDirty());
+
+ // mark page as being modified by us...
+ pi->SetModifier(&memory[0]);
+
+ // get info about page...
+ memory[i] = pi->Owner();
+ index[i] = pi->Index();
+ physAddr[i] = pi->PhysAddr();
+ anyExecutable = anyExecutable || memory[i]->IsExecutable();
+ }
- // get info about page...
- TUint index = aPageInfo->Index();
- TPhysAddr physAddr = aPageInfo->PhysAddr();
-
- // Release the mmu lock while we write out the page. This is safe as the
- // RamAllocLock stops the physical address being freed from this object.
MmuLock::Unlock();
// get paging request object...
DPageWriteRequest* req;
- TInt r = AcquirePageWriteRequest(req, aMemory, index, 1);
- __NK_ASSERT_DEBUG(r==KErrNone); // we should always get a write request because the previous function blocks until it gets one
- __NK_ASSERT_DEBUG(req); // we should always get a write request because the previous function blocks until it gets one
-
- r = WritePages(aMemory, index, 1, &physAddr, req);
+ TInt r = AcquirePageWriteRequest(req, memory, index, aPageCount);
+ __NK_ASSERT_DEBUG(r==KErrNone && req);
+
+ r = WritePages(memory, index, physAddr, aPageCount, req, anyExecutable, aBackground);
+ __NK_ASSERT_DEBUG(r == KErrNone); // this should never return an error
req->Release();
MmuLock::Lock();
- if(r!=KErrNone)
- return r;
-
- // check if page is clean...
- if(aPageInfo->CheckModified(&modifierInstance) || aPageInfo->IsWritable())
+ for (i = 0 ; i < aPageCount ; ++i)
{
- // someone else modified the page, or it became writable, so fail...
- r = KErrInUse;
+ SPageInfo* pi = aPageInfos[i];
+ // check if page is clean...
+ if(pi->CheckModified(&memory[0]) || pi->IsWritable())
+ {
+ // someone else modified the page, or it became writable, so mark as not cleaned
+ aPageInfos[i] = NULL;
+ }
+ else
+ {
+ // page is now clean!
+ ThePager.SetClean(*pi);
+ }
}
- else
- {
- // page is now clean!
- ThePager.SetClean(*aPageInfo);
- }
-
- return r;
}
@@ -680,22 +846,13 @@
void DDataPagedMemoryManager::GetSwapInfo(SVMSwapInfo& aInfoOut)
{
- NKern::ThreadEnterCS();
- RamAllocLock::Lock();
iSwapManager->GetSwapInfo(aInfoOut);
- RamAllocLock::Unlock();
- NKern::ThreadLeaveCS();
}
TInt DDataPagedMemoryManager::SetSwapThresholds(const SVMSwapThresholds& aThresholds)
{
- NKern::ThreadEnterCS();
- RamAllocLock::Lock();
- TInt r = iSwapManager->SetSwapThresholds(aThresholds);
- RamAllocLock::Unlock();
- NKern::ThreadLeaveCS();
- return r;
+ return iSwapManager->SetSwapThresholds(aThresholds);
}
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mdefrag.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mdefrag.cpp Tue May 11 17:28:22 2010 +0300
@@ -124,7 +124,8 @@
TInt M::MovePage(TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)
{
- TInt r;
+ // Returns this when page is not paged or managed or free but is a real RAM page.
+ TInt r = KErrNotSupported;
// get memory object corresponding to the page...
DMemoryObject* memory = 0;
@@ -136,30 +137,76 @@
{// The page is paged so let the pager handle it.
return ThePager.DiscardPage(pi, aBlockZoneId, aBlockRest);
}
- if (pi->Type()==SPageInfo::EManaged)
- memory = pi->Owner();
- }
- MmuLock::Unlock();
-
- // Note, whilst we hold the RamAllocLock the page can't change it's use
- // and we can safely assume that it still belongs to the memory object
- // at a fixed page index.
- // Also, as memory objects can't be destroyed whilst they still own pages
- // we can safely access this object without taking an explicit referernce,
- // i.e. we don't need to Open() the memory object.
- if (!pi)
- {// page info for aOld not found so aOld is not a RAM page...
- r = KErrArgument;
- }
- else if(!memory)
- {
- // page does not have a memory manager, so we can't move it...
- r = KErrNotSupported;
+ switch (pi->Type())
+ {
+ case SPageInfo::EManaged:
+ memory = pi->Owner();
+ // Note, whilst we hold the RamAllocLock the page can't change it's use
+ // and we can safely assume that it still belongs to the memory object
+ // at a fixed page index.
+ // Also, as memory objects can't be destroyed whilst they still own pages
+ // we can safely access this object without taking an explicit reference,
+ // i.e. we don't need to Open() the memory object.
+ MmuLock::Unlock();
+ // move page...
+ r = memory->iManager->MovePage(memory, pi, aNew, aBlockZoneId, aBlockRest);
+ break;
+ case SPageInfo::EUnused:
+ r = KErrNotFound; // This page is free so nothing to do.
+ // Fall through..
+ default:
+ MmuLock::Unlock();
+ }
}
else
- {
- // move page...
- r = memory->iManager->MovePage(memory, pi, aNew, aBlockZoneId, aBlockRest);
+ {// page info for aOld not found so aOld is not a RAM page...
+ MmuLock::Unlock();
+ r = KErrArgument;
}
return r;
}
+
+
+TInt M::MoveAndAllocPage(TPhysAddr aAddr, TZonePageType aPageType)
+ {
+ // Returns this when page is not paged or managed or free but is a real RAM page.
+ TInt r = KErrNotSupported;
+
+ // get memory object corresponding to the page...
+ DMemoryObject* memory = 0;
+ MmuLock::Lock();
+ SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aAddr & ~KPageMask);
+ if(pi)
+ {
+ if (pi->PagedState() != SPageInfo::EUnpaged)
+ {// The page is paged so let the pager handle it.
+ return ThePager.DiscardAndAllocPage(pi, aPageType);
+ }
+ switch (pi->Type())
+ {
+ case SPageInfo::EManaged:
+ memory = pi->Owner();
+ // Note, whilst we hold the RamAllocLock the page can't change it's use
+ // and we can safely assume that it still belongs to the memory object
+ // at a fixed page index.
+ // Also, as memory objects can't be destroyed whilst they still own pages
+ // we can safely access this object without taking an explicit referernce,
+ // i.e. we don't need to Open() the memory object.
+ MmuLock::Unlock();
+ // move page...
+ r = memory->iManager->MoveAndAllocPage(memory, pi, aPageType);
+ break;
+ case SPageInfo::EUnused:
+ r = KErrNone; // This page is free so nothing to do.
+ // Fall through..
+ default:
+ MmuLock::Unlock();
+ }
+ }
+ else
+ {// page info for aAddr not found so aAddr is not a RAM page...
+ MmuLock::Unlock();
+ r = KErrArgument;
+ }
+ return r;
+ }
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mexport.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mexport.cpp Tue May 11 17:28:22 2010 +0300
@@ -839,11 +839,3 @@
}
-
-EXPORT_C void DPagingDevice::NotifyIdle()
- {
- }
-
-EXPORT_C void DPagingDevice::NotifyBusy()
- {
- }
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mmanager.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mmanager.cpp Tue May 11 17:28:22 2010 +0300
@@ -108,12 +108,10 @@
}
-TInt DMemoryManager::CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& /*aPageArrayEntry*/)
+void DMemoryManager::CleanPages(TUint aPageCount, SPageInfo** aPageInfos, TBool /*aBackground*/)
{
- if(aPageInfo->IsDirty()==false)
- return KErrNone;
- __NK_ASSERT_DEBUG(0);
- return KErrNotSupported;
+ for (TUint i = 0 ; i < aPageCount ; ++i)
+ __NK_ASSERT_DEBUG(!aPageInfos[i]->IsDirty());
}
@@ -136,6 +134,13 @@
return KErrNotSupported;
}
+
+TInt DMemoryManager::MoveAndAllocPage(DMemoryObject*, SPageInfo*, TZonePageType)
+ {
+ return KErrNotSupported;
+ }
+
+
TZonePageType DMemoryManager::PageType()
{// This should not be invoked on memory managers that do not use the methods
// AllocPages() and FreePages().
@@ -723,6 +728,7 @@
public:
// from DMemoryManager...
virtual TInt MovePage(DMemoryObject* aMemory, SPageInfo* aOldPageInfo, TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest);
+ virtual TInt MoveAndAllocPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TZonePageType aPageType);
virtual TInt HandleFault( DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping,
TUint aMapInstanceCount, TUint aAccessPermissions);
virtual TZonePageType PageType();
@@ -889,6 +895,18 @@
}
+TInt DMovableMemoryManager::MoveAndAllocPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TZonePageType aPageType)
+ {
+ TPhysAddr newPage;
+ TInt r = MovePage(aMemory, aPageInfo, newPage, KRamZoneInvalidId, EFalse);
+ if (r == KErrNone)
+ {
+ TheMmu.MarkPageAllocated(aPageInfo->PhysAddr(), aPageType);
+ }
+ return r;
+ }
+
+
TInt DMovableMemoryManager::HandleFault(DMemoryObject* aMemory, TUint aIndex, DMemoryMapping* aMapping,
TUint aMapInstanceCount, TUint aAccessPermissions)
{
@@ -1039,6 +1057,9 @@
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
__UNLOCK_GUARD_START(MmuLock);
+ // must always hold the PageCleaningLock if the page needs to be cleaned
+ __NK_ASSERT_DEBUG(!aPageInfo->IsDirty() || PageCleaningLock::IsHeld());
+
TUint index = aPageInfo->Index();
TInt r;
@@ -1096,10 +1117,11 @@
// page successfully unmapped...
aPageInfo->SetReadOnly(); // page not mapped, so must be read-only
- // if the page can be made clean...
- r = aMemory->iManager->CleanPage(aMemory,aPageInfo,p);
+ // attempt to clean the page if it is dirty...
+ if (aPageInfo->IsDirty())
+ aMemory->iManager->CleanPages(1, &aPageInfo, EFalse);
- if(r==KErrNone)
+ if(aPageInfo)
{
// page successfully stolen...
__NK_ASSERT_DEBUG((*p^page)<(TUint)KPageSize); // sanity check, page should still be allocated to us
@@ -1111,13 +1133,10 @@
__NK_ASSERT_ALWAYS((pagerInfo&(RPageArray::EFlagsMask|RPageArray::EStateMask)) == RPageArray::ENotPresent);
TheMmu.PageFreed(aPageInfo);
+ r = KErrNone;
}
else
- {
- // only legitimate reason for failing the clean is if the page state was changed
- // by a page fault or by pinning, this should return KErrInUse...
- __NK_ASSERT_DEBUG(r==KErrInUse);
- }
+ r = KErrInUse;
}
}
@@ -2030,14 +2049,6 @@
__NK_ASSERT_ALWAYS(0);
return KErrNotSupported;
}
-
-
-TInt DPagedMemoryManager::WritePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageWriteRequest* aRequest)
- {
- __NK_ASSERT_ALWAYS(0);
- return KErrNotSupported;
- }
-
TZonePageType DPagedMemoryManager::PageType()
{// Paged manager's pages should be discardable and will actaully be freed by
// the pager so this value won't be used.
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mmanager.h Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mmanager.h Tue May 11 17:28:22 2010 +0300
@@ -242,6 +242,7 @@
otherwise one of the system wide error codes.
@pre RamAllocLock held.
+ @pre If the page is dirty the PageCleaning lock must be held.
@pre MmuLock held.
*/
virtual TInt StealPage(DMemoryObject* aMemory, SPageInfo* aPageInfo);
@@ -265,31 +266,31 @@
virtual TInt RestrictPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TRestrictPagesType aRestriction);
/**
- Clean a page of RAM by saving any modifications to it out to backing store.
+ Clean multiple pages of RAM by saving any modifications to it out to backing store.
- This function must only be called when there are no writable MMU mappings of the page.
+ This function must only be called when there are no writable MMU mappings of the pages.
- The function must only return a success after determining no writable MMU mappings
- were created for the page, in which case it should also mark the page as clean using
- SPageInfo::SetClean.
-
- This is only intended for use by #StealPage.
+ The function takes an array of SPageInfo pointers indicating the pages to clean. On return, the
+ elements of this array are unchanged if the page was successfully cleaned, or set to NULL if
+ cleaning was abandoned (by the page being written to, for example).
- @param aMemory A memory object associated with this manager.
- @param aPageInfo The page information structure of the page to be cleaned.
- This must be owned by \a aMemory.
- @param aPageArrayEntry Reference to the page's page array entry in \a aMemory->iPages.
+ The pages passed must be sequential in their page colour (index & KPageColourMask).
+
+ Those pages that are successfully cleaned are marked as clean using SPageInfo::SetClean.
- @return KErrNone if successful,
- KErrInUse if the page state changed, e.g. became pinned or was subject to a page fault making it writable,
- KErrNotSupported if the manager doesn't support this function,
- otherwise one of the system wide error codes.
+ This is intended for use by #StealPage and #CleanSomePages.
+
+ @param aPageCount Number of pages to clean.
+ @param aPageInfos Pointer to an array of aPageCount page info pointers.
+ @param aBackground Whether the activity should be ignored when determining whether the
+ paging device is busy.
@pre MmuLock held
+ @pre PageCleaning lock held
@pre The memory page must not have any writeable MMU mappings.
@post MmuLock held (but may have been released by this function)
*/
- virtual TInt CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry);
+ virtual void CleanPages(TUint aPageCount, SPageInfo** aPageInfos, TBool aBackground);
/**
Process a page fault in memory associated with this manager.
@@ -358,11 +359,37 @@
virtual void Unpin(DMemoryObject* aMemory, DMemoryMappingBase* aMapping, TPinArgs& aPinArgs) =0;
/**
- @todo
+ Attempt to move the page specified to a new physical location. The new physical
+ location for the page to be moved to is allocated by this method. However,
+ aBlockZoneId and aBlockRest can be used to control which RAM zone the new
+ location is in.
+
+ @param aMemory The memory object that owns the page.
+ @param aOldPageInfo The page info for the physical page to move.
+ @param aNewPage On success this will hold the physical address of the new
+ location for the page.
+ @param aBlockZoneId The ID of a RAM zone not to allocate the new page into.
+ @param aBlockRest When set to ETrue the search for a new page will stop if it
+ ever needs to look at aBlockZoneId.
+ @return KErrNone on success, KErrInUse if the page couldn't be moved,
+ or KErrNoMemory if it wasn't possible to allocate a new page.
*/
virtual TInt MovePage(DMemoryObject* aMemory, SPageInfo* aOldPageInfo, TPhysAddr& aNewPage, TUint aBlockZoneId, TBool aBlockRest);
/**
+ Move the page specified to a new physical location and mark the page as
+ allocated as type aPageType.
+
+ @param aMemory The memory object that owns the page.
+ @param aPageInfo The page info for the physical page to move.
+ @param aPageType The type of the page to allocate into the orignal physical
+ location of the page to move.
+ @return KErrNone on success, KErrInUse if the page couldn't be moved,
+ or KErrNoMemory if it wasn't possible to allocate a new page.
+ */
+ virtual TInt MoveAndAllocPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TZonePageType aPageType);
+
+ /**
Return the TZonePageType of the pages that the memory manager can allocate and free.
*/
virtual TZonePageType PageType();
@@ -567,25 +594,6 @@
virtual TInt ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest) =0;
/**
- Save the data content of a specified region of a memory object by writing it to
- storage media. This is intended for use by an implementation of #CleanPage.
-
- The memory region must be the same as, or a subset of, the region used when obtaining
- the request object \a aRequest.
-
- @param aMemory A memory object associated with this manager.
- @param aIndex Page index for the start of the region.
- @param aCount Number of pages in the region.
- @param aPages Pointer to array of pages to read into. This must contain \a aCount
- number of physical page addresses which are each page aligned.
- @param aRequest A request object previously obtained with #AcquirePageWriteRequest.
-
- @return KErrNone if successful,
- otherwise one of the system wide error codes.
- */
- virtual TInt WritePages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageWriteRequest* aRequest);
-
- /**
Check if a region of a memory object has been allocated. E.g. that #Alloc
has reserved backing store for the memory and this has has not yet been freed
by #Free or #Destruct.
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mmu.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mmu.cpp Tue May 11 17:28:22 2010 +0300
@@ -622,7 +622,7 @@
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?,%d)", aZoneIdCount, aBytes, aPhysAddr, aAlign));
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
- TInt r = iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aBytes, aPhysAddr, EPageFixed, aAlign);
+ TInt r = iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aBytes, aPhysAddr, aAlign);
if(r!=KErrNone)
iRamAllocFailed = ETrue;
else
@@ -871,6 +871,22 @@
}
+/**
+Mark a page as being allocated to a particular page type.
+
+NOTE - This page should not be used until PagesAllocated() has been invoked on it.
+
+@param aPhysAddr The physical address of the page to mark as allocated.
+@param aZonePageType The type of the page to mark as allocated.
+*/
+void Mmu::MarkPageAllocated(TPhysAddr aPhysAddr, TZonePageType aZonePageType)
+ {
+ __KTRACE_OPT(KMMU,Kern::Printf("Mmu::MarkPageAllocated(0x%x, %d)", aPhysAddr, aZonePageType));
+ __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
+ iRamPageAllocator->MarkPageAllocated(aPhysAddr, aZonePageType);
+ }
+
+
void Mmu::FreeRam(TPhysAddr* aPages, TUint aCount, TZonePageType aZonePageType)
{
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreeRam(?,%d)",aCount));
@@ -936,15 +952,7 @@
// Only the pager sets EAllocNoPagerReclaim and it shouldn't allocate contiguous ram.
__NK_ASSERT_DEBUG(!(aFlags&EAllocNoPagerReclaim));
#endif
- TInt r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift);
- if(r==KErrNoMemory && aCount > KMaxFreeableContiguousPages)
- {
- // flush paging cache and retry...
- RamAllocLock::Unlock();
- ThePager.FlushAll();
- RamAllocLock::Lock();
- r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift);
- }
+ TInt r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, aAlign+KPageShift);
if(r!=KErrNone)
iRamAllocFailed = ETrue;
else
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mmu.h Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mmu.h Tue May 11 17:28:22 2010 +0300
@@ -21,8 +21,6 @@
#ifndef __MMU_H__
#define __MMU_H__
-#define _USE_OLDEST_LISTS
-
#include "mm.h"
#include "mmboot.h"
#include <mmtypes.h>
@@ -146,7 +144,7 @@
enum TPagedState
{
/**
- Page is not being managed for demand paging purposes, is has been transiently
+ Page is not being managed for demand paging purposes, or has been transiently
removed from the demand paging live list.
*/
EUnpaged = 0x0,
@@ -172,7 +170,6 @@
// NOTE - This must be the same value as EStatePagedLocked as defined in mmubase.h
EPagedPinned = 0x4,
-#ifdef _USE_OLDEST_LISTS
/**
Page is in the live list as one of oldest pages that is clean.
*/
@@ -181,8 +178,7 @@
/**
Page is in the live list as one of oldest pages that is dirty.
*/
- EPagedOldestDirty = 0x6
-#endif
+ EPagedOldestDirty = 0x6
};
@@ -678,7 +674,7 @@
/**
Flag this page as 'dirty', indicating that its contents may no longer match those saved
- to a backing store. This sets the flag #EWritable.
+ to a backing store. This sets the flag #EDirty.
This is used in the management of demand paged memory.
@@ -687,12 +683,13 @@
FORCE_INLINE void SetDirty()
{
CheckAccess("SetDirty");
+ __NK_ASSERT_DEBUG(IsWritable());
iFlags |= EDirty;
}
/**
Flag this page as 'clean', indicating that its contents now match those saved
- to a backing store. This clears the flag #EWritable.
+ to a backing store. This clears the flag #EDirty.
This is used in the management of demand paged memory.
@@ -701,6 +698,7 @@
FORCE_INLINE void SetClean()
{
CheckAccess("SetClean");
+ __NK_ASSERT_DEBUG(!IsWritable());
iFlags &= ~EDirty;
}
@@ -1751,11 +1749,12 @@
#endif
}
-private:
+public:
/** The lock */
static NFastMutex iLock;
#ifdef _DEBUG
+private:
static TUint UnlockGuardNest;
static TUint UnlockGuardFail;
#endif
@@ -1963,6 +1962,7 @@
TInt AllocRam( TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags, TZonePageType aZonePageType,
TUint aBlockZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
+ void MarkPageAllocated(TPhysAddr aPhysAddr, TZonePageType aZonePageType);
void FreeRam(TPhysAddr* aPages, TUint aCount, TZonePageType aZonePageType);
TInt AllocContiguousRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags);
void FreeContiguousRam(TPhysAddr aPhysAddr, TUint aCount);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mpagecleaner.cpp Tue May 11 17:28:22 2010 +0300
@@ -0,0 +1,244 @@
+// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
+// All rights reserved.
+// This component and the accompanying materials are made available
+// under the terms of the License "Eclipse Public License v1.0"
+// which accompanies this distribution, and is available
+// at the URL "http://www.eclipse.org/legal/epl-v10.html".
+//
+// Initial Contributors:
+// Nokia Corporation - initial contribution.
+//
+// Contributors:
+//
+// Description:
+//
+
+#include <kernel.h>
+#include "mpagecleaner.h"
+#include "mm.h"
+#include "mmu.h"
+#include "mpager.h"
+
+#ifdef __PAGING_PRE_CLEAN_DIRTY_PAGES
+
+inline void IgnorePrintf(...) { }
+
+#define PAGE_CLEANER_TRACE IgnorePrintf
+//#define PAGE_CLEANER_TRACE Kern::Printf
+
+_LIT(KThreadName, "PageCleaner");
+
+const TInt KThreadPriority = 25;
+
+/// The length of time the paging device is idle before we decide to use it for cleaning dirty
+/// pages, in milliseconds.
+const TInt KIdleDelayInMillis = 2;
+
+class DPageCleaner
+ {
+public:
+ DPageCleaner();
+ void Start();
+ void NotifyPagingDeviceIdle();
+ void NotifyPagingDeviceBusy();
+ void NotifyPagesToClean();
+
+private:
+ inline TBool IsRunning();
+ void UpdateBusyCount(TInt aChange);
+ void IdleTimerExpired(TUint aInitialNotificationCount);
+ void TryToClean();
+
+private:
+ static void TimerDfcFn(TAny*);
+ static void CleanerDfcFn(TAny*);
+
+private:
+ TInt iIdleDelayInTicks;
+ NTimer iDelayTimer;
+ TDfcQue iDfcQue;
+ TDfc iTimerDfc;
+ TDfc iCleanerDfc;
+ TBool iRunning;
+
+ // All state below is accessed with the MmuLock held.
+
+ /// Whether the paging device is currently idle.
+ TBool iPagingDeviceIdle;
+
+ /// Whether the paging device has been idle for longer than the wait period.
+ TBool iIdleForAWhile;
+
+ /// Whether the page cleaner is currently running.
+ TBool iCleaningInProgress;
+ };
+
+DPageCleaner ThePageCleaner;
+
+DPageCleaner::DPageCleaner() :
+ iTimerDfc(TimerDfcFn, NULL, 1),
+ iCleanerDfc(CleanerDfcFn, NULL, 1),
+ iRunning(EFalse),
+ iPagingDeviceIdle(ETrue),
+ iIdleForAWhile(ETrue),
+ iCleaningInProgress(EFalse)
+ {
+ }
+
+void DPageCleaner::Start()
+ {
+ TBool alreadyRunning = __e32_atomic_swp_ord32(&iRunning, ETrue);
+ if (alreadyRunning)
+ return;
+
+ iIdleDelayInTicks = NKern::TimerTicks(KIdleDelayInMillis);
+
+ TInt r = Kern::DfcQInit(&iDfcQue, KThreadPriority, &KThreadName);
+ __NK_ASSERT_ALWAYS(r == KErrNone);
+ iTimerDfc.SetDfcQ(&iDfcQue);
+ iCleanerDfc.SetDfcQ(&iDfcQue);
+
+ PAGE_CLEANER_TRACE("PageCleaner started");
+ }
+
+FORCE_INLINE TBool DPageCleaner::IsRunning()
+ {
+ return __e32_atomic_load_acq32(&iRunning);
+ }
+
+void DPageCleaner::NotifyPagingDeviceIdle()
+ {
+ __NK_ASSERT_DEBUG(MmuLock::IsHeld());
+ if (IsRunning())
+ {
+ iPagingDeviceIdle = ETrue;
+ if (!iDelayTimer.IsPending())
+ iDelayTimer.OneShot(iIdleDelayInTicks, iTimerDfc);
+ }
+ }
+
+void DPageCleaner::NotifyPagingDeviceBusy()
+ {
+ __NK_ASSERT_DEBUG(MmuLock::IsHeld());
+ if (IsRunning())
+ {
+ iDelayTimer.Cancel();
+ iPagingDeviceIdle = EFalse;
+ iIdleForAWhile = EFalse;
+ }
+ }
+
+void DPageCleaner::NotifyPagesToClean()
+ {
+ __NK_ASSERT_DEBUG(MmuLock::IsHeld());
+ if (IsRunning())
+ {
+ if (!iCleaningInProgress && iIdleForAWhile)
+ iCleanerDfc.Enque();
+ }
+ }
+
+void DPageCleaner::TimerDfcFn(TAny* aPtr)
+ {
+ ThePageCleaner.IdleTimerExpired((TUint)aPtr);
+ }
+
+void DPageCleaner::IdleTimerExpired(TUint aInitialNotificationCount)
+ {
+ MmuLock::Lock();
+ if (iPagingDeviceIdle)
+ {
+ iIdleForAWhile = ETrue;
+ if (!iCleaningInProgress && ThePager.HasPagesToClean())
+ iCleanerDfc.Enque();
+ }
+ MmuLock::Unlock();
+ }
+
+void DPageCleaner::CleanerDfcFn(TAny*)
+ {
+ ThePageCleaner.TryToClean();
+ }
+
+void DPageCleaner::TryToClean()
+ {
+ MmuLock::Lock();
+ TBool workToDo = iIdleForAWhile && ThePager.HasPagesToClean();
+ iCleaningInProgress = workToDo;
+ MmuLock::Unlock();
+
+ if (!workToDo)
+ {
+ PAGE_CLEANER_TRACE("PageCleaner - started but no work to do");
+ return;
+ }
+
+ for (;;)
+ {
+ PageCleaningLock::Lock();
+ MmuLock::Lock();
+ if (!iIdleForAWhile)
+ break;
+ TInt attempted = ThePager.CleanSomePages(ETrue);
+ if (attempted == 0)
+ break;
+ PAGE_CLEANER_TRACE("PageCleaner - attempted to clean %d pages", attempted);
+ MmuLock::Unlock();
+ PageCleaningLock::Unlock();
+ }
+
+ if (iIdleForAWhile)
+ PAGE_CLEANER_TRACE("PageCleaner - no more pages to clean");
+ else
+ PAGE_CLEANER_TRACE("PageCleaner - device now busy");
+
+ iCleaningInProgress = EFalse;
+ MmuLock::Unlock();
+ PageCleaningLock::Unlock();
+ }
+
+void PageCleaner::Start()
+ {
+ ThePageCleaner.Start();
+ }
+
+void PageCleaner::NotifyPagesToClean()
+ {
+ ThePageCleaner.NotifyPagesToClean();
+ }
+
+EXPORT_C void DPagingDevice::NotifyIdle()
+ {
+ ThePageCleaner.NotifyPagingDeviceIdle();
+ }
+
+EXPORT_C void DPagingDevice::NotifyBusy()
+ {
+ ThePageCleaner.NotifyPagingDeviceBusy();
+ }
+
+#else // __PAGING_PRE_CLEAN_DIRTY_PAGES not defined
+
+void PageCleaner::Start()
+ {
+ }
+
+void PageCleaner::NotifyPagesToClean()
+ {
+ }
+
+EXPORT_C void DPagingDevice::NotifyIdle()
+ {
+ }
+
+EXPORT_C void DPagingDevice::NotifyBusy()
+ {
+ }
+
+#endif
+
+EXPORT_C NFastMutex* DPagingDevice::NotificationLock()
+ {
+ // use the MmuLock
+ return &MmuLock::iLock;
+ }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mpagecleaner.h Tue May 11 17:28:22 2010 +0300
@@ -0,0 +1,39 @@
+// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
+// All rights reserved.
+// This component and the accompanying materials are made available
+// under the terms of the License "Eclipse Public License v1.0"
+// which accompanies this distribution, and is available
+// at the URL "http://www.eclipse.org/legal/epl-v10.html".
+//
+// Initial Contributors:
+// Nokia Corporation - initial contribution.
+//
+// Contributors:
+//
+// Description:
+//
+// Handles pre-cleaning of dirty pages.
+//
+// When the paging device is idle (as determined by it calling NotifyIdle/NotifyBusy), a thread
+// writes cleans dirty pages in the oldest section of the live list.
+//
+
+/**
+ @file
+ @internalComponent
+*/
+
+#ifndef MPAGECLEANER_H
+#define MPAGECLEANER_H
+
+#include <e32def.h>
+#include <nkern.h>
+
+class PageCleaner
+ {
+public:
+ static void Start();
+ static void NotifyPagesToClean();
+ };
+
+#endif
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp Tue May 11 17:28:22 2010 +0300
@@ -27,14 +27,14 @@
#include "mpagearray.h"
#include "mswap.h"
#include "mthrash.h"
+#include "mpagecleaner.h"
+
#include "cache_maintenance.inl"
const TUint16 KDefaultYoungOldRatio = 3;
const TUint16 KDefaultMinPages = 256;
-#ifdef _USE_OLDEST_LISTS
const TUint16 KDefaultOldOldestRatio = 3;
-#endif
const TUint KMinOldPages = 1;
@@ -43,18 +43,24 @@
*/
const TUint KAbsoluteMaxPageCount = (1u<<(32-KPageShift))-1u;
-
+/*
+Limit the maximum number of oldest pages to bound the time taken by SelectPagesToClean(), which is
+called with the MmuLock held.
+*/
+const TUint KMaxOldestPages = 32;
+
+static DMutex* ThePageCleaningLock = NULL;
DPager ThePager;
DPager::DPager()
: iMinimumPageCount(0), iMaximumPageCount(0), iYoungOldRatio(0),
- iYoungCount(0),iOldCount(0),
-#ifdef _USE_OLDEST_LISTS
- iOldestCleanCount(0),
-#endif
+ iYoungCount(0), iOldCount(0), iOldestCleanCount(0),
iNumberOfFreePages(0), iReservePageCount(0), iMinimumPageLimit(0)
+#ifdef __DEMAND_PAGING_BENCHMARKS__
+ , iBenchmarkLock(TSpinLock::EOrderGenericIrqHigh3)
+#endif
{
}
@@ -102,13 +108,8 @@
#ifdef __SMP__
// Adjust min page count so that all CPUs are guaranteed to make progress.
- // NOTE: Can't use NKern::NumberOfCpus here because we haven't finished booting yet and will
- // always have only one CPU running at this point...
-
- // TODO: Before we can enable this the base test configuration needs
- // updating to have a sufficient minimum page size...
- //
- // iMinYoungPages *= KMaxCpus;
+ TInt numberOfCpus = NKern::NumberOfCpus();
+ iMinYoungPages *= numberOfCpus;
#endif
// A minimum young/old ratio of 1 means that we need at least twice iMinYoungPages pages...
@@ -123,11 +124,9 @@
iYoungOldRatio = KDefaultYoungOldRatio;
if(config.iYoungOldRatio)
iYoungOldRatio = config.iYoungOldRatio;
-#ifdef _USE_OLDEST_LISTS
iOldOldestRatio = KDefaultOldOldestRatio;
if(config.iSpare[2])
iOldOldestRatio = config.iSpare[2];
-#endif
// Set the minimum page counts...
iMinimumPageLimit = iMinYoungPages * (1 + iYoungOldRatio) / iYoungOldRatio
@@ -161,7 +160,6 @@
iMaximumPageCount = KAbsoluteMaxPageCount;
iInitMaximumPageCount = iMaximumPageCount;
-
TRACEB(("DPager::InitCache() live list min=%d max=%d ratio=%d",iMinimumPageCount,iMaximumPageCount,iYoungOldRatio));
// Verify the page counts are valid.
@@ -179,11 +177,9 @@
TInt ratioLimit = (iMinimumPageCount-KMinOldPages)/KMinOldPages;
__NK_ASSERT_ALWAYS(iYoungOldRatio <= ratioLimit);
-#ifdef _USE_OLDEST_LISTS
// There should always be enough old pages to allow the oldest lists ratio.
TUint oldestCount = minOldAndOldest / (1 + iOldOldestRatio);
__NK_ASSERT_ALWAYS(oldestCount);
-#endif
iNumberOfFreePages = 0;
iNumberOfDirtyPages = 0;
@@ -193,13 +189,9 @@
// old list so don't allocate them again.
RamAllocLock::Lock();
iYoungCount = 0;
-#ifdef _USE_OLDEST_LISTS
iOldCount = 0;
iOldestDirtyCount = 0;
__NK_ASSERT_DEBUG(iOldestCleanCount == iReservePageCount);
-#else
- __NK_ASSERT_DEBUG(iOldCount == iReservePageCount);
-#endif
Mmu& m = TheMmu;
for(TUint i = iReservePageCount; i < iMinimumPageCount; i++)
{
@@ -216,11 +208,7 @@
RamAllocLock::Unlock();
__NK_ASSERT_DEBUG(CacheInitialised());
-#ifdef _USE_OLDEST_LISTS
TRACEB(("DPager::InitCache() end with young=%d old=%d oldClean=%d oldDirty=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iOldestCleanCount,iOldestDirtyCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
-#else
- TRACEB(("DPager::InitCache() end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
-#endif
}
@@ -250,17 +238,12 @@
return EFalse;
if (!CheckList(&iYoungList.iA, iYoungCount))
return EFalse;
-
-#ifdef _USE_OLDEST_LISTS
if (!CheckList(&iOldestCleanList.iA, iOldestCleanCount))
return EFalse;
if (!CheckList(&iOldestDirtyList.iA, iOldestDirtyCount))
return EFalse;
TRACEP(("DP: y=%d o=%d oc=%d od=%d f=%d", iYoungCount, iOldCount,
iOldestCleanCount, iOldestDirtyCount, iNumberOfFreePages));
-#else
- TRACEP(("DP: y=%d o=%d f=%d", iYoungCount, iOldCount, iNumberOfFreePages));
-#endif //#ifdef _USE_OLDEST_LISTS
TraceCounts();
#endif // #ifdef FMM_PAGER_CHECK_LISTS
return true;
@@ -268,16 +251,10 @@
void DPager::TraceCounts()
{
-#ifdef _USE_OLDEST_LISTS
TRACEP(("DP: y=%d o=%d oc=%d od=%d f=%d min=%d max=%d ml=%d res=%d",
iYoungCount, iOldCount, iOldestCleanCount, iOldestDirtyCount,
iNumberOfFreePages, iMinimumPageCount, iMaximumPageCount,
iMinimumPageLimit, iReservePageCount));
-#else
- TRACEP(("DP: y=%d o=%d f=%d min=%d max=%d ml=%d res=%d",
- iYoungCount, iOldCount, iNumberOfFreePages, iMinimumPageCount,
- iMaximumPageCount, iMinimumPageLimit, iReservePageCount));
-#endif //#ifdef _USE_OLDEST_LISTS
}
#endif //#ifdef _DEBUG
@@ -320,15 +297,9 @@
__NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EUnpaged);
// add as oldest page...
-#ifdef _USE_OLDEST_LISTS
aPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
iOldestCleanList.Add(&aPageInfo->iLink);
++iOldestCleanCount;
-#else
- aPageInfo->SetPagedState(SPageInfo::EPagedOld);
- iOldList.Add(&aPageInfo->iLink);
- ++iOldCount;
-#endif
Event(EEventPageInFree,aPageInfo);
}
@@ -357,7 +328,6 @@
--iOldCount;
break;
-#ifdef _USE_OLDEST_LISTS
case SPageInfo::EPagedOldestClean:
__NK_ASSERT_DEBUG(iOldestCleanCount);
aPageInfo->iLink.Deque();
@@ -369,7 +339,6 @@
aPageInfo->iLink.Deque();
--iOldestDirtyCount;
break;
-#endif
case SPageInfo::EPagedPinned:
// this can occur if a pinned mapping is being unmapped when memory is decommitted.
@@ -392,7 +361,10 @@
// Update the dirty page count as required...
if (aPageInfo->IsDirty())
+ {
+ aPageInfo->SetReadOnly();
SetClean(*aPageInfo);
+ }
if (iNumberOfFreePages > 0)
{// The paging cache is not at the minimum size so safe to let the
@@ -403,15 +375,9 @@
}
// Need to hold onto this page as have reached the page cache limit.
// add as oldest page...
-#ifdef _USE_OLDEST_LISTS
aPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
iOldestCleanList.Add(&aPageInfo->iLink);
++iOldestCleanCount;
-#else
- aPageInfo->SetPagedState(SPageInfo::EPagedOld);
- iOldList.Add(&aPageInfo->iLink);
- ++iOldCount;
-#endif
return KErrNone;
}
@@ -438,7 +404,6 @@
--iOldCount;
break;
-#ifdef _USE_OLDEST_LISTS
case SPageInfo::EPagedOldestClean:
__NK_ASSERT_DEBUG(iOldestCleanCount);
aPageInfo->iLink.Deque();
@@ -450,7 +415,6 @@
aPageInfo->iLink.Deque();
--iOldestDirtyCount;
break;
-#endif
case SPageInfo::EPagedPinned:
__NK_ASSERT_DEBUG(0);
@@ -521,52 +485,253 @@
}
-SPageInfo* DPager::StealOldestPage()
+TInt DPager::TryStealOldestPage(SPageInfo*& aPageInfoOut)
{
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
+ // find oldest page in list...
+ SDblQueLink* link;
+ if (iOldestCleanCount)
+ {
+ __NK_ASSERT_DEBUG(!iOldestCleanList.IsEmpty());
+ link = iOldestCleanList.Last();
+ }
+ else if (iOldestDirtyCount)
+ {
+ __NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty());
+ link = iOldestDirtyList.Last();
+ }
+ else if (iOldCount)
+ {
+ __NK_ASSERT_DEBUG(!iOldList.IsEmpty());
+ link = iOldList.Last();
+ }
+ else
+ {
+ __NK_ASSERT_DEBUG(iYoungCount);
+ __NK_ASSERT_ALWAYS(!iYoungList.IsEmpty());
+ link = iYoungList.Last();
+ }
+ SPageInfo* pageInfo = SPageInfo::FromLink(link);
+
+ if (pageInfo->IsDirty() && !PageCleaningLock::IsHeld())
+ return 1;
+
+ // try to steal it from owning object...
+ TInt r = StealPage(pageInfo);
+ if (r == KErrNone)
+ {
+ BalanceAges();
+ aPageInfoOut = pageInfo;
+ }
+
+ return r;
+ }
+
+
+SPageInfo* DPager::StealOldestPage()
+ {
+ __NK_ASSERT_DEBUG(MmuLock::IsHeld());
+ TBool pageCleaningLockHeld = EFalse;
for(;;)
{
- // find oldest page in list...
- SDblQueLink* link;
-#ifdef _USE_OLDEST_LISTS
- if (iOldestCleanCount)
+ SPageInfo* pageInfo = NULL;
+ TInt r = TryStealOldestPage(pageInfo);
+
+ if (r == KErrNone)
{
- __NK_ASSERT_DEBUG(!iOldestCleanList.IsEmpty());
- link = iOldestCleanList.Last();
+ if (pageCleaningLockHeld)
+ {
+ MmuLock::Unlock();
+ PageCleaningLock::Unlock();
+ MmuLock::Lock();
+ }
+ return pageInfo;
+ }
+ else if (r == 1)
+ {
+ __NK_ASSERT_ALWAYS(!pageCleaningLockHeld);
+ MmuLock::Unlock();
+ PageCleaningLock::Lock();
+ MmuLock::Lock();
+ pageCleaningLockHeld = ETrue;
}
- else if (iOldestDirtyCount)
+ // else retry...
+ }
+ }
+
+#ifdef __CPU_CACHE_HAS_COLOUR
+
+template <class T, TInt maxObjects> class TSequentialColourSelector
+ {
+public:
+ static const TInt KMaxLength = maxObjects;
+ static const TInt KArrayLength = _ALIGN_UP(KMaxLength, KPageColourCount);
+
+ FORCE_INLINE TSequentialColourSelector()
+ {
+ memclr(this, sizeof(*this));
+ }
+
+ FORCE_INLINE TBool FoundLongestSequence()
+ {
+ return iLongestLength >= KMaxLength;
+ }
+
+ FORCE_INLINE void AddCandidate(T* aObject, TInt aColour)
+ {
+ // allocate objects to slots based on colour
+ for (TInt i = aColour ; i < KArrayLength ; i += KPageColourCount)
{
- __NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty());
- link = iOldestDirtyList.Last();
+ if (!iSlot[i])
+ {
+ iSlot[i] = aObject;
+ iSeqLength[i] = i == 0 ? 1 : iSeqLength[i - 1] + 1;
+ TInt j = i + 1;
+ while(j < KArrayLength && iSeqLength[j])
+ iSeqLength[j++] += iSeqLength[i];
+ TInt currentLength = iSeqLength[j - 1];
+ if (currentLength > iLongestLength)
+ {
+ iLongestLength = currentLength;
+ iLongestStart = j - currentLength;
+ }
+ break;
+ }
}
- else if (iOldCount)
-#else
- if (iOldCount)
-#endif
+ }
+
+ FORCE_INLINE TInt FindLongestRun(T** aObjectsOut)
+ {
+ if (iLongestLength == 0)
+ return 0;
+
+ if (iLongestLength < KMaxLength && iSlot[0] && iSlot[KArrayLength - 1])
{
- __NK_ASSERT_DEBUG(!iOldList.IsEmpty());
- link = iOldList.Last();
- }
- else
+ // check possibility of wrapping
+
+ TInt i = 1;
+ while (iSlot[i]) ++i; // find first hole
+ TInt wrappedLength = iSeqLength[KArrayLength - 1] + iSeqLength[i - 1];
+ if (wrappedLength > iLongestLength)
+ {
+ iLongestLength = wrappedLength;
+ iLongestStart = KArrayLength - iSeqLength[KArrayLength - 1];
+ }
+ }
+
+ iLongestLength = Min(iLongestLength, KMaxLength);
+
+ __NK_ASSERT_DEBUG(iLongestStart >= 0 && iLongestStart < KArrayLength);
+ __NK_ASSERT_DEBUG(iLongestStart + iLongestLength < 2 * KArrayLength);
+
+ TInt len = Min(iLongestLength, KArrayLength - iLongestStart);
+ wordmove(aObjectsOut, &iSlot[iLongestStart], len * sizeof(T*));
+ wordmove(aObjectsOut + len, &iSlot[0], (iLongestLength - len) * sizeof(T*));
+
+ return iLongestLength;
+ }
+
+private:
+ T* iSlot[KArrayLength];
+ TInt8 iSeqLength[KArrayLength];
+ TInt iLongestStart;
+ TInt iLongestLength;
+ };
+
+TInt DPager::SelectPagesToClean(SPageInfo** aPageInfosOut)
+ {
+ // select up to KMaxPagesToClean oldest dirty pages with sequential page colours
+
+ __NK_ASSERT_DEBUG(MmuLock::IsHeld());
+
+ TSequentialColourSelector<SPageInfo, KMaxPagesToClean> selector;
+
+ SDblQueLink* link = iOldestDirtyList.Last();
+ while (link != &iOldestDirtyList.iA)
+ {
+ SPageInfo* pi = SPageInfo::FromLink(link);
+ if (!pi->IsWritable())
{
- __NK_ASSERT_DEBUG(iYoungCount);
- __NK_ASSERT_ALWAYS(!iYoungList.IsEmpty());
- link = iYoungList.Last();
+ // the page may be in the process of being restricted, stolen or decommitted, but don't
+ // check for this as it will occur infrequently and will be detected by CheckModified
+ // anyway
+ TInt colour = pi->Index() & KPageColourMask;
+ selector.AddCandidate(pi, colour);
+ if (selector.FoundLongestSequence())
+ break;
}
- SPageInfo* pageInfo = SPageInfo::FromLink(link);
-
- // steal it from owning object...
- TInt r = StealPage(pageInfo);
-
- BalanceAges();
-
- if(r==KErrNone)
- return pageInfo; // done
-
- // loop back and try again
+ link = link->iPrev;
+ }
+
+ return selector.FindLongestRun(aPageInfosOut);
+ }
+
+#else
+
+TInt DPager::SelectPagesToClean(SPageInfo** aPageInfosOut)
+ {
+ // no page colouring restrictions, so just take up to KMaxPagesToClean oldest dirty pages
+ __NK_ASSERT_DEBUG(MmuLock::IsHeld());
+ TInt pageCount = 0;
+ SDblQueLink* link = iOldestDirtyList.Last();
+ while (link != &iOldestDirtyList.iA && pageCount < KMaxPagesToClean)
+ {
+ SPageInfo* pi = SPageInfo::FromLink(link);
+ if (!pi->IsWritable())
+ {
+ // the page may be in the process of being restricted, stolen or decommitted, but don't
+ // check for this as it will occur infrequently and will be detected by CheckModified
+ // anyway
+ aPageInfosOut[pageCount++] = pi;
+ }
+ link = link->iPrev;
}
+ return pageCount;
+ }
+
+#endif
+
+
+TInt DPager::CleanSomePages(TBool aBackground)
+ {
+ __NK_ASSERT_DEBUG(MmuLock::IsHeld());
+ __NK_ASSERT_DEBUG(PageCleaningLock::IsHeld());
+ // ram alloc lock may or may not be held
+
+ SPageInfo* pageInfos[KMaxPagesToClean];
+ TInt pageCount = SelectPagesToClean(&pageInfos[0]);
+
+ if (pageCount == 0)
+ return 0;
+
+ TheDataPagedMemoryManager->CleanPages(pageCount, pageInfos, aBackground);
+
+ for (TInt i = 0 ; i < pageCount ; ++i)
+ {
+ SPageInfo* pi = pageInfos[i];
+ if (pi)
+ {
+ __NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EPagedOldestDirty && iOldestDirtyCount);
+ __NK_ASSERT_DEBUG(!pi->IsDirty() && !pi->IsWritable());
+
+ pi->iLink.Deque();
+ iOldestCleanList.AddHead(&pi->iLink);
+ --iOldestDirtyCount;
+ ++iOldestCleanCount;
+ pi->SetPagedState(SPageInfo::EPagedOldestClean);
+ }
+ }
+
+ return pageCount;
+ }
+
+
+TBool DPager::HasPagesToClean()
+ {
+ __NK_ASSERT_DEBUG(MmuLock::IsHeld());
+ return iOldestDirtyCount > 0;
}
@@ -647,97 +812,158 @@
}
+TInt DPager::DiscardAndAllocPage(SPageInfo* aPageInfo, TZonePageType aPageType)
+ {
+ TInt r = DiscardPage(aPageInfo, KRamZoneInvalidId, EFalse);
+ if (r == KErrNone)
+ {
+ TheMmu.MarkPageAllocated(aPageInfo->PhysAddr(), aPageType);
+ }
+ // Flash the ram alloc lock as we may have had to write a page out to swap.
+ RamAllocLock::Unlock();
+ RamAllocLock::Lock();
+ return r;
+ }
+
+
+static TBool DiscardCanStealPage(SPageInfo* aOldPageInfo, TBool aBlockRest)
+ {
+ // If the page is pinned or if the page is dirty and a general defrag is being performed then
+ // don't attempt to steal it
+ return aOldPageInfo->Type() == SPageInfo::EUnused ||
+ (aOldPageInfo->PagedState() != SPageInfo::EPagedPinned && (!aBlockRest || !aOldPageInfo->IsDirty()));
+ }
+
+
TInt DPager::DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest)
{
+ // todo: assert MmuLock not released
+
+ TRACE(("> DPager::DiscardPage %08x", aOldPageInfo));
+
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
- TInt r;
- // If the page is pinned or if the page is dirty and a general defrag is being
- // performed then don't attempt to steal it.
- if (aOldPageInfo->Type() != SPageInfo::EUnused &&
- (aOldPageInfo->PagedState() == SPageInfo::EPagedPinned ||
- (aBlockRest && aOldPageInfo->IsDirty())))
- {// The page is pinned or is dirty and this is a general defrag so move the page.
+ if (!DiscardCanStealPage(aOldPageInfo, aBlockRest))
+ {
+ // The page is pinned or is dirty and this is a general defrag so move the page.
DMemoryObject* memory = aOldPageInfo->Owner();
// Page must be managed if it is pinned or dirty.
__NK_ASSERT_DEBUG(aOldPageInfo->Type()==SPageInfo::EManaged);
__NK_ASSERT_DEBUG(memory);
MmuLock::Unlock();
TPhysAddr newAddr;
- return memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, aBlockRest);
+ TRACE2(("DPager::DiscardPage delegating pinned/dirty page to manager"));
+ TInt r = memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, aBlockRest);
+ TRACE(("< DPager::DiscardPage %d", r));
+ return r;
}
- if (!iNumberOfFreePages)
+ TInt r = KErrNone;
+ SPageInfo* newPageInfo = NULL;
+ TBool havePageCleaningLock = EFalse;
+
+ TBool needNewPage;
+ TBool needPageCleaningLock;
+ while(needNewPage = (iNumberOfFreePages == 0 && newPageInfo == NULL),
+ needPageCleaningLock = (aOldPageInfo->IsDirty() && !havePageCleaningLock),
+ needNewPage || needPageCleaningLock)
{
- // Allocate a new page for the live list as it has reached its minimum size.
MmuLock::Unlock();
- SPageInfo* newPageInfo = GetPageFromSystem((Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe),
- aBlockZoneId, aBlockRest);
- if (!newPageInfo)
- return KErrNoMemory;
+
+ if (needNewPage)
+ {
+ // Allocate a new page for the live list as it has reached its minimum size.
+ TUint flags = EMemAttNormalCached | Mmu::EAllocNoWipe;
+ newPageInfo = GetPageFromSystem((Mmu::TRamAllocFlags)flags, aBlockZoneId, aBlockRest);
+ if (!newPageInfo)
+ {
+ TRACE(("< DPager::DiscardPage KErrNoMemory"));
+ r = KErrNoMemory;
+ MmuLock::Lock();
+ break;
+ }
+ }
+
+ if (needPageCleaningLock)
+ {
+ // Acquire the page cleaning mutex so StealPage can clean it
+ PageCleaningLock::Lock();
+ havePageCleaningLock = ETrue;
+ }
// Re-acquire the mmulock and re-check that the page is not pinned or dirty.
MmuLock::Lock();
- if (aOldPageInfo->Type() != SPageInfo::EUnused &&
- (aOldPageInfo->PagedState() == SPageInfo::EPagedPinned ||
- (aBlockRest && aOldPageInfo->IsDirty())))
- {// Page is now pinned or dirty so give up as it is inuse.
- ReturnPageToSystem(*newPageInfo);
- MmuLock::Unlock();
- return KErrInUse;
- }
-
- // Attempt to steal the page
- r = StealPage(aOldPageInfo);
- __NK_ASSERT_DEBUG(MmuLock::IsHeld());
-
- if (r == KErrCompletion)
- {// This was a page table that has been freed but added to the
- // live list as a free page. Remove from live list and continue.
- __NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty());
- RemovePage(aOldPageInfo);
- r = KErrNone;
- }
-
- if (r == KErrNone)
- {// Add the new page to the live list as discarding the old page
- // will reduce the live list below the minimum.
- AddAsFreePage(newPageInfo);
- // We've successfully discarded the page so return it to the free pool.
- ReturnPageToSystem(*aOldPageInfo);
- BalanceAges();
- }
- else
+ if (!DiscardCanStealPage(aOldPageInfo, aBlockRest))
{
- // New page not required so just return it to the system. This is safe as
- // iNumberOfFreePages will have this page counted but as it is not on the live list
- // noone else can touch it.
- ReturnPageToSystem(*newPageInfo);
+ // Page is now pinned or dirty so give up as it is in use.
+ r = KErrInUse;
+ break;
}
}
- else
+
+ if (r == KErrNone)
{
// Attempt to steal the page
- r = StealPage(aOldPageInfo);
-
- __NK_ASSERT_DEBUG(MmuLock::IsHeld());
-
- if (r == KErrCompletion)
- {// This was a page table that has been freed but added to the
- // live list as a free page. Remove from live list.
- __NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty());
- RemovePage(aOldPageInfo);
- r = KErrNone;
+ r = StealPage(aOldPageInfo); // temporarily releases MmuLock if page is dirty
+ }
+ __NK_ASSERT_DEBUG(MmuLock::IsHeld());
+
+ if (r == KErrCompletion)
+ {// This was a page table that has been freed but added to the
+ // live list as a free page. Remove from live list and continue.
+ __NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty());
+ RemovePage(aOldPageInfo);
+ r = KErrNone;
+ }
+
+ if (r == KErrNone && iNumberOfFreePages == 0)
+ {
+ if (newPageInfo)
+ {
+ // Add a new page to the live list if we have one as discarding the old page will reduce
+ // the live list below the minimum.
+ AddAsFreePage(newPageInfo);
+ newPageInfo = NULL;
}
-
- if (r == KErrNone)
- {// We've successfully discarded the page so return it to the free pool.
- ReturnPageToSystem(*aOldPageInfo);
- BalanceAges();
+ else
+ {
+ // Otherwise the live list shrank when page was being cleaned so have to give up
+ AddAsFreePage(aOldPageInfo);
+ BalanceAges(); // temporarily releases MmuLock
+ r = KErrInUse;
}
}
+
+ if (r == KErrNone)
+ {
+ // We've successfully discarded the page and ensured the live list is large enough, so
+ // return it to the free pool.
+ ReturnPageToSystem(*aOldPageInfo); // temporarily releases MmuLock
+ BalanceAges(); // temporarily releases MmuLock
+ }
+
+ if (newPageInfo)
+ {
+ // New page not required so just return it to the system. This is safe as
+ // iNumberOfFreePages will have this page counted but as it is not on the live list noone
+ // else can touch it.
+ if (iNumberOfFreePages == 0)
+ AddAsFreePage(newPageInfo);
+ else
+ ReturnPageToSystem(*newPageInfo); // temporarily releases MmuLock
+ }
+
+ if (havePageCleaningLock)
+ {
+ // Release the page cleaning mutex
+ MmuLock::Unlock();
+ PageCleaningLock::Unlock();
+ MmuLock::Lock();
+ }
+
MmuLock::Unlock();
+ TRACE(("< DPager::DiscardPage returns %d", r));
return r;
}
@@ -793,6 +1019,9 @@
__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
+ // should be unpaged at this point, otherwise Mmu::FreeRam will just give it back to us
+ __NK_ASSERT_DEBUG(aPageInfo.PagedState() == SPageInfo::EUnpaged);
+
__NK_ASSERT_DEBUG(iNumberOfFreePages>0);
--iNumberOfFreePages;
@@ -810,28 +1039,22 @@
SPageInfo* DPager::PageInAllocPage(Mmu::TRamAllocFlags aAllocFlags)
{
+ TBool pageCleaningLockHeld = EFalse;
SPageInfo* pageInfo;
TPhysAddr pagePhys;
-
+ TInt r = KErrGeneral;
+
RamAllocLock::Lock();
MmuLock::Lock();
+find_a_page:
// try getting a free page from our live list...
-#ifdef _USE_OLDEST_LISTS
if (iOldestCleanCount)
{
pageInfo = SPageInfo::FromLink(iOldestCleanList.Last());
if(pageInfo->Type()==SPageInfo::EUnused)
- goto get_oldest;
+ goto try_steal_oldest_page;
}
-#else
- if(iOldCount)
- {
- pageInfo = SPageInfo::FromLink(iOldList.Last());
- if(pageInfo->Type()==SPageInfo::EUnused)
- goto get_oldest;
- }
-#endif
// try getting a free page from the system pool...
if(!HaveMaximumPages())
@@ -843,14 +1066,62 @@
MmuLock::Lock();
}
+ // try stealing a clean page...
+ if (iOldestCleanCount)
+ goto try_steal_oldest_page;
+
+ // see if we can clean multiple dirty pages in one go...
+ if (KMaxPagesToClean > 1 && iOldestDirtyCount > 1)
+ {
+ // if we don't hold the page cleaning mutex then temporarily release ram alloc mutex and
+ // acquire page cleaning mutex; if we hold it already just proceed
+ if (!pageCleaningLockHeld)
+ {
+ MmuLock::Unlock();
+ RamAllocLock::Unlock();
+ PageCleaningLock::Lock();
+ MmuLock::Lock();
+ }
+
+ // there may be clean pages now if we've waited on the page cleaning mutex, if so don't
+ // bother cleaning but just restart
+ if (iOldestCleanCount == 0)
+ CleanSomePages(EFalse);
+
+ if (!pageCleaningLockHeld)
+ {
+ MmuLock::Unlock();
+ PageCleaningLock::Unlock();
+ RamAllocLock::Lock();
+ MmuLock::Lock();
+ }
+
+ if (iOldestCleanCount > 0)
+ goto find_a_page;
+ }
+
// as a last resort, steal a page from the live list...
-get_oldest:
-#ifdef _USE_OLDEST_LISTS
+
+try_steal_oldest_page:
__NK_ASSERT_ALWAYS(iOldestCleanCount|iOldestDirtyCount|iOldCount|iYoungCount);
-#else
- __NK_ASSERT_ALWAYS(iOldCount|iYoungCount);
-#endif
- pageInfo = StealOldestPage();
+ r = TryStealOldestPage(pageInfo);
+ // if this fails we restart whole process
+ if (r < KErrNone)
+ goto find_a_page;
+
+ // if we need to clean, acquire page cleaning mutex for life of this function
+ if (r == 1)
+ {
+ __NK_ASSERT_ALWAYS(!pageCleaningLockHeld);
+ MmuLock::Unlock();
+ PageCleaningLock::Lock();
+ MmuLock::Lock();
+ pageCleaningLockHeld = ETrue;
+ goto find_a_page;
+ }
+
+ // otherwise we're done!
+ __NK_ASSERT_DEBUG(r == KErrNone);
MmuLock::Unlock();
// make page state same as a freshly allocated page...
@@ -858,7 +1129,10 @@
TheMmu.PagesAllocated(&pagePhys,1,aAllocFlags);
done:
+ if (pageCleaningLockHeld)
+ PageCleaningLock::Unlock();
RamAllocLock::Unlock();
+
return pageInfo;
}
@@ -915,10 +1189,8 @@
case SPageInfo::EPagedYoung:
case SPageInfo::EPagedOld:
-#ifdef _USE_OLDEST_LISTS
case SPageInfo::EPagedOldestDirty:
case SPageInfo::EPagedOldestClean:
-#endif
continue; // discard already been allowed
case SPageInfo::EPagedPinned:
@@ -977,10 +1249,8 @@
case SPageInfo::EPagedYoung:
case SPageInfo::EPagedOld:
-#ifdef _USE_OLDEST_LISTS
case SPageInfo::EPagedOldestClean:
case SPageInfo::EPagedOldestDirty:
-#endif
changeType = ETrue;
break; // remove from live list
@@ -1046,6 +1316,7 @@
TheCodePagedMemoryManager->Init3();
TInt r = Kern::AddHalEntry(EHalGroupVM, VMHalFunction, 0);
__NK_ASSERT_ALWAYS(r==KErrNone);
+ PageCleaningLock::Init();
}
@@ -1060,12 +1331,8 @@
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
TBool restrictPage = EFalse;
SPageInfo* pageInfo = NULL;
-#ifdef _USE_OLDEST_LISTS
TUint oldestCount = iOldestCleanCount + iOldestDirtyCount;
if((iOldCount + oldestCount) * iYoungOldRatio < iYoungCount)
-#else
- if (iOldCount * iYoungOldRatio < iYoungCount)
-#endif
{
// Need more old pages so make one young page into an old page...
__NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
@@ -1084,9 +1351,9 @@
restrictPage = ETrue;
}
-#ifdef _USE_OLDEST_LISTS
// Check we have enough oldest pages.
- if (oldestCount * iOldOldestRatio < iOldCount)
+ if (oldestCount < KMaxOldestPages &&
+ oldestCount * iOldOldestRatio < iOldCount)
{
__NK_ASSERT_DEBUG(!iOldList.IsEmpty());
__NK_ASSERT_DEBUG(iOldCount);
@@ -1099,6 +1366,7 @@
oldestPageInfo->SetPagedState(SPageInfo::EPagedOldestDirty);
iOldestDirtyList.AddHead(link);
++iOldestDirtyCount;
+ PageCleaner::NotifyPagesToClean();
Event(EEventPageAgedDirty,oldestPageInfo);
}
else
@@ -1109,7 +1377,7 @@
Event(EEventPageAgedClean,oldestPageInfo);
}
}
-#endif
+
if (restrictPage)
{
// Make the recently aged old page inaccessible. This is done last as it
@@ -1144,10 +1412,8 @@
{
case SPageInfo::EPagedYoung:
case SPageInfo::EPagedOld:
-#ifdef _USE_OLDEST_LISTS
case SPageInfo::EPagedOldestClean:
case SPageInfo::EPagedOldestDirty:
-#endif
RemovePage(pi);
AddAsYoungestPage(pi);
BalanceAges();
@@ -1167,6 +1433,7 @@
}
}
+
TInt DPager::PteAndInfoFromLinAddr( TInt aOsAsid, TLinAddr aAddress, DMemoryMappingBase* aMapping,
TUint aMapInstanceCount, TPte*& aPte, SPageInfo*& aPageInfo)
{
@@ -1192,11 +1459,13 @@
return KErrNone;
}
+
TInt DPager::TryRejuvenate( TInt aOsAsid, TLinAddr aAddress, TUint aAccessPermissions, TLinAddr aPc,
DMemoryMappingBase* aMapping, TUint aMapInstanceCount, DThread* aThread,
TAny* aExceptionInfo)
{
__NK_ASSERT_DEBUG(MmuLock::IsHeld());
+ START_PAGING_BENCHMARK;
SPageInfo* pi;
TPte* pPte;
@@ -1292,12 +1561,8 @@
Event(EEventPageRejuvenate,pi,aPc,aAddress,aAccessPermissions);
TBool balance = false;
-#ifdef _USE_OLDEST_LISTS
if( state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld ||
state==SPageInfo::EPagedOldestClean || state==SPageInfo::EPagedOldestDirty)
-#else
- if(state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld)
-#endif
{
RemovePage(pi);
AddAsYoungestPage(pi);
@@ -1318,6 +1583,7 @@
if(balance)
BalanceAges();
+ END_PAGING_BENCHMARK(EPagingBmRejuvenate);
return KErrNone;
}
@@ -1349,10 +1615,8 @@
{
case SPageInfo::EPagedYoung:
case SPageInfo::EPagedOld:
-#ifdef _USE_OLDEST_LISTS
case SPageInfo::EPagedOldestClean:
case SPageInfo::EPagedOldestDirty:
-#endif
RemovePage(pi);
// fall through...
case SPageInfo::EUnpaged:
@@ -1386,10 +1650,8 @@
{
case SPageInfo::EPagedYoung:
case SPageInfo::EPagedOld:
-#ifdef _USE_OLDEST_LISTS
case SPageInfo::EPagedOldestClean:
case SPageInfo::EPagedOldestDirty:
-#endif
RemovePage(aPageInfo);
AddAsYoungestPage(aPageInfo);
BalanceAges();
@@ -1446,7 +1708,6 @@
__NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
break;
-#ifdef _USE_OLDEST_LISTS
case SPageInfo::EPagedOldestClean:
__NK_ASSERT_DEBUG(iOldestCleanCount);
aPageInfo->iLink.Deque();
@@ -1460,7 +1721,6 @@
--iOldestDirtyCount;
__NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
break;
-#endif
case SPageInfo::EPagedPinned:
// nothing more to do...
@@ -1749,7 +2009,7 @@
MmuLock::Lock();
- __NK_ASSERT_ALWAYS(iYoungOldRatio!=0);
+ __NK_ASSERT_ALWAYS(iYoungOldRatio);
// Make sure aMinimumPageCount is not less than absolute minimum we can cope with...
iMinimumPageLimit = iMinYoungPages * (1 + iYoungOldRatio) / iYoungOldRatio
@@ -1830,10 +2090,12 @@
}
+// WARNING THIS METHOD MAY HOLD THE RAM ALLOC LOCK FOR EXCESSIVE PERIODS. DON'T USE THIS IN ANY PRODUCTION CODE.
void DPager::FlushAll()
{
NKern::ThreadEnterCS();
RamAllocLock::Lock();
+ PageCleaningLock::Lock();
TRACE(("DPager::FlushAll() live list young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
@@ -1856,12 +2118,8 @@
do
{
SPageInfo::TPagedState state = pi->PagedState();
-#ifdef _USE_OLDEST_LISTS
if (state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld ||
state==SPageInfo::EPagedOldestClean || state==SPageInfo::EPagedOldestDirty)
-#else
- if(state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld)
-#endif
{
if (pi->Type() != SPageInfo::EUnused)
{
@@ -1874,10 +2132,7 @@
++pi;
if(((TUint)pi&(0xf<<KPageInfoShift))==0)
{
- MmuLock::Unlock(); // every 16 page infos
- RamAllocLock::Unlock();
- RamAllocLock::Lock();
- MmuLock::Lock();
+ MmuLock::Flash(); // every 16 page infos
}
}
while(pi<piEnd);
@@ -1892,6 +2147,7 @@
TRACE(("DPager::FlushAll() end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
+ PageCleaningLock::Unlock();
RamAllocLock::Unlock();
NKern::ThreadLeaveCS();
}
@@ -2066,9 +2322,8 @@
TUint index = (TInt) a1;
if (index >= EMaxPagingBm)
return KErrNotFound;
- NKern::LockSystem();
- SPagingBenchmarkInfo info = ThePager.iBenchmarkInfo[index];
- NKern::UnlockSystem();
+ SPagingBenchmarkInfo info;
+ ThePager.ReadBenchmarkData((TPagingBenchmark)index, info);
kumemput32(a2,&info,sizeof(info));
}
return KErrNone;
@@ -2078,9 +2333,7 @@
TUint index = (TInt) a1;
if (index >= EMaxPagingBm)
return KErrNotFound;
- NKern::LockSystem();
ThePager.ResetBenchmarkData((TPagingBenchmark)index);
- NKern::UnlockSystem();
}
return KErrNone;
#endif
@@ -2096,28 +2349,39 @@
void DPager::ResetBenchmarkData(TPagingBenchmark aBm)
{
SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
+ __SPIN_LOCK_IRQ(iBenchmarkLock);
info.iCount = 0;
info.iTotalTime = 0;
info.iMaxTime = 0;
info.iMinTime = KMaxTInt;
+ __SPIN_UNLOCK_IRQ(iBenchmarkLock);
}
-void DPager::RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime)
+void DPager::RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime, TUint aCount)
{
SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
- ++info.iCount;
#if !defined(HIGH_RES_TIMER) || defined(HIGH_RES_TIMER_COUNTS_UP)
TInt64 elapsed = aEndTime - aStartTime;
#else
TInt64 elapsed = aStartTime - aEndTime;
#endif
+ __SPIN_LOCK_IRQ(iBenchmarkLock);
+ info.iCount += aCount;
info.iTotalTime += elapsed;
if (elapsed > info.iMaxTime)
info.iMaxTime = elapsed;
if (elapsed < info.iMinTime)
info.iMinTime = elapsed;
+ __SPIN_UNLOCK_IRQ(iBenchmarkLock);
}
+void DPager::ReadBenchmarkData(TPagingBenchmark aBm, SPagingBenchmarkInfo& aDataOut)
+ {
+ __SPIN_LOCK_IRQ(iBenchmarkLock);
+ aDataOut = iBenchmarkInfo[aBm];
+ __SPIN_UNLOCK_IRQ(iBenchmarkLock);
+ }
+
#endif //__DEMAND_PAGING_BENCHMARKS__
@@ -2129,62 +2393,86 @@
// DPagingRequest
//
-DPagingRequest::DPagingRequest(DPagingRequestPool::TGroup& aPoolGroup)
- : iPoolGroup(aPoolGroup), iUseRegionMemory(0), iUseRegionIndex(0), iUseRegionCount(0)
+DPagingRequest::DPagingRequest()
+ : iMutex(NULL), iUseRegionCount(0)
{
}
-FORCE_INLINE void DPagingRequest::SetUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+void DPagingRequest::SetUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
{
__ASSERT_SYSTEM_LOCK;
- iUseRegionMemory = aMemory;
- iUseRegionIndex = aIndex;
+ __NK_ASSERT_DEBUG(iUseRegionCount == 0);
+ __NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages);
+ for (TUint i = 0 ; i < aCount ; ++i)
+ {
+ iUseRegionMemory[i] = aMemory;
+ iUseRegionIndex[i] = aIndex + i;
+ }
+ iUseRegionCount = aCount;
+ }
+
+
+void DPagingRequest::SetUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount)
+ {
+ __ASSERT_SYSTEM_LOCK;
+ __NK_ASSERT_DEBUG(iUseRegionCount == 0);
+ __NK_ASSERT_DEBUG(aCount > 0 && aCount <= EMaxPages);
+ for (TUint i = 0 ; i < aCount ; ++i)
+ {
+ iUseRegionMemory[i] = aMemory[i];
+ iUseRegionIndex[i] = aIndex[i];
+ }
iUseRegionCount = aCount;
}
-TBool DPagingRequest::CheckUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
- {
- return aMemory==iUseRegionMemory
- && TUint(aIndex-iUseRegionIndex) < iUseRegionCount
- && TUint(iUseRegionCount-TUint(aIndex-iUseRegionIndex)) <= aCount;
- }
-
-
-void DPagingRequest::Release()
- {
- NKern::LockSystem();
- SetUse(0,0,0);
- Signal();
- }
-
-
-void DPagingRequest::Wait()
+void DPagingRequest::ResetUse()
{
__ASSERT_SYSTEM_LOCK;
- ++iUsageCount;
- TInt r = iMutex->Wait();
- __NK_ASSERT_ALWAYS(r == KErrNone);
+ __NK_ASSERT_DEBUG(iUseRegionCount > 0);
+ iUseRegionCount = 0;
}
-void DPagingRequest::Signal()
+TBool DPagingRequest::CheckUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
{
- __ASSERT_SYSTEM_LOCK;
- iPoolGroup.Signal(this);
+ if (iUseRegionCount != aCount)
+ return EFalse;
+ for (TUint i = 0 ; i < iUseRegionCount ; ++i)
+ {
+ if (iUseRegionMemory[i] != aMemory || iUseRegionIndex[i] != aIndex + i)
+ return EFalse;
+ }
+ return ETrue;
}
-FORCE_INLINE TBool DPagingRequest::IsCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+TBool DPagingRequest::CheckUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount)
{
+ if (iUseRegionCount != aCount)
+ return EFalse;
+ for (TUint i = 0 ; i < iUseRegionCount ; ++i)
+ {
+ if (iUseRegionMemory[i] != aMemory[i] || iUseRegionIndex[i] != aIndex[i])
+ return EFalse;
+ }
+ return ETrue;
+ }
+
+
+ TBool DPagingRequest::IsCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+ {
+ // note this could be optimised as most of the time we will be checking read/read collusions,
+ // both of which will be contiguous
__ASSERT_SYSTEM_LOCK;
- DMemoryObject* memory = iUseRegionMemory;
- TUint index = iUseRegionIndex;
- TUint count = iUseRegionCount;
- // note, this comparison would fail if either region includes page number KMaxTUint,
- // but it isn't possible to create a memory object which is > KMaxTUint pages...
- return (memory == aMemory) && ((index + count) > aIndex) && (index < (aIndex + aCount));
+ for (TUint i = 0 ; i < iUseRegionCount ; ++i)
+ {
+ if (iUseRegionMemory[i] == aMemory &&
+ TUint(iUseRegionIndex[i] - aIndex) < aCount)
+ return ETrue;
+ }
+ return EFalse;
}
@@ -2201,6 +2489,38 @@
iTempMapping.Unmap(aIMBRequired);
}
+//
+// DPoolPagingRequest
+//
+
+DPoolPagingRequest::DPoolPagingRequest(DPagingRequestPool::TGroup& aPoolGroup) :
+ iPoolGroup(aPoolGroup)
+ {
+ }
+
+
+void DPoolPagingRequest::Release()
+ {
+ NKern::LockSystem();
+ ResetUse();
+ Signal();
+ }
+
+
+void DPoolPagingRequest::Wait()
+ {
+ __ASSERT_SYSTEM_LOCK;
+ ++iUsageCount;
+ TInt r = iMutex->Wait();
+ __NK_ASSERT_ALWAYS(r == KErrNone);
+ }
+
+
+void DPoolPagingRequest::Signal()
+ {
+ __ASSERT_SYSTEM_LOCK;
+ iPoolGroup.Signal(this);
+ }
//
// DPageReadRequest
@@ -2208,6 +2528,13 @@
TInt DPageReadRequest::iAllocNext = 0;
+DPageReadRequest::DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup) :
+ DPoolPagingRequest(aPoolGroup)
+ {
+ // allocate space for mapping pages whilst they're being loaded...
+ iTempMapping.Alloc(EMaxPages);
+ }
+
TInt DPageReadRequest::Construct()
{
// allocate id and mutex...
@@ -2219,9 +2546,6 @@
if(r!=KErrNone)
return r;
- // allocate space for mapping pages whilst they're being loaded...
- iTempMapping.Alloc(EMaxPages);
-
// create memory buffer...
TUint bufferSize = EMaxPages+1;
DMemoryObject* bufferMemory;
@@ -2248,23 +2572,20 @@
// DPageWriteRequest
//
-TInt DPageWriteRequest::iAllocNext = 0;
-
-TInt DPageWriteRequest::Construct()
+
+DPageWriteRequest::DPageWriteRequest()
{
- // allocate id and mutex...
- TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1);
- _LIT(KLitPagingRequest,"PageWriteRequest-");
- TBuf<sizeof("PageWriteRequest-")+10> mutexName(KLitPagingRequest);
- mutexName.AppendNum(id);
- TInt r = K::MutexCreate(iMutex, mutexName, NULL, EFalse, KMutexOrdPageOut);
- if(r!=KErrNone)
- return r;
-
+ iMutex = ThePageCleaningLock;
// allocate space for mapping pages whilst they're being loaded...
- iTempMapping.Alloc(EMaxPages);
-
- return r;
+ iTempMapping.Alloc(KMaxPagesToClean);
+ }
+
+
+void DPageWriteRequest::Release()
+ {
+ NKern::LockSystem();
+ ResetUse();
+ NKern::UnlockSystem();
}
@@ -2272,11 +2593,10 @@
// DPagingRequestPool
//
-DPagingRequestPool::DPagingRequestPool(TUint aNumPageReadRequest,TUint aNumPageWriteRequest)
- : iPageReadRequests(aNumPageReadRequest), iPageWriteRequests(aNumPageWriteRequest)
+DPagingRequestPool::DPagingRequestPool(TUint aNumPageReadRequest, TBool aWriteRequest)
+ : iPageReadRequests(aNumPageReadRequest)
{
TUint i;
-
for(i=0; i<aNumPageReadRequest; ++i)
{
DPageReadRequest* req = new DPageReadRequest(iPageReadRequests);
@@ -2287,14 +2607,10 @@
iPageReadRequests.iFreeList.Add(req);
}
- for(i=0; i<aNumPageWriteRequest; ++i)
+ if (aWriteRequest)
{
- DPageWriteRequest* req = new DPageWriteRequest(iPageWriteRequests);
- __NK_ASSERT_ALWAYS(req);
- TInt r = req->Construct();
- __NK_ASSERT_ALWAYS(r==KErrNone);
- iPageWriteRequests.iRequests[i] = req;
- iPageWriteRequests.iFreeList.Add(req);
+ iPageWriteRequest = new DPageWriteRequest();
+ __NK_ASSERT_ALWAYS(iPageWriteRequest);
}
}
@@ -2309,24 +2625,23 @@
{
NKern::LockSystem();
- DPagingRequest* req;
-
- // if we collide with page write operation...
- req = iPageWriteRequests.FindCollision(aMemory,aIndex,aCount);
- if(req)
+ DPoolPagingRequest* req;
+
+ // check for collision with existing write
+ if(iPageWriteRequest && iPageWriteRequest->IsCollisionContiguous(aMemory,aIndex,aCount))
{
- // wait until write completes...
- req->Wait();
- req->Signal();
+ NKern::UnlockSystem();
+ PageCleaningLock::Lock();
+ PageCleaningLock::Unlock();
return 0; // caller expected to retry if needed
}
// get a request object to use...
req = iPageReadRequests.GetRequest(aMemory,aIndex,aCount);
- // check no new requests collide with us...
- if(iPageWriteRequests.FindCollision(aMemory,aIndex,aCount)
- || iPageReadRequests.FindCollision(aMemory,aIndex,aCount))
+ // check no new read or write requests collide with us...
+ if ((iPageWriteRequest && iPageWriteRequest->IsCollisionContiguous(aMemory,aIndex,aCount)) ||
+ iPageReadRequests.FindCollisionContiguous(aMemory,aIndex,aCount))
{
// another operation is colliding with this region, give up and retry...
req->Signal();
@@ -2334,61 +2649,57 @@
}
// we have a request object which we can use...
- req->SetUse(aMemory,aIndex,aCount);
+ req->SetUseContiguous(aMemory,aIndex,aCount);
NKern::UnlockSystem();
return (DPageReadRequest*)req;
}
-DPageWriteRequest* DPagingRequestPool::AcquirePageWriteRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+DPageWriteRequest* DPagingRequestPool::AcquirePageWriteRequest(DMemoryObject** aMemory, TUint* aIndex, TUint aCount)
{
+ __NK_ASSERT_DEBUG(iPageWriteRequest);
+ __NK_ASSERT_DEBUG(PageCleaningLock::IsHeld());
+
NKern::LockSystem();
- DPagingRequest* req;
-
- for(;;)
- {
- // get a request object to use...
- req = iPageWriteRequests.GetRequest(aMemory,aIndex,aCount);
-
- if(iPageWriteRequests.FindCollision(aMemory,aIndex,aCount))
- {
- // another write operation is colliding with this region, give up and retry...
- req->Signal();
- // Reacquire the system lock as Signal() above will release it.
- NKern::LockSystem();
- continue;
- }
-
- break;
- }
-
- // we have a request object which we can use...
- req->SetUse(aMemory,aIndex,aCount);
-
+ // Collision with existing read requests is not possible here. For a page to be read it must
+ // not be present, and for it to be written it must be present and dirty. There is no way for a
+ // page to go between these states without an intervening read on an uninitialised (freshly
+ // committed) page, which will wait on the first read request. In other words something like
+ // this:
+ //
+ // read (blocks), decommit, re-commit, read (waits on mutex), write (now no pending reads!)
+ //
+ // Note that a read request can be outstanding and appear to collide with this write, but only
+ // in the case when the thread making the read has blocked just after acquiring the request but
+ // before it checks whether the read is still necessasry. This makes it difficult to assert
+ // that no collisions take place.
+
+ iPageWriteRequest->SetUseDiscontiguous(aMemory,aIndex,aCount);
NKern::UnlockSystem();
- return (DPageWriteRequest*)req;
+
+ return iPageWriteRequest;
}
DPagingRequestPool::TGroup::TGroup(TUint aNumRequests)
{
iNumRequests = aNumRequests;
- iRequests = new DPagingRequest*[aNumRequests];
+ iRequests = new DPoolPagingRequest*[aNumRequests];
__NK_ASSERT_ALWAYS(iRequests);
}
-DPagingRequest* DPagingRequestPool::TGroup::FindCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+DPoolPagingRequest* DPagingRequestPool::TGroup::FindCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
{
__ASSERT_SYSTEM_LOCK;
- DPagingRequest** ptr = iRequests;
- DPagingRequest** ptrEnd = ptr+iNumRequests;
+ DPoolPagingRequest** ptr = iRequests;
+ DPoolPagingRequest** ptrEnd = ptr+iNumRequests;
while(ptr<ptrEnd)
{
- DPagingRequest* req = *ptr++;
- if(req->IsCollision(aMemory,aIndex,aCount))
+ DPoolPagingRequest* req = *ptr++;
+ if(req->IsCollisionContiguous(aMemory,aIndex,aCount))
return req;
}
return 0;
@@ -2397,16 +2708,16 @@
static TUint32 RandomSeed = 33333;
-DPagingRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
+DPoolPagingRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
{
__NK_ASSERT_DEBUG(iNumRequests > 0);
// try using an existing request which collides with this region...
- DPagingRequest* req = FindCollision(aMemory,aIndex,aCount);
+ DPoolPagingRequest* req = FindCollisionContiguous(aMemory,aIndex,aCount);
if(!req)
{
// use a free request...
- req = (DPagingRequest*)iFreeList.GetFirst();
+ req = (DPoolPagingRequest*)iFreeList.GetFirst();
if(req)
{
// free requests aren't being used...
@@ -2429,7 +2740,7 @@
}
-void DPagingRequestPool::TGroup::Signal(DPagingRequest* aRequest)
+void DPagingRequestPool::TGroup::Signal(DPoolPagingRequest* aRequest)
{
// if there are no threads waiting on the mutex then return it to the free pool...
__NK_ASSERT_DEBUG(aRequest->iUsageCount > 0);
@@ -2457,8 +2768,8 @@
TInt r = KErrNotSupported; // Will return this if unsupported device type is installed
// create the pools of page out and page in requests...
- const TInt writeReqs = (aDevice->iType & DPagingDevice::EData) ? KPagingRequestsPerDevice : 0;
- aDevice->iRequestPool = new DPagingRequestPool(KPagingRequestsPerDevice,writeReqs);
+ const TBool writeReq = (aDevice->iType & DPagingDevice::EData) != 0;
+ aDevice->iRequestPool = new DPagingRequestPool(KPagingRequestsPerDevice, writeReq);
if(!aDevice->iRequestPool)
{
r = KErrNoMemory;
@@ -2488,6 +2799,9 @@
if (K::MemModelAttributes & (EMemModelAttrRomPaging | EMemModelAttrCodePaging | EMemModelAttrDataPaging))
TheThrashMonitor.Start();
+
+ if (K::MemModelAttributes & EMemModelAttrDataPaging)
+ PageCleaner::Start();
exit:
TRACEB(("Kern::InstallPagingDevice returns %d",r));
@@ -2637,3 +2951,32 @@
}
+
+//
+// PageCleaningLock
+//
+
+_LIT(KLitPageCleaningLock,"PageCleaningLock");
+
+void PageCleaningLock::Init()
+ {
+ __NK_ASSERT_DEBUG(!ThePageCleaningLock);
+ TInt r = Kern::MutexCreate(ThePageCleaningLock, KLitPageCleaningLock, KMutexOrdPageOut);
+ __NK_ASSERT_ALWAYS(r == KErrNone);
+ }
+
+void PageCleaningLock::Lock()
+ {
+ Kern::MutexWait(*ThePageCleaningLock);
+ }
+
+
+void PageCleaningLock::Unlock()
+ {
+ Kern::MutexSignal(*ThePageCleaningLock);
+ }
+
+TBool PageCleaningLock::IsHeld()
+ {
+ return ThePageCleaningLock->iCleanup.iThread == &Kern::CurrentThread();
+ }
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mpager.h Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mpager.h Tue May 11 17:28:22 2010 +0300
@@ -21,6 +21,14 @@
#ifndef MPAGER_H
#define MPAGER_H
+#include "mmu.h"
+#include <kern_priv.h>
+
+/**
+Maximum number of pages to attempt to clean in one go.
+*/
+const TInt KMaxPagesToClean = 4;
+
struct SVMCacheInfo;
class DMemModelThread;
class DMemoryMappingBase;
@@ -61,9 +69,9 @@
if (!aPageInfo.IsDirty())
{// This is the first mapping to write to the page so increase the
// dirty page count.
- aPageInfo.SetWritable();
iNumberOfDirtyPages++;
}
+ aPageInfo.SetWritable();
}
FORCE_INLINE void SetClean(SPageInfo& aPageInfo)
@@ -246,6 +254,31 @@
void UnreservePages(TUint& aCount);
/**
+ Indicates whether there are any dirty pages available to be cleaned by #CleanSomePages.
+
+ This is called by the page cleaner to work out whether it has any work to do.
+
+ @return Whether there are any dirty pages in the oldest section of the live list.
+ */
+ TBool HasPagesToClean();
+
+ /**
+ Attempt to clean one or more dirty pages in one go.
+
+ Called by the page cleaner to clean pages and by PageInAllocPage when needs to steal a page from
+ the live list, but the oldest clean list is empty.
+
+ May or may not succeed in acually cleaning any pages.
+
+ @param aBackground Whether the activity should be ignored when determining whether the paging
+ device is busy. This is used by the page cleaner.
+
+ @return The number of pages this method attempted to clean. If it returns zero, there were no
+ pages eligible to be cleaned.
+ */
+ TInt CleanSomePages(TBool aBackground);
+
+ /**
Enumeration of instrumented paging events which only require the
SPageInfo object as an argument.
*/
@@ -306,6 +339,15 @@
*/
TInt DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest);
+ /**
+ Attempt to discard the specified page and then allocate a page of type aPageType
+ in its place.
+
+ @param aPageInfo The page info of the page to discard.
+ @param aPageType The new page type to allocate into aPageInfo's physical address.
+ */
+ TInt DiscardAndAllocPage(SPageInfo* aPageInfo, TZonePageType aPageType);
+
/**
Update any live list links to replace the old page with the new page.
@@ -357,10 +399,23 @@
void RemovePage(SPageInfo* aPageInfo);
/**
+ Try to remove the oldest page from the live page list and perform #StealPage.
+
+ @param aPageInfoOut Set to the SPageInfo pointer for the stolen page if any.
+
+ @return KErrNone on success, KErrInUse if stealing failed or 1 to indicate the the oldest page
+ was dirty and the PageCleaning mutex was not held.
+
+ @pre MmuLock held
+ @post MmuLock left unchanged.
+ */
+ TInt TryStealOldestPage(SPageInfo*& aPageInfoOut);
+
+ /**
Remove the oldest page from the live page list and perform #StealPage.
@pre MmuLock held
- @post MmuLock left unchanged.
+ @post MmuLock held (but may have been released by this function)
*/
SPageInfo* StealOldestPage();
@@ -371,6 +426,7 @@
if the page had been allocated by Mmu::AllocRam.
@pre RamAlloc mutex held
+ @pre If the page is dirty the PageCleaning lock must be held.
@pre MmuLock held
@post MmuLock held (but may have been released by this function)
*/
@@ -428,6 +484,22 @@
SPageInfo* PageInAllocPage(Mmu::TRamAllocFlags aAllocFlags);
/**
+ Called by CleanSomePages() to determine which pages should be cleaned.
+
+ This deals with the complexity of page colouring, which means that pages can only be mapped at
+ certain locations. When cleaning multiple pages at once we need to find a set of pages that we
+ can map in memory sequentially.
+
+ @pre MmuLock held
+
+ @param aPageInfosOut Pointer to an array of SPageInfo pointers, which must be at least
+ KMaxPagesToClean long. This will be filled in to indicate the pages to clean.
+
+ @return The numnber of pages to clean.
+ */
+ TInt SelectPagesToClean(SPageInfo** aPageInfosOut);
+
+ /**
If the number of young pages exceeds that specified by iYoungOldRatio then a
single page is made 'old'. Call this after adding a new 'young' page.
@@ -521,13 +593,11 @@
TUint iYoungCount; /**< Number of young pages */
SDblQue iOldList; /**< Head of 'old' page list. */
TUint iOldCount; /**< Number of old pages */
-#ifdef _USE_OLDEST_LISTS
SDblQue iOldestCleanList; /**< Head of 'oldestClean' page list. */
TUint iOldestCleanCount; /**< Number of 'oldestClean' pages */
SDblQue iOldestDirtyList; /**< Head of 'oldestDirty' page list. */
TUint iOldestDirtyCount; /**< Number of 'oldestDirty' pages */
TUint16 iOldOldestRatio; /**< Ratio of old pages to oldest to clean and dirty in the live page list*/
-#endif
TUint iNumberOfFreePages;
TUint iNumberOfDirtyPages; /**< The total number of dirty pages in the paging cache. Protected by MmuLock */
TUint iInitMinimumPageCount;/**< Initial value for iMinimumPageCount */
@@ -539,8 +609,10 @@
#ifdef __DEMAND_PAGING_BENCHMARKS__
public:
- void RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime);
+ void RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime, TUint aCount);
void ResetBenchmarkData(TPagingBenchmark aBm);
+ void ReadBenchmarkData(TPagingBenchmark aBm, SPagingBenchmarkInfo& aDataOut);
+ TSpinLock iBenchmarkLock;
SPagingBenchmarkInfo iBenchmarkInfo[EMaxPagingBm];
#endif //__DEMAND_PAGING_BENCHMARKS__
};
@@ -551,12 +623,14 @@
#ifdef __DEMAND_PAGING_BENCHMARKS__
#define START_PAGING_BENCHMARK TUint32 _bmStart = NKern::FastCounter()
-#define END_PAGING_BENCHMARK(bm) ThePager.RecordBenchmarkData(bm, _bmStart, NKern::FastCounter())
+#define END_PAGING_BENCHMARK(bm) ThePager.RecordBenchmarkData(bm, _bmStart, NKern::FastCounter(), 1)
+#define END_PAGING_BENCHMARK_N(bm, n) ThePager.RecordBenchmarkData(bm, _bmStart, NKern::FastCounter(), (n))
#else
#define START_PAGING_BENCHMARK
#define END_PAGING_BENCHMARK(bm)
+#define END_PAGING_BENCHMARK_N(bm, n)
#endif // __DEMAND_PAGING_BENCHMARKS__
@@ -698,7 +772,7 @@
const TInt KPagingRequestsPerDevice = 2;
-class DPagingRequest;
+class DPoolPagingRequest;
class DPageReadRequest;
class DPageWriteRequest;
@@ -708,9 +782,9 @@
class DPagingRequestPool : public DBase
{
public:
- DPagingRequestPool(TUint aNumPageReadRequest,TUint aNumPageWriteRequest);
+ DPagingRequestPool(TUint aNumPageReadRequest, TBool aWriteRequest);
DPageReadRequest* AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
- DPageWriteRequest* AcquirePageWriteRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
+ DPageWriteRequest* AcquirePageWriteRequest(DMemoryObject** aMemory, TUint* aIndex, TUint aCount);
private:
~DPagingRequestPool();
private:
@@ -718,18 +792,18 @@
{
public:
TGroup(TUint aNumRequests);
- DPagingRequest* FindCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
- DPagingRequest* GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
- void Signal(DPagingRequest* aRequest);
+ DPoolPagingRequest* FindCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
+ DPoolPagingRequest* GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
+ void Signal(DPoolPagingRequest* aRequest);
public:
TUint iNumRequests;
- DPagingRequest** iRequests;
+ DPoolPagingRequest** iRequests;
SDblQue iFreeList;
};
TGroup iPageReadRequests;
- TGroup iPageWriteRequests;
+ DPageWriteRequest* iPageWriteRequest;
- friend class DPagingRequest;
+ friend class DPoolPagingRequest;
friend class DPageReadRequest;
friend class DPageWriteRequest;
};
@@ -741,45 +815,59 @@
class DPagingRequest : public SDblQueLink
{
public:
- DPagingRequest(DPagingRequestPool::TGroup& aPoolGroup);
- void Release();
- void Wait();
- void Signal();
- void SetUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
- TBool CheckUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
- TBool IsCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
- TLinAddr MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages);
- void UnmapPages(TBool aIMBRequired);
-public:
- TThreadMessage iMessage; /**< Used by the media driver to queue requests */
- DMutex* iMutex; /**< A mutex for synchronisation and priority inheritance. */
- TInt iUsageCount;/**< How many threads are using or waiting for this object. */
- TLinAddr iBuffer; /**< A buffer to read compressed data into. Size is EMaxPages+1 pages.*/
-protected:
- Mmu::TTempMapping iTempMapping;
-private:
- DPagingRequestPool::TGroup& iPoolGroup;
- // used to identify memory request is used for...
- DMemoryObject* iUseRegionMemory;
- TUint iUseRegionIndex;
- TUint iUseRegionCount;
- };
-
-
-/**
-Resources needed to service a page in request.
-*/
-class DPageReadRequest : public DPagingRequest
- {
-public:
- inline DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup)
- : DPagingRequest(aPoolGroup)
- {}
- TInt Construct();
enum
{
EMaxPages = 4
};
+ DPagingRequest();
+ TLinAddr MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages);
+ void UnmapPages(TBool aIMBRequired);
+ void SetUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
+ void SetUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount);
+ void ResetUse();
+ TBool CheckUseContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
+ TBool CheckUseDiscontiguous(DMemoryObject** aMemory, TUint* aIndex, TUint aCount);
+ TBool IsCollisionContiguous(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
+public:
+ DMutex* iMutex; /**< A mutex for synchronisation and priority inheritance. */
+protected:
+ Mmu::TTempMapping iTempMapping;
+private:
+ // used to identify memory request is used for...
+ TUint iUseRegionCount;
+ DMemoryObject* iUseRegionMemory[EMaxPages];
+ TUint iUseRegionIndex[EMaxPages];
+ };
+
+
+__ASSERT_COMPILE(DPagingRequest::EMaxPages >= KMaxPagesToClean);
+
+
+/**
+A paging request that is part of a pool of similar request objects.
+*/
+class DPoolPagingRequest : public DPagingRequest
+ {
+public:
+ DPoolPagingRequest(DPagingRequestPool::TGroup& aPoolGroup);
+ void Release();
+ void Wait();
+ void Signal();
+public:
+ TInt iUsageCount; /**< How many threads are using or waiting for this object. */
+private:
+ DPagingRequestPool::TGroup& iPoolGroup;
+ };
+
+
+/**
+Resources needed to service a page in request.
+*/
+class DPageReadRequest : public DPoolPagingRequest
+ {
+public:
+ DPageReadRequest(DPagingRequestPool::TGroup& aPoolGroup);
+ TInt Construct();
static TUint ReservedPagesRequired();
private:
~DPageReadRequest(); // can't delete
@@ -804,18 +892,45 @@
class DPageWriteRequest : public DPagingRequest
{
public:
- inline DPageWriteRequest(DPagingRequestPool::TGroup& aPoolGroup)
- : DPagingRequest(aPoolGroup)
- {}
- TInt Construct();
- enum
- {
- EMaxPages = 1
- };
+ DPageWriteRequest();
+ void Release();
private:
~DPageWriteRequest(); // can't delete
-private:
- static TInt iAllocNext;
+ };
+
+
+/**
+Class providing access to the mutex used to protect page cleaning operations;
+this is the mutex DPager::iPageCleaningLock.
+*/
+class PageCleaningLock
+ {
+public:
+ /**
+ Acquire the lock.
+ The lock may be acquired multiple times by a thread, and will remain locked
+ until #Unlock has been used enough times to balance this.
+ */
+ static void Lock();
+
+ /**
+ Release the lock.
+
+ @pre The current thread has previously acquired the lock.
+ */
+ static void Unlock();
+
+ /**
+ Return true if the current thread holds the lock.
+ This is used for debug checks.
+ */
+ static TBool IsHeld();
+
+ /**
+ Create the lock.
+ Called by DPager::Init3().
+ */
+ static void Init();
};
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mptalloc.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mptalloc.cpp Tue May 11 17:28:22 2010 +0300
@@ -54,10 +54,6 @@
uses memory from #ThePager which will reclaim paged memory if necessary.
Providing the live list always has #DPager::iMinYoungPages, this guarantees that
handling page faults can never fail by running out of memory.
-
-TODO: In really pathological situations page table allocation can fail due to
-being out of virtual address space to map the table, this needs to be prevented
-from happening when handling demand paging faults.
*/
--- a/kernel/eka/memmodel/epoc/flexible/mmu/mrom.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/flexible/mmu/mrom.cpp Tue May 11 17:28:22 2010 +0300
@@ -530,15 +530,18 @@
TInt DRomMemoryManager::ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest)
{
- __NK_ASSERT_DEBUG(aRequest->CheckUse(aMemory,aIndex,aCount));
+ __NK_ASSERT_DEBUG(aRequest->CheckUseContiguous(aMemory,aIndex,aCount));
+ __ASSERT_CRITICAL;
TLinAddr linAddr = aRequest->MapPages(aIndex,aCount,aPages);
TInt r = KErrNone;
+ TThreadMessage message;
const TInt readUnitShift = iDevice->iReadUnitShift;
for(; aCount; ++aIndex, --aCount, linAddr+=KPageSize)
{
+
START_PAGING_BENCHMARK;
if(!iRomPageIndex)
{
@@ -548,7 +551,7 @@
// by readUnitShift.
const TInt dataOffset = aIndex << KPageShift;
START_PAGING_BENCHMARK;
- r = iDevice->Read( const_cast<TThreadMessage*>(&aRequest->iMessage),
+ r = iDevice->Read( &message,
linAddr, dataOffset >> readUnitShift,
KPageSize >> readUnitShift, DPagingDevice::EDriveRomPaging);
__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocated memory, therefore can't fail with KErrNoMemory
@@ -571,13 +574,12 @@
__NK_ASSERT_ALWAYS(romPageInfo->iPagingAttributes & SRomPageInfo::EPageable);
// Read data for page...
- TThreadMessage* msg = const_cast<TThreadMessage*>(&aRequest->iMessage);
const TLinAddr buffer = aRequest->iBuffer;
const TUint readStart = dataOffset >> readUnitShift;
const TUint readSize = ((dataOffset + dataSize - 1) >> readUnitShift) - readStart + 1;
__NK_ASSERT_DEBUG((readSize << readUnitShift) <= (DPageReadRequest::EMaxPages << KPageShift));
START_PAGING_BENCHMARK;
- r = iDevice->Read(msg, buffer, readStart, readSize, DPagingDevice::EDriveRomPaging);
+ r = iDevice->Read(&message, buffer, readStart, readSize, DPagingDevice::EDriveRomPaging);
__NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocated memory, therefore can't fail with KErrNoMemory
END_PAGING_BENCHMARK(EPagingBmReadMedia);
if(r==KErrNone)
--- a/kernel/eka/memmodel/epoc/mmubase/mmubase.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/mmubase/mmubase.cpp Tue May 11 17:28:22 2010 +0300
@@ -483,7 +483,7 @@
TInt MmuBase::AllocPhysicalRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
{
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam() size=%x align=%d",aSize,aAlign));
- TInt r=AllocContiguousRam(aSize, aPhysAddr, EPageFixed, aAlign);
+ TInt r=AllocContiguousRam(aSize, aPhysAddr, aAlign);
if (r!=KErrNone)
{
iAllocFailed=ETrue;
@@ -516,7 +516,7 @@
TInt MmuBase::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
{
__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam() size=0x%x align=%d", aSize, aAlign));
- TInt r = ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, EPageFixed, aAlign);
+ TInt r = ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aAlign);
if (r!=KErrNone)
{
iAllocFailed=ETrue;
@@ -714,20 +714,19 @@
}
-TInt MmuBase::AllocContiguousRam(TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign, TUint aBlockedZoneId, TBool aBlockRest)
+TInt MmuBase::AllocContiguousRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
{
#ifdef _DEBUG
if(K::CheckForSimulatedAllocFail())
return KErrNoMemory;
#endif
- __NK_ASSERT_DEBUG(aPageType == EPageFixed);
TUint contigPages = (aSize + KPageSize - 1) >> KPageShift;
- TInt r = iRamPageAllocator->AllocContiguousRam(contigPages, aPhysAddr, aPageType, aAlign, aBlockedZoneId, aBlockRest);
+ TInt r = iRamPageAllocator->AllocContiguousRam(contigPages, aPhysAddr, aAlign);
if (r == KErrNoMemory && contigPages > KMaxFreeableContiguousPages)
{// Allocation failed but as this is a large allocation flush the RAM cache
// and reattempt the allocation as large allocation wouldn't discard pages.
iRamCache->FlushAll();
- r = iRamPageAllocator->AllocContiguousRam(contigPages, aPhysAddr, aPageType, aAlign, aBlockedZoneId, aBlockRest);
+ r = iRamPageAllocator->AllocContiguousRam(contigPages, aPhysAddr, aAlign);
}
return r;
}
@@ -739,16 +738,15 @@
@param aZoneIdCount The number of IDs listed in aZoneIdList
@param aSize The number of bytes to allocate
@param aPhysAddr Will receive the physical base address of the allocated RAM
-@param aPageType The type of the pages being allocated
@param aAlign The log base 2 alginment required
*/
-TInt MmuBase::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign)
+TInt MmuBase::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
{
#ifdef _DEBUG
if(K::CheckForSimulatedAllocFail())
return KErrNoMemory;
#endif
- return iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aPageType, aAlign);
+ return iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aAlign);
}
SPageInfo* SPageInfo::SafeFromPhysAddr(TPhysAddr aAddress)
@@ -5225,6 +5223,12 @@
// Misc DPagingDevice methods
+EXPORT_C NFastMutex* DPagingDevice::NotificationLock()
+ {
+ // use the system lock
+ return &TheScheduler.iLock;
+ }
+
EXPORT_C void DPagingDevice::NotifyIdle()
{
// Not used on this memory model
--- a/kernel/eka/memmodel/epoc/mmubase/ramalloc.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/mmubase/ramalloc.cpp Tue May 11 17:28:22 2010 +0300
@@ -354,13 +354,13 @@
aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
Panic(EZonesCountErr);
}
- __ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent));
__KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));
__KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown],
aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
- if (iAllowBmaVerify)
+ if (!iContiguousReserved)
{
+ __ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent));
TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]);
TUint allocPages;
if (aType == EPageFixed || aType == EPageUnknown)
@@ -495,13 +495,13 @@
aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
Panic(EZonesCountErr);
}
- __ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent));
__KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));
__KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown],
aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
- if (iAllowBmaVerify)
+ if (!iContiguousReserved)
{
+ __ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent));
TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]);
TUint allocPages;
if (aType == EPageFixed || aType == EPageUnknown)
@@ -968,7 +968,7 @@
// Temporarily fill preference list so SetPhysicalRamState can succeed
#ifdef _DEBUG
// Block bma verificaitons as bma and alloc counts aren't consistent yet.
- iAllowBmaVerify = EFalse;
+ iContiguousReserved = 1;
#endif
const SZone* const lastZone = iZones + iNumZones;
zone = iZones;
@@ -984,7 +984,7 @@
}
#ifdef _DEBUG
// Only now is it safe to enable bma verifications
- iAllowBmaVerify = ETrue;
+ iContiguousReserved = 0;
#endif
///////////////////////////////////////////////////////////////////////////
@@ -1135,6 +1135,7 @@
}
}
+
TInt DRamAllocator::MarkPageAllocated(TPhysAddr aAddr, TZonePageType aType)
{
__KTRACE_OPT(KMMU,Kern::Printf("DRamAllocator::MarkPageAllocated %08x",aAddr));
@@ -1160,7 +1161,12 @@
return KErrAlreadyExists; // page is already allocated
}
bmaAll.Alloc(n,1);
- bmaType.Alloc(n,1);
+ if (bmaType.NotAllocated(n,1))
+ bmaType.Alloc(n,1);
+#ifdef _DEBUG
+ else // Allow this page to already be reserved in bmaType as AllocContiguousRam() may have done this.
+ __NK_ASSERT_DEBUG(aType == EPageFixed);
+#endif
--iTotalFreeRamPages;
ZoneAllocPages(z, 1, aType);
__KTRACE_OPT(KMMU,Kern::Printf("Total free RAM pages now = %d",iTotalFreeRamPages));
@@ -1171,6 +1177,7 @@
return KErrNone;
}
+
TInt DRamAllocator::FreeRamPage(TPhysAddr aAddr, TZonePageType aType)
{
__KTRACE_OPT(KMMU,Kern::Printf("FreeRamPage %08x",aAddr));
@@ -1201,17 +1208,27 @@
__KTRACE_OPT(KMMU2,Kern::Printf("Zone index %d page index %04x",z-iZones,n));
TBitMapAllocator& bmaAll = *(z->iBma[KBmaAllPages]);
TBitMapAllocator& bmaType = *(z->iBma[aType]);
- bmaAll.Free(n);
+
bmaType.Free(n);
- ++iTotalFreeRamPages;
- ZoneFreePages(z, 1, aType);
-
+ if (iContiguousReserved && aType != EPageFixed && z->iBma[EPageFixed]->NotFree(n, 1))
+ {// This page has been reserved by AllocContiguous() so don't free it
+ // but allocate it as fixed.
+ ZoneFreePages(z, 1, aType);
+ ZoneAllocPages(z, 1, EPageFixed);
+ }
+ else
+ {
+ bmaAll.Free(n);
+ ++iTotalFreeRamPages;
+ ZoneFreePages(z, 1, aType);
+ }
#ifdef BTRACE_RAM_ALLOCATOR
BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocFreePage, aType, aAddr);
#endif
return KErrNone;
}
+
void DRamAllocator::FreeRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType)
{
__KTRACE_OPT(KMMU,Kern::Printf("FreeRamPages count=%08x",aNumPages));
@@ -1259,11 +1276,37 @@
pa += KPageSize;
}
__KTRACE_OPT(KMMU2,Kern::Printf("%d consecutive pages, zp_rem=%x, %d remaining pages",n,zp_rem,aNumPages));
- bmaAll.Free(ix,n);
TBitMapAllocator& bmaType = *(z->iBma[aType]);
bmaType.Free(ix,n);
- iTotalFreeRamPages += n;
- ZoneFreePages(z, n, aType);
+
+ if (iContiguousReserved && aType != EPageFixed)
+ {// See if a page has been reserved by AllocContiguous() in this range.
+ TUint pagesFreed = 0;
+ TUint allocStart = ix;
+ TUint freeOffset = ix;
+ TUint endOffset = ix + n - 1;
+ while (freeOffset <= endOffset)
+ {
+ TUint runLength = NextAllocatedRun(z, allocStart, endOffset, EPageFixed);
+ if (allocStart > freeOffset)
+ {
+ TUint freed = allocStart - freeOffset;
+ bmaAll.Free(freeOffset, freed);
+ pagesFreed += freed;
+ }
+ allocStart += runLength;
+ freeOffset = allocStart;
+ }
+ iTotalFreeRamPages += pagesFreed;
+ ZoneFreePages(z, n, aType);
+ ZoneAllocPages(z, n - pagesFreed, EPageFixed);
+ }
+ else
+ {
+ bmaAll.Free(ix,n);
+ iTotalFreeRamPages += n;
+ ZoneFreePages(z, n, aType);
+ }
#ifdef BTRACE_RAM_ALLOCATOR
BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocFreePages, aType, n, first_pa);
#endif
@@ -1273,6 +1316,7 @@
#endif
}
+
/**
Attempt to clear upto the required amount of discardable or movable pages
from the RAM zone.
@@ -1464,7 +1508,7 @@
{// Allocating as part of a general defragmentation and
// can't allocate without using a RAM zone less preferable than
// the current least prefeable RAM zone with movable and/or
- //discardable.
+ // discardable.
__NK_ASSERT_DEBUG(numMissing);
goto exit;
}
@@ -1679,29 +1723,173 @@
return r;
}
+
+#if !defined(__MEMMODEL_MULTIPLE__) && !defined(__MEMMODEL_MOVING__)
+void DRamAllocator::BlockContiguousRegion(TPhysAddr aAddrBase, TUint aNumPages)
+ {
+ // Shouldn't be asked to block zero pages, addrEndPage would be wrong if we did.
+ __NK_ASSERT_DEBUG(aNumPages);
+ TPhysAddr addr = aAddrBase;
+ TPhysAddr addrEndPage = aAddrBase + ((aNumPages - 1) << KPageShift);
+ TInt tmpOffset;
+ SZone* endZone = GetZoneAndOffset(addrEndPage, tmpOffset);
+ SZone* tmpZone;
+ do
+ {
+ tmpZone = GetZoneAndOffset(addr, tmpOffset);
+ __NK_ASSERT_DEBUG(tmpZone != NULL);
+ TUint runLength = (addrEndPage < tmpZone->iPhysEnd)?
+ ((addrEndPage - addr) >> KPageShift) + 1:
+ tmpZone->iPhysPages - tmpOffset;
+ TUint reserved = tmpZone->iBma[KBmaAllPages]->SelectiveAlloc(tmpOffset, runLength);
+ if (reserved)
+ {
+#ifdef _DEBUG
+ TUint runEnd = tmpOffset + runLength;
+ TUint free = 0;
+ for (TUint i = tmpOffset; i < runEnd; i++)
+ if (tmpZone->iBma[EPageMovable]->NotAllocated(i,1) && tmpZone->iBma[EPageDiscard]->NotAllocated(i,1))
+ free++;
+ __NK_ASSERT_DEBUG(free == reserved);
+#endif
+ ZoneAllocPages(tmpZone, reserved, EPageFixed);
+ iTotalFreeRamPages -= reserved;
+ }
+ tmpZone->iBma[EPageFixed]->Alloc(tmpOffset, runLength);
+ addr = tmpZone->iPhysEnd + 1;
+ }
+ while (tmpZone != endZone);
+ }
+
+
+FORCE_INLINE void DRamAllocator::UnblockSetAllocRuns( TUint& aOffset1, TUint& aOffset2,
+ TUint aRunLength1, TUint aRunLength2,
+ TUint& aAllocLength, TUint& aAllocStart)
+ {
+ aAllocStart = aOffset1;
+ aAllocLength = aRunLength1;
+ aOffset1 += aAllocLength;
+ if (aOffset1 == aOffset2)
+ {
+ aAllocLength += aRunLength2;
+ aOffset2 += aRunLength2;
+ aOffset1 = aOffset2;
+ }
+ }
+
+
+void DRamAllocator::UnblockContiguousRegion(TPhysAddr aAddrBase, TUint aNumPages)
+ {
+ // Shouldn't be asked to unblock zero pages, addrEndPage would be wrong if we did.
+ __NK_ASSERT_DEBUG(aNumPages);
+ TPhysAddr addr = aAddrBase;
+ TPhysAddr addrEndPage = aAddrBase + ((aNumPages - 1) << KPageShift);
+ TInt tmpOffset;
+ SZone* endZone = GetZoneAndOffset(addrEndPage, tmpOffset);
+ SZone* tmpZone;
+ do
+ {
+ tmpZone = GetZoneAndOffset(addr, tmpOffset);
+ __NK_ASSERT_DEBUG(tmpZone != NULL);
+ TUint runLength = (addrEndPage < tmpZone->iPhysEnd)?
+ ((addrEndPage - addr) >> KPageShift) + 1:
+ tmpZone->iPhysPages - tmpOffset;
+ TUint unreserved = 0;
+ TUint runEnd = tmpOffset + runLength - 1;
+ TUint freeOffset = tmpOffset;
+ TUint discardOffset = freeOffset;
+ TUint movableOffset = freeOffset;
+ __KTRACE_OPT(KMMU2, Kern::Printf("freeOff %d, runEnd %d", freeOffset, runEnd));
+ while (freeOffset <= runEnd)
+ {
+ TUint discardRun;
+ TUint movableRun;
+ discardRun = NextAllocatedRun(tmpZone, discardOffset, runEnd, EPageDiscard);
+ movableRun = NextAllocatedRun(tmpZone, movableOffset, runEnd, EPageMovable);
+ TUint allocLength;
+ TUint allocStart;
+ __KTRACE_OPT(KMMU2, Kern::Printf("disOff %d len %d movOff %d len %d", discardOffset, discardRun, movableOffset, movableRun));
+ if (discardOffset < movableOffset)
+ UnblockSetAllocRuns(discardOffset, movableOffset, discardRun, movableRun, allocLength, allocStart);
+ else
+ UnblockSetAllocRuns(movableOffset, discardOffset, movableRun, discardRun, allocLength, allocStart);
+
+ if (allocStart > freeOffset)
+ {
+ unreserved += allocStart - freeOffset;
+ tmpZone->iBma[KBmaAllPages]->Free(freeOffset, allocStart - freeOffset);
+ __NK_ASSERT_DEBUG( !tmpZone->iBma[EPageMovable]->NotFree(freeOffset, allocStart - freeOffset) &&
+ !tmpZone->iBma[EPageDiscard]->NotFree(freeOffset, allocStart - freeOffset));
+ }
+ __KTRACE_OPT(KMMU2, Kern::Printf("disOff %d len %d movOff %d len %d start %d len %d", discardOffset, discardRun, movableOffset, movableRun, allocStart, allocLength));
+ freeOffset = allocStart + allocLength;
+ __KTRACE_OPT(KMMU2, Kern::Printf("freeOff %d", freeOffset));
+ }
+ tmpZone->iBma[EPageFixed]->Free(tmpOffset, runLength);
+ ZoneFreePages(tmpZone, unreserved, EPageFixed);
+ iTotalFreeRamPages += unreserved;
+ addr = tmpZone->iPhysEnd + 1;
+ }
+ while (tmpZone != endZone);
+ }
+
+
+TBool DRamAllocator::ClearContiguousRegion(TPhysAddr aAddrBase, TPhysAddr aZoneBase, TUint aNumPages, TInt& aOffset)
+ {
+ TPhysAddr addr = aAddrBase;
+ TPhysAddr addrEnd = aAddrBase + (aNumPages << KPageShift);
+ TInt contigOffset = 0;
+ SZone* contigZone = GetZoneAndOffset(addr, contigOffset);
+ for (; addr != addrEnd; addr += KPageSize, contigOffset++)
+ {
+ if (contigZone->iPhysEnd < addr)
+ {
+ contigZone = GetZoneAndOffset(addr, contigOffset);
+ __NK_ASSERT_DEBUG(contigZone != NULL);
+ }
+
+ __NK_ASSERT_DEBUG(contigZone != NULL);
+ __NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotFree(contigOffset, 1));
+ __NK_ASSERT_DEBUG(SPageInfo::SafeFromPhysAddr(addr) != NULL);
+
+ // WARNING - This may flash the ram alloc mutex.
+ TInt exRet = M::MoveAndAllocPage(addr, EPageFixed);
+ if (exRet != KErrNone)
+ {// This page couldn't be moved or discarded so
+ // restart the search the page after this one.
+ __KTRACE_OPT(KMMU2, Kern::Printf("ContigMov fail contigOffset 0x%x exRet %d", contigOffset, exRet));
+ aOffset = (addr < aZoneBase)? 0 : contigOffset + 1;
+ break;
+ }
+ }
+ return addr == addrEnd;
+ }
+
+
/**
Search through the zones for the requested contiguous RAM, first in preference
order then, if that fails, in address order.
+No support for non-fixed pages as this will discard and move pages if required.
+
@param aNumPages The number of contiguous pages to find
@param aPhysAddr Will contain the base address of any contiguous run if found
-@param aType The page type of the memory to be allocated
@param aAlign Alignment specified as the alignment shift
-@param aBlockedZoneId The ID of a zone that can't be allocated into, by default this has no effect
-@param aBlockRest Set to ETrue to stop allocation as soon as aBlockedZoneId is reached
in preference ordering. EFalse otherwise.
@return KErrNone on success, KErrNoMemory otherwise
*/
-TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign, TUint aBlockedZoneId, TBool aBlockRest)
+TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TInt aAlign)
{
__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam size %08x align %d",aNumPages,aAlign));
M::RamAllocIsLocked();
- // No support for non-fixed pages as this will discard and move
- // pages if required.
- __NK_ASSERT_DEBUG(aType == EPageFixed);
+ if ((TUint)aNumPages > iTotalFreeRamPages + M::NumberOfFreeDpPages())
+ {// Not enough free space and not enough freeable pages.
+ return KErrNoMemory;
+ }
+
TInt alignWrtPage = Max(aAlign - KPageShift, 0);
TUint32 alignmask = (1u << alignWrtPage) - 1;
@@ -1716,7 +1904,124 @@
TInt offset = 0;
iZoneTmpAddrIndex = -1;
iZoneTmpPrefLink = iZonePrefList.First();
- while (NextAllocZone(zone, searchState, aType, aBlockedZoneId, aBlockRest))
+ while (NextAllocZone(zone, searchState, EPageFixed, KRamZoneInvalidId, EFalse))
+ {
+ // Be sure to start from scratch if zone not contiguous with previous zone
+ if (prevZone && (zone->iPhysBase == 0 || (zone->iPhysBase - 1) != prevZone->iPhysEnd))
+ {
+ carryAll = 0;
+ carryImmov = 0;
+ }
+ prevZone = zone;
+ TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]);
+ base = TInt(zone->iPhysBase >> KPageShift);
+ TInt runLength;
+ __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryAll=%08x offset=%08x", base, carryAll, offset));
+ offset = bmaAll.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryAll, runLength);
+ __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset));
+
+ if (offset >= 0)
+ {
+ // Have found enough contiguous pages so return address of physical page
+ // at the start of the region
+ aPhysAddr = TPhysAddr((base + offset - carryAll + alignmask) & ~alignmask) << KPageShift;
+ MarkPagesAllocated(aPhysAddr, aNumPages, EPageFixed);
+
+ __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
+#ifdef BTRACE_RAM_ALLOCATOR
+ BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, EPageFixed, aNumPages, aPhysAddr);
+#endif
+ return KErrNone;
+ }
+ // No run found when looking in just the free pages so see if this
+ // RAM zone could be used if pages where moved or discarded.
+ TBitMapAllocator& bmaImmov = *(zone->iBma[EPageFixed]);
+ offset = 0; // Clear so searches whole of fixed BMA on the first pass.
+ do
+ {
+ __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryImmov=%08x offset=%08x", base, carryImmov, offset));
+ offset = bmaImmov.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryImmov, runLength, offset);
+ __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset));
+ if (offset >= 0)
+ {// Have found a run in immovable page bma so attempt to clear
+ // it for the allocation.
+ TPhysAddr addrBase = TPhysAddr((base + offset - carryImmov + alignmask) & ~alignmask) << KPageShift;
+ __KTRACE_OPT(KMMU2, Kern::Printf(">AllocContig fix run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread));
+
+ // Block the contiguous region from being allocated.
+ iContiguousReserved++;
+ BlockContiguousRegion(addrBase, aNumPages);
+ if (ClearContiguousRegion(addrBase, zone->iPhysBase, aNumPages, offset))
+ {// Cleared all the required pages.
+ // Return address of physical page at the start of the region.
+ iContiguousReserved--;
+ aPhysAddr = addrBase;
+ __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
+#ifdef BTRACE_RAM_ALLOCATOR
+ BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, EPageFixed, aNumPages, aPhysAddr);
+#endif
+ __KTRACE_OPT(KMMU2, Kern::Printf("<AllocContig suc run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread));
+ return KErrNone;
+ }
+ else
+ {
+ // Unblock the contiguous region.
+ UnblockContiguousRegion(addrBase, aNumPages);
+ iContiguousReserved--;
+ __KTRACE_OPT(KMMU2, Kern::Printf("ContigMov fail offset 0x%x carryImmov %x",
+ offset, carryImmov));
+ // Can't rely on RAM zone preference ordering being
+ // the same so clear carrys and restart search from
+ // within the current RAM zone or skip onto the next
+ // one if at the end of this one.
+ carryImmov = 0;
+ carryAll = 0;
+ __KTRACE_OPT(KMMU2, Kern::Printf("<AllocContigfail run 0x%08x - 0x%08x 0x%x", addrBase, addrBase + (aNumPages << KPageShift), TheCurrentThread));
+ }
+ }
+ }
+ // Keep searching immovable page bma of the current RAM zone until
+ // gone past end of RAM zone or no run can be found.
+ while (offset >= 0 && (TUint)offset < zone->iPhysPages);
+ }
+ return KErrNoMemory;
+ }
+
+#else
+
+/**
+Search through the zones for the requested contiguous RAM, first in preference
+order then, if that fails, in address order.
+
+No support for non-fixed pages as this will discard and move pages if required.
+
+@param aNumPages The number of contiguous pages to find
+@param aPhysAddr Will contain the base address of any contiguous run if found
+@param aAlign Alignment specified as the alignment shift
+
+@return KErrNone on success, KErrNoMemory otherwise
+*/
+TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TInt aAlign)
+ {
+ __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam size %08x align %d",aNumPages,aAlign));
+
+ M::RamAllocIsLocked();
+
+ TInt alignWrtPage = Max(aAlign - KPageShift, 0);
+ TUint32 alignmask = (1u << alignWrtPage) - 1;
+
+ // Attempt to find enough pages searching in preference order first then
+ // in address order
+ TZoneSearchState searchState = EZoneSearchPref;
+ SZone* zone;
+ SZone* prevZone = NULL;
+ TInt carryAll = 0; // Carry for all pages bma, clear to start new run.
+ TInt carryImmov = 0; // Carry for immovable pages bma, clear to start new run.
+ TInt base = 0;
+ TInt offset = 0;
+ iZoneTmpAddrIndex = -1;
+ iZoneTmpPrefLink = iZonePrefList.First();
+ while (NextAllocZone(zone, searchState, EPageFixed, KRamZoneInvalidId, EFalse))
{
// Be sure to start from scratch if zone not contiguous with previous zone
if (prevZone && (zone->iPhysBase == 0 || (zone->iPhysBase - 1) != prevZone->iPhysEnd))
@@ -1736,11 +2041,11 @@
{// Have found enough contiguous pages so return address of physical page
// at the start of the region
aPhysAddr = TPhysAddr((base + offset - carryAll + alignmask) & ~alignmask) << KPageShift;
- MarkPagesAllocated(aPhysAddr, aNumPages, aType);
+ MarkPagesAllocated(aPhysAddr, aNumPages, EPageFixed);
__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
#ifdef BTRACE_RAM_ALLOCATOR
- BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, aType, aNumPages, aPhysAddr);
+ BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, EPageFixed, aNumPages, aPhysAddr);
#endif
return KErrNone;
}
@@ -1788,12 +2093,11 @@
contigZone = GetZoneAndOffset(addr, contigOffset);
__NK_ASSERT_DEBUG(contigZone != NULL);
}
-#ifdef _DEBUG // This page shouldn't be allocated as fixed, only movable or discardable.
+ // This page shouldn't be allocated as fixed, only movable or discardable.
__NK_ASSERT_DEBUG(contigZone != NULL);
__NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotAllocated(contigOffset, 1));
- SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(addr);
- __NK_ASSERT_DEBUG(pageInfo != NULL);
-#endif
+ __NK_ASSERT_DEBUG(SPageInfo::SafeFromPhysAddr(addr) != NULL);
+
TPhysAddr newAddr;
TInt moveRet = M::MovePage(addr, newAddr, contigZone->iId, EFalse);
if (moveRet != KErrNone && moveRet != KErrNotFound)
@@ -1827,11 +2131,11 @@
{// Cleared all the required pages so allocate them.
// Return address of physical page at the start of the region.
aPhysAddr = addrBase;
- MarkPagesAllocated(aPhysAddr, aNumPages, aType);
+ MarkPagesAllocated(aPhysAddr, aNumPages, EPageFixed);
__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
#ifdef BTRACE_RAM_ALLOCATOR
- BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, aType, aNumPages, aPhysAddr);
+ BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, EPageFixed, aNumPages, aPhysAddr);
#endif
return KErrNone;
}
@@ -1844,6 +2148,7 @@
}
return KErrNoMemory;
}
+#endif // !defined(__MEMODEL_MULTIPLE__) || !defined(__MEMODEL_MOVING__)
/**
@@ -1858,19 +2163,17 @@
@param aZoneIdCount The number of the IDs listed by aZoneIdList.
@param aSize The number of contiguous bytes to find
@param aPhysAddr Will contain the base address of the contiguous run if found
-@param aType The page type of the memory to be allocated
@param aAlign Alignment specified as the alignment shift
@return KErrNone on success, KErrNoMemory if allocation couldn't succeed or
the RAM zone has the KRamZoneFlagNoAlloc flag set. KErrArgument if a zone of
aZoneIdList exists or if aSize is larger than the size of the zone.
*/
-TInt DRamAllocator::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign)
+TInt DRamAllocator::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
{
__KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocContiguousRam zones 0x%x size 0x%08x align %d",aZoneIdCount, aSize, aAlign));
M::RamAllocIsLocked();
- __NK_ASSERT_DEBUG(aType == EPageFixed);
TUint numPages = (aSize + KPageSize - 1) >> KPageShift;
@@ -1930,11 +2233,11 @@
// Have found enough contiguous pages so mark the pages allocated and
// return address of physical page at the start of the region.
aPhysAddr = TPhysAddr((base + offset - carry + alignmask) & ~alignmask) << KPageShift;
- MarkPagesAllocated(aPhysAddr, numPages, aType);
+ MarkPagesAllocated(aPhysAddr, numPages, EPageFixed);
__KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocContiguousRam returns %08x",aPhysAddr));
#ifdef BTRACE_RAM_ALLOCATOR
- BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocZoneContiguousRam, aType, numPages, aPhysAddr);
+ BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocZoneContiguousRam, EPageFixed, numPages, aPhysAddr);
#endif
return KErrNone;
}
@@ -2106,34 +2409,34 @@
// Makes things simpler for bma selection.
__NK_ASSERT_DEBUG(aType != EPageUnknown);
- if (aOffset >= aZone->iPhysPages)
+ TUint zoneEndOffset = aZone->iPhysPages - 1;
+ if (aOffset > zoneEndOffset)
{// Starting point is outside the zone
return KErrArgument;
}
- TUint offset = aOffset;
- TUint endOffset = aZone->iPhysPages;
- TUint endOffsetAligned = endOffset & KWordAlignMask;
+ TUint wordIndex = aOffset >> 5;
+ TUint endWordIndex = zoneEndOffset >> 5;
// Select the BMA to search,
TUint bmaIndex = (aType == EPageTypes)? KBmaAllPages : aType;
- TUint32* map = &(aZone->iBma[bmaIndex]->iMap[offset >> 5]);
- TUint32 bits = *map++;
+ TUint32* map = &(aZone->iBma[bmaIndex]->iMap[wordIndex]);
+ TUint32* mapEnd = &(aZone->iBma[bmaIndex]->iMap[endWordIndex]);
+ TUint32 bits = *map;
// Set bits for pages before 'offset' (i.e. ones we want to ignore)...
- bits |= ~(KMaxTUint32 >> (offset & ~KWordAlignMask));
+ bits |= ~(KMaxTUint32 >> (aOffset & ~KWordAlignMask));
// Find the first bit map word from aOffset in aZone with allocated pages
- while (bits == KMaxTUint32 && offset < endOffsetAligned)
+ while (bits == KMaxTUint32 && map < mapEnd)
{
- bits = *map++;
- offset = (offset + 32) & KWordAlignMask;
+ bits = *++map;
}
- if (offset >= endOffsetAligned && endOffset != endOffsetAligned)
+ if (map == mapEnd)
{// Have reached the last bit mask word so set the bits that are
// outside of the zone so that they are ignored.
- bits |= KMaxTUint32 >> (endOffset - endOffsetAligned);
+ bits |= (KMaxTUint32 >> (zoneEndOffset & ~KWordAlignMask)) >> 1;
}
if (bits == KMaxTUint32)
@@ -2143,25 +2446,104 @@
// Now we have bits with allocated pages in it so determine the exact
// offset of the next allocated page
- TUint32 mask = 0x80000000 >> (offset & ~KWordAlignMask);
- while (bits & mask)
- {
- mask >>= 1;
- offset++;
- }
-
- if (offset >= endOffset)
- {// Reached the end of the zone without finding an allocated page after aOffset
- return KErrNotFound;
- }
-
- // Should definitely have found an allocated page within aZone's pages
- __NK_ASSERT_DEBUG(mask != 0 && !(bits & mask) && offset < aZone->iPhysPages);
-
- aOffset = offset;
+ TInt msOne = __e32_find_ms1_32(~bits);
+ __NK_ASSERT_DEBUG(msOne >= 0); // Must have at least one allocated page in the word.
+ TUint msOneOffset = 31 - msOne;
+ aOffset = ((map - aZone->iBma[bmaIndex]->iMap) << 5) + msOneOffset;
return KErrNone;
}
+
+/**
+Get the next run of pages in this zone that are allocated after aOffset.
+
+@param aZone The zone to find the next allocated page in.
+@param aOffset On entry this is the offset from which the next allocated
+ page in the zone should be found, on return it will be the offset
+ of the next allocated page.
+@param aEndOffset The last offset within this RAM zone to check for allocated runs.
+@return The length of any run found, KErrNotFound if no more pages in
+the zone after aOffset are allocated, KErrArgument if aOffset is outside the zone.
+*/
+TInt DRamAllocator::NextAllocatedRun(SZone* aZone, TUint& aOffset, TUint aEndOffset, TZonePageType aType) const
+ {
+ const TUint KWordAlignMask = KMaxTUint32 << 5;
+
+ M::RamAllocIsLocked();
+
+ __NK_ASSERT_DEBUG(aZone - iZones < (TInt)iNumZones);
+ // Makes things simpler for bma selection.
+ __NK_ASSERT_DEBUG(aType != EPageUnknown);
+
+ if (aOffset > aEndOffset)
+ {// UnblockContiguous() has already searched the whole range for this page type.
+ return 0;
+ }
+
+ TUint wordIndex = aOffset >> 5;
+ TUint endWordIndex = aEndOffset >> 5;
+
+ // Select the BMA to search,
+ TUint bmaIndex = (aType == EPageTypes)? KBmaAllPages : aType;
+ TUint32* map = &(aZone->iBma[bmaIndex]->iMap[wordIndex]);
+ TUint32* mapEnd = &(aZone->iBma[bmaIndex]->iMap[endWordIndex]);
+ TUint32 bits = *map;
+
+ // Set bits for pages before 'offset' (i.e. ones we want to ignore)...
+ bits |= ~(KMaxTUint32 >> (aOffset & ~KWordAlignMask));
+
+ // Find the first bit map word from aOffset in aZone with allocated pages
+ while (bits == KMaxTUint32 && map < mapEnd)
+ {
+ bits = *++map;
+ }
+
+ if (map == mapEnd)
+ {// Have reached the last bit mask word so set the bits that are
+ // outside of the range so that they are ignored.
+ bits |= (KMaxTUint32 >> (aEndOffset & ~KWordAlignMask)) >> 1;
+ }
+
+ if (bits == KMaxTUint32)
+ {// No allocated pages found in the range.
+ aOffset = aEndOffset + 1;
+ return 0;
+ }
+
+ // Now we have bits with allocated pages in it so determine the exact
+ // offset of the next allocated page
+ TInt msOne = __e32_find_ms1_32(~bits);
+ __NK_ASSERT_DEBUG(msOne >= 0); // Must have at least one allocated page in the word.
+ TUint msOneOffset = 31 - msOne;
+ aOffset = ((map - aZone->iBma[bmaIndex]->iMap) << 5) + msOneOffset;
+ TUint32* runWord = map;
+
+ if (map < mapEnd && __e32_bit_count_32(~bits) == msOne + 1)
+ {// The whole of the region in this word is allocated.
+ // Find the next word which isn't completely allocated within the range.
+ do
+ {
+ bits = *++map;
+ }
+ while (!bits && map < mapEnd);
+ }
+
+ // Clear any bits before the run so can get next free from __e32_find_msl_32().
+ if (runWord == map)
+ bits &= KMaxTUint32 >> (aOffset & ~KWordAlignMask);
+ TInt msFree = __e32_find_ms1_32(bits);
+ __NK_ASSERT_DEBUG(msFree >= 0 || map == mapEnd);
+ TUint msFreeOffset = (msFree >= 0)? 31 - msFree : 32;
+ TUint endIndex = map - aZone->iBma[bmaIndex]->iMap;
+ TUint runEnd = (endIndex << 5) + msFreeOffset;
+ if (runEnd > aEndOffset + 1) // Ensure we don't go past the range.
+ runEnd = aEndOffset + 1;
+ __NK_ASSERT_DEBUG(runEnd > aOffset);
+
+ return runEnd - aOffset;
+ }
+
+
/**
See if any of the least preferable RAM zones can be emptied. If they can then
initialise the allocator for a general defragmentation operation.
--- a/kernel/eka/memmodel/epoc/moving/mchunk.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/moving/mchunk.cpp Tue May 11 17:28:22 2010 +0300
@@ -467,7 +467,8 @@
{
// Allocate a block of contiguous RAM from the free pool
TInt numPages=(endOffset-offset)>>m.iPageShift;
- r=m.AllocContiguousRam(numPages<<m.iPageShift, nextPage, GetPageType(), 0);
+ __NK_ASSERT_DEBUG(EPageFixed == GetPageType());
+ r=m.AllocContiguousRam(numPages<<m.iPageShift, nextPage, 0);
if (r!=KErrNone)
return r;
if(clearRam)
--- a/kernel/eka/memmodel/epoc/multiple/arm/xmmu.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/multiple/arm/xmmu.cpp Tue May 11 17:28:22 2010 +0300
@@ -1615,7 +1615,7 @@
if (aNumPages>1)
{
TInt align=aSeparateGlobal ? KPageDirectoryShift : KPageDirectoryShift-1;
- r=AllocContiguousRam(aNumPages<<KPageShift, aPhysAddr, EPageFixed, align);
+ r=AllocContiguousRam(aNumPages<<KPageShift, aPhysAddr, align);
}
else
r=AllocRamPages(&aPhysAddr,1, EPageFixed);
--- a/kernel/eka/memmodel/epoc/multiple/mchunk.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/epoc/multiple/mchunk.cpp Tue May 11 17:28:22 2010 +0300
@@ -417,7 +417,8 @@
{
// Allocate a block of contiguous RAM from the free pool
TInt numPages=(endOffset-offset)>>m.iPageShift;
- r=m.AllocContiguousRam(numPages<<m.iPageShift, nextPage, GetPageType(), 0);
+ __NK_ASSERT_DEBUG(EPageFixed == GetPageType());
+ r=m.AllocContiguousRam(numPages<<m.iPageShift, nextPage, 0);
if (r!=KErrNone)
return r;
if(clearRam)
--- a/kernel/eka/memmodel/memmodel.mmp Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/memmodel/memmodel.mmp Tue May 11 17:28:22 2010 +0300
@@ -102,6 +102,7 @@
source mpager.cpp mrom.cpp mdatapaging.cpp mcodepaging.cpp
source mexport.cpp mthrash.cpp
source mdefrag.cpp mlargemappings.cpp
+source mpagecleaner.cpp
sourcepath ../memmodel/epoc/mmubase
source kblockmap.cpp ramalloc.cpp defragbase.cpp
--- a/kernel/eka/release.txt Tue Apr 27 18:02:57 2010 +0300
+++ b/kernel/eka/release.txt Tue May 11 17:28:22 2010 +0300
@@ -1,3 +1,61 @@
+Version 2.00.2117
+=================
+(Made by vfebvre 28/04/2010)
+
+1. necliffo
+ 1. ou1cimx1#358999 eMMC with DDR-mode support is not used in 4-bit mode
+
+2. martai
+ 1. ou1cimx1#354860 WDP:Potential system impact of flushing the entire page cache is unacceptable
+
+3. y153liu
+ 1. ou1cimx1#355901 Music playback is not correct after resuming the playback from the USB headset AD-83 after long pause
+
+
+Version 2.00.2116
+=================
+(Made by vfebvre 28/04/2010)
+
+1. stmansfi
+ 1. ou1cimx1#361073 [System Build]: Advisory notes related to cat x in TB92SF_1116 and MSF00398
+
+2 shubmurt
+ 1. DEF144601 E32TEST T_DATAPAGING occasionally reports KERN-EXEC 17 on SMPDATAPAGE configs
+
+
+Version 2.00.2115
+=================
+(Made by vfebvre 26/04/2010)
+
+1. jimmzhou
+ 1. ou1cimx1#351997 [kernel92]Can't recieve USB data after Host call SetConfiguraion
+
+
+Version 2.00.2114
+=================
+(Made by vfebvre 21/04/2010)
+
+1. davegord
+ 1. ou1cimx1#353731 DownloadServerMgr.exe causes phone crash
+
+
+Version 2.00.2113
+=================
+(Made by vfebvre 16/04/2010)
+
+1. jimmzhou
+ 1. ou1cimx1#342085 [kernel92]Logitech USB desktop microphone is not recogonized.
+
+
+Version 2.00.2112
+=================
+(Made by vfebvre 12/04/2010)
+
+1. jcoppear
+ 1. PDEF145321: WDP: WDP only writes one page to swap at a time
+ 2. DEF143737: T_SHADOW failing test that ROM is section mapped (t_shadow.cpp:360)
+
+
Version 2.00.2111
=================
(Made by vfebvre 09/04/2010)
--- a/kerneltest/e32test/buffer/t_tbma.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kerneltest/e32test/buffer/t_tbma.cpp Tue May 11 17:28:22 2010 +0300
@@ -682,6 +682,62 @@
delete pA;
}
+
+void TestSelectiveAlloc(TInt aSize)
+ {
+ test.Printf(_L("TestSelectiveAlloc %d\n"),aSize);
+ TBitMapAllocator* pA=TBitMapAllocator::New(aSize, ETrue);
+ test(pA!=NULL);
+ test(pA->Avail()==aSize);
+ // Allocate whole free bma
+ test_Equal(aSize, pA->SelectiveAlloc(0, aSize));
+ test_Equal(0,pA->Avail());
+ // Allocate whole full bma
+ test_Equal(0, pA->SelectiveAlloc(0, aSize));
+ test_Equal(0,pA->Avail());
+ TInt i;
+ TInt j;
+ TInt l;
+ for (i=2; i<8; ++i)
+ {
+ for (l=1; l<=aSize; ++l)
+ {
+ new (pA) TBitMapAllocator(aSize, ETrue);
+ for (j=0; j<aSize; j+=i)
+ pA->Alloc(j,1);
+ TInt orig=pA->Avail();
+ test_Equal(aSize-(aSize+i-1)/i, orig);
+ TUint newAllocs = pA->SelectiveAlloc(0,l);
+ TInt allocated = orig - pA->Avail();
+ test_Equal(allocated, newAllocs);
+ test_Equal(l - (l+i-1)/i, allocated);
+ Check(*pA);
+ }
+ }
+ for (i=0; i<=Min(32,aSize-1); ++i)
+ {
+ for (l=1; l<=aSize-i; ++l)
+ {
+ for (j=1; j<=aSize; ++j)
+ {
+ new (pA) TBitMapAllocator(aSize, ETrue);
+ pA->Alloc(i,l);
+ test_Equal(aSize-l, pA->Avail());
+ TUint newAllocs = pA->SelectiveAlloc(0,j);
+ TUint allocated = j - Max(0,Min(i+l,j)-i);
+ test_Equal(allocated, newAllocs);
+ test_Equal(pA->Avail(), aSize-l-allocated);
+ test(!pA->NotAllocated(0,j));
+ if (j>=i && j<i+l)
+ test(!pA->NotAllocated(0,j+1));
+ Check(*pA);
+ }
+ }
+ }
+ delete pA;
+ }
+
+
TBitMapAllocator* DoSetupBMA(TInt aSize, VA_LIST aList)
{
TBitMapAllocator* pA=TBitMapAllocator::New(aSize, EFalse);
@@ -1246,6 +1302,11 @@
TestSelectiveFree(128);
TestSelectiveFree(149);
+ TestSelectiveAlloc(3);
+ TestSelectiveAlloc(31);
+ TestSelectiveAlloc(128);
+ TestSelectiveAlloc(149);
+
TestAllocConsecutive();
TestChain();
--- a/kerneltest/e32test/buffer/t_tbma.h Tue Apr 27 18:02:57 2010 +0300
+++ b/kerneltest/e32test/buffer/t_tbma.h Tue May 11 17:28:22 2010 +0300
@@ -39,6 +39,7 @@
IMPORT_C void Free(TInt aPos);
IMPORT_C void Alloc(TInt aStart, TInt aLength);
IMPORT_C void Free(TInt aStart, TInt aLength);
+ IMPORT_C TUint SelectiveAlloc(TInt aStart, TInt aLength);
IMPORT_C void SelectiveFree(TInt aStart, TInt aLength);
IMPORT_C TBool NotFree(TInt aStart, TInt aLength) const;
IMPORT_C TBool NotAllocated(TInt aStart, TInt aLength) const;
--- a/kerneltest/e32test/defrag/t_ramdefrag.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kerneltest/e32test/defrag/t_ramdefrag.cpp Tue May 11 17:28:22 2010 +0300
@@ -33,6 +33,7 @@
#include <e32math.h>
#include <hal.h>
#include "testdefs.h"
+#include "..\mmu\mmudetect.h"
#include <dptest.h>
@@ -128,6 +129,7 @@
LOCAL_D TInt* gCandList1; // Array of zones that have the same preference and the same
LOCAL_D TInt* gCandList2; // amount of free pages
const TInt KInvalidCandIndex = -1;
+LOCAL_D TUint gMemModel;
//
// GetDrive
@@ -489,6 +491,8 @@
currentCacheSize >> gPageShift));
}
+ // Get the memory model of the kernel that this test is running on.
+ gMemModel = MemModelType();
return KErrNone;
}
@@ -1536,7 +1540,7 @@
}
if (totalMorePrefInUse > requiredMovDis)
- {// There enough allocatable pages in the RAM zones below the currently
+ {// There are enough allocatable pages in the RAM zones below the currently
// least preferable RAM in use.
test.Printf(_L("Memory is spread out totalMorePref 0x%x required 0x%x\n"), totalMorePrefInUse, requiredMovDis);
if (verifySpread)
@@ -9384,6 +9388,14 @@
test.Next(_L("Test5: Filling the FS Cache and allocating more than 16 contiguous fixed pages"));
TestStart();
+
+ if (gMemModel >= EMemModelTypeFlexible)
+ {// The flexible memory model won't flush the whole paging cache for
+ // contiguous allocations >16 pages so skip the next test.
+ test.Printf(_L("This memory model won't flush the cache - Skipping...\n"));
+ goto SkipTest5;
+ }
+
// TestEnd() will have reduced any cache pages to minimum so just get current
// count of discardable pages.
GetAllPageInfo();
--- a/kerneltest/e32test/demandpaging/t_datapaging.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kerneltest/e32test/demandpaging/t_datapaging.cpp Tue May 11 17:28:22 2010 +0300
@@ -44,9 +44,10 @@
#include "../mmu/d_memorytest.h"
#include "../mmu/paging_info.h"
-RTest test(_L("T_DATAPAGING"));
+_LIT(KChunkName, "t_datapaging chunk");
-_LIT(KChunkName, "t_datapaging chunk");
+RTest test(_L("T_DATAPAGING"));
+SVMSwapInfo InitialSwapInfo;
class TRandom
{
@@ -235,7 +236,7 @@
CLOSE_AND_WAIT(thread);
}
CLOSE_AND_WAIT(gChunk);
- User::After(1000000);
+ UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, 0, 0);
__KHEAP_MARKEND;
}
@@ -330,8 +331,8 @@
if (aActual != aExpected)
{
StopSoakTest(aMsgQueue);
- RDebug::Printf(" thread %d failure reading page %d at iteration %d address %08x: expected %08x but got %08x",
- aThread, aPage, aIteration, aPtr, aExpected, aActual);
+ RDebug::Printf(" thread %d failure reading page %d at iteration %d address %08x line %d: expected %08x but got %08x",
+ aThread, aPage, aIteration, aPtr, aLine, aExpected, aActual);
return EFalse;
}
return ETrue;
@@ -341,7 +342,6 @@
{
SSoakTestArgs* args = (SSoakTestArgs*)aArg;
-
RMsgQueue<TInt> msgQueue;
TInt r = msgQueue.OpenGlobal(KMsgQueueName, EOwnerThread);
if (r != KErrNone)
@@ -595,41 +595,35 @@
test(swapInfo.iSwapFree <= swapInfo.iSwapSize);
test.Printf(_L(" Swap size == 0x%x bytes\n"), swapInfo.iSwapSize);
test.Printf(_L(" Swap free == 0x%x bytes\n"), swapInfo.iSwapFree);
- if (!gDataPagingSupported)
- {
- test_Equal(0, swapInfo.iSwapSize);
- }
- else
- {
- test(swapInfo.iSwapSize != 0);
+ test(swapInfo.iSwapSize != 0);
+ InitialSwapInfo = swapInfo;
- CommitPage(chunk, 0);
- SVMSwapInfo swapInfo2;
- test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0));
- test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize);
- test_Equal(swapInfo.iSwapFree - gPageSize, swapInfo2.iSwapFree);
+ CommitPage(chunk, 0);
+ SVMSwapInfo swapInfo2;
+ test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0));
+ test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize);
+ test_Equal(swapInfo.iSwapFree - gPageSize, swapInfo2.iSwapFree);
- DecommitPage(chunk, 0);
- test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0));
- test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize);
- test_Equal(swapInfo.iSwapFree, swapInfo2.iSwapFree);
+ DecommitPage(chunk, 0);
+ test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0));
+ test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize);
+ test_Equal(swapInfo.iSwapFree, swapInfo2.iSwapFree);
- // Test that closing the chunk releases the swap page.
- CommitPage(chunk, 0);
- test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0));
- test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize);
- test_Equal(swapInfo.iSwapFree - gPageSize, swapInfo2.iSwapFree);
+ // Test that closing the chunk releases the swap page.
+ CommitPage(chunk, 0);
+ test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0));
+ test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize);
+ test_Equal(swapInfo.iSwapFree - gPageSize, swapInfo2.iSwapFree);
- chunk.Close();
- test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0));
- test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize);
- test_Equal(swapInfo.iSwapFree, swapInfo2.iSwapFree);
+ chunk.Close();
+ test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo2, 0));
+ test_Equal(swapInfo.iSwapSize, swapInfo2.iSwapSize);
+ test_Equal(swapInfo.iSwapFree, swapInfo2.iSwapFree);
- // Chunk must be created for rest of testing.
- test_KErrNone(chunk.Create(createInfo));
- if (gDataPagingSupported)
- test(chunk.IsPaged());
- }
+ // Chunk must be created for rest of testing.
+ test_KErrNone(chunk.Create(createInfo));
+ if (gDataPagingSupported)
+ test(chunk.IsPaged());
// EVMHalSetSwapThresholds,
test.Next(_L("Test EVMHalSetSwapThresholds"));
@@ -690,6 +684,16 @@
CLOSE_AND_WAIT(chunk);
}
+void TestSwapInfoUnchanged()
+ {
+ SVMSwapInfo swapInfo;
+ test_KErrNone(UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, &swapInfo, 0));
+ test.Printf(_L(" Swap size == 0x%x bytes\n"), swapInfo.iSwapSize);
+ test.Printf(_L(" Swap free == 0x%x bytes\n"), swapInfo.iSwapFree);
+ test_Equal(InitialSwapInfo.iSwapSize, swapInfo.iSwapSize);
+ test_Equal(InitialSwapInfo.iSwapFree, swapInfo.iSwapFree);
+ }
+
void TestSwapHalNotSupported()
{
test_Equal(KErrNotSupported, UserSvr::HalFunction(EHalGroupVM, EVMHalGetSwapInfo, 0, 0));
@@ -779,6 +783,9 @@
test_KErrNone(timeoutStatus.Int());
CLOSE_AND_WAIT(gChunk);
+
+ UserSvr::HalFunction(EHalGroupKernel, EKernelHalSupervisorBarrier, 0, 0);
+
__KHEAP_MARKEND;
}
@@ -1241,15 +1248,18 @@
for (TUint pin = 0 ; pin <= 1 ; ++pin)
{
test.Printf(_L("processes=%d threads=%d pages=%d maxcachesize=%d pin=%d\r\n"),processes, threads, pages, gMaxCacheSize,pin);
- SoakTest(processes, threads, pages, pin, 3);
+ SoakTest(processes, threads, pages, pin, 5);
}
}
}
}
- //Reset the cache size to normal
- test.Next(_L("Soak test: Reset cache size to normal"));
- test_KErrNone(DPTest::SetCacheSize(cacheOriginalMin, cacheOriginalMax));
+ //Reset the cache size to normal
+ test.Next(_L("Soak test: Reset cache size to normal"));
+ test_KErrNone(DPTest::SetCacheSize(cacheOriginalMin, cacheOriginalMax));
+
+ test.Next(_L("Check we haven't leaked any swap in the course of the test"));
+ TestSwapInfoUnchanged();
}
test.End();
--- a/kerneltest/e32test/demandpaging/t_thrash.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kerneltest/e32test/demandpaging/t_thrash.cpp Tue May 11 17:28:22 2010 +0300
@@ -27,6 +27,7 @@
#include <e32msgqueue.h>
#include <e32atomics.h>
#include <e32math.h>
+#include <hal.h>
#include "t_dpcmn.h"
#include "../mmu/mmudetect.h"
@@ -39,11 +40,12 @@
_LIT(KChunkName, "t_thrash chunk");
-class TRandom
+class TPRNG
{
public:
- TRandom();
- TUint32 Next();
+ TPRNG();
+ TUint32 IntRand();
+ TReal FloatRand();
private:
enum
@@ -54,17 +56,178 @@
TUint32 iV;
};
-TRandom::TRandom()
+TPRNG::TPRNG()
{
iV = (TUint32)this + RThread().Id() + User::FastCounter() + 23;
}
-TUint32 TRandom::Next()
+TUint32 TPRNG::IntRand()
{
iV = KA * iV + KB;
return iV;
}
+TReal TPRNG::FloatRand()
+ {
+ return (TReal)IntRand() / KMaxTUint32;
+ }
+
+class TRandom
+ {
+public:
+ virtual ~TRandom() { }
+ virtual TUint32 Next() = 0;
+ };
+
+ class TUniformRandom : public TRandom
+ {
+public:
+ void SetParams(TUint aMax) { iMax = aMax; }
+ virtual TUint32 Next();
+
+private:
+ TPRNG iRand;
+ TUint iMax;
+ };
+
+TUint32 TUniformRandom::Next()
+ {
+ return iRand.IntRand() % iMax;
+ }
+
+class TNormalRandom : public TRandom
+ {
+public:
+ void SetParams(TInt aMax, TInt aSd);
+ virtual TUint32 Next();
+
+private:
+ TUint32 GetNext();
+
+private:
+ TPRNG iRand;
+ TInt iMax;
+ TInt iExpectation;
+ TInt iSd;
+ TUint32 iCached;
+ };
+
+void TNormalRandom::SetParams(TInt aMax, TInt aSd)
+ {
+ iMax = aMax;
+ iExpectation = aMax / 2;
+ iSd = aSd;
+ iCached = KMaxTUint32;
+ }
+
+TUint32 TNormalRandom::Next()
+ {
+ TUint32 r;
+ do
+ {
+ r = GetNext();
+ }
+ while (r > (TUint)iMax);
+ return r;
+ }
+
+TUint32 TNormalRandom::GetNext()
+ {
+ if (iCached != KMaxTUint32)
+ {
+ TUint32 r = iCached;
+ iCached = KMaxTUint32;
+ return r;
+ }
+
+ // box-muller transform
+ // from http://www.taygeta.com/random/gaussian.html
+
+ TReal x1, x2, w, ln_w, y1, y2;
+ do
+ {
+ x1 = 2.0 * iRand.FloatRand() - 1.0;
+ x2 = 2.0 * iRand.FloatRand() - 1.0;
+ w = x1 * x1 + x2 * x2;
+ }
+ while ( w >= 1.0 );
+
+ TInt r = Math::Ln(ln_w, w);
+ __ASSERT_ALWAYS(r == KErrNone, User::Invariant());
+ w = (-2.0 * ln_w ) / w;
+ TReal w2;
+ r = Math::Sqrt(w2, w);
+ __ASSERT_ALWAYS(r == KErrNone, User::Invariant());
+ y1 = x1 * w2;
+ y2 = x2 * w2;
+
+ y1 = y1 * iSd + iExpectation;
+ y2 = y2 * iSd + iExpectation;
+
+ iCached = (TUint32)y2;
+
+ return (TUint32)y1;
+ }
+
+static TBool BenchmarksSupported = EFalse;
+static TReal BenchmarkMultiplier;
+
+static TInt InitBenchmarks()
+ {
+ BenchmarksSupported = UserSvr::HalFunction(EHalGroupVM, EVMHalResetPagingBenchmark, (TAny*)EPagingBmReadRomPage, NULL) == KErrNone;
+ if (!BenchmarksSupported)
+ return KErrNone;
+
+ TInt freq = 0;
+ TInt r = HAL::Get(HAL::EFastCounterFrequency, freq);
+ if (r != KErrNone)
+ return r;
+ BenchmarkMultiplier = 1000000.0 / freq;
+ return KErrNone;
+ }
+
+static void ResetBenchmarks()
+ {
+ if (!BenchmarksSupported)
+ return;
+ for (TInt i = 0 ; i < EMaxPagingBm ; ++i)
+ {
+ TInt r = UserSvr::HalFunction(EHalGroupVM, EVMHalResetPagingBenchmark, (TAny*)i, NULL);
+ if (r != KErrNone)
+ test.Printf(_L("Error resetting benchmark %d\n"), i);
+ test_KErrNone(r);
+ }
+ }
+
+static TInt GetBenchmark(TPagingBenchmark aBenchmark, TInt& aCountOut, TInt& aTotalTimeInMicrosOut)
+ {
+
+ SPagingBenchmarkInfo info;
+ TInt r = UserSvr::HalFunction(EHalGroupVM, EVMHalGetPagingBenchmark, (TAny*)aBenchmark, &info);
+ if (r!=KErrNone)
+ return r;
+
+ aCountOut = info.iCount;
+ aTotalTimeInMicrosOut = (TInt)(info.iTotalTime * BenchmarkMultiplier);
+ return KErrNone;
+ }
+
+static TInt GetAllBenchmarks(TInt aTestLengthInSeconds, TInt aCountOut[EMaxPagingBm], TInt aTimeOut[EMaxPagingBm])
+ {
+ for (TInt i = 0 ; i < EMaxPagingBm ; ++i)
+ {
+ TInt count = 0;
+ TInt timeInMicros = 0;
+ TInt r = GetBenchmark((TPagingBenchmark)i, count, timeInMicros);
+ if (r != KErrNone)
+ return r;
+
+ aCountOut[i] = count / aTestLengthInSeconds;
+ aTimeOut[i] = timeInMicros / aTestLengthInSeconds;
+ }
+ return KErrNone;
+ }
+
void CreatePagedChunk(TInt aSizeInPages)
{
test_Equal(0,gChunk.Handle());
@@ -84,11 +247,70 @@
return (TUint32*)(gChunk.Base() + (gPageSize * aPage));
}
+TInt EnsureSystemIdleThread(TAny*)
+ {
+ RThread::Rendezvous(KErrNone);
+ for (;;)
+ {
+ // Spin
+ }
+ }
+
+void EnsureSystemIdle()
+ {
+ const TInt KMaxWait = 60 * 1000000;
+ const TInt KSampleTime = 1 * 1000000;
+ const TInt KWaitTime = 5 * 1000000;
+
+ test.Printf(_L("Waiting for system to become idle\n"));
+ TInt totalTime = 0;
+ TBool idle;
+ do
+ {
+ RThread thread;
+ test_KErrNone(thread.Create(_L("EnsureSystemIdleThread"), EnsureSystemIdleThread, 1024, NULL, NULL));
+ thread.SetPriority(EPriorityLess);
+ thread.Resume();
+
+ TRequestStatus status;
+ thread.Rendezvous(status);
+ User::WaitForRequest(status);
+ test_KErrNone(status.Int());
+
+ User::After(KSampleTime);
+ thread.Suspend();
+
+ TTimeIntervalMicroSeconds time;
+ test_KErrNone(thread.GetCpuTime(time));
+ TReal error = (100.0 * Abs(time.Int64() - KSampleTime)) / KSampleTime;
+ test.Printf(_L(" time == %ld, error == %f%%\n"), time.Int64(), error);
+
+ idle = error < 2.0;
+
+ thread.Kill(KErrNone);
+ thread.Logon(status);
+ User::WaitForRequest(status);
+ test_KErrNone(status.Int());
+ CLOSE_AND_WAIT(thread);
+
+ if (!idle)
+ User::After(KWaitTime); // Allow system to finish whatever it's doing
+
+ totalTime += KSampleTime + KWaitTime;
+ test(totalTime < KMaxWait);
+ }
+ while(!idle);
+ }
+
enum TWorkload
{
EWorkloadSequential,
- EWorkloadRandom,
- EWorkloadShuffle
+ EWorkloadUniformRandom,
+ EWorkloadNormalRandom1,
+ EWorkloadNormalRandom2,
+ EWorkloadShuffle,
+
+ EMaxWorkloads
};
struct SThrashTestArgs
@@ -105,55 +327,86 @@
{
SThrashTestArgs* args = (SThrashTestArgs*)aArg;
- TRandom random;
+ TPRNG random;
+ TUniformRandom uniformRand;
+ TNormalRandom normalRand;
+
TInt startPage = args->iThreadGroup * args->iGroupSize;
TInt* ptr = (TInt*)(args->iBasePtr + startPage * gPageSize);
+
+
switch (args->iWorkload)
{
case EWorkloadSequential:
while (gRunThrashTest)
{
- TInt size = (args->iPageCount * gPageSize) / sizeof(TInt);
- for (TInt i = 0 ; i < size && gRunThrashTest ; ++i)
+ for (TUint i = 0 ;
+ gRunThrashTest && i < (args->iPageCount * gPageSize) / sizeof(TInt) ;
+ ++i)
{
- ptr[i] = random.Next();
+ ptr[i] = 1;
__e32_atomic_add_ord64(&args->iAccesses, 1);
}
}
break;
- case EWorkloadRandom:
+ case EWorkloadUniformRandom:
+ case EWorkloadNormalRandom1:
+ case EWorkloadNormalRandom2:
{
TInt acc = 0;
+ TInt oldSize = -1;
+ TUint32 writeMask = 0;
+ switch (args->iWorkload)
+ {
+ case EWorkloadUniformRandom:
+ case EWorkloadNormalRandom1:
+ writeMask = 0x80000000; break;
+ case EWorkloadNormalRandom2:
+ writeMask = 0xc0000000; break;
+ default: test(EFalse); break;
+ }
while (gRunThrashTest)
{
- TInt size = (args->iPageCount * gPageSize) / sizeof(TInt);
- for (TInt i = 0 ; i < size && gRunThrashTest ; ++i)
+ TInt size = args->iPageCount;
+ if (size != oldSize)
{
- TUint32 rand = random.Next();
- TInt action = rand >> 31;
- TInt r = rand % size;
- if (action == 0)
- acc += ptr[r];
- else
- ptr[r] = acc;
- __e32_atomic_add_ord64(&args->iAccesses, 1);
+ switch (args->iWorkload)
+ {
+ case EWorkloadUniformRandom:
+ uniformRand.SetParams(size); break;
+ case EWorkloadNormalRandom1:
+ case EWorkloadNormalRandom2:
+ normalRand.SetParams(size, size / 8); break;
+ default: test(EFalse); break;
+ }
+ oldSize = size;
}
+
+ TInt page = args->iWorkload == EWorkloadUniformRandom ?
+ uniformRand.Next() : normalRand.Next();
+ TInt index = page * (gPageSize / sizeof(TInt));
+ TBool write = (random.IntRand() & writeMask) == 0;
+ if (write)
+ ptr[index] = acc;
+ else
+ acc += ptr[index];
+ __e32_atomic_add_ord64(&args->iAccesses, 1);
}
}
break;
case EWorkloadShuffle:
{
- TInt i;
+ TInt i = 0;
while (gRunThrashTest)
{
TInt size = (args->iPageCount * gPageSize) / sizeof(TInt);
- for (i = 0 ; gRunThrashTest && i < (size - 1) ; ++i)
- {
- Mem::Swap(&ptr[i], &ptr[i + random.Next() % (size - i - 1) + 1], sizeof(TInt));
- __e32_atomic_add_ord64(&args->iAccesses, 2);
- }
+ Mem::Swap(&ptr[i], &ptr[i + random.IntRand() % (size - i - 1) + 1], sizeof(TInt));
+ __e32_atomic_add_ord64(&args->iAccesses, 2);
+ ++i;
+ if (i >= size - 1)
+ i = 0;
}
}
break;
@@ -172,17 +425,32 @@
SThrashTestArgs iArgs;
};
-void ThrashTest(TInt aThreads, // number of threads to run
+void ThrashTest(const TDesC& aTestName, // name and description
+ TInt aThreads, // number of threads to run
TBool aSharedData, // whether all threads share the same data
TWorkload aWorkload,
TInt aBeginPages, // number of pages to start with for last/all threads
TInt aEndPages, // number of pages to end with for last/all threads
TInt aOtherPages) // num of pages for other threads, or zero to use same value for all
{
- RDebug::Printf("\nPages Accesses ThL");
+ const TInt KTestLengthInSeconds = 2;
+
+ test.Next(_L("Thrash test"));
+
+ DPTest::FlushCache();
+ EnsureSystemIdle();
- DPTest::FlushCache();
- User::After(1000000);
+ TInt i;
+ test.Printf(_L("Table: %S\n"), &aTestName);
+ test.Printf(_L("totalPages, totalAccesses, thrashLevel"));
+ if (BenchmarksSupported)
+ test.Printf(_L(", rejuveCount, rejuveTime, codePageInCount, codePageInTime, initCount, initTime, readCount, readTime, writePages, writeCount, writeTime"));
+ if (aThreads > 1)
+ {
+ for (TInt i = 0 ; i < aThreads ; ++i)
+ test.Printf(_L(", Thread%dPages, Thread%dAccesses"), i, i);
+ }
+ test.Printf(_L("\n"));
TInt pagesNeeded;
TInt maxPages = Max(aBeginPages, aEndPages);
@@ -208,11 +476,10 @@
test_NotNull(threads);
gRunThrashTest = ETrue;
- TInt pageCount = aBeginPages;
const TInt maxSteps = 30;
TInt step = aEndPages >= aBeginPages ? Max((aEndPages - aBeginPages) / maxSteps, 1) : Min((aEndPages - aBeginPages) / maxSteps, -1);
+ TInt pageCount = aBeginPages - 5 * step; // first run ignored
- TInt i;
for (i = 0 ; i < aThreads ; ++i)
{
SThrashThreadData& thread = threads[i];
@@ -242,8 +509,10 @@
for (i = 0 ; i < aThreads ; ++i)
__e32_atomic_store_ord64(&threads[i].iArgs.iAccesses, 0);
+ ResetBenchmarks();
- User::After(2000000);
+ User::After(KTestLengthInSeconds * 1000 * 1000);
+
TInt thrashLevel = UserSvr::HalFunction(EHalGroupVM, EVMHalGetThrashLevel, 0, 0);
test(thrashLevel >= 0 && thrashLevel <= 255);
@@ -257,20 +526,50 @@
else
totalPages += threads[i].iArgs.iPageCount;
}
+ TInt accessesPerSecond = (TInt)(totalAccesses / KTestLengthInSeconds);
- test.Printf(_L("%5d %12ld %3d"), totalPages, totalAccesses, thrashLevel);
- for (i = 0 ; i < aThreads ; ++i)
+ TBool warmingUp = (step > 0) ? pageCount < aBeginPages : pageCount > aBeginPages;
+ if (!warmingUp)
{
- test.Printf(_L(" %5d %12ld"),
- threads[i].iArgs.iPageCount,
- __e32_atomic_load_acq64(&threads[i].iArgs.iAccesses));
- test_Equal(KRequestPending, threads[i].iStatus.Int());
+ test.Printf(_L("%10d, %13d, %11.2f"), totalPages, accessesPerSecond, (TReal)thrashLevel / 255);
+
+ if (BenchmarksSupported)
+ {
+ TInt benchmarkCount[EMaxPagingBm];
+ TInt benchmarkTime[EMaxPagingBm];
+ test_KErrNone(GetAllBenchmarks(KTestLengthInSeconds, benchmarkCount, benchmarkTime));
+
+ TInt otherPageInCount = benchmarkCount[EPagingBmReadRomPage] + benchmarkCount[EPagingBmReadCodePage];
+ TInt otherPageInTime = benchmarkTime[EPagingBmReadRomPage] + benchmarkTime[EPagingBmReadCodePage];
+
+ TInt initCount = benchmarkCount[EPagingBmReadDataPage] - benchmarkCount[EPagingBmReadDataMedia];
+ TInt initTime = benchmarkTime[EPagingBmReadDataPage] - benchmarkTime[EPagingBmReadDataMedia];
+
+ test.Printf(_L(", %11d, %10d, %15d, %14d, %9d, %8d, %9d, %8d, %10d, %10d, %9d"),
+ benchmarkCount[EPagingBmRejuvenate], benchmarkTime[EPagingBmRejuvenate],
+ otherPageInCount, otherPageInTime,
+ initCount, initTime,
+ benchmarkCount[EPagingBmReadDataMedia], benchmarkTime[EPagingBmReadDataMedia],
+ benchmarkCount[EPagingBmWriteDataPage],
+ benchmarkCount[EPagingBmWriteDataMedia], benchmarkTime[EPagingBmWriteDataMedia]);
+ }
+
+ if (aThreads > 1)
+ {
+ for (i = 0 ; i < aThreads ; ++i)
+ {
+ test.Printf(_L(", %12d, %15ld"),
+ threads[i].iArgs.iPageCount,
+ __e32_atomic_load_acq64(&threads[i].iArgs.iAccesses));
+ test_Equal(KRequestPending, threads[i].iStatus.Int());
+ }
+ }
+ test.Printf(_L("\n"));
}
- test.Printf(_L("\n"));
+ pageCount += step;
if (aEndPages >= aBeginPages ? pageCount >= aEndPages : pageCount < aEndPages)
break;
- pageCount += step;
}
gRunThrashTest = EFalse;
@@ -285,7 +584,7 @@
}
gChunk.Close();
- RDebug::Printf("\n");
+ test.Printf(_L("\n"));
}
void TestThrashing()
@@ -298,47 +597,96 @@
TInt maxPages4 = (5 * gMaxCacheSize) / 16;
// Single thread increasing in size
- test.Next(_L("Thrash test: single thread, sequential workload"));
- ThrashTest(1, ETrue, EWorkloadSequential, minPages, maxPages, 0);
+ ThrashTest(_L("single thread, sequential workload"),
+ 1, ETrue, EWorkloadSequential, minPages, maxPages, 0);
- test.Next(_L("Thrash test: single thread, random workload"));
- ThrashTest(1, ETrue, EWorkloadRandom, minPages, maxPages, 0);
+ ThrashTest(_L("single thread, random workload"),
+ 1, ETrue, EWorkloadUniformRandom, minPages, maxPages, 0);
- test.Next(_L("Thrash test: single thread, shuffle workload"));
- ThrashTest(1, ETrue, EWorkloadShuffle, minPages, maxPages, 0);
+ ThrashTest(_L("single thread, shuffle workload"),
+ 1, ETrue, EWorkloadShuffle, minPages, maxPages, 0);
// Multiple threads with shared data, one thread incresing in size
- test.Next(_L("Thrash test: two threads with shared data, one thread increasing, random workload"));
- ThrashTest(2, ETrue, EWorkloadRandom, minPages, maxPages, minPages);
+ ThrashTest(_L("two threads with shared data, one thread increasing, random workload"),
+ 2, ETrue, EWorkloadUniformRandom, minPages, maxPages, minPages);
- test.Next(_L("Thrash test: four threads with shared data, one thread increasing, random workload"));
- ThrashTest(4, ETrue, EWorkloadRandom, minPages, maxPages, minPages);
+ ThrashTest(_L("four threads with shared data, one thread increasing, random workload"),
+ 4, ETrue, EWorkloadUniformRandom, minPages, maxPages, minPages);
// Multiple threads with shared data, all threads incresing in size
- test.Next(_L("Thrash test: two threads with shared data, all threads increasing, random workload"));
- ThrashTest(2, ETrue, EWorkloadRandom, minPages, maxPages, 0);
+ ThrashTest(_L("two threads with shared data, all threads increasing, random workload"),
+ 2, ETrue, EWorkloadUniformRandom, minPages, maxPages, 0);
- test.Next(_L("Thrash test: four threads with shared data, all threads increasing, random workload"));
- ThrashTest(4, ETrue, EWorkloadRandom, minPages, maxPages, 0);
+ ThrashTest(_L("four threads with shared data, all threads increasing, random workload"),
+ 4, ETrue, EWorkloadUniformRandom, minPages, maxPages, 0);
// Multiple threads with independent data, one thread incresing in size
- test.Next(_L("Thrash test: two threads with independent data, one thread increasing, random workload"));
- ThrashTest(2, EFalse, EWorkloadRandom, minPages2, maxPages2, gMaxCacheSize / 2);
+ ThrashTest(_L("two threads with independent data, one thread increasing, random workload"),
+ 2, EFalse, EWorkloadUniformRandom, minPages2, maxPages2, gMaxCacheSize / 2);
- test.Next(_L("Thrash test: four threads with independent data, one thread increasing, random workload"));
- ThrashTest(4, EFalse, EWorkloadRandom, minPages4, maxPages4, gMaxCacheSize / 4);
+ ThrashTest(_L("four threads with independent data, one thread increasing, random workload"),
+ 4, EFalse, EWorkloadUniformRandom, minPages4, maxPages4, gMaxCacheSize / 4);
// Multiple threads with independant data, all threads incresing in size
- test.Next(_L("Thrash test: two threads with independent data, all threads increasing, random workload"));
- ThrashTest(2, EFalse, EWorkloadRandom, minPages2, maxPages2, 0);
+ ThrashTest(_L("two threads with independent data, all threads increasing, random workload"),
+ 2, EFalse, EWorkloadUniformRandom, minPages2, maxPages2, 0);
- test.Next(_L("Thrash test: four threads with independent data, all threads increasing, random workload"));
- ThrashTest(4, EFalse, EWorkloadRandom, minPages4, maxPages4, 0);
+ ThrashTest(_L("four threads with independent data, all threads increasing, random workload"),
+ 4, EFalse, EWorkloadUniformRandom, minPages4, maxPages4, 0);
// Attempt to create thrash state where there is sufficient cache
- test.Next(_L("Thrash test: two threads with independent data, one threads decreasing, random workload"));
TInt halfCacheSize = gMaxCacheSize / 2;
- ThrashTest(2, EFalse, EWorkloadRandom, halfCacheSize + 10, halfCacheSize - 30, halfCacheSize);
+ ThrashTest(_L("two threads with independent data, one threads decreasing, random workload"),
+ 2, EFalse, EWorkloadUniformRandom, halfCacheSize + 10, halfCacheSize - 30, halfCacheSize);
+ }
+
+void TestDistribution(TRandom& aRandom, TInt aSamples)
+ {
+ TUint32* data = new TUint32[aSamples];
+ test_NotNull(data);
+
+ TInt i;
+ TReal mean = 0.0;
+ for (i = 0 ; i < aSamples ; ++i)
+ {
+ data[i] = aRandom.Next();
+ mean += (TReal)data[i] / aSamples;
+ }
+
+ TReal sum2 = 0.0;
+ for (i = 0 ; i < aSamples ; ++i)
+ {
+ TReal d = (TReal)data[i] - mean;
+ sum2 += d * d;
+ }
+ TReal variance = sum2 / (aSamples - 1);
+
+ test.Printf(_L(" mean == %f\n"), mean);
+ test.Printf(_L(" variance == %f\n"), variance);
+
+ delete [] data;
+ }
+
+void BenchmarkReplacement()
+ {
+ test.Next(_L("Test uniform distribution"));
+ TUniformRandom rand1;
+ rand1.SetParams(100);
+ TestDistribution(rand1, 10000);
+
+ test.Next(_L("Test normal distribution"));
+ TNormalRandom rand2;
+ rand2.SetParams(100, 25);
+ TestDistribution(rand2, 10000);
+
+ ThrashTest(_L("Thrash test: single thread, normal random workload 1"),
+ 1, ETrue, EWorkloadNormalRandom1, (2 * gMaxCacheSize) / 3, 2 * gMaxCacheSize, 0);
+
+ ThrashTest(_L("Thrash test: single thread, normal random workload 2"),
+ 1, ETrue, EWorkloadNormalRandom2, (2 * gMaxCacheSize) / 3, 2 * gMaxCacheSize, 0);
+
+ ThrashTest(_L("Thrash test: single thread, uniform random workload"),
+ 1, ETrue, EWorkloadUniformRandom, (2 * gMinCacheSize) / 3, (3 * gMaxCacheSize) / 2, 0);
}
void TestThrashHal()
@@ -369,27 +717,44 @@
test_Equal(KRequestPending, status.Int());
// stress system and check thrash level and notification
- ThrashTest(1, ETrue, EWorkloadRandom, gMaxCacheSize * 2, gMaxCacheSize * 2 + 5, 0);
+ ThrashTest(_L("stress system"),
+ 1, ETrue, EWorkloadUniformRandom, gMaxCacheSize * 2, gMaxCacheSize * 2 + 5, 0);
r = UserSvr::HalFunction(EHalGroupVM, EVMHalGetThrashLevel, 0, 0);
test(r >= 0 && r <= 255);
test.Printf(_L("Thrash level == %d\n"), r);
test(r > 200); // should indicate thrashing
- test_Equal(EChangesThrashLevel, status.Int());
- User::WaitForAnyRequest();
- // wait for system to calm down and check notification again
- test_KErrNone(notifier.Logon(status));
- User::WaitForAnyRequest();
- test_Equal(EChangesThreadDeath, status.Int());
-
- test_KErrNone(notifier.Logon(status));
+ TBool gotThrashNotification = EFalse;
+
+ // wait for EChangesThrashLevel notification
+ while(status.Int() != KRequestPending)
+ {
+ gotThrashNotification = (status.Int() & EChangesThrashLevel) != 0;
+ User::WaitForAnyRequest();
+ test_KErrNone(notifier.Logon(status));
+ User::After(1);
+ }
+ test(gotThrashNotification);
+
User::After(2000000);
r = UserSvr::HalFunction(EHalGroupVM, EVMHalGetThrashLevel, 0, 0);
test(r >= 0 && r <= 255);
test.Printf(_L("Thrash level == %d\n"), r);
test(r <= 10); // should indicate lightly loaded system
- test_Equal(EChangesThrashLevel, status.Int());
+
+ // wait for EChangesThrashLevel notification
+ gotThrashNotification = EFalse;
+ while(status.Int() != KRequestPending)
+ {
+ gotThrashNotification = (status.Int() & EChangesThrashLevel) != 0;
+ User::WaitForAnyRequest();
+ test_KErrNone(notifier.Logon(status));
+ User::After(1);
+ }
+ test(gotThrashNotification);
+ test_KErrNone(notifier.LogonCancel());
User::WaitForAnyRequest();
+ notifier.Close();
}
void TestThrashHalNotSupported()
@@ -398,45 +763,105 @@
test_Equal(KErrNotSupported, UserSvr::HalFunction(EHalGroupVM, EVMHalSetThrashThresholds, 0, 0));
}
+_LIT(KUsageMessage, "usage: t_thrash [ test ] [ thrashing ] [ benchmarks ]\n");
+
+enum TTestAction
+ {
+ EActionTest = 1 << 0,
+ EActionThrashing = 1 << 1,
+ EActionBenchmarks = 1 << 2
+ };
+
+void BadUsage()
+ {
+ test.Printf(KUsageMessage);
+ test(EFalse);
+ }
+
+TInt ParseCommandLine()
+ {
+ const TInt KMaxLineLength = 64;
+
+ if (User::CommandLineLength() > KMaxLineLength)
+ BadUsage();
+ TBuf<KMaxLineLength> buffer;
+ User::CommandLine(buffer);
+
+ if (buffer == KNullDesC)
+ return EActionTest;
+
+ TLex lex(buffer);
+ TInt result = 0;
+ while (!lex.Eos())
+ {
+ TPtrC word = lex.NextToken();
+ if (word == _L("test"))
+ result |= EActionTest;
+ else if (word == _L("thrashing"))
+ result |= EActionThrashing;
+ else if (word == _L("benchmarks"))
+ result |= EActionBenchmarks;
+ else
+ {
+ test.Printf(_L("bad token '%S'\n"), &word);
+ BadUsage();
+ }
+ }
+
+ return result;
+ }
+
TInt E32Main()
{
test.Title();
test.Start(_L("Test thrashing monitor"));
+
+ test_KErrNone(InitBenchmarks());
+ TInt actions = ParseCommandLine();
+
test_KErrNone(GetGlobalPolicies());
TUint cacheOriginalMin = 0;
TUint cacheOriginalMax = 0;
- TUint cacheCurrentSize = 0;
if (gDataPagingSupported)
{
- test.Next(_L("Thrash test: change maximum cache size to minimal"));
- //store original values
+ test.Next(_L("Thrash test: change cache size to maximum 2Mb"));
+ TUint cacheCurrentSize = 0;
DPTest::CacheSize(cacheOriginalMin, cacheOriginalMax, cacheCurrentSize);
- gMaxCacheSize = 256;
- gMinCacheSize = 64;
+ gMinCacheSize = 512;
+ gMaxCacheSize = 520;
test_KErrNone(DPTest::SetCacheSize(gMinCacheSize * gPageSize, gMaxCacheSize * gPageSize));
}
+
+ if (actions & EActionTest)
+ {
+ TBool flexibleMemoryModel = (MemModelAttributes() & EMemModelTypeMask) == EMemModelTypeFlexible;
+ if (flexibleMemoryModel)
+ TestThrashHal();
+ else
+ TestThrashHalNotSupported();
+ }
- TBool flexibleMemoryModel = (MemModelAttributes() & EMemModelTypeMask) == EMemModelTypeFlexible;
- if (flexibleMemoryModel)
- TestThrashHal();
- else
- TestThrashHalNotSupported();
-
- if (gDataPagingSupported && User::CommandLineLength() > 0)
- {
+ if (actions & EActionThrashing)
+ {
test.Next(_L("Extended thrashing tests"));
TestThrashing();
}
+
+ if (actions & EActionBenchmarks)
+ {
+ test.Next(_L("Benchmarking page replacement"));
+ BenchmarkReplacement();
+ }
+
if (gDataPagingSupported)
{
- //Reset the cache size to normal
test.Next(_L("Thrash test: Reset cache size to normal"));
test_KErrNone(DPTest::SetCacheSize(cacheOriginalMin, cacheOriginalMax));
}
-
+
test.End();
return 0;
}
--- a/kerneltest/e32test/group/t_ramall.mmp Tue Apr 27 18:02:57 2010 +0300
+++ b/kerneltest/e32test/group/t_ramall.mmp Tue May 11 17:28:22 2010 +0300
@@ -19,7 +19,7 @@
targettype exe
sourcepath ../mmu
source t_ramall.cpp
-library euser.lib
+library euser.lib dptest.lib
OS_LAYER_SYSTEMINCLUDE_SYMBIAN
--- a/kerneltest/e32test/mmu/paging_info.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kerneltest/e32test/mmu/paging_info.cpp Tue May 11 17:28:22 2010 +0300
@@ -36,6 +36,7 @@
(const TUint8*)"Del notify data page",
(const TUint8*)"Read media data page",
(const TUint8*)"Write media data page",
+ (const TUint8*)"Rejuvenate page",
};
__ASSERT_COMPILE(sizeof(BenchmarkNames)/sizeof(TUint8*) == EMaxPagingBm);
--- a/kerneltest/e32test/mmu/t_ramall.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kerneltest/e32test/mmu/t_ramall.cpp Tue May 11 17:28:22 2010 +0300
@@ -20,6 +20,7 @@
#include <e32test.h>
#include <e32uid.h>
#include <e32hal.h>
+#include <dptest.h>
#include "d_shadow.h"
#include "mmudetect.h"
#include "freeram.h"
@@ -32,6 +33,18 @@
TInt PageShift;
RShadow Shadow;
TInt InitFreeRam;
+RChunk Chunk;
+TUint ChunkCommitEnd;
+RThread TouchThread;
+TRequestStatus TouchStatus;
+TBool TouchDataStop;
+RThread FragThread;
+TRequestStatus FragStatus;
+TBool FragThreadStop;
+TBool ManualTest;
+TBool CacheSizeAdjustable;
+TUint OrigMinCacheSize;
+TUint OrigMaxCacheSize;
TInt AllocPhysicalRam(TUint32& aAddr, TInt aSize, TInt aAlign)
{
@@ -118,6 +131,333 @@
}
+
+struct SPhysAllocData
+ {
+ TUint iSize;
+ TUint iAlign;
+ TBool iCheckMaxAllocs;
+ TBool iCheckFreeRam;
+ };
+
+
+TInt FillPhysicalRam(TAny* aArgs)
+ {
+ SPhysAllocData& allocData = *((SPhysAllocData*)aArgs);
+ TUint maxAllocs = FreeRam() / allocData.iSize;
+ TUint32* physAddrs = new TUint32[maxAllocs + 1];
+ if (!physAddrs)
+ return KErrNoMemory;
+ TUint32* pa = physAddrs;
+ TUint32 alignMask = (1 << allocData.iAlign) - 1;
+ TUint initialFreeRam = FreeRam();
+ TInt r = KErrNone;
+ TUint allocations = 0;
+ for (; allocations <= maxAllocs + 1; allocations++)
+ {
+ TUint freeRam = FreeRam();
+ r = AllocPhysicalRam(*pa, allocData.iSize, allocData.iAlign);
+ if (r != KErrNone)
+ break;
+ if (*pa++ & alignMask)
+ {
+ r = KErrGeneral;
+ RDebug::Printf("Error alignment phys addr 0x%08x", *(pa - 1));
+ break;
+ }
+ if (allocData.iCheckFreeRam && freeRam - allocData.iSize != (TUint)FreeRam())
+ {
+ r = KErrGeneral;
+ RDebug::Printf("Error in free ram 0x%08x orig 0x%08x", FreeRam(), freeRam);
+ }
+ if (allocData.iCheckMaxAllocs && allocations > maxAllocs && r == KErrNone)
+ {
+ r = KErrOverflow;
+ RDebug::Printf("Error able to allocate too many pages");
+ break;
+ }
+ }
+
+ TUint32* physEnd = pa;
+ TBool failFrees = EFalse;
+ for (pa = physAddrs; pa < physEnd; pa++)
+ {
+ if (FreePhysicalRam(*pa, allocData.iSize) != KErrNone)
+ failFrees = ETrue;
+ }
+ if (failFrees)
+ r = KErrNotFound;
+ if (allocData.iCheckFreeRam && initialFreeRam != (TUint)FreeRam())
+ {
+ r = KErrGeneral;
+ RDebug::Printf("Error in free ram 0x%08x initial 0x%08x", FreeRam(), initialFreeRam);
+ }
+ delete[] physAddrs;
+ if (r != KErrNone && r != KErrNoMemory)
+ return r;
+ TUint possibleAllocs = initialFreeRam / allocData.iSize;
+ if (allocData.iCheckMaxAllocs && possibleAllocs != allocations)
+ {
+ RDebug::Printf("Error in number of allocations possibleAllocs %d allocations %d", possibleAllocs, allocations);
+ return KErrGeneral;
+ }
+ return allocations;
+ }
+
+
+void TestMultipleContiguousAllocations(TUint aNumThreads, TUint aSize, TUint aAlign)
+ {
+ test.Printf(_L("TestMultiContig threads %d size 0x%x, align %d\n"), aNumThreads, aSize, aAlign);
+ SPhysAllocData allocData;
+ allocData.iSize = aSize;
+ allocData.iAlign = aAlign;
+ allocData.iCheckMaxAllocs = EFalse;
+ allocData.iCheckFreeRam = EFalse;
+ // Start several threads all contiguous allocating memory.
+ RThread* threads = new RThread[aNumThreads];
+ TRequestStatus* status = new TRequestStatus[aNumThreads];
+ TUint i = 0;
+ for (; i < aNumThreads; i++)
+ {
+ TInt r = threads[i].Create(KNullDesC, FillPhysicalRam, KDefaultStackSize, PageSize, PageSize, (TAny*)&allocData);
+ test_KErrNone(r);
+ threads[i].Logon(status[i]);
+ }
+ for (i = 0; i < aNumThreads; i++)
+ {
+ threads[i].Resume();
+ }
+ for (i = 0; i < aNumThreads; i++)
+ {
+ User::WaitForRequest(status[i]);
+ test_Equal(EExitKill, threads[i].ExitType());
+ TInt exitReason = threads[i].ExitReason();
+ test_Value(exitReason, exitReason >= 0 || exitReason == KErrNoMemory);
+ threads[i].Close();
+ }
+ delete[] status;
+ delete[] threads;
+ }
+
+struct STouchData
+ {
+ TUint iSize;
+ TUint iFrequency;
+ }TouchData;
+
+
+TInt TouchMemory(TAny*)
+ {
+ while (!TouchDataStop)
+ {
+ TUint8* p = Chunk.Base();
+ TUint8* pEnd = p + ChunkCommitEnd;
+ TUint8* fragPEnd = p + TouchData.iFrequency;
+ for (TUint8* fragP = p + TouchData.iSize; fragPEnd < pEnd;)
+ {
+ TUint8* data = fragP;
+ for (; data < fragPEnd; data += PageSize)
+ {
+ *data = (TUint8)(data - fragP);
+ }
+ for (data = fragP; data < fragPEnd; data += PageSize)
+ {
+ if (*data != (TUint8)(data - fragP))
+ {
+ RDebug::Printf("Error unexpected data 0x%x read from 0x%08x", *data, data);
+ return KErrGeneral;
+ }
+ }
+ fragP = fragPEnd + TouchData.iSize;
+ fragPEnd += TouchData.iFrequency;
+ }
+ }
+ return KErrNone;
+ }
+
+struct SFragData
+ {
+ TUint iSize;
+ TUint iFrequency;
+ TUint iDiscard;
+ TBool iFragThread;
+ }FragData;
+
+void FragmentMemoryFunc()
+ {
+ ChunkCommitEnd = 0;
+ TInt r;
+ while(KErrNone == (r = Chunk.Commit(ChunkCommitEnd,PageSize)) && !FragThreadStop)
+ {
+ ChunkCommitEnd += PageSize;
+ }
+ if (FragThreadStop)
+ return;
+ test_Equal(KErrNoMemory, r);
+ TUint freeBlocks = 0;
+ for ( TUint offset = 0;
+ (offset + FragData.iSize) < ChunkCommitEnd;
+ offset += FragData.iFrequency, freeBlocks++)
+ {
+ test_KErrNone(Chunk.Decommit(offset, FragData.iSize));
+ }
+ if (!FragData.iFragThread)
+ test_Equal(FreeRam(), freeBlocks * FragData.iSize);
+
+ if (FragData.iDiscard && CacheSizeAdjustable && !FragThreadStop)
+ {
+ TUint minCacheSize = FreeRam();
+ TUint maxCacheSize = minCacheSize;
+ TUint currentCacheSize;
+ test_KErrNone(DPTest::CacheSize(OrigMinCacheSize, OrigMaxCacheSize, currentCacheSize));
+ test_KErrNone(DPTest::SetCacheSize(minCacheSize, maxCacheSize));
+ test_KErrNone(DPTest::SetCacheSize(OrigMinCacheSize, maxCacheSize));
+ }
+ }
+
+
+void UnfragmentMemoryFunc()
+ {
+ if (FragData.iDiscard && CacheSizeAdjustable)
+ test_KErrNone(DPTest::SetCacheSize(OrigMinCacheSize, OrigMaxCacheSize));
+ Chunk.Decommit(0, Chunk.MaxSize());
+ }
+
+
+TInt FragmentMemoryThreadFunc(TAny*)
+ {
+ while (!FragThreadStop)
+ {
+ FragmentMemoryFunc();
+ UnfragmentMemoryFunc();
+ }
+ return KErrNone;
+ }
+
+
+void FragmentMemory(TUint aSize, TUint aFrequency, TBool aDiscard, TBool aTouchMemory, TBool aFragThread)
+ {
+ test_Value(aTouchMemory, !aTouchMemory || !aFragThread);
+ test_Value(aSize, aSize < aFrequency);
+ FragData.iSize = aSize;
+ FragData.iFrequency = aFrequency;
+ FragData.iDiscard = aDiscard;
+ FragData.iFragThread = aFragThread;
+
+ TChunkCreateInfo chunkInfo;
+ chunkInfo.SetDisconnected(0, 0, FreeRam());
+ chunkInfo.SetPaging(TChunkCreateInfo::EUnpaged);
+ test_KErrNone(Chunk.Create(chunkInfo));
+
+ if (aFragThread)
+ {
+ TInt r = FragThread.Create(KNullDesC, FragmentMemoryThreadFunc, KDefaultStackSize, PageSize, PageSize, NULL);
+ test_KErrNone(r);
+ FragThread.Logon(FragStatus);
+ FragThreadStop = EFalse;
+ FragThread.Resume();
+ }
+ else
+ {
+ FragmentMemoryFunc();
+ }
+ if (aTouchMemory && !ManualTest)
+ {
+ TouchData.iSize = aSize;
+ TouchData.iFrequency = aFrequency;
+ TInt r = TouchThread.Create(KNullDesC, TouchMemory, KDefaultStackSize, PageSize, PageSize, NULL);
+ test_KErrNone(r);
+ TouchThread.Logon(TouchStatus);
+ TouchDataStop = EFalse;
+ TouchThread.Resume();
+ }
+ }
+
+
+void UnfragmentMemory(TBool aDiscard, TBool aTouchMemory, TBool aFragThread)
+ {
+ test_Value(aTouchMemory, !aTouchMemory || !aFragThread);
+ if (aTouchMemory && !ManualTest)
+ {
+ TouchDataStop = ETrue;
+ User::WaitForRequest(TouchStatus);
+ test_Equal(EExitKill, TouchThread.ExitType());
+ test_KErrNone(TouchThread.ExitReason());
+ CLOSE_AND_WAIT(TouchThread);
+ }
+ if (aFragThread)
+ {
+ FragThreadStop = ETrue;
+ User::WaitForRequest(FragStatus);
+ test_Equal(EExitKill, FragThread.ExitType());
+ test_KErrNone(FragThread.ExitReason());
+ CLOSE_AND_WAIT(FragThread);
+ }
+ else
+ UnfragmentMemoryFunc();
+ CLOSE_AND_WAIT(Chunk);
+ }
+
+
+void TestFillPhysicalRam(TUint aFragSize, TUint aFragFreq, TUint aAllocSize, TUint aAllocAlign, TBool aDiscard, TBool aTouchMemory)
+ {
+ test.Printf(_L("TestFillPhysicalRam aFragSize 0x%x aFragFreq 0x%x aAllocSize 0x%x aAllocAlign %d dis %d touch %d\n"),
+ aFragSize, aFragFreq, aAllocSize, aAllocAlign, aDiscard, aTouchMemory);
+ FragmentMemory(aFragSize, aFragFreq, aDiscard, aTouchMemory, EFalse);
+ SPhysAllocData allocData;
+ // Only check free all ram could be allocated in manual tests as fixed pages may be fragmented.
+ allocData.iCheckMaxAllocs = (ManualTest && !aTouchMemory && !aAllocAlign)? ETrue : EFalse;
+ allocData.iCheckFreeRam = ETrue;
+ allocData.iSize = aAllocSize;
+ allocData.iAlign = aAllocAlign;
+ FillPhysicalRam(&allocData);
+ UnfragmentMemory(aDiscard, aTouchMemory, EFalse);
+ }
+
+
+void TestFragmentedAllocation()
+ {
+ // Test every other page free.
+ TestFillPhysicalRam(PageSize, PageSize * 2, PageSize, 0, EFalse, EFalse);
+ if (ManualTest)
+ {
+ TestFillPhysicalRam(PageSize, PageSize * 2, PageSize * 2, 0, EFalse, EFalse);
+ TestFillPhysicalRam(PageSize, PageSize * 2, PageSize, 0, EFalse, ETrue);
+ }
+ TestFillPhysicalRam(PageSize, PageSize * 2, PageSize * 2, 0, EFalse, ETrue);
+ // Test every 2 pages free.
+ TestFillPhysicalRam(PageSize * 2, PageSize * 4, PageSize * 8, 0, EFalse, EFalse);
+ if (ManualTest)
+ TestFillPhysicalRam(PageSize * 2, PageSize * 4, PageSize * 8, 0, EFalse, ETrue);
+ // Test 10 pages free then 20 pages allocated, allocate 256 pages (1MB in most cases).
+ if (ManualTest)
+ TestFillPhysicalRam(PageSize * 10, PageSize * 30, PageSize * 256, 0, EFalse, EFalse);
+ TestFillPhysicalRam(PageSize * 10, PageSize * 30, PageSize * 256, 0, EFalse, ETrue);
+
+ if (CacheSizeAdjustable)
+ {// It is possible to adjust the cache size so test phyiscally contiguous
+ // allocations discard and move pages when required.
+ test.Next(_L("TestFragmentedAllocations with discardable data no true free memory"));
+ // Test every other page free.
+ TestFillPhysicalRam(PageSize, PageSize * 2, PageSize, 0, ETrue, EFalse);
+ if (ManualTest)
+ {
+ TestFillPhysicalRam(PageSize, PageSize * 2, PageSize, 0, ETrue, ETrue);
+ TestFillPhysicalRam(PageSize, PageSize * 2, PageSize * 2, 0, ETrue, EFalse);
+ }
+ TestFillPhysicalRam(PageSize, PageSize * 2, PageSize * 2, 0, ETrue, ETrue);
+ // Test every 2 pages free.
+ TestFillPhysicalRam(PageSize * 2, PageSize * 4, PageSize * 8, 0, ETrue, EFalse);
+ if (ManualTest)
+ TestFillPhysicalRam(PageSize * 2, PageSize * 4, PageSize * 8, 0, ETrue, ETrue);
+ // Test 10 pages free then 20 pages allocated, allocate 256 pages (1MB in most cases).
+ if (ManualTest)
+ TestFillPhysicalRam(PageSize * 10, PageSize * 30, PageSize * 256, 0, ETrue, EFalse);
+ TestFillPhysicalRam(PageSize * 10, PageSize * 30, PageSize * 256, 0, ETrue, ETrue);
+ }
+ }
+
+
GLDEF_C TInt E32Main()
//
// Test RAM allocation
@@ -135,6 +475,22 @@
PageShift=-1;
for (; psz; psz>>=1, ++PageShift);
+ TUint currentCacheSize;
+ CacheSizeAdjustable = DPTest::CacheSize(OrigMinCacheSize, OrigMaxCacheSize, currentCacheSize) == KErrNone;
+
+ TUint memodel = UserSvr::HalFunction(EHalGroupKernel, EKernelHalMemModelInfo, NULL, NULL) & EMemModelTypeMask;
+
+ TInt cmdLineLen = User::CommandLineLength();
+ if(cmdLineLen)
+ {
+ _LIT(KManual, "manual");
+ RBuf cmdLine;
+ test_KErrNone(cmdLine.Create(cmdLineLen));
+ User::CommandLine(cmdLine);
+ cmdLine.LowerCase();
+ ManualTest = cmdLine.Find(KManual) != KErrNotFound;
+ }
+
InitFreeRam=FreeRam();
test.Printf(_L("Free RAM=%08x, Page size=%x, Page shift=%d\n"),InitFreeRam,PageSize,PageShift);
@@ -148,8 +504,54 @@
test.Next(_L("TestClaimPhys"));
TestClaimPhys();
+ if (memodel >= EMemModelTypeFlexible)
+ {
+ test.Next(_L("TestFragmentedAllocation"));
+ TestFragmentedAllocation();
+
+ test.Next(_L("TestMultipleContiguousAllocations"));
+ TestMultipleContiguousAllocations(20, PageSize * 16, 0);
+ TestMultipleContiguousAllocations(20, PageSize * 16, PageShift + 1);
+ TestMultipleContiguousAllocations(20, PageSize * 128, PageShift + 2);
+
+ FragmentMemory(PageSize, PageSize * 2, EFalse, EFalse, EFalse);
+ TestMultipleContiguousAllocations(20, PageSize * 128, PageShift + 2);
+ UnfragmentMemory(EFalse, EFalse, EFalse);
+
+ test.Next(_L("TestMultipleContiguousAllocations while accessing memory"));
+ FragmentMemory(PageSize, PageSize * 2, EFalse, ETrue, EFalse);
+ TestMultipleContiguousAllocations(20, PageSize * 128, PageShift + 2);
+ UnfragmentMemory(EFalse, ETrue, EFalse);
+ FragmentMemory(PageSize, PageSize * 2, ETrue, ETrue, EFalse);
+ TestMultipleContiguousAllocations(50, PageSize * 256, PageShift + 5);
+ UnfragmentMemory(ETrue, ETrue, EFalse);
+ FragmentMemory(PageSize * 16, PageSize * 32, ETrue, ETrue, EFalse);
+ TestMultipleContiguousAllocations(10, PageSize * 512, PageShift + 8);
+ UnfragmentMemory(ETrue, ETrue, EFalse);
+ FragmentMemory(PageSize * 32, PageSize * 64, ETrue, ETrue, EFalse);
+ TestMultipleContiguousAllocations(10, PageSize * 1024, PageShift + 10);
+ UnfragmentMemory(ETrue, ETrue, EFalse);
+
+ test.Next(_L("TestMultipleContiguousAllocations with repeated movable and discardable allocations"));
+ FragmentMemory(PageSize, PageSize * 2, EFalse, EFalse, ETrue);
+ TestMultipleContiguousAllocations(20, PageSize * 2, PageShift);
+ UnfragmentMemory(EFalse, EFalse, ETrue);
+ FragmentMemory(PageSize, PageSize * 2, EFalse, EFalse, ETrue);
+ TestMultipleContiguousAllocations(20, PageSize * 128, PageShift + 2);
+ UnfragmentMemory(EFalse, EFalse, ETrue);
+ FragmentMemory(PageSize, PageSize * 2, ETrue, EFalse, ETrue);
+ TestMultipleContiguousAllocations(50, PageSize * 256, PageShift + 5);
+ UnfragmentMemory(ETrue, EFalse, ETrue);
+ FragmentMemory(PageSize * 16, PageSize * 32, ETrue, EFalse, ETrue);
+ TestMultipleContiguousAllocations(20, PageSize * 512, PageShift + 8);
+ UnfragmentMemory(ETrue, EFalse, ETrue);
+ FragmentMemory(PageSize * 32, PageSize * 64, ETrue, EFalse, ETrue);
+ TestMultipleContiguousAllocations(20, PageSize * 1024, PageShift + 10);
+ UnfragmentMemory(ETrue, EFalse, ETrue);
+ }
+
Shadow.Close();
+ test.Printf(_L("Free RAM=%08x at end of test\n"),FreeRam());
test.End();
return(KErrNone);
}
-
--- a/kerneltest/e32test/mmu/t_shadow.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kerneltest/e32test/mmu/t_shadow.cpp Tue May 11 17:28:22 2010 +0300
@@ -338,6 +338,9 @@
const TUint KChunkShift = 20;
const TUint KChunkSize = 1 << KChunkShift;
+const TUint KRomSizeAlign = 16; // This should match CFG_RomSizeAlign defined in bootcpu.inc
+const TUint KRomSizeAlignMask = (1 << KRomSizeAlign) - 1;
+
void TestRomIsSectionMapped()
{
test.Start(_L("Test ROM is section mapped"));
@@ -350,13 +353,24 @@
test_KErrNone(Shadow.GetPdInfo(KGlobalPageDirectory, pdSize, pdBase, offset));
test.Printf(_L("pd base == %08x, pd size == %08x, pd offset == %08x\n"), pdBase, pdSize, offset);
- for (TLinAddr addr = RomUnpagedStart ; addr <= RomUnpagedEnd ; addr += KChunkSize)
+ TUint romSize = RomUnpagedEnd - RomUnpagedStart;
+ test.Printf(_L("rom size == %x\n"), romSize);
+ if (RomPagedStart == RomPagedEnd)
{
+ // If rom is not paged then we must round the ROM size up to a mutiple of 64KB (or whatever
+ // the CFG_RomSizeAlign settings is), because that's how the bootstrap maps it
+ romSize = (romSize + KRomSizeAlignMask) & ~KRomSizeAlignMask;
+ test.Printf(_L("rom size rounded up to %x\n"), romSize);
+ }
+
+ for (TUint pos = 0 ; pos < romSize ; pos += KChunkSize)
+ {
+ TLinAddr addr = RomUnpagedStart + pos;
TUint i = (addr >> KChunkShift) - offset;
TUint pde = Shadow.Read(pdBase + i*4);
test.Printf(_L(" %08x: PDE %08x\n"), addr, pde);
- TUint expectedPdeType = (RomUnpagedEnd - addr) >= KChunkSize ? 2 : 1;
+ TUint expectedPdeType = (romSize - pos) >= KChunkSize ? 2 : 1;
test_Equal(expectedPdeType, pde & 3);
}
#else
--- a/kerneltest/e32utils/group/base_e32utils.mrp Tue Apr 27 18:02:57 2010 +0300
+++ b/kerneltest/e32utils/group/base_e32utils.mrp Tue May 11 17:28:22 2010 +0300
@@ -17,6 +17,7 @@
source \sf\os\kernelhwsrv\kerneltest\e32utils\trace
source \sf\os\kernelhwsrv\kerneltest\e32utils\usbmsapp
source \sf\os\kernelhwsrv\kerneltest\e32utils\sdpartition
+source \sf\os\kernelhwsrv\kerneltest\e32utils\nandboot\coreldr\bootstrap_smrif.h
#MattD: Reltools 2.67 don't understand 'cwtools' are the CW equivelent of 'tools' and try to do '-what cwtools udeb' instead of '-what cwtools deb'.
#binary \sf\os\kernelhwsrv\kerneltest\e32utils\group cwtools
--- a/kerneltest/e32utils/group/bld.inf Tue Apr 27 18:02:57 2010 +0300
+++ b/kerneltest/e32utils/group/bld.inf Tue May 11 17:28:22 2010 +0300
@@ -1,4 +1,4 @@
-// Copyright (c) 1999-2009 Nokia Corporation and/or its subsidiary(-ies).
+// Copyright (c) 1999-2010 Nokia Corporation and/or its subsidiary(-ies).
// All rights reserved.
// This component and the accompanying materials are made available
// under the terms of the License "Eclipse Public License v1.0"
@@ -30,20 +30,19 @@
PRJ_EXPORTS
-
-
+../nandboot/coreldr/bootstrap_smrif.h SYMBIAN_OS_LAYER_PLATFORM_EXPORT_PATH(bootstrap_smrif.h)
../profiler/profiler.h SYMBIAN_OS_LAYER_PLATFORM_EXPORT_PATH(profiler.h)
-../analyse/profiler.rtf /epoc32/engdoc/profiler/profiler.rtf
+../analyse/profiler.rtf /epoc32/engdoc/profiler/profiler.rtf
-../d_exc/printstk.pl /epoc32/rom/tools/printstk.pl
-../d_exc/printsym.pl /epoc32/rom/tools/printsym.pl
+../d_exc/printstk.pl /epoc32/rom/tools/printstk.pl
+../d_exc/printsym.pl /epoc32/rom/tools/printsym.pl
-../setcap/setcap.iby /epoc32/rom/include/setcap.iby
+../setcap/setcap.iby /epoc32/rom/include/setcap.iby
../demandpaging/dptest.h SYMBIAN_OS_LAYER_PLATFORM_EXPORT_PATH(dptest.h)
-../demandpaging/dptestcons.oby /epoc32/rom/include/dptestcons.oby
+../demandpaging/dptestcons.oby /epoc32/rom/include/dptestcons.oby
PRJ_TESTEXPORTS
../trace/btracevw.pl /epoc32/tools/btracevw.pl
--- a/kerneltest/f32test/server/t_fsys.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/kerneltest/f32test/server/t_fsys.cpp Tue May 11 17:28:22 2010 +0300
@@ -933,38 +933,57 @@
//-- 2. open this file
nRes = file.Open(TheFs, KFile, EFileRead);
test_KErrNone(nRes);
+
+ const TInt drvNumber = CurrentDrive();
//-- 2.1 try to dismount the FS, it must fail because of the opened object.
TBuf<40> fsName;
- nRes = TheFs.FileSystemName(fsName, CurrentDrive());
+ nRes = TheFs.FileSystemName(fsName, drvNumber);
test_KErrNone(nRes);
- nRes = TheFs.DismountFileSystem(fsName, CurrentDrive());
- test(nRes == KErrInUse);
-
+ nRes = TheFs.DismountFileSystem(fsName, drvNumber);
+ test_Value(nRes, nRes == KErrInUse);
+ // Flag from locmedia.h to simulate ejecting and re-inserting the media.
+ const TUint KMediaRemountForceMediaChange = 0x00000001;
+ TRequestStatus changeStatus;
+ TheFs.NotifyChange(ENotifyAll, changeStatus);
+ TDriveInfo driveInfo;
+
//-- 3. forcedly remount the drive
- nRes = TheFs.RemountDrive(CurrentDrive());
+ nRes = TheFs.RemountDrive(drvNumber, NULL, KMediaRemountForceMediaChange);
+
if(nRes == KErrNotSupported)
- {//-- this feature is not supported and the test is inconsistent.
+ {//-- this feature is not supported and the test is inconsistent.
test.Printf(_L("RemountDrive() is not supported, the test is inconsistent!"));
//-- remounting must work at least on MMC drives
- const TBool isFAT = Is_Fat(TheFs, CurrentDrive());
+ const TBool isFAT = Is_Fat(TheFs, drvNumber);
- TDriveInfo driveInfo;
- nRes = TheFs.Drive(driveInfo, CurrentDrive());
+ nRes = TheFs.Drive(driveInfo, drvNumber);
test_KErrNone(nRes);
- test(!isFAT || (!(driveInfo.iDriveAtt & KDriveAttRemovable)));
-
- }
+ test_Value(driveInfo.iDriveAtt, !isFAT || (!(driveInfo.iDriveAtt & KDriveAttRemovable)));
+ }
else
- {
- test_KErrNone(nRes);
- }
-
- User::After(500*K1mSec);
+ {
+ test_Value(nRes, nRes == KErrNotReady || nRes == KErrNone);
+
+ //-- 3.1 wait for media change to complete
+ do
+ {
+ // Waiting for media change...
+ User::WaitForRequest(changeStatus);
+ nRes = TheFs.Drive(driveInfo, drvNumber);
+ TheFs.NotifyChange(ENotifyAll, changeStatus);
+ }
+ while (nRes == KErrNotReady);
+
+ test_KErrNone(nRes);
+ User::After(1000*K1mSec); // Wait 1 sec (needed by certain platforms)
+ }
+
+ TheFs.NotifyChangeCancel(changeStatus);
//-- 4. read this file. The FS will be remounted and the read must be OK.
TBuf8<40> buf;
--- a/userlibandfileserver/fileserver/etshell/ts_com.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/userlibandfileserver/fileserver/etshell/ts_com.cpp Tue May 11 17:28:22 2010 +0300
@@ -1,4 +1,4 @@
-// Copyright (c) 1996-2009 Nokia Corporation and/or its subsidiary(-ies).
+// Copyright (c) 1996-2010 Nokia Corporation and/or its subsidiary(-ies).
// All rights reserved.
// This component and the accompanying materials are made available
// under the terms of the License "Eclipse Public License v1.0"
@@ -978,7 +978,11 @@
default: aPrintBuf.Append(_L("??? Unknown Type")); break;
};
-
+ if (aDrvInfo.iConnectionBusType)
+ {
+ aPrintBuf.Append(_L(" USB"));
+ }
+
aPrintBuf.Append(_L("\n"));
}
--- a/userlibandfileserver/fileserver/group/release.txt Tue Apr 27 18:02:57 2010 +0300
+++ b/userlibandfileserver/fileserver/group/release.txt Tue May 11 17:28:22 2010 +0300
@@ -1,3 +1,27 @@
+Version 2.00.2054
+=================
+(Made by vfebvre 23/04/2010)
+
+1. michcox
+ 1. ou1cimx1#357507 TB92 Maps 3.04: File mode EFileWrite (defined in f32file.h) corrupts memory of other processes under certain conditions on 5.0 devices
+
+
+Version 2.00.2053
+=================
+(Made by vfebvre 22/04/2010)
+
+1. niccox
+ 1. ou1cimx1#356808 Disconnecting memory card reader/writer from phone causes phone crash
+
+
+Version 2.00.2052
+=================
+(Made by vfebvre 14/04/2010)
+
+1. famustaf
+ 1. PDEF145305 F32TEST T_FSYS test failure investigation
+
+
Version 2.00.2051
=================
(Made by vfebvre 09/04/2010)
--- a/userlibandfileserver/fileserver/inc/f32ver.h Tue Apr 27 18:02:57 2010 +0300
+++ b/userlibandfileserver/fileserver/inc/f32ver.h Tue May 11 17:28:22 2010 +0300
@@ -58,6 +58,6 @@
@see TVersion
*/
-const TInt KF32BuildVersionNumber=2051;
+const TInt KF32BuildVersionNumber=2054;
//
#endif
--- a/userlibandfileserver/fileserver/sfile/sf_file_cache.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/userlibandfileserver/fileserver/sfile/sf_file_cache.cpp Tue May 11 17:28:22 2010 +0300
@@ -1503,6 +1503,11 @@
// Need to reset currentOperation.iReadWriteArgs.iTotalLength here to make sure
// TFsFileWrite::PostInitialise() doesn't think there's no data left to process
aMsgRequest.ReStart();
+
+ //Need to preserve the current state otherwise if we are over the ram threshold
+ //the request can end up in a livelock trying to repeatedly flush.
+ currentOperation->iState = EStWriteThrough;
+
if (r == CFsRequest::EReqActionBusy || r != CFsRequest::EReqActionComplete)
return r;
}
--- a/userlibandfileserver/fileserver/shostmassstorage/msproxy/debug.h Tue Apr 27 18:02:57 2010 +0300
+++ b/userlibandfileserver/fileserver/shostmassstorage/msproxy/debug.h Tue May 11 17:28:22 2010 +0300
@@ -22,7 +22,7 @@
#ifndef PXY_DEBUG_H
#define PXY_DEBUG_H
-//#define _HOST_DEBUG_PRINT_
+// #define _HOST_DEBUG_PRINT_
// #define _PROXY_DEBUG_PRINT_
// #define _PROXY_FN_TRACE_
--- a/userlibandfileserver/fileserver/shostmassstorage/msproxy/hostusbmsproxy.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/userlibandfileserver/fileserver/shostmassstorage/msproxy/hostusbmsproxy.cpp Tue May 11 17:28:22 2010 +0300
@@ -596,12 +596,18 @@
// do nothing
}
}
- else if (KErrNotReady)
+ else if (KErrNotReady == r)
{
__HOSTPRINT(_L("<<< HOST Caps Media Not Present"));
c.iType = EMediaNotPresent;
r = KErrNone;
}
+ else if (KErrGeneral == r)
+ {
+ RDebug::Print(_L("<<< HOST Caps Unable to communicate with media"));
+ c.iType = EMediaUnknown;
+ }
+
else
{
__HOSTPRINT(_L("<<< HOST Caps Unknown Error"));
--- a/userlibandfileserver/fileserver/shostmassstorage/server/controller/cusbhostmsdevice.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/userlibandfileserver/fileserver/shostmassstorage/server/controller/cusbhostmsdevice.cpp Tue May 11 17:28:22 2010 +0300
@@ -166,14 +166,14 @@
// interface suspension to the transport layer
for (TInt i = 0; i < iLuList.Count(); i++)
{
- CUsbHostMsLogicalUnit& lu = iLuList.GetLuL(i);
+ CUsbHostMsLogicalUnit& lu = iLuList.GetLu(i);
if (!lu.IsReadyToSuspend() && lu.IsConnected())
return;
}
for (TInt i = 0; i < iLuList.Count(); i++)
{
- CUsbHostMsLogicalUnit& lu = iLuList.GetLuL(i);
+ CUsbHostMsLogicalUnit& lu = iLuList.GetLu(i);
SetLunL(lu);
lu.SuspendL();
}
@@ -299,7 +299,7 @@
TInt err;
for (TInt i = 0; i < iLuList.Count(); i++)
{
- CUsbHostMsLogicalUnit& lu = iLuList.GetLuL(i);
+ CUsbHostMsLogicalUnit& lu = iLuList.GetLu(i);
SetLunL(lu);
TRAP(err, lu.DoLunReadyCheckL());
}
@@ -315,7 +315,7 @@
// interface suspension to the transport layer
for (TInt i = 0; i < iLuList.Count(); i++)
{
- CUsbHostMsLogicalUnit& lu = iLuList.GetLuL(i);
+ CUsbHostMsLogicalUnit& lu = iLuList.GetLu(i);
// Has any of the logical units have got its state changed?
if ( (lu.IsReadyToSuspend() && !lu.IsConnected()) ||
(!lu.IsReadyToSuspend() && lu.IsConnected()) )
@@ -327,7 +327,7 @@
for (TInt i = 0; i < iLuList.Count(); i++)
{
- CUsbHostMsLogicalUnit& lu = iLuList.GetLuL(i);
+ CUsbHostMsLogicalUnit& lu = iLuList.GetLu(i);
SetLunL(lu);
lu.SuspendL();
}
@@ -341,7 +341,7 @@
__MSFNLOG
for (TInt i = 0; i < iLuList.Count(); i++)
{
- CUsbHostMsLogicalUnit& lu = iLuList.GetLuL(i);
+ CUsbHostMsLogicalUnit& lu = iLuList.GetLu(i);
SetLunL(lu);
lu.ResumeL();
}
--- a/userlibandfileserver/fileserver/shostmassstorage/server/controller/include/tlogicalunitlist.h Tue Apr 27 18:02:57 2010 +0300
+++ b/userlibandfileserver/fileserver/shostmassstorage/server/controller/include/tlogicalunitlist.h Tue May 11 17:28:22 2010 +0300
@@ -1,4 +1,4 @@
-// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
+// Copyright (c) 2008-2010 Nokia Corporation and/or its subsidiary(-ies).
// All rights reserved.
// This component and the accompanying materials are made available
// under the terms of the License "Eclipse Public License v1.0"
@@ -29,9 +29,10 @@
void RemoveLuL(TLun aLun);
void RemoveAllLuL();
CUsbHostMsLogicalUnit& GetLuL(TLun aLun) const;
- TInt FindLu(TLun aLun) const;
+ CUsbHostMsLogicalUnit& GetLu(TInt aIndex) const;
TInt Count() const;
private:
+ TInt FindLu(TLun aLun) const;
RPointerArray<CUsbHostMsLogicalUnit> iLu;
};
--- a/userlibandfileserver/fileserver/shostmassstorage/server/controller/tlogicalunitlist.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/userlibandfileserver/fileserver/shostmassstorage/server/controller/tlogicalunitlist.cpp Tue May 11 17:28:22 2010 +0300
@@ -1,4 +1,4 @@
-// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
+// Copyright (c) 2008-2010 Nokia Corporation and/or its subsidiary(-ies).
// All rights reserved.
// This component and the accompanying materials are made available
// under the terms of the License "Eclipse Public License v1.0"
@@ -124,3 +124,10 @@
User::LeaveIfError(index);
return *iLu[index];
}
+
+
+CUsbHostMsLogicalUnit& TLogicalUnitList::GetLu(TInt aIndex) const
+ {
+ __MSFNSLOG
+ return *iLu[aIndex];
+ }
--- a/userlibandfileserver/fileserver/shostmassstorage/server/protocol/cscsiprotocol.cpp Tue Apr 27 18:02:57 2010 +0300
+++ b/userlibandfileserver/fileserver/shostmassstorage/server/protocol/cscsiprotocol.cpp Tue May 11 17:28:22 2010 +0300
@@ -264,7 +264,8 @@
__MSFNLOG
if (!IsConnected())
{
- DoScsiReadyCheckEventL();
+ if (!DoScsiReadyCheckEventL())
+ return;
}
if (!iSbcInterface)
@@ -800,7 +801,7 @@
}
-void CScsiProtocol::DoScsiReadyCheckEventL()
+TBool CScsiProtocol::DoScsiReadyCheckEventL()
{
__MSFNLOG
TInt err = KErrNone;
@@ -833,6 +834,7 @@
iMediaChangeNotifier.DoNotifyL();
}
}
+ return err = KErrNone ? ETrue : EFalse;
}
--- a/userlibandfileserver/fileserver/shostmassstorage/server/protocol/include/cscsiprotocol.h Tue Apr 27 18:02:57 2010 +0300
+++ b/userlibandfileserver/fileserver/shostmassstorage/server/protocol/include/cscsiprotocol.h Tue May 11 17:28:22 2010 +0300
@@ -85,7 +85,7 @@
// unit testing
void CreateSbcInterfaceL(TUint32 aBlockLen, TUint32 aLastLba);
- void DoScsiReadyCheckEventL();
+ TBool DoScsiReadyCheckEventL();
void NotifyChange(const RMessage2& aMessage);
void ForceCompleteNotifyChangeL();
--- a/userlibandfileserver/fileserver/shostmassstorage/server/protocol/include/mprotocol.h Tue Apr 27 18:02:57 2010 +0300
+++ b/userlibandfileserver/fileserver/shostmassstorage/server/protocol/include/mprotocol.h Tue May 11 17:28:22 2010 +0300
@@ -1,4 +1,4 @@
-// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
+// Copyright (c) 2008-2010 Nokia Corporation and/or its subsidiary(-ies).
// All rights reserved.
// This component and the accompanying materials are made available
// under the terms of the License "Eclipse Public License v1.0"
@@ -66,7 +66,7 @@
/** unit testing */
virtual void CreateSbcInterfaceL(TUint32 aBlockLen, TUint32 aLastLba) = 0;
- virtual void DoScsiReadyCheckEventL() = 0;
+ virtual TBool DoScsiReadyCheckEventL() = 0;
/**
Media change notification