diff -r 73ea206103e6 -r 43365a9b78a3 userlibandfileserver/fileserver/sfat32/sl_dir_cache.cpp --- a/userlibandfileserver/fileserver/sfat32/sl_dir_cache.cpp Wed Jun 23 19:44:53 2010 +0300 +++ b/userlibandfileserver/fileserver/sfat32/sl_dir_cache.cpp Tue Jul 06 15:50:07 2010 +0300 @@ -113,6 +113,7 @@ iMinSizeInPages(aMinPageNum), iMaxSizeInPages(aMaxPageNum), iDrive(aDrive), + iLastVisitedPage(NULL), iLockedQ(_FOFF(TDynamicDirCachePage, iLink)), iUnlockedQ(_FOFF(TDynamicDirCachePage, iLink)), iLockedQCount(0), @@ -158,19 +159,19 @@ // allocate as many permanently locked pages as there are threads - plus one // otherwise DoMakePageMRU() won't work properly with only one thread - //-- At present moment the size of TDrive thread pool is 1 (1 drive thread in a pool) - const TUint KThreadCount = 1; - iPermanentlyAllocatedPageCount = KThreadCount + 1; + //-- At present moment the size of TDrive thread pool is 1 (1 drive thread in a pool) + const TUint KThreadCount = 1; + iPermanentlyAllocatedPageCount = KThreadCount; if (iPermanentlyAllocatedPageCount > iMinSizeInPages) iMinSizeInPages = iPermanentlyAllocatedPageCount; for (TUint n=0; niType == TDynamicDirCachePage::EUnknown); - - if (iLockedQ.IsEmpty()) - { - // if locked queue is empty, add it onto the locked queue directly - AddFirstOntoQueue(aPage, TDynamicDirCachePage::ELocked); - } - else - { - // otherwise, we squeeze for the last position on locked queue - while (iLockedQCount + 1 >= iMinSizeInPages) - { - TDynamicDirCachePage* last = iLockedQ.Last(); - DeQueue(last); - UnlockPage(last); - AddFirstOntoQueue(last, TDynamicDirCachePage::EUnlocked); - } - - // iLockedQCount + 1 < iMinSizeInPages - iLockedQ.AddLast(*aPage); - aPage->SetPageType(TDynamicDirCachePage::ELocked); - iLockedQCount++; - } - } - -/** Read data from a single page. If the page is not found or not valid anymore, read media onto iActive page first. The data will be _Appended_ the the descriptor aDes. The caller is responsible for maintaining this descriptor. @@ -248,13 +213,19 @@ aDes.Append(pPage->PtrInPage(aPos), aLength); - // if page is from unlocked queue, insert it onto the last page of the locked - // queue. this is to avoid excessive locking and unlocking operations that is - // highly likely to happen for following reads. - if (pPage->PageType() == TDynamicDirCachePage::EUnlocked) - { - DeQueue(pPage); - MakePageLastLocked(pPage); + // if page is from unlocked queue, try to keep it locked until we move to a + // different page from the unlocked queue + // this is to avoid excessive locking and unlocking operations that is + // highly likely to happen when DoFindL() linearly scan through the directory + if (pPage->PageType() == TDynamicDirCachePage::EUnlocked + && iLastVisitedPage != pPage) + { + // Note: iLastVisitedPage may have been moved from unlocked queue to locked queue + if(iLastVisitedPage && iLastVisitedPage->PageType() == TDynamicDirCachePage::EUnlocked) + { + User::LeaveIfError(UnlockPage(iLastVisitedPage)); + } + iLastVisitedPage = pPage; } } else // page locking failed @@ -624,23 +595,32 @@ case EDumpCache: { RFs fs; - fs.Connect(); + r = fs.Connect(); + if(r != KErrNone) + break; + const TUint32 debugRegister = DebugRegister(); fs.SetDebugRegister(debugRegister|KFSYS); Dump(); fs.SetDebugRegister(debugRegister); fs.Close(); + r = KErrNone; break; } case ECacheInfo: { RFs fs; - fs.Connect(); + r = fs.Connect(); + if(r != KErrNone) + break; + const TUint32 debugRegister = DebugRegister(); fs.SetDebugRegister(debugRegister|KFSYS); - Info(); + TDirCacheInfo* aInfo = static_cast(aParam2); + Info(aInfo); fs.SetDebugRegister(debugRegister); fs.Close(); + r = KErrNone; break; } @@ -683,14 +663,14 @@ { // __PRINT1(_L("MakePageMRU (%lx)"), aPos); // __PRINT4(_L("Current Cache State: iLockedQCount=%d, iUnlockedQCount=%d, iLookupTbl=%d, iMaxSizeInPages=%d"), iLockedQCount, iUnlockedQCount, iLookupTable.Count(), iMaxSizeInPages); - // check there are at least two locked pages - ASSERT(iLockedQCount > 1); + // check there are at least one locked pages + ASSERT(iLockedQCount > 0); // check the MRU page first, if it is already the MRU page, we can return immediately TInt64 pageStartMedPos = CalcPageStartPos(aPos); if (!iLockedQ.IsEmpty()) { - if (iLockedQ.First()->StartPos() == pageStartMedPos) + if (iLockedQCount > 1 && iLockedQ.First()->StartPos() == pageStartMedPos) { return; } @@ -703,7 +683,7 @@ // lock page before make it MRU if (pPage->PageType() == TDynamicDirCachePage::EUnlocked) { - ASSERT(!pPage->IsLocked()); + ASSERT(!pPage->IsLocked() || (pPage->IsLocked() && pPage == iLastVisitedPage)); if (LockPage(pPage) == NULL) { DeQueue(pPage); @@ -735,8 +715,9 @@ ASSERT(pPage && pPage->IsValid() && pPage->IsLocked()); - // if we used the active page (last on the queue), try to grow the cache. - TBool growCache = pPage == iLockedQ.Last(); + + TBool makeNewPageMRU = pPage == iLockedQ.Last(); + switch (pPage->PageType()) { @@ -764,16 +745,27 @@ ASSERT(0); } - if (CacheIsFull() || !growCache) - return; + if (!makeNewPageMRU) + return; + + // when cache is full and a new MRU page is about to be added, we will need to evict the LRU page + // accordingly + if (CacheIsFull()) + { + TUint32& queueCnt = iMaxSizeInPages - iMinSizeInPages > 0 ? iUnlockedQCount : iLockedQCount; + queueCnt++; + CheckThresholds(); + queueCnt--; + } - // attempt to grow the cache by appending a clean, new page at the end of the LRU list. - // This can fail when out of memory; the LRU mechanism then makes sure the oldest page will be re-used. - TDynamicDirCachePage* nPage = AllocateAndLockNewPage(0); - if (!nPage) - return; + // attempt to grow the cache by appending a clean, new page at the end of the locked page queue. + // This can fail when out of memory; the LRU mechanism then makes sure the oldest page will be re-used. + TDynamicDirCachePage* nPage = AllocateAndLockNewPage(); + if (!nPage) + return; - // about to add a page to end of locked queue, so lie about iLockedQCount + // about to add the new active page, force the locked queue to evict the existing last page to make room + // for the new active page iLockedQCount++; CheckThresholds(); iLockedQCount--; @@ -781,7 +773,6 @@ iLockedQ.AddLast(*nPage); nPage->SetPageType(TDynamicDirCachePage::ELocked); ++iLockedQCount; - LookupTblAdd(nPage); } /** @@ -857,7 +848,7 @@ */ void CDynamicDirCache::CheckThresholds() { - while (iLockedQCount + 1 > iMinSizeInPages) + while (iLockedQCount > iMinSizeInPages) { TDynamicDirCachePage* movePage = iLockedQ.Last(); UnlockPage(movePage); @@ -897,65 +888,67 @@ Try to create a new page and lock the page content when it is created. This function should only be called when creating iActive page or making a page MRU (which might result in page evictions). @return the pointer of the newly created page, or NULL if allocation failed. -@param aStartMedPos the starting media address of the page to be created. -@pre aStartMedPos should not already be existing in the cache. */ -TDynamicDirCachePage* CDynamicDirCache::AllocateAndLockNewPage(TInt64 aStartMedPos) - { - __PRINT1(_L("CDynamicDirCache::AllocateAndLockNewPage(aStartMedPos=%lx)"), aStartMedPos); +TDynamicDirCachePage* CDynamicDirCache::AllocateAndLockNewPage(/*TInt64 aStartMedPos*/) + { + __PRINT(_L("CDynamicDirCache::AllocateAndLockNewPage()")); - TUint8* startRamAddr = iCacheMemoryClient->AllocateAndLockSegments(PageSizeInSegs()); + TUint8* startRamAddr = iCacheMemoryClient->AllocateAndLockSegments(PageSizeInSegs()); + + if (!startRamAddr) + return NULL; - if (!startRamAddr) - return NULL; - - TDynamicDirCachePage* pPage = TDynamicDirCachePage::CreateCachePage(this, aStartMedPos, startRamAddr); + TDynamicDirCachePage* pPage = TDynamicDirCachePage::CreateCachePage(this, 0, startRamAddr); - // Failure would mean the cache chunk was able to grow but we've run out of heap. - // This seems extremely unlikely, but decommit the now-unmanageable cache segment just in case. - if (!pPage) - { - iCacheMemoryClient->DecommitSegments(startRamAddr, PageSizeInSegs()); - return NULL; - } + // Failure would mean the cache chunk was able to grow but we've run out of heap. + // This seems extremely unlikely, but decommit the now-unmanageable cache segment just in case. + if (!pPage) + { + iCacheMemoryClient->DecommitSegments(startRamAddr, PageSizeInSegs()); + return NULL; + } pPage->SetLocked(ETrue); pPage->SetValid(EFalse); return pPage; } -#ifdef _DEBUG +#if defined(_DEBUG) || defined(_DEBUG_RELEASE) /** Dump cache information, only enabled in debug mode. @see CDynamicDirCache::Control() */ -void CDynamicDirCache::Info() const - { - __PRINT(_L("======== CDynamicDirCache::Info =========")); - const TUint32 SegmentSizeInBytesLog2 = CCacheMemoryManagerFactory::CacheMemoryManager()->SegmentSizeInBytesLog2(); - // page size - __PRINT1(_L("=== Pages size: [%d Bytes]"), iPageSizeInBytes); - __PRINT1(_L("=== Segment size: [%d Bytes]"), 1 << SegmentSizeInBytesLog2); +void CDynamicDirCache::Info(TDirCacheInfo* aInfo) const + { + __PRINT(_L("======== CDynamicDirCache::Info =========")); + const TUint32 SegmentSizeInBytesLog2 = CCacheMemoryManagerFactory::CacheMemoryManager()->SegmentSizeInBytesLog2(); + const TUint32 pageMemSizeLog2 = iPageSizeLog2 > SegmentSizeInBytesLog2 ? iPageSizeLog2 : SegmentSizeInBytesLog2; + // page size + __PRINT1(_L("=== Segment size: [%d Bytes]"), 1 << SegmentSizeInBytesLog2); + __PRINT1(_L("=== Page data size: [%d Bytes]"), iPageSizeInBytes); + __PRINT1(_L("=== Page mem size: [%d Bytes]"), 1 << pageMemSizeLog2); - // data size: - __PRINT1(_L("=== Min data size: [%d Bytes]"), iMinSizeInPages << iPageSizeLog2); - __PRINT1(_L("=== Max data size: [%d Bytes]"), iMaxSizeInPages << iPageSizeLog2); + // cache size in pages + __PRINT1(_L("=== Min cache size in pages: [%d]"), iMinSizeInPages); + __PRINT1(_L("=== Max cache size in pages: [%d]"), iMaxSizeInPages); - // memory size: - const TUint32 pageMemSizeLog2 = iPageSizeLog2 > SegmentSizeInBytesLog2 ? iPageSizeLog2 : SegmentSizeInBytesLog2; - __PRINT1(_L("=== Min memory size: [%d Bytes]"), iMinSizeInPages << pageMemSizeLog2); - __PRINT1(_L("=== Max memory size: [%d Bytes]"), iMaxSizeInPages << pageMemSizeLog2); - - // reserved pages - __PRINT1(_L("=== Number of pages reserved: [%d]"), iMinSizeInPages); - __PRINT1(_L("=== Reserved memory: [%d Bytes]"), (iMinSizeInPages * PageSizeInSegs()) << SegmentSizeInBytesLog2); - // locked page num - __PRINT1(_L("=== Number of pages locked: [%d]"), iLockedQCount); - __PRINT1(_L("=== Locked memory: [%d Bytes]"), (iLockedQCount * PageSizeInSegs()) << SegmentSizeInBytesLog2); - // unlocked page num - __PRINT1(_L("=== Number of pages unlocked: [%d]"), iUnlockedQCount); - __PRINT1(_L("=== Unlocked memory: [%d Bytes]"), (iUnlockedQCount * PageSizeInSegs()) << SegmentSizeInBytesLog2); - } + // locked page num + __PRINT1(_L("=== Number of pages locked: [%d]"), iLockedQCount); + // unlocked page num + __PRINT1(_L("=== Number of pages unlocked: [%d]"), iUnlockedQCount); + __PRINT(_L("=========================================\n")); + + ASSERT(aInfo); + aInfo->iMemorySegmentSize = 1 << SegmentSizeInBytesLog2; + aInfo->iPageSizeInMemory = PageSizeInSegs() << SegmentSizeInBytesLog2; + aInfo->iPageSizeInData = iPageSizeInBytes; + aInfo->iMinCacheSizeInPages = iMinSizeInPages; + aInfo->iMaxCacheSizeInPages = iMaxSizeInPages; + aInfo->iMinCacheSizeInMemory = iMinSizeInPages * aInfo->iPageSizeInMemory; + aInfo->iMaxCacheSizeInMemory = iMaxSizeInPages * aInfo->iPageSizeInMemory; + aInfo->iLockedPageNumber = iLockedQCount; + aInfo->iUnlockedPageNumber = iUnlockedQCount; + } /** Dump cache content, only enabled in debug mode. @@ -1005,7 +998,7 @@ } __PRINT(_L("===========================================\n")); } -#endif //_DEBUG +#endif //#if defined(_DEBUG) || defined(_DEBUG_RELEASE) /** Lock an unlocked page, or do nothing if the page is already locked. @@ -1037,14 +1030,17 @@ { ASSERT(aPage != NULL); __PRINT1(_L("CDynamicDirCache::UnlockPage(%lx)"), aPage->StartPos()); - TInt r = iCacheMemoryClient->UnlockSegments(aPage->StartPtr(), PageSizeInSegs()); - if (r == KErrNone) - { - aPage->SetLocked(EFalse); + if (aPage) + { + TInt r = iCacheMemoryClient->UnlockSegments(aPage->StartPtr(), PageSizeInSegs()); + if (r == KErrNone) + { + aPage->SetLocked(EFalse); + } + return r; } - return r; - } - + return KErrArgument; + } /** Decommit a locked or unlocked page. @return TInt KErrNone if decommition was successful, otherwise system-wide error code.