Revision: 201021
authorDremov Kirill (Nokia-D-MSW/Tampere) <kirill.dremov@nokia.com>
Fri, 11 Jun 2010 14:43:47 +0300
changeset 7 a5fbfefd615f
parent 3 ae3f1779f6da
child 8 6547bf8ca13a
child 9 d575fd691cf9
Revision: 201021 Kit: 2010123
cenrep/2001f6fb.cre
cenrep/2001f6fb.txt
conf/s60/cpixhscommon.confml
conf/s60/cpixhscommon_2001f6fb.crml
group/bld.inf
harvester/harvesterserver/group/harvesterserver.mmp
harvester/harvesterserver/inc/cblacklistdb.h
harvester/harvesterserver/inc/cblacklistmgr.h
harvester/harvesterserver/inc/cindexingmanager.h
harvester/harvesterserver/src/cblacklistdb.cpp
harvester/harvesterserver/src/cblacklistmgr.cpp
harvester/harvesterserver/src/cindexingmanager.cpp
harvester/harvesterserver/traces/CBlacklistDbTraces.h
harvester/harvesterserver/traces/CBlacklistMgrTraces.h
harvester/harvesterserver/traces/CIndexingManagerTraces.h
harvester/harvesterserver/traces/OstTraceDefinitions.h
harvester/harvesterserver/traces/ccontentinfoTraces.h
harvester/harvesterserver/traces/ccontentinfodbTraces.h
harvester/harvesterserver/traces/contentinfomgrTraces.h
harvester/harvesterserver/traces/fixed_id.definitions
qcpix/tsrc/orbitsearch/searchhelper.cpp
rom/cpix_mw.iby
searchengine/cpix/cpix/inc/private/document.h
searchengine/cpix/cpix/inc/public/cpixdoc.h
searchengine/cpix/cpix/src/analyzer.cpp
searchengine/cpix/cpix/src/analyzerexp.cpp
searchengine/cpix/cpix/src/document.cpp
searchengine/cpix/tsrc/cpixsample/sis/cpixsample_s60_3_x_v_1_0_0.pkg
searchengine/cpix/tsrc/cpixsample/src/cpixsample.cpp
searchengine/cpix/tsrc/cpixunittest/data/cpixunittestcorpus/pdf/start_enter.pdf
searchengine/cpix/tsrc/cpixunittest/group/bld.inf
searchengine/cpix/tsrc/cpixunittest/group/cpixunittest.mmp
searchengine/cpix/tsrc/cpixunittest/inc/std_log_result.h
searchengine/cpix/tsrc/cpixunittest/src/analysis.cpp
searchengine/cpix/tsrc/cpixunittest/src/analysiswhitebox.cpp
searchengine/cpix/tsrc/cpixunittest/src/cpixunittest.cpp
searchengine/cpix/tsrc/cpixunittest/src/documenttest.cpp
searchengine/cpix/tsrc/cpixunittest/src/misc.cpp
searchengine/cpix/tsrc/cpixunittest/src/pdftests.cpp
searchengine/cpix/tsrc/cpixunittest/src/std_log_result.cpp
searchengine/oss/cl/clucene/group/clucene.mmp
searchengine/oss/cl/clucene/src/clucene.h
searchengine/oss/cl/clucene/src/clucene/highlighter/Encoder.cpp
searchengine/oss/cl/clucene/src/clucene/highlighter/Encoder.h
searchengine/oss/cl/clucene/src/clucene/highlighter/Formatter.cpp
searchengine/oss/cl/clucene/src/clucene/highlighter/Formatter.h
searchengine/oss/cl/clucene/src/clucene/highlighter/Fragmenter.cpp
searchengine/oss/cl/clucene/src/clucene/highlighter/Fragmenter.h
searchengine/oss/cl/clucene/src/clucene/highlighter/HighlightScorer.h
searchengine/oss/cl/clucene/src/clucene/highlighter/Highlighter.cpp
searchengine/oss/cl/clucene/src/clucene/highlighter/Highlighter.h
searchengine/oss/cl/clucene/src/clucene/highlighter/Makefile.am
searchengine/oss/cl/clucene/src/clucene/highlighter/QueryScorer.cpp
searchengine/oss/cl/clucene/src/clucene/highlighter/QueryScorer.h
searchengine/oss/cl/clucene/src/clucene/highlighter/QueryTermExtractor.cpp
searchengine/oss/cl/clucene/src/clucene/highlighter/QueryTermExtractor.h
searchengine/oss/cl/clucene/src/clucene/highlighter/Scorer.h
searchengine/oss/cl/clucene/src/clucene/highlighter/SimpleFragmenter.cpp
searchengine/oss/cl/clucene/src/clucene/highlighter/SimpleFragmenter.h
searchengine/oss/cl/clucene/src/clucene/highlighter/SimpleHTMLEncoder.cpp
searchengine/oss/cl/clucene/src/clucene/highlighter/SimpleHTMLEncoder.h
searchengine/oss/cl/clucene/src/clucene/highlighter/SimpleHTMLFormatter.cpp
searchengine/oss/cl/clucene/src/clucene/highlighter/SimpleHTMLFormatter.h
searchengine/oss/cl/clucene/src/clucene/highlighter/TextFragment.cpp
searchengine/oss/cl/clucene/src/clucene/highlighter/TextFragment.h
searchengine/oss/cl/clucene/src/clucene/highlighter/TokenGroup.cpp
searchengine/oss/cl/clucene/src/clucene/highlighter/TokenGroup.h
searchengine/oss/cl/clucene/src/clucene/highlighter/TokenSources.cpp
searchengine/oss/cl/clucene/src/clucene/highlighter/TokenSources.h
searchengine/oss/cl/clucene/src/clucene/highlighter/WeightedTerm.cpp
searchengine/oss/cl/clucene/src/clucene/highlighter/WeightedTerm.h
searchengine/oss/cl/clucene/src/clucene/util/equators.h
searcher/searchclient/traces/OstTraceDefinitions.h
searcher/searchserver/traces/OstTraceDefinitions.h
searchsrv_plat/cpix_framework_api/inc/cdocumentfield.h
sis/centrep.pkg
sis/cpixsearch_stub.pkg
sis/makeme.bat
sis/makeme.pl
sis/makesis.mk
tsrc/cpixmwtester/conf/cpixmwtester.cfg
tsrc/cpixmwtester/inc/cpixmwtester.h
tsrc/cpixmwtester/src/cpixmwtesterblocks.cpp
watchdog/traces/OstTraceDefinitions.h
Binary file cenrep/2001f6fb.cre has changed
Binary file cenrep/2001f6fb.txt has changed
Binary file conf/s60/cpixhscommon.confml has changed
Binary file conf/s60/cpixhscommon_2001f6fb.crml has changed
--- a/group/bld.inf	Thu May 27 13:59:44 2010 +0300
+++ b/group/bld.inf	Fri Jun 11 14:43:47 2010 +0300
@@ -33,6 +33,8 @@
 PRJ_EXPORTS
 ../rom/CPix_mw.iby	CORE_MW_LAYER_IBY_EXPORT_PATH(CPix_mw.iby)
 ../sis/cpixsearch_stub.sis        /epoc32/data/z/system/install/cpixsearch_stub.sis
+../cenrep/2001f6fb.cre                         /epoc32/winscw/c/private/10202be9/2001f6fb.cre
+../cenrep/2001f6fb.cre                         /epoc32/data/z/private/10202be9/2001f6fb.cre
 
 PRJ_TESTMMPFILES
 
--- a/harvester/harvesterserver/group/harvesterserver.mmp	Thu May 27 13:59:44 2010 +0300
+++ b/harvester/harvesterserver/group/harvesterserver.mmp	Fri Jun 11 14:43:47 2010 +0300
@@ -50,6 +50,7 @@
 LIBRARY			  CPixHarvesterPluginInterface.lib
 LIBRARY           edbms.lib
 LIBRARY           sqldb.lib
+LIBRARY           centralrepository.lib
 
 // Logging
 LIBRARY           flogger.lib 
--- a/harvester/harvesterserver/inc/cblacklistdb.h	Thu May 27 13:59:44 2010 +0300
+++ b/harvester/harvesterserver/inc/cblacklistdb.h	Fri Jun 11 14:43:47 2010 +0300
@@ -96,7 +96,30 @@
     * Leaves in case of errors.
     */
 	TBool FindL(TInt32 aPluginUid);
+	
+	/*
+     * @description Adds the given uid of a plugin to the unloadlist table.
+     * @param aPluginUid Uid of the plugin
+     * @return sysmbian error code
+     * Leaves in case of errors.
+     */  
+	TInt AddtoUnloadListL( TInt32 aPluginUid );
     
+	/*
+     * @description remove the given uid of a plugin to the unloadlist table.
+     * @param aPluginUid Uid of the plugin
+     * Leaves in case of errors.
+     */  
+    void RemoveFromUnloadListL( TInt32 aPluginUid );
+    
+    /*
+     * @description Find the given uid of a plugin to the unloadlist table.
+     * @param aPluginUid Uid of the plugin
+     * @return ETrue if exists else returns EFalse
+     * Leaves in case of errors.
+     */  
+    TBool FindFromUnloadListL( TInt32 aPluginUid );
+        
 private :
     /*
     * @description Creates the Blacklist database.
@@ -110,6 +133,13 @@
     * Leaves in case of errors.
     */
     CDbColSet* CreateColumnSetLC();
+    
+    /*
+    * @description Creates Column set for unload table.
+    * @return CDbColSet database column set
+    * Leaves in case of errors.
+    */
+    CDbColSet* CreateUnloadColumnSetLC();
 private:    
     /*
      * A handle to a file server session.Owned
--- a/harvester/harvesterserver/inc/cblacklistmgr.h	Thu May 27 13:59:44 2010 +0300
+++ b/harvester/harvesterserver/inc/cblacklistmgr.h	Fri Jun 11 14:43:47 2010 +0300
@@ -87,6 +87,29 @@
      */ 
     TBool FindL(TUid aPluginUid , TInt aVersion);
     
+    /*
+    * @description Adds the given uid of a plugin to the unload list of Blacklist DB.    
+    * @param aPluginUid Uid of the plugin
+    * @return sysmbian error code
+    * Leaves in case of errors.
+    */    
+   TInt  AddtoUnloadListL( TUid aPluginUid );
+   
+   /*
+   * @description removes the given uid of a plugin from the unload list of Blacklist DB.    
+   * @param aPluginUid Uid of the plugin
+   * Leaves in case of errors.
+   */    
+  void  RemoveFromUnloadListL( TUid aPluginUid );
+  
+  /*
+   * @description Checks wether the plugin uid is exists in the database unload list or not.
+   * @param aPluginUid Uid of the plugin
+   * @return ETrue if uid exists else returns EFalse
+   * Leaves in case of errors.
+   */ 
+  TBool FindfromUnloadListL(TUid aPluginUid );
+    
 private:    
     /*
      * Interface to Blacklist database.Owned
--- a/harvester/harvesterserver/inc/cindexingmanager.h	Thu May 27 13:59:44 2010 +0300
+++ b/harvester/harvesterserver/inc/cindexingmanager.h	Fri Jun 11 14:43:47 2010 +0300
@@ -100,6 +100,14 @@
      * saving the state of the Plugins
      */
 	void SaveL();
+	/**
+     * Update content info Db with the plugin details
+     */
+	void UpdateContentInfoDbL( const TDesC& aPluginName);
+	/**
+     * Update the unload list in a separate table in blacklist database
+     */
+	void UpdateUnloadListL();
 	
 private:
 	CIndexingManager();
--- a/harvester/harvesterserver/src/cblacklistdb.cpp	Thu May 27 13:59:44 2010 +0300
+++ b/harvester/harvesterserver/src/cblacklistdb.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -33,7 +33,7 @@
 //column name for version numbers of plugins in blacklist database
 _LIT( Kversion , "version" );
 // The max length for creating sql query for KBlistSqlFormatSeek format
-const TInt KBlistSqlStringMaxLength(40);
+const TInt KBlistSqlStringMaxLength(50);
 //SQL query to fetch the records with given uid
 _LIT(KBlistSqlFormatSeek , "SELECT * FROM table WHERE uid=%d");
 //SQL query to delete the records with given uid
@@ -41,6 +41,14 @@
 //SQL query to fetch all the records in database
 _LIT(KBlistSqlFormatAll , "SELECT * FROM table");
 _LIT(KDriveC, "c:");
+//Unload plugins Table name in blacklist database
+_LIT( KBLUnloadTableName , "unloadtable" );
+//SQL query to fetch all the records in unload table
+_LIT(KUnloadlistSqlFormatAll , "SELECT * FROM unloadtable");
+//SQL query to delete the records with given uid in unload table
+_LIT(KunloadlistSqlDelete, "DELETE FROM unloadtable WHERE uid=%d");
+//SQL query to fetch the records with given uid from unload table
+_LIT(KUnloadlistSqlFormatSeek , "SELECT * FROM unloadtable WHERE uid=%d");
 // -----------------------------------------------------------------------------
 // CBlacklistDb::NewL()
 // -----------------------------------------------------------------------------
@@ -372,8 +380,11 @@
     User::LeaveIfError( iDatabase.Create( iFs , datafile ) );
     CDbColSet* columns = CreateColumnSetLC();//creates the columns and push to cleanupstack
     User::LeaveIfError( iDatabase.CreateTable( KBlacklistTableName , *columns ) );
+    //Add table to store the unload plugins
+    CDbColSet* unloadcolumns = CreateUnloadColumnSetLC(); //creates the columns and push to cleanupstack
+    User::LeaveIfError( iDatabase.CreateTable( KBLUnloadTableName , *unloadcolumns ) );
     //clean up of variables (columns and dataFile)
-    CleanupStack::PopAndDestroy( columns );
+    CleanupStack::PopAndDestroy( 2 );
     
     CPIXLOGSTRING("CBlacklistDb::CreateDBL(): Exit");
     OstTraceFunctionExit0( CBLACKLISTDB_CREATEDBL_EXIT );
@@ -406,3 +417,127 @@
     OstTraceFunctionExit0( CBLACKLISTDB_CREATECOLUMNSETLC_EXIT );
     return columns; // columns stays on CleanupStack
     }
+
+// -----------------------------------------------------------------------------
+// CBlacklistDb::CreateUnloadColumnSetLC
+// -----------------------------------------------------------------------------
+//
+CDbColSet* CBlacklistDb::CreateUnloadColumnSetLC()
+    {
+    OstTraceFunctionEntry0( CBLACKLISTDB_CREATEUNLOADCOLUMNSETLC_ENTRY );
+    CDbColSet* columns = CDbColSet::NewLC();
+        
+    //Add uid column
+    TDbCol col( Kuid , EDbColInt32 );
+    col.iAttributes = TDbCol::ENotNull ;
+    columns->AddL( col );        
+    
+    OstTraceFunctionExit0( CBLACKLISTDB_CREATEUNLOADCOLUMNSETLC_EXIT );
+    return columns; // columns stays on CleanupStack
+    }
+
+// -----------------------------------------------------------------------------
+// CBlacklistDb::AddtoUnloadListL
+// -----------------------------------------------------------------------------
+//
+TInt CBlacklistDb::AddtoUnloadListL( TInt32 aPluginUid )
+    {
+    OstTraceFunctionEntry0( CBLACKLISTDB_ADDTOUNLOADLISTL_ENTRY );
+    if ( !iOpened )
+            return KErrNotReady;
+        
+    TInt err; 
+    //Prepare the view
+    RDbView dbView;
+    CleanupClosePushL( dbView );
+
+    err = dbView.Prepare( iDatabase , TDbQuery( KUnloadlistSqlFormatAll ) ) ;
+        
+    if ( err == KErrNone )
+       {
+        TRAP( err , dbView.InsertL() );
+        CDbColSet* colSet = dbView.ColSetL();
+        TDbColNo uidcolno = colSet->ColNo( Kuid );        
+        dbView.SetColL( uidcolno , aPluginUid );
+        dbView.PutL();
+        //If addition failed, rollback
+        if(err != KErrNone)
+            {
+            iDatabase.Rollback();
+            }            
+       }
+    CleanupStack::PopAndDestroy( &dbView ); // dbView/    
+    User::LeaveIfError( iDatabase.Compact() );    
+    
+    OstTraceFunctionExit0( CBLACKLISTDB_ADDTOUNLOADLISTL_EXIT );
+    return err;
+    }
+// -----------------------------------------------------------------------------
+// CBlacklistDb::RemoveFromUnloadListL
+// -----------------------------------------------------------------------------
+//
+void CBlacklistDb::RemoveFromUnloadListL( TInt32 aPluginUid )
+    {
+    OstTraceFunctionEntry0( CBLACKLISTDB_REMOVEFROMUNLOADLISTL_ENTRY );
+    if ( !iOpened )
+           return ;
+        
+    //Remove the item record to database
+    // Create the sql statement.  KBlistSqlDelete
+    TBuf<KBlistSqlStringMaxLength> sql;
+    sql.Format( KunloadlistSqlDelete , aPluginUid );
+    
+    //delete the row.
+    TInt rowCount( iDatabase.Execute(sql) );
+    if(rowCount > 0)
+        {       
+        OstTrace0( TRACE_NORMAL, CBLACKLISTDB_REMOVEFROMUNLOADLISTL, "CBlacklistDb::RemoveFromUnloadListL :: removed UID succesfully" );
+        CPIXLOGSTRING("CBlacklistDb::RemoveFromUnloadListL(): Removed UID succesfully");
+        }
+    else
+        {        
+        OstTrace0( TRACE_NORMAL, DUP1_CBLACKLISTDB_REMOVEFROMUNLOADLISTL, "CBlacklistDb::RemoveFromUnloadListL:: UID not found" );
+        CPIXLOGSTRING("CBlacklistDb::RemoveFromUnloadListL(): UID not found");
+        }
+    CPIXLOGSTRING("CBlacklistDb::RemoveFromUnloadListL(): Exit");
+    
+    OstTraceFunctionExit0( CBLACKLISTDB_REMOVEFROMUNLOADLISTL_EXIT );
+    return ;
+    }
+
+// -----------------------------------------------------------------------------
+// CBlacklistDb::FindFromUnloadListL
+// -----------------------------------------------------------------------------
+//
+TBool CBlacklistDb::FindFromUnloadListL( TInt32 aPluginUid )
+    {
+    OstTraceFunctionEntry0( CBLACKLISTDB_FINDFROMUNLOADLISTL_ENTRY );
+    CPIXLOGSTRING2("CBlacklistDb::FindFromUnloadListL(): Uid = %x " , aPluginUid );
+        
+    if ( !iOpened )
+            return EFalse;
+    
+    //Check if the item is available in database
+    //Prepare the sql
+    TBuf<KBlistSqlStringMaxLength> sql;
+    sql.Format( KUnloadlistSqlFormatSeek , aPluginUid );
+    TBool found = EFalse;
+    //Prepare the view
+    RDbView dbView;
+    CleanupClosePushL( dbView );
+
+    User::LeaveIfError( dbView.Prepare( iDatabase , TDbQuery(sql) , RDbView::EReadOnly ) );
+    User::LeaveIfError( dbView.EvaluateAll() );
+
+    TInt isAtRow( dbView.FirstL() );
+    
+    if ( isAtRow )
+       {
+        OstTrace0( TRACE_NORMAL, CBLACKLISTDB_FINDFROMUNLOADLISTL, "CBlacklistDb::FindFromUnloadListL::UID found" );
+        CPIXLOGSTRING("CBlacklistDb::FindFromUnloadListL(): UID found");
+        found = ETrue;                 
+       }    
+    CleanupStack::PopAndDestroy( &dbView ); // dbView/
+    OstTraceFunctionExit0( CBLACKLISTDB_FINDFROMUNLOADLISTL_EXIT );
+    return found;
+    }
--- a/harvester/harvesterserver/src/cblacklistmgr.cpp	Thu May 27 13:59:44 2010 +0300
+++ b/harvester/harvesterserver/src/cblacklistmgr.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -129,7 +129,7 @@
     }
 
 // -----------------------------------------------------------------------------
-// CBlacklistMgr::iSAvailableL()
+// CBlacklistMgr::FindL()
 // -----------------------------------------------------------------------------
 //
 TBool CBlacklistMgr::FindL( TUid aPluginUid , TInt aVersion )
@@ -151,3 +151,54 @@
         }
     return found;
     }
+
+// -----------------------------------------------------------------------------
+// CBlacklistMgr::AddtoUnloadListL()
+// -----------------------------------------------------------------------------
+//
+TInt CBlacklistMgr::AddtoUnloadListL( TUid aPluginUid )
+    {
+    OstTraceFunctionEntry0( CBLACKLISTMGR_ADDTOUNLOADLISTL_ENTRY );
+    OstTrace1( TRACE_NORMAL, CBLACKLISTMGR_ADDTOUNLOADLISTL, "CBlacklistMgr::AddtoUnloadListL;Uid=%x", aPluginUid.iUid );
+    CPIXLOGSTRING2("CBlacklistMgr::AddtoUnloadListL(): Uid = %x " , aPluginUid.iUid );
+    //Check if the record with given plugin uid is already available in database or not
+    //If available just ignore the addition
+    //If there is no record found in database with given uid, add new record with given uid    
+    TInt err = KErrNone;
+    
+    if( !(iBlacklistDb->FindFromUnloadListL( aPluginUid.iUid )) )    
+        {
+        err = iBlacklistDb->AddtoUnloadListL( aPluginUid.iUid );
+        }
+      
+    CPIXLOGSTRING("CBlacklistMgr::AddtoUnloadListL(): Exit");    
+    OstTraceFunctionExit0( CBLACKLISTMGR_ADDTOUNLOADLISTL_EXIT );
+    return err;
+    }
+
+// -----------------------------------------------------------------------------
+// CBlacklistMgr::RemoveFromUnloadListL()
+// -----------------------------------------------------------------------------
+//
+void CBlacklistMgr::RemoveFromUnloadListL( TUid aPluginUid )
+    {
+    OstTraceFunctionEntry0( CBLACKLISTMGR_REMOVEFROMUNLOADLISTL_ENTRY );
+    OstTrace1( TRACE_NORMAL, CBLACKLISTMGR_REMOVEFROMUNLOADLISTL, "CBlacklistMgr::RemoveFromUnloadListL;Uid=%x", aPluginUid.iUid );
+    CPIXLOGSTRING2("CBlacklistMgr::RemoveFromUnloadListL(): Uid = %x " , aPluginUid.iUid );
+    //Remove the item record to database
+    iBlacklistDb->RemoveFromUnloadListL( aPluginUid.iUid );
+    
+    CPIXLOGSTRING("CBlacklistMgr::RemoveFromUnloadListL(): Exit");  
+    OstTraceFunctionExit0( CBLACKLISTMGR_REMOVEFROMUNLOADLISTL_EXIT );
+    }
+
+// -----------------------------------------------------------------------------
+// CBlacklistMgr::FindfromUnloadListL()
+// -----------------------------------------------------------------------------
+//
+TBool CBlacklistMgr::FindfromUnloadListL(TUid aPluginUid )
+    {
+    CPIXLOGSTRING2("CBlacklistMgr::FindfromUnloadListL(): Uid = %x " , aPluginUid.iUid );
+    OstTrace1( TRACE_NORMAL, CBLACKLISTMGR_FINDFROMUNLOADLISTL, "CBlacklistMgr::FindfromUnloadListL;Uid=%x", aPluginUid.iUid );
+    return ( iBlacklistDb->FindFromUnloadListL( aPluginUid.iUid ) );    
+    }
--- a/harvester/harvesterserver/src/cindexingmanager.cpp	Thu May 27 13:59:44 2010 +0300
+++ b/harvester/harvesterserver/src/cindexingmanager.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -17,6 +17,7 @@
 
 #include <f32file.h>
 #include <s32file.h>
+#include <centralrepository.h>
 #include "CIndexingManager.h"
 #include "HarvesterServerLogger.h"
 #include "CBlacklistMgr.h"
@@ -48,6 +49,11 @@
 //constants for enable and disable status
 const TInt KEnable = 1;
 const TInt KDisable = 0;
+
+const TUid KCPIXHSrepoUidMenu = {0x2001f6fb};
+
+//Length of uid string in cenrep
+const TInt KuidStringLength = 8;
 // -----------------------------------------------------------------------------
 // CHarvesterServer::NewL()
 // -----------------------------------------------------------------------------
@@ -134,6 +140,8 @@
 	iBlacklistMgr = CBlacklistMgr::NewL();
 	//Instantiate Contentinfo manager
 	iContentInfoMgr = CContentInfoMgr::NewL();
+	
+	UpdateUnloadListL();
 
 	// Load plugins
 	LoadPluginsL();
@@ -322,28 +330,28 @@
         TInt version = infoArray[i]->Version();
         //FFLOGSTRING2( "CFastFindHarvesterPluginControl:: PLUGINS UID %x", uid );
         plugin = NULL;
-        TBool pluginblacklisted = EFalse;
-        
-        pluginblacklisted = iBlacklistMgr->FindL( uid , version );
         
-        TBool iscontentfound = iContentInfoMgr->FindL( infoArray[i]->DisplayName() );
+        UpdateContentInfoDbL( infoArray[i]->DisplayName() );
+        TBool loadplugin = ETrue;
+        //status of plugin in blacklist table
+        TBool pluginblacklisted = iBlacklistMgr->FindL( uid, version);
+        //status of plugin in unload table
+        TBool loadstatus =  iBlacklistMgr->FindfromUnloadListL( uid );  
+        //Check the Uid in both the tables of the blacklist db 
+        if ( loadstatus || pluginblacklisted )
+            loadplugin = EFalse;
         
-        if( !iscontentfound )
+        if ( loadstatus )
             {
-            //Add the content details to database
-            CContentInfo* contentinfo = CContentInfo::NewL();
-            contentinfo->SetNameL( infoArray[i]->DisplayName() );
-            contentinfo->SetBlacklistStatus( KEnable );
-            contentinfo->SetIndexStatus( KEnable );
-            iContentInfoMgr->AddL( contentinfo );
-            delete contentinfo;
+            //Found in unload list.Update the indexing and blacklist status in contentinfo DB
+            iContentInfoMgr->UpdatePluginIndexStatusL( infoArray[i]->DisplayName() , KDisable );
+            iContentInfoMgr->UpdateBlacklistStatusL( infoArray[i]->DisplayName() , KDisable );
             }
-        else
-            {
+        if ( pluginblacklisted )
+            //Update the blacklist status in content info db
             iContentInfoMgr->UpdateBlacklistStatusL( infoArray[i]->DisplayName() , KEnable );
-            }
         
-        if ( !pluginblacklisted )
+        if ( loadplugin )
             {
             // Plugin is not black listed. Add it to database and try to load the plugin
             iBlacklistMgr->AddL( uid , version );
@@ -614,3 +622,65 @@
 	CleanupStack::PopAndDestroy(2, &file);
 }
 
+// -----------------------------------------------------------------------------
+// CIndexingManager::UpdateContentInfoDbL()
+// -----------------------------------------------------------------------------
+//
+void CIndexingManager::UpdateContentInfoDbL( const TDesC& aPluginName)
+{
+    OstTraceFunctionEntry0( CINDEXINGMANAGER_UPDATECONTENTINFODBL_ENTRY );
+    TBool iscontentfound = iContentInfoMgr->FindL( aPluginName );
+            
+    if( !iscontentfound )
+        {
+        //Add the content details to database
+        CContentInfo* contentinfo = CContentInfo::NewL();
+        contentinfo->SetNameL( aPluginName );
+        contentinfo->SetBlacklistStatus( KEnable );
+        contentinfo->SetIndexStatus( KEnable );
+        iContentInfoMgr->AddL( contentinfo );
+        delete contentinfo;
+        }
+    else
+        {
+        iContentInfoMgr->UpdateBlacklistStatusL( aPluginName , KEnable );
+        }
+    OstTraceFunctionExit0( CINDEXINGMANAGER_UPDATECONTENTINFODBL_EXIT );
+}
+
+// -----------------------------------------------------------------------------
+// CIndexingManager::UpdateUnloadList()
+// -----------------------------------------------------------------------------
+//
+void CIndexingManager::UpdateUnloadListL()
+    {
+    OstTraceFunctionEntry0( CINDEXINGMANAGER_UPDATEUNLOADLISTL_ENTRY );
+    CPIXLOGSTRING("CIndexingManager::UpdateUnloadList : Start");
+    //Read the list of Uid's from the cenrep and update blacklist database
+    //Open the unload list common repository
+    CRepository* unloadrepo = NULL;
+    TRAPD( cerror , unloadrepo = CRepository::NewL( KCPIXHSrepoUidMenu ));
+    if ( cerror != KErrNone)
+        return;
+    RArray<TUint32> uidlist;    
+    //Read all the key list
+    TInt error = unloadrepo->FindL( 0, 0, uidlist);
+    if ( error == KErrNone )
+        {
+        TBuf<KuidStringLength> temp;
+        //get the Uid of each and every plugin and add it to blacklist database
+        TInt count = uidlist.Count();
+        for (int i = 0; i < count; i++ )
+            {
+            TUid uid;
+            TInt64 value;
+            unloadrepo->Get( uidlist[i], temp );
+            TLex uidvalue(temp);
+            TInt xerr = uidvalue.Val( value,EHex );
+            uid.iUid = value;
+            (void)iBlacklistMgr->AddtoUnloadListL( uid );            
+            }
+        }
+    CPIXLOGSTRING("CIndexingManager::UpdateUnloadList : End");
+    OstTraceFunctionExit0( CINDEXINGMANAGER_UPDATEUNLOADLISTL_EXIT );
+    }
--- a/harvester/harvesterserver/traces/CBlacklistDbTraces.h	Thu May 27 13:59:44 2010 +0300
+++ b/harvester/harvesterserver/traces/CBlacklistDbTraces.h	Fri Jun 11 14:43:47 2010 +0300
@@ -1,4 +1,4 @@
-// Created by TraceCompiler 2.1.2
+// Created by TraceCompiler 2.2.2
 // DO NOT EDIT, CHANGES WILL BE LOST
 
 #ifndef __CBLACKLISTDBTRACES_H__
@@ -20,6 +20,14 @@
 #define CBLACKLISTDB_CREATEDBL_EXIT 0x8a000c
 #define CBLACKLISTDB_CREATECOLUMNSETLC_ENTRY 0x8a000d
 #define CBLACKLISTDB_CREATECOLUMNSETLC_EXIT 0x8a000e
+#define CBLACKLISTDB_CREATEUNLOADCOLUMNSETLC_ENTRY 0x8a003b
+#define CBLACKLISTDB_CREATEUNLOADCOLUMNSETLC_EXIT 0x8a003c
+#define CBLACKLISTDB_ADDTOUNLOADLISTL_ENTRY 0x8a003d
+#define CBLACKLISTDB_ADDTOUNLOADLISTL_EXIT 0x8a003e
+#define CBLACKLISTDB_REMOVEFROMUNLOADLISTL_ENTRY 0x8a003f
+#define CBLACKLISTDB_REMOVEFROMUNLOADLISTL_EXIT 0x8a0040
+#define CBLACKLISTDB_FINDFROMUNLOADLISTL_ENTRY 0x8a0041
+#define CBLACKLISTDB_FINDFROMUNLOADLISTL_EXIT 0x8a0042
 #define CBLACKLISTDB_CONSTRUCTL 0x860001
 #define CBLACKLISTDB_ADDL 0x860002
 #define CBLACKLISTDB_REMOVE 0x860003
@@ -30,6 +38,9 @@
 #define CBLACKLISTDB_UPDATEL 0x860008
 #define CBLACKLISTDB_FINDL 0x860009
 #define DUP1_CBLACKLISTDB_FINDL 0x86000a
+#define CBLACKLISTDB_REMOVEFROMUNLOADLISTL 0x860027
+#define DUP1_CBLACKLISTDB_REMOVEFROMUNLOADLISTL 0x860028
+#define CBLACKLISTDB_FINDFROMUNLOADLISTL 0x860029
 
 
 inline TBool OstTraceGen2( TUint32 aTraceID, TUint aParam1, TInt aParam2 )
--- a/harvester/harvesterserver/traces/CBlacklistMgrTraces.h	Thu May 27 13:59:44 2010 +0300
+++ b/harvester/harvesterserver/traces/CBlacklistMgrTraces.h	Fri Jun 11 14:43:47 2010 +0300
@@ -1,4 +1,4 @@
-// Created by TraceCompiler 2.1.2
+// Created by TraceCompiler 2.2.2
 // DO NOT EDIT, CHANGES WILL BE LOST
 
 #ifndef __CBLACKLISTMGRTRACES_H__
@@ -14,11 +14,18 @@
 #define CBLACKLISTMGR_ADDL_EXIT 0x8a0014
 #define CBLACKLISTMGR_REMOVE_ENTRY 0x8a0015
 #define CBLACKLISTMGR_REMOVE_EXIT 0x8a0016
+#define CBLACKLISTMGR_ADDTOUNLOADLISTL_ENTRY 0x8a0043
+#define CBLACKLISTMGR_ADDTOUNLOADLISTL_EXIT 0x8a0044
+#define CBLACKLISTMGR_REMOVEFROMUNLOADLISTL_ENTRY 0x8a0045
+#define CBLACKLISTMGR_REMOVEFROMUNLOADLISTL_EXIT 0x8a0046
 #define CBLACKLISTMGR_ADDL 0x86000b
 #define CBLACKLISTMGR_REMOVE 0x86000c
 #define CBLACKLISTMGR_FINDL 0x86000d
 #define DUP1_CBLACKLISTMGR_FINDL 0x86000e
 #define DUP2_CBLACKLISTMGR_FINDL 0x86000f
+#define CBLACKLISTMGR_ADDTOUNLOADLISTL 0x86002a
+#define CBLACKLISTMGR_REMOVEFROMUNLOADLISTL 0x86002b
+#define CBLACKLISTMGR_FINDFROMUNLOADLISTL 0x86002c
 
 
 inline TBool OstTraceGen2( TUint32 aTraceID, TUint aParam1, TInt aParam2 )
--- a/harvester/harvesterserver/traces/CIndexingManagerTraces.h	Thu May 27 13:59:44 2010 +0300
+++ b/harvester/harvesterserver/traces/CIndexingManagerTraces.h	Fri Jun 11 14:43:47 2010 +0300
@@ -1,4 +1,4 @@
-// Created by TraceCompiler 2.1.2
+// Created by TraceCompiler 2.2.2
 // DO NOT EDIT, CHANGES WILL BE LOST
 
 #ifndef __CINDEXINGMANAGERTRACES_H__
@@ -6,6 +6,10 @@
 
 #define KOstTraceComponentID 0x2001f6fb
 
+#define CINDEXINGMANAGER_UPDATECONTENTINFODBL_ENTRY 0x8a0047
+#define CINDEXINGMANAGER_UPDATECONTENTINFODBL_EXIT 0x8a0048
+#define CINDEXINGMANAGER_UPDATEUNLOADLISTL_ENTRY 0x8a0049
+#define CINDEXINGMANAGER_UPDATEUNLOADLISTL_EXIT 0x8a004a
 #define CINDEXINGMANAGER_RUNL 0x860010
 #define DUP1_CINDEXINGMANAGER_RUNL 0x860011
 #define DUP2_CINDEXINGMANAGER_RUNL 0x860012
--- a/harvester/harvesterserver/traces/OstTraceDefinitions.h	Thu May 27 13:59:44 2010 +0300
+++ b/harvester/harvesterserver/traces/OstTraceDefinitions.h	Fri Jun 11 14:43:47 2010 +0300
@@ -1,19 +1,3 @@
-/*
-* Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
-* All rights reserved.
-* This component and the accompanying materials are made available
-* under the terms of "Eclipse Public License v1.0"
-* which accompanies this distribution, and is available
-* at the URL "http://www.eclipse.org/legal/epl-v10.html".
-*
-* Initial Contributors:
-* Nokia Corporation - initial contribution.
-*
-* Contributors:
-*
-* Description: 
-*
-*/
 #ifndef __OSTTRACEDEFINITIONS_H__
 #define __OSTTRACEDEFINITIONS_H__
 // OST_TRACE_COMPILER_IN_USE flag has been added by Trace Compiler
--- a/harvester/harvesterserver/traces/ccontentinfoTraces.h	Thu May 27 13:59:44 2010 +0300
+++ b/harvester/harvesterserver/traces/ccontentinfoTraces.h	Fri Jun 11 14:43:47 2010 +0300
@@ -1,4 +1,4 @@
-// Created by TraceCompiler 2.1.2
+// Created by TraceCompiler 2.2.2
 // DO NOT EDIT, CHANGES WILL BE LOST
 
 #ifndef __CCONTENTINFOTRACES_H__
--- a/harvester/harvesterserver/traces/ccontentinfodbTraces.h	Thu May 27 13:59:44 2010 +0300
+++ b/harvester/harvesterserver/traces/ccontentinfodbTraces.h	Fri Jun 11 14:43:47 2010 +0300
@@ -1,4 +1,4 @@
-// Created by TraceCompiler 2.1.2
+// Created by TraceCompiler 2.2.2
 // DO NOT EDIT, CHANGES WILL BE LOST
 
 #ifndef __CCONTENTINFODBTRACES_H__
--- a/harvester/harvesterserver/traces/contentinfomgrTraces.h	Thu May 27 13:59:44 2010 +0300
+++ b/harvester/harvesterserver/traces/contentinfomgrTraces.h	Fri Jun 11 14:43:47 2010 +0300
@@ -1,4 +1,4 @@
-// Created by TraceCompiler 2.1.2
+// Created by TraceCompiler 2.2.2
 // DO NOT EDIT, CHANGES WILL BE LOST
 
 #ifndef __CONTENTINFOMGRTRACES_H__
--- a/harvester/harvesterserver/traces/fixed_id.definitions	Thu May 27 13:59:44 2010 +0300
+++ b/harvester/harvesterserver/traces/fixed_id.definitions	Fri Jun 11 14:43:47 2010 +0300
@@ -3,24 +3,36 @@
 [GROUP]TRACE_NORMAL=0x86
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_ADDL_ENTRY=0x5
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_ADDL_EXIT=0x6
+[TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_ADDTOUNLOADLISTL_ENTRY=0x3d
+[TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_ADDTOUNLOADLISTL_EXIT=0x3e
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_CONSTRUCTL_ENTRY=0x3
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_CONSTRUCTL_EXIT=0x4
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_CREATECOLUMNSETLC_ENTRY=0xd
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_CREATECOLUMNSETLC_EXIT=0xe
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_CREATEDBL_ENTRY=0xb
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_CREATEDBL_EXIT=0xc
+[TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_CREATEUNLOADCOLUMNSETLC_ENTRY=0x3b
+[TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_CREATEUNLOADCOLUMNSETLC_EXIT=0x3c
+[TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_FINDFROMUNLOADLISTL_ENTRY=0x41
+[TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_FINDFROMUNLOADLISTL_EXIT=0x42
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_NEWL_ENTRY=0x1
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_NEWL_EXIT=0x2
+[TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_REMOVEFROMUNLOADLISTL_ENTRY=0x3f
+[TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_REMOVEFROMUNLOADLISTL_EXIT=0x40
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_REMOVE_ENTRY=0x7
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_REMOVE_EXIT=0x8
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_UPDATEL_ENTRY=0x9
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTDB_UPDATEL_EXIT=0xa
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTMGR_ADDL_ENTRY=0x13
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTMGR_ADDL_EXIT=0x14
+[TRACE]TRACE_FLOW[0x8A]_CBLACKLISTMGR_ADDTOUNLOADLISTL_ENTRY=0x43
+[TRACE]TRACE_FLOW[0x8A]_CBLACKLISTMGR_ADDTOUNLOADLISTL_EXIT=0x44
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTMGR_CONSTRUCTL_ENTRY=0x11
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTMGR_CONSTRUCTL_EXIT=0x12
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTMGR_NEWL_ENTRY=0xf
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTMGR_NEWL_EXIT=0x10
+[TRACE]TRACE_FLOW[0x8A]_CBLACKLISTMGR_REMOVEFROMUNLOADLISTL_ENTRY=0x45
+[TRACE]TRACE_FLOW[0x8A]_CBLACKLISTMGR_REMOVEFROMUNLOADLISTL_EXIT=0x46
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTMGR_REMOVE_ENTRY=0x15
 [TRACE]TRACE_FLOW[0x8A]_CBLACKLISTMGR_REMOVE_EXIT=0x16
 [TRACE]TRACE_FLOW[0x8A]_CCONTENTINFODB_ADDL_ENTRY=0x25
@@ -59,15 +71,24 @@
 [TRACE]TRACE_FLOW[0x8A]_CCONTENTINFO_NEWL_EXIT=0x18
 [TRACE]TRACE_FLOW[0x8A]_CCONTENTINFO_SETNAMEL_ENTRY=0x1d
 [TRACE]TRACE_FLOW[0x8A]_CCONTENTINFO_SETNAMEL_EXIT=0x1e
+[TRACE]TRACE_FLOW[0x8A]_CINDEXINGMANAGER_UPDATECONTENTINFODBL_ENTRY=0x47
+[TRACE]TRACE_FLOW[0x8A]_CINDEXINGMANAGER_UPDATECONTENTINFODBL_EXIT=0x48
+[TRACE]TRACE_FLOW[0x8A]_CINDEXINGMANAGER_UPDATEUNLOADLISTL_ENTRY=0x49
+[TRACE]TRACE_FLOW[0x8A]_CINDEXINGMANAGER_UPDATEUNLOADLISTL_EXIT=0x4a
 [TRACE]TRACE_NORMAL[0x86]_CBLACKLISTDB_ADDL=0x2
 [TRACE]TRACE_NORMAL[0x86]_CBLACKLISTDB_CONSTRUCTL=0x1
+[TRACE]TRACE_NORMAL[0x86]_CBLACKLISTDB_FINDFROMUNLOADLISTL=0x29
 [TRACE]TRACE_NORMAL[0x86]_CBLACKLISTDB_FINDL=0x9
 [TRACE]TRACE_NORMAL[0x86]_CBLACKLISTDB_FINDWITHVERSIONL=0x6
 [TRACE]TRACE_NORMAL[0x86]_CBLACKLISTDB_REMOVE=0x3
+[TRACE]TRACE_NORMAL[0x86]_CBLACKLISTDB_REMOVEFROMUNLOADLISTL=0x27
 [TRACE]TRACE_NORMAL[0x86]_CBLACKLISTDB_UPDATEL=0x8
 [TRACE]TRACE_NORMAL[0x86]_CBLACKLISTMGR_ADDL=0xb
+[TRACE]TRACE_NORMAL[0x86]_CBLACKLISTMGR_ADDTOUNLOADLISTL=0x2a
+[TRACE]TRACE_NORMAL[0x86]_CBLACKLISTMGR_FINDFROMUNLOADLISTL=0x2c
 [TRACE]TRACE_NORMAL[0x86]_CBLACKLISTMGR_FINDL=0xd
 [TRACE]TRACE_NORMAL[0x86]_CBLACKLISTMGR_REMOVE=0xc
+[TRACE]TRACE_NORMAL[0x86]_CBLACKLISTMGR_REMOVEFROMUNLOADLISTL=0x2b
 [TRACE]TRACE_NORMAL[0x86]_CINDEXINGMANAGER_ADDHARVESTINGQUEUE=0x1d
 [TRACE]TRACE_NORMAL[0x86]_CINDEXINGMANAGER_HARVESTINGCOMPLETED=0x24
 [TRACE]TRACE_NORMAL[0x86]_CINDEXINGMANAGER_LOADPLUGINSL=0x1a
@@ -76,6 +97,7 @@
 [TRACE]TRACE_NORMAL[0x86]_DUP1_CBLACKLISTDB_FINDL=0xa
 [TRACE]TRACE_NORMAL[0x86]_DUP1_CBLACKLISTDB_FINDWITHVERSIONL=0x7
 [TRACE]TRACE_NORMAL[0x86]_DUP1_CBLACKLISTDB_REMOVE=0x4
+[TRACE]TRACE_NORMAL[0x86]_DUP1_CBLACKLISTDB_REMOVEFROMUNLOADLISTL=0x28
 [TRACE]TRACE_NORMAL[0x86]_DUP1_CBLACKLISTMGR_FINDL=0xe
 [TRACE]TRACE_NORMAL[0x86]_DUP1_CINDEXINGMANAGER_ADDHARVESTINGQUEUE=0x1e
 [TRACE]TRACE_NORMAL[0x86]_DUP1_CINDEXINGMANAGER_HARVESTINGCOMPLETED=0x25
--- a/qcpix/tsrc/orbitsearch/searchhelper.cpp	Thu May 27 13:59:44 2010 +0300
+++ b/qcpix/tsrc/orbitsearch/searchhelper.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -37,17 +37,16 @@
     resultsBox->setPlainText("Search button clicked!");
     
     int hits = 0;
+    
     QString resultString("");
     resultsBox->setPlainText( resultString );
     searchTime.restart();
     QString searchString;
-    
-#if PREFIX_SEARCH
     searchString = "$prefix(\""; 
     searchString += searchBox->text();
     searchString += "\")";
 
-#elif STAR_SEARCH
+#if STAR_SEARCH
     searchString += searchBox->text();
     searchString += "*";
 #elif NO_STAR_SEARCH
--- a/rom/cpix_mw.iby	Thu May 27 13:59:44 2010 +0300
+++ b/rom/cpix_mw.iby	Fri Jun 11 14:43:47 2010 +0300
@@ -1,22 +1,25 @@
 /*
-* Copyright (c) 2002 Nokia Corporation and/or its subsidiary(-ies).
-* All rights reserved.
-* This component and the accompanying materials are made available
-* under the terms of "Eclipse Public License v1.0"
-* which accompanies this distribution, and is available
-* at the URL "http://www.eclipse.org/legal/epl-v10.html".
+* ============================================================================
+*  Name     : cpix.iby
+*  Part of  : S605.0, PF52.50, ivalo
+*
+*  Description: Includes additional application IBYs for PF52.50/ivalo
+*       
+*  Version:
 *
-* Initial Contributors:
-* Nokia Corporation - initial contribution.
-*
-* Contributors:
+*  Copyright (C) 2002 Nokia Corporation.
+*  This material, including documentation and any related 
+*  computer programs, is protected by copyright controlled by 
+*  Nokia Corporation. All rights are reserved. Copying, 
+*  including reproducing, storing,  adapting or translating, any 
+*  or all of this material requires the prior written consent of 
+*  Nokia Corporation. This material also contains confidential 
+*  information which may not be disclosed to others without the 
+*  prior written consent of Nokia Corporation.
 *
-* Description:  Includes additional application IBYs for PF52.50/ivalo
-*       
-*
+* ============================================================================
 */
 
-
 #ifndef __CPIXENGINE_IBY__
 #define __CPIXENGINE_IBY__
 
@@ -53,6 +56,8 @@
 
 data=ZSYSTEM\install\cpixsearch_stub.sis    System\Install\cpixsearch_stub.sis
 
+data=DATAZ_\private\10202be9\2001f6fb.cre       private\10202be9\2001f6fb.cre
+
 #endif //FF_SEARCH_SW
 
 #endif // __CPIXENGINE_IBY__
--- a/searchengine/cpix/cpix/inc/private/document.h	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/cpix/cpix/inc/private/document.h	Fri Jun 11 14:43:47 2010 +0300
@@ -74,7 +74,9 @@
 
         int isIndexed() const;
 
-        bool isAggregated()  const; 
+        bool isAggregated()  const;
+        
+        bool isFreeText()  const; 
 		
         float_t boost() const;
 
@@ -93,6 +95,8 @@
         lucene::document::Field* field_;
 		
         bool aggregate_;
+        
+        bool freeText_;
 	
     };
 	
--- a/searchengine/cpix/cpix/inc/public/cpixdoc.h	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/cpix/cpix/inc/public/cpixdoc.h	Fri Jun 11 14:43:47 2010 +0300
@@ -73,6 +73,10 @@
             // index the value of the field without an analyzer and
             // disable storing of norms
             cpix_INDEX_UNTOKENIZED = 64,
+            
+            // index the value of the field without the stop word analyzer and
+            // store it in _aggregate
+            cpix_FREE_TEXT = 128
         };
 
     typedef enum cpix_Index_ cpix_Index;
@@ -89,6 +93,7 @@
             // Expose the value to be searchable throught the
             // aggregate field
             cpix_AGGREGATE_YES = 1<<31
+
     	};
     
     typedef enum cpix_Aggregate_ cpix_Aggregate;   
--- a/searchengine/cpix/cpix/src/analyzer.cpp	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/cpix/cpix/src/analyzer.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -160,6 +160,14 @@
                 {
                     _CLTHROWA(CL_ERR_IO, AGGR_STREAMREADER_ERR);
                 }
+            if(field->isFreeText())
+                {
+                    using namespace lucene::analysis;
+                    stream_ = _CLNEW standard::StandardTokenizer(reader_);
+                    stream_ = _CLNEW standard::StandardFilter(stream_,true);
+                    stream_ = _CLNEW LowerCaseFilter(stream_,true);
+                }
+            else
             stream_ = analyzer_.tokenStream( field->name(), reader_ ); 
         }
     }
--- a/searchengine/cpix/cpix/src/analyzerexp.cpp	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/cpix/cpix/src/analyzerexp.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -16,6 +16,7 @@
 */
 
 
+
 #include "analyzerexp.h"
 
 #include "indevicecfg.h" 
--- a/searchengine/cpix/cpix/src/document.cpp	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/cpix/cpix/src/document.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -35,6 +35,7 @@
                  const wchar_t * value,
                  int             configs) 
         :	own_(true), field_(0) {
+        freeText_  = false;
         resolveConfig(configs); 
         field_ = _CLNEW lucene::document::Field(name, value, configs); 		 
     }
@@ -44,6 +45,7 @@
                  lucene::util::Reader* stream,
                  int             configs) 
 	:  own_(true), field_(0) {
+        freeText_  = false;
         resolveConfig(configs); 
         field_ = _CLNEW lucene::document::Field(name, stream, configs); 		 
     }
@@ -63,6 +65,11 @@
             // Aggregate indexed fields by default. 
             aggregate_ = !(configs & cpix_INDEX_NO);
         }
+        
+        if(configs & cpix_FREE_TEXT){
+            freeText_ = true;
+            configs &= (~cpix_FREE_TEXT);
+        }
     }
 				    
 
@@ -73,6 +80,7 @@
       field_( field ), 
 	  aggregate_( aggregate ) 
 	   {
+        freeText_  = false;
     }
 	
     Field::~Field() {
@@ -105,7 +113,11 @@
     int Field::isIndexed() const {
         return field_->isIndexed(); 
     }
-
+    
+    bool Field::isFreeText() const {
+        return freeText_;
+    }
+    
     bool Field::isAggregated() const {
         return aggregate_;
     }
--- a/searchengine/cpix/tsrc/cpixsample/sis/cpixsample_s60_3_x_v_1_0_0.pkg	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/cpix/tsrc/cpixsample/sis/cpixsample_s60_3_x_v_1_0_0.pkg	Fri Jun 11 14:43:47 2010 +0300
@@ -1,18 +1,3 @@
-;
-; Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
-; All rights reserved.
-; This component and the accompanying materials are made available
-; under the terms of "Eclipse Public License v1.0"
-; which accompanies this distribution, and is available
-; at the URL "http://www.eclipse.org/legal/epl-v10.html".
-;
-; Initial Contributors:
-; Nokia Corporation - initial contribution.
-;
-; Contributors:
-;
-; Description: 
-;
 ; Installation file for cpixsample application
 ;
 ; This is an auto-generated PKG file by Carbide.
--- a/searchengine/cpix/tsrc/cpixsample/src/cpixsample.cpp	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/cpix/tsrc/cpixsample/src/cpixsample.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -1,19 +1,3 @@
-/*
-* Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
-* All rights reserved.
-* This component and the accompanying materials are made available
-* under the terms of "Eclipse Public License v1.0"
-* which accompanies this distribution, and is available
-* at the URL "http://www.eclipse.org/legal/epl-v10.html".
-*
-* Initial Contributors:
-* Nokia Corporation - initial contribution.
-*
-* Contributors:
-*
-* Description: 
-*
-*/
 
 // INCLUDE FILES
 #include <stdio.h>
@@ -41,14 +25,15 @@
 
 
 #define FIELD_ALPHA L"Alpha"
+#define FIELD_ALPHA1 L"Alpha1"
 
 
 #define DOC1CONTENT L"mary had little lamb issue its anirban fleece was black as coal"
-#define DOC2CONTENT L"sri rama jeyam shankar.rajendran@yahoo.co.in www.google.com U.S.A. file.txt"
+#define DOC2CONTENT L"sri rama jeyam and it  3gpp_70.jpg 170(kb).jpg is shankar.rajendran@yahoo.co.in then www.google.com U.S.A. file.txt"
 
 
 // The term that will be present in multiple documents.
-#define SEARCH_TERM L"$prefix(\"a\")"
+#define SEARCH_TERM L"$prefix(\"had\")"
 
 
 int testInit(cpix_Analyzer **analyzer_, cpix_IdxDb **idxDb_)
@@ -103,11 +88,15 @@
     {
     cpix_Document *doc;
     cpix_Field field;
+    cpix_Field field1;
+    cpix_Field field2;
+    cpix_Field field3;
     cpix_Result result;
     
     doc = cpix_Document_create(&result,docUid,NULL,      // app class
             NULL,      // excerpt
             NULL);     // mime type
+    
     if (cpix_Failed(&result))
         {
         printf("Failed to create a document\n");
@@ -117,7 +106,24 @@
     cpix_Field_initialize(&field,
             FIELD_ALPHA,
             data, 
-            cpix_STORE_YES |cpix_INDEX_TOKENIZED);
+            cpix_STORE_YES |cpix_INDEX_UNTOKENIZED | cpix_AGGREGATE_YES | cpix_FREE_TEXT );
+    
+    cpix_Field_initialize(&field1,
+                FIELD_ALPHA1,
+                data, 
+                cpix_STORE_YES |cpix_INDEX_UNTOKENIZED | cpix_AGGREGATE_YES);
+    
+    
+    cpix_Field_initialize(&field2,
+                L"ALPHA2",
+                L"This is shankar and I am working for nokia", 
+                cpix_STORE_YES |cpix_INDEX_TOKENIZED | cpix_AGGREGATE_YES | cpix_FREE_TEXT );
+    
+    
+    cpix_Field_initialize(&field3,
+                L"ALPHA3",
+                L"This is shankar and I am working for nokia", 
+                cpix_STORE_NO |cpix_INDEX_TOKENIZED | cpix_AGGREGATE_YES);
 
     if (cpix_Failed(&field))
         {
@@ -126,6 +132,9 @@
         return 0;
         }
     cpix_Document_add(doc,&field);
+    cpix_Document_add(doc,&field1);
+    cpix_Document_add(doc,&field2);
+    cpix_Document_add(doc,&field3);
     cpix_IdxDb_add(*idxDb_,doc,*analyzer_);
 
     cpix_Document_destroy(doc);
Binary file searchengine/cpix/tsrc/cpixunittest/data/cpixunittestcorpus/pdf/start_enter.pdf has changed
--- a/searchengine/cpix/tsrc/cpixunittest/group/bld.inf	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/cpix/tsrc/cpixunittest/group/bld.inf	Fri Jun 11 14:43:47 2010 +0300
@@ -360,5 +360,11 @@
 "../data/cpixunittestcorpus/query/query8.txt"                            "/epoc32/winscw/c/Data/cpixunittestcorpus/query/query8.txt"
 "../data/cpixunittestcorpus/query/query9.txt"                            "/epoc32/winscw/c/Data/cpixunittestcorpus/query/query9.txt"
 
+"../data/cpixunittestcorpus/pdf/ctutor.pdf"                             "/epoc32/winscw/c/Data/cpixunittestcorpus/pdf/ctutor.pdf"
+"../data/cpixunittestcorpus/pdf/Empty.pdf"                              "/epoc32/winscw/c/Data/cpixunittestcorpus/pdf/Empty.pdf"
+"../data/cpixunittestcorpus/pdf/geology.pdf"                            "/epoc32/winscw/c/Data/cpixunittestcorpus/pdf/geology.pdf"
+"../data/cpixunittestcorpus/pdf/samplepdf.pdf"                          "/epoc32/winscw/c/Data/cpixunittestcorpus/pdf/samplepdf.pdf"
+"../data/cpixunittestcorpus/pdf/windjack.pdf"                           "/epoc32/winscw/c/Data/cpixunittestcorpus/pdf/windjack.PDF"
+
 PRJ_TESTMMPFILES
 cpixunittest.mmp
--- a/searchengine/cpix/tsrc/cpixunittest/group/cpixunittest.mmp	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/cpix/tsrc/cpixunittest/group/cpixunittest.mmp	Fri Jun 11 14:43:47 2010 +0300
@@ -71,7 +71,7 @@
 SOURCE          config.cpp
 SOURCE          clq/uxqry.cpp
 SOURCE          clq/clqry.cpp 
-SOURCE		pdftests.cpp querytest.cpp std_log_result.cpp
+SOURCE		pdftests.cpp querytest.cpp std_log_result.cpp misc.cpp
 
 USERINCLUDE     ../../../../../searchsrv_plat/cpix_utility_api/inc
 USERINCLUDE     ../inc
--- a/searchengine/cpix/tsrc/cpixunittest/inc/std_log_result.h	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/cpix/tsrc/cpixunittest/inc/std_log_result.h	Fri Jun 11 14:43:47 2010 +0300
@@ -1,20 +1,15 @@
-/*
-* Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies).
-* All rights reserved.
-* This component and the accompanying materials are made available
-* under the terms of "Eclipse Public License v1.0"
-* which accompanies this distribution, and is available
-* at the URL "http://www.eclipse.org/legal/epl-v10.html".
-*
-* Initial Contributors:
-* Nokia Corporation - initial contribution.
-*
-* Contributors:
-*
-* Description: 
-*
-*/
-
+ /*
+ *  Copyright © 2008 Nokia Corporation.
+ *  This material, including documentation and any related 
+ *  computer programs, is protected by copyright controlled by 
+ *  Nokia Corporation. All rights are reserved. Copying, 
+ *  including reproducing, storing, adapting or translating, any 
+ *  or all of this material requires the prior written consent of 
+ *  Nokia Corporation. This material also contains confidential 
+ *  information which may not be disclosed to others without the 
+ *  prior written consent of Nokia Corporation.
+ * ============================================================================
+ */
 
 
 #ifndef _STD_LOG_FILE_H__
--- a/searchengine/cpix/tsrc/cpixunittest/src/analysis.cpp	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/cpix/tsrc/cpixunittest/src/analysis.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -31,6 +31,7 @@
 #include "cpixdoc.h"
 
 #include "std_log_result.h"
+
 const char * AnalysisTestDocsToIndex[5] = {
     FILE_TEST_CORPUS_PATH "\\en\\1.txt",
     FILE_TEST_CORPUS_PATH "\\en\\2.txt",
@@ -83,14 +84,14 @@
 
 	// bad syntaxes
 	TestAnalyzerParsing(testMgr, L"letter><lowercase" ,0); 
-	TestAnalyzerParsing(testMgr, L"38j_d fad23 4?q ca'wRA", 0 ); 
+	TestAnalyzerParsing(testMgr, L"38j_d fad23 4?q ca'wRA", 0 );
+	TestAnalyzerParsing(testMgr, L"38.45_d fd23<ca'wRA", 0 ); 
 	// parsing failures
 	TestAnalyzerParsing(testMgr, L"letter>>lowercase", 0 ); 
 	TestAnalyzerParsing(testMgr, L">letter>>lowercase lowercase", 0 ); 
 	TestAnalyzerParsing(testMgr, L"letter lowercase", 0 );
 	testResultXml(xml_file);
 }
-
 void TestSwitchParsing(Itk::TestMgr * testMgr) 
 {
     char *xml_file = (char*)__FUNCTION__;
@@ -105,14 +106,13 @@
 	TestAnalyzerParsing(testMgr, L"switch{ case '_qnr': whitespace; default: standard; }>lowercase");
 	TestAnalyzerParsing(testMgr, L"switch{ default: 	standard; }");
 	TestAnalyzerParsing(testMgr, L"switch{ case '_qnr': switch{ case '_docuid': keyword; default: whitespace; }; default: standard; }");
+	TestAnalyzerParsing(testMgr, L"switch{ case '_mimetype': standard; default: whitespace; }; default: standard; }");
 	testResultXml(xml_file);
 }
 
 void TestAnalyzerUsage(Itk::TestMgr * testMgr, const wchar_t* definition) 
 {
 	printf("Indexing and searching with %S\n", definition); 
-	char *xml_file = (char*)__FUNCTION__;
-	    assert_failed = 0;
 	cpix_Result
         result;
 
@@ -205,7 +205,7 @@
 	}
 	cpix_QueryParser_destroy(queryParser);
 	cpix_Analyzer_destroy( analyzer ); 
-	testResultXml(xml_file);
+	
 }
 
 void TestAnalyzersUsage(Itk::TestMgr * testMgr) 
--- a/searchengine/cpix/tsrc/cpixunittest/src/analysiswhitebox.cpp	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/cpix/tsrc/cpixunittest/src/analysiswhitebox.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -18,6 +18,7 @@
 #include <wchar.h>
 #include <stddef.h>
 
+
 #include <iostream>
 
 #include "cpixidxdb.h"
--- a/searchengine/cpix/tsrc/cpixunittest/src/cpixunittest.cpp	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/cpix/tsrc/cpixunittest/src/cpixunittest.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -68,6 +68,8 @@
 
 Itk::TesterBase * CreateASyncTests();
 
+Itk::TesterBase * CreateMiscTests();
+
 // Avoid media Testcases if MDS not present. 
 #ifdef MDS_PRESENT
 // Spi 
@@ -98,7 +100,7 @@
     SuiteTester
         * all = new SuiteTester("all");
     
-//
+
     all->add(CreateSuiteTesterRandom()); 
     all->add(CreateDestructiveTests());
     all->add(CreatePrematureQryTests());
@@ -116,12 +118,12 @@
     all->add(CreateUtf8PathTests());
     all->add(CreateAnalysisTests());		
 		 
-
+    all->add(CreatePdfSearchTests());
     all->add(CreateDocumentTests());		
     all->add(CreateFlushTests());			
     all->add(CreateHeartbeatTests());	    
     all->add(CreateTermSearchTests()); 		
-    all->add(CreatePdfSearchTests());
+    
     
     // TODO enable later Itk::TesterBase * CreateClQryHierarchy();
 
@@ -134,6 +136,7 @@
     // all->add(CreateSpiHierarchy());  // Commented for JPG/EXIF files checks
 #endif
 	all->add(CreateQueryTests()); 
+	all->add(CreateMiscTests());
 
     // add more top level test suites here
     // ...
@@ -173,6 +176,39 @@
                         printf("Failed to initialize Cpix\n");
                         return -1;
                     }
+//                /* Added for decision coverage Test case */
+                const char *cpix_Dir = cpix_InitParams_getCpixDir(initParams);
+                
+                const char *logFilebase = cpix_InitParams_getLogFileBase(initParams);
+                
+                size_t logSizeLimit = cpix_InitParams_getLogSizeLimit(initParams);
+                
+                size_t logSizeRecurrency = cpix_InitParams_getLogSizeCheckRecurrency(initParams);
+                
+                size_t maxIdleSec = cpix_InitParams_getMaxIdleSec(initParams);
+                
+                //size_t maxInsertBufferSize = cpix_InitParams_getMaxInsertBufSize(initParams);
+                //cpix_InitParams_setMaxInsertBufferSize(initParams, maxInsertBufferSize);
+                
+                size_t insertBufMaxDocs = cpix_InitParams_getInsertBufMaxDocs(initParams);
+                cpix_InitParams_setInsertBufMaxDocs(initParams, insertBufMaxDocs);
+                
+                size_t idxJobQueueSize = cpix_InitParams_getIdxJobQueueSize(initParams);
+                cpix_InitParams_setIdxJobQueueSize(initParams, idxJobQueueSize);
+                
+                size_t qryJobQueueSize = cpix_InitParams_getQryJobQueueSize(initParams);
+                cpix_InitParams_setQryJobQueueSize(initParams, qryJobQueueSize);
+                
+                int idxThreadPriorityDelta = cpix_InitParams_getIdxThreadPriorityDelta(initParams);
+                cpix_InitParams_setIdxThreadPriorityDelta(initParams, idxThreadPriorityDelta);
+                
+                int qryThreadPriorityDelta = cpix_InitParams_getQryThreadPriorityDelta(initParams);
+                size_t clHitsPageSize = cpix_InitParams_getClHitsPageSize(initParams);
+                
+                cpix_InitParams_setClHitsPageSize(initParams, clHitsPageSize);
+                
+                
+                
                 cpix_InitParams_setMaxIdleSec(initParams,
                                               MaxIdleSec);
                 if (cpix_Failed(initParams))
--- a/searchengine/cpix/tsrc/cpixunittest/src/documenttest.cpp	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/cpix/tsrc/cpixunittest/src/documenttest.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -36,6 +36,8 @@
 #include "setupsentry.h"
 
 #include "std_log_result.h"
+#include "..\..\..\cpix\src\qrytypes\prefixqrytype.cpp"
+#include "..\..\..\cpix\src\qrytypes\termsqrytype.cpp"
 
 #define TEST_DOCUMENT_QBASEAPPCLASS "@0:root test document"
 #define TEST_DOCUMENT_INDEXDB_PATH "c:\\Data\\indexing\\indexdb\\root\\test\\document"
@@ -293,6 +295,61 @@
     {
     }
 
+    
+
+    void TestPrefixQryType(Itk::TestMgr *testMgr )
+        {
+        char *xml_file = (char *)__FUNCTION__;
+        assert_failed = 0;
+        tearDown();
+        setup();
+        cpix_Result  result;
+        addDocument(testMgr,
+                    LDOCUID1,
+                    DOC1CONTENT);
+        addDocument(testMgr,
+                    LDOCUID2,
+                    DOC2CONTENT);
+        
+        cpix_IdxDb_flush(idxDb_);
+        ITK_EXPECT(testMgr,
+                   cpix_Succeeded(idxDb_),
+                   "Flushing index has failed");
+        if(!cpix_Succeeded(idxDb_))
+            {
+            assert_failed = 1;
+            }
+        Cpix::PrefixQryType *qryType = new Cpix::PrefixQryType;
+        std::list<std::wstring> list(3,L"");
+        std::list<std::wstring> list1;
+        qryType->setUp(queryParser_, list, SEARCH_TERM);
+        qryType->setUp(queryParser_, list1, SEARCH_TERM);
+        cpix_IdxSearcher *
+        searcher = cpix_IdxSearcher_openDb(&result,
+                TEST_DOCUMENT_QBASEAPPCLASS);
+        if (searcher == NULL)
+            {
+                ITK_PANIC("Could not create searcher");
+            }
+        cpix_Hits *Hits1 = qryType->search(searcher);
+        cpix_Hits *Hits2 = qryType->search(idxDb_);
+        testResultXml(xml_file);
+        }
+    
+    void TestTermsQryType(Itk::TestMgr * )
+        {
+        char *xml_file = (char *)__FUNCTION__;
+        assert_failed = 0;
+        Cpix::TermsQryType qrytype;
+        tearDown();
+        setup();
+        std::list<std::wstring> list(3, L"term");
+        std::list<std::wstring> list1;
+        qrytype.setUp(queryParser_, list, SEARCH_TERM);
+        qrytype.setUp(queryParser_, list1, SEARCH_TERM);
+        testResultXml(xml_file);
+        }
+    
     void testNoBoostingFields(Itk::TestMgr * testMgr)
     {
         // Don't boost Field Alpha in doc1
@@ -652,6 +709,18 @@
             documentContext,
                        &DocumentContext::testBoostQuery);
 #undef TEST
-        
+    // Both test throws the exception so need not to cover.
+//#define TEST "perfixqrytype"
+//    contextTester->add(TEST,
+//            documentContext,
+//                &DocumentContext::TestPrefixQryType);
+//#undef TEST    
+//        
+//#define TEST "termqrytype"
+//    contextTester->add(TEST,
+//            documentContext,
+//                &DocumentContext::TestTermsQryType);
+//#undef TEST 
+    
     return contextTester;
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/cpix/tsrc/cpixunittest/src/misc.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,159 @@
+/*
+* Copyright (c) 2010 Nokia Corporation and/or its subsidiary(-ies).
+* All rights reserved.
+* This component and the accompanying materials are made available
+* under the terms of "Eclipse Public License v1.0"
+* which accompanies this distribution, and is available
+* at the URL "http://www.eclipse.org/legal/epl-v10.html".
+*
+* Initial Contributors:
+* Nokia Corporation - initial contribution.
+*
+* Contributors:
+*
+* Description: 
+*
+*/
+
+
+#include <wchar.h>
+#include <stddef.h>
+#include <iostream>
+#include "itk.h"
+
+#include "config.h"
+#include "testutils.h"
+
+#include "std_log_result.h"
+
+#include "cpixexc.h"
+#include "cpixhits.h"
+#include "cpixidxdb.h"
+
+
+#include "..\..\..\cpix\src\cpixerror.cpp"
+#include "..\..\..\cpix\src\cpixanalyzer.cpp"
+#include "..\..\..\cpix\src\fileparser\fileparser.cpp"
+#include "..\..\..\cpix\src\fileparser\pdffileparser.cpp"
+#include "..\..\..\cpix\src\qrytypes\cluceneqrytype.cpp"
+
+const cpix_LangCode cpix_LANG_JA = { "ja" };
+void TestAnalyzer(Itk::TestMgr *)
+    {
+    const cpix_LangCode **C_Lang = cpix_supportedLanguages();
+    const wchar_t* toWchar = cpix_ToWideLangCode(cpix_LANG_EN);
+    cpix_Result result;
+    cpix_Analyzer * c_analyzer = cpix_CreateSnowballAnalyzer(&result,cpix_LANG_JA);
+    }
+void TestSetError(Itk::TestMgr *)
+    {
+    struct ErrorInfo serrorinfo;
+    serrorinfo.setInfo(ET_CPIX_EXC);
+    serrorinfo.setInfo(ET_CPIX_EXC, "");
+    cpix_Error *err1 = CreateError(ET_OS_EXC, L"");
+    cpix_Error *err2 = CreateError(ET_CPIX_EXC, "");
+    cpix_Error_report(NULL,L"",0);
+    }
+
+void TestCpixExc(Itk::TestMgr * )
+    {
+    char *xml_file = (char*)__FUNCTION__;
+    assert_failed = 0;
+    CpixExc *exc1 = new CpixExc(/*(const wchar_t *)NULL*/L"", "misc.cpp", __LINE__);
+    CpixExc *exc2 = new CpixExc("", "misc.cpp", __LINE__);
+    CpixExc exc3 = *exc1;
+    exc3 = *exc2;
+    exc2->file();
+    exc2->line();
+    exc2->wWhat();
+    free(exc1);
+    free(exc2);
+    testResultXml(xml_file);
+    }
+
+void TestHitDocumentList(Itk::TestMgr * )
+    {
+    Cpix::HitDocumentList *hitdoclist = new Cpix::HitDocumentList;
+    hitdoclist->remove(0);
+    free(hitdoclist);
+    }
+
+void TestTermCreateDestroy(Itk::TestMgr * )
+    {
+    cpix_Result result;
+    cpix_Term *term = cpix_Term_create(&result, CONTENTS_FIELD, L"Hello");
+    cpix_Term_destroy(term);
+    }
+
+void TestPdfFileParser(Itk::TestMgr * )
+    {
+    bool isParse1 = Cpix::isFileAllowedToParse(L"");
+    bool isParse2 = Cpix::isFileAllowedToParse(L"c:\\data\\cpixunittestcorpus\\pdf\\ctutor");
+//     getPDFExcerpt is unnamed namespace so cant call ...
+//    std::wstring line;
+//    int result = getPDFExcerpt(L"c:\\data\\cpixunittestcorpus\\pdf\\ctutor\\empty.pdf", &line);
+    
+    char *buffer = "Hello this is test for find string";
+
+    Cpix::FindStringInBuffer(buffer, "find", strlen(buffer));
+    
+    Cpix::FindStringInBuffer(buffer, "no", strlen(buffer));
+    
+    Cpix::seen2("find", "If its for find");
+    
+    Cpix::ExtractNumber("hello calculate 123   ", strlen("hello calculate 123   "));
+    
+    //Cpix::convertPDFToText("c:\\data\\cpixunittestcorpus\\pdf\\ctutor.pdf");
+    
+    
+    }
+void TestLuceneQryType(Itk::TestMgr * )
+    {
+    cpix_Result result;
+    
+    cpix_Analyzer* analyzer = cpix_CreateSimpleAnalyzer(&result);
+    if ( cpix_Failed( &result) ) 
+        {
+            ITK_PANIC("Analyzer could not be created");
+            assert_failed = 1;
+        }
+    cpix_QueryParser
+        * queryParser = cpix_QueryParser_create(&result,
+                                                LCPIX_DEFAULT_FIELD,
+                                                analyzer );
+    if (queryParser == NULL)
+        {
+            cpix_Analyzer_destroy( analyzer );
+            ITK_PANIC("Could not create query parser");
+        }
+    Cpix::LuceneQryType *QryType = new Cpix::LuceneQryType();
+    std::list<std::wstring> list (2, L"no argumnet");
+    //QryType->setUp(queryParser,list,L"Find this");
+    
+    }
+
+
+Itk::TesterBase * CreateMiscTests()
+    {
+    
+    using namespace Itk;
+
+    SuiteTester
+        * misc = new SuiteTester("misc");
+
+    misc->add("testanalyzer",&TestAnalyzer,"testanalyzer");
+    
+    misc->add("SetError",&TestSetError,"SetError");
+    
+    misc->add("CpixExc", &TestCpixExc, "CpixExc");
+
+    misc->add("doclist", &TestHitDocumentList, "doclist");
+    
+    misc->add("termCD", &TestTermCreateDestroy, "termCD");
+
+    misc->add("pdfparser", &TestPdfFileParser, "pdfparser");
+    
+    misc->add("lucenetype", &TestLuceneQryType, "lucenetype");
+    
+    return misc;
+    }
--- a/searchengine/cpix/tsrc/cpixunittest/src/pdftests.cpp	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/cpix/tsrc/cpixunittest/src/pdftests.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -38,13 +38,14 @@
 #include "suggestion.h"
 #include "std_log_result.h"
 
-const char * PdfDocsToIndex[7] = {
+const char * PdfDocsToIndex[8] = {
     "c:\\data\\cpixunittestcorpus\\pdf\\ctutor.pdf",
-	"c:\\data\\cpixunittestcorpus\\stem\\pdf\\geology.pdf",
-	"c:\\data\\cpixunittestcorpus\\stem\\pdf\\samplepdf.pdf",
-	"c:\\data\\cpixunittestcorpus\\stem\\pdf\\windjack.pdf",
-	"c:\\data\\cpixunittestcorpus\\stem\\pdf\\DCTDecode.pdf",
-	"c:\\data\\cpixunittestcorpus\\stem\\pdf\\Empty.pdf",
+	"c:\\data\\cpixunittestcorpus\\pdf\\geology.pdf",
+	"c:\\data\\cpixunittestcorpus\\pdf\\samplepdf.pdf",
+	"c:\\data\\cpixunittestcorpus\\pdf\\windjack.pdf",
+	"c:\\data\\cpixunittestcorpus\\pdf\\DCTDecode.pdf",
+	"c:\\data\\cpixunittestcorpus\\pdf\\Empty.pdf",
+	"c:\\data\\cpixunittestcorpus\\pdf\\start_enter.pdf",
     NULL
 };
 
--- a/searchengine/cpix/tsrc/cpixunittest/src/std_log_result.cpp	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/cpix/tsrc/cpixunittest/src/std_log_result.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -1,19 +1,3 @@
-/*
-* Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
-* All rights reserved.
-* This component and the accompanying materials are made available
-* under the terms of "Eclipse Public License v1.0"
-* which accompanies this distribution, and is available
-* at the URL "http://www.eclipse.org/legal/epl-v10.html".
-*
-* Initial Contributors:
-* Nokia Corporation - initial contribution.
-*
-* Contributors:
-*
-* Description: 
-*
-*/
 #include "std_log_result.h"
 #ifdef __SYMBIAN32__
 #define LOG_FILE "c:\\logs\\std_test_log.txt"
@@ -124,7 +108,8 @@
     // create the xml file name
     FILE *fp_result;
     sprintf(xmlfilename, "%s%s.%s", LOG_DIR, filename, LOG_FILE_EXT);
-    strftime(time_buf,50,"%c",tm1);
+    //strftime(time_buf,50,"%c",tm1);
+    sprintf(time_buf,"%s","");
 
     if(assert_failed )
         strcpy(result,"FAILED");
--- a/searchengine/oss/cl/clucene/group/clucene.mmp	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/oss/cl/clucene/group/clucene.mmp	Fri Jun 11 14:43:47 2010 +0300
@@ -165,6 +165,22 @@
 SOURCE StringIntern.cpp
 SOURCE ThreadLocal.cpp
 
+// highlighter
+SOURCEPATH	  ../src/CLucene/highlighter
+SOURCE Encoder.cpp 
+SOURCE Formatter.cpp
+SOURCE Fragmenter.cpp
+SOURCE Highlighter.cpp
+SOURCE QueryScorer.cpp
+SOURCE QueryTermExtractor.cpp
+SOURCE SimpleFragmenter.cpp
+SOURCE SimpleHTMLEncoder.cpp
+SOURCE SimpleHTMLFormatter.cpp
+SOURCE TextFragment.cpp
+SOURCE TokenSources.cpp
+SOURCE WeightedTerm.cpp
+SOURCE TokenGroup.cpp
+
 #endif // !MONOLITHIC
 
 //CAPABILITY 	  NONE
--- a/searchengine/oss/cl/clucene/src/clucene.h	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/oss/cl/clucene/src/clucene.h	Fri Jun 11 14:43:47 2010 +0300
@@ -35,5 +35,6 @@
 #include "CLucene/analysis/standard/StandardAnalyzer.h"
 #include "CLucene/analysis/Analyzers.h"
 #include "CLucene/util/Reader.h"
+#include "CLucene/highlighter/Highlighter.h"
 
 #endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/Encoder.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,3 @@
+#include "CLucene/StdHeader.h"
+#include "Encoder.h"
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/Encoder.h	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,61 @@
+/**
+ * Copyright 2002-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _lucene_search_highlight_encoder_
+#define _lucene_search_highlight_encoder_
+
+#if defined(_LUCENE_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include "CLucene/util/StringBuffer.h"
+
+CL_NS_DEF2(search,highlight)
+
+/**
+ * Encodes original text. The Encoder works with the Formatter to generate the output.
+ *
+ */
+class Encoder:LUCENE_BASE
+{
+public:
+	/** Virtual destructor */
+	virtual ~Encoder(){
+	}
+
+	/**
+	 * @param originalText The section of text being output
+	 */
+	virtual TCHAR* encodeText(TCHAR* originalText) = 0;
+};
+
+/**
+ * Simple {@link Encoder} implementation that does not modify the output
+ * @author Nicko Cadell
+ *
+ */
+class DefaultEncoder: public Encoder
+{
+public:
+	TCHAR* encodeText(TCHAR* originalText)
+	{
+		return STRDUP_TtoT(originalText);
+	}
+};
+
+
+CL_NS_END2
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/Formatter.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,2 @@
+#include "CLucene/StdHeader.h"
+#include "Formatter.h"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/Formatter.h	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,54 @@
+/**
+ * Copyright 2002-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _lucene_search_highlight_formatter_
+#define _lucene_search_highlight_formatter_
+
+#if defined(_LUCENE_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include "CLucene/highlighter/TokenGroup.h"
+
+CL_NS_DEF2(search,highlight)
+
+/**
+ * Processes terms found in the original text, typically by applying some form 
+ * of mark-up to highlight terms in HTML search results pages.
+ *
+ */
+class Formatter:LUCENE_BASE
+{
+public:
+
+	/** Virtual destructor */
+	virtual ~Formatter(){
+	}
+
+  /**
+	 * @param originalText The section of text being considered for markup
+	 * @param tokenGroup contains one or several overlapping Tokens along with
+	 * their scores and positions.
+	 */
+  virtual TCHAR* highlightTerm(const TCHAR* originalTermText, const TokenGroup* tokenGroup) = 0;
+};
+
+CL_NS_END2
+
+#endif
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/Fragmenter.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,3 @@
+#include "CLucene/StdHeader.h"
+#include "Fragmenter.h"
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/Fragmenter.h	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,55 @@
+/**
+ * Copyright 2002-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _lucene_search_highlight_fragmenter_
+#define _lucene_search_highlight_fragmenter_
+
+#if defined(_LUCENE_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include "CLucene/analysis/AnalysisHeader.h"
+
+CL_NS_DEF2(search,highlight)
+
+/**
+ * Implements the policy for breaking text into multiple fragments for consideration
+ * by the {@link Highlighter} class. A sophisticated implementation may do this on the basis
+ * of detecting end of sentences in the text. 
+ */
+class Fragmenter:LUCENE_BASE
+{
+public:
+	/** Virtual destructor */
+	virtual ~Fragmenter(){
+	}
+
+	/**
+	 * Initializes the Fragmenter
+	 * @param originalText
+	 */
+	virtual void start(const TCHAR* originalText) = 0;
+
+	/**
+	 * Test to see if this token from the stream should be held in a new TextFragment
+	 * @param nextToken
+	 */
+	virtual bool isNewFragment(const CL_NS(analysis)::Token * nextToken) = 0;
+};
+
+CL_NS_END2
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/HighlightScorer.h	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,65 @@
+/**
+ * Copyright 2002-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _lucene_search_highlight_highlighterscorer_
+#define _lucene_search_highlight_highlighterscorer_
+
+#if defined(_LUCENE_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include "CLucene/highlighter/TextFragment.h"
+#include "CLucene/analysis/AnalysisHeader.h"
+
+CL_NS_DEF2(search,highlight)
+
+/**
+ * Adds to the score for a fragment based on its tokens
+ */
+class HighlightScorer:LUCENE_BASE
+{
+public:
+	virtual ~HighlightScorer(){
+	}
+
+	/**
+	 * called when a new fragment is started for consideration
+	 * @param newFragment
+	 */
+	virtual void startFragment(TextFragment* newFragment) = 0;
+
+	/**
+	 * Called for each token in the current fragment
+	 * @param token The token to be scored
+	 * @return a score which is passed to the Highlighter class to influence the mark-up of the text
+	 * (this return value is NOT used to score the fragment)
+	 */
+	virtual float_t getTokenScore(CL_NS(analysis)::Token* token) = 0;
+	
+
+	/**
+	 * Called when the highlighter has no more tokens for the current fragment - the scorer returns
+	 * the weighting it has derived for the most recent fragment, typically based on the tokens
+	 * passed to getTokenScore(). 
+	 *
+	 */	
+	virtual float_t getFragmentScore() = 0;
+};
+
+CL_NS_END2
+#endif
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/Highlighter.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,439 @@
+#include "CLucene/StdHeader.h"
+#include "Highlighter.h"
+
+CL_NS_DEF2(search,highlight)
+CL_NS_USE(analysis)
+CL_NS_USE(util)
+
+	class FragmentQueue : public CL_NS(util)::PriorityQueue<TextFragment*, CL_NS(util)::Deletor::Object<TextFragment> >
+	{
+	public:
+		FragmentQueue(int32_t size)
+		{
+			initialize(size, true);
+		}
+
+	protected:
+		bool lessThan(TextFragment * fragA, TextFragment * fragB)
+		{
+			if (fragA->getScore() == fragB->getScore())
+				return fragA->getFragNum() > fragB->getFragNum();
+			else
+				return fragA->getScore() < fragB->getScore();
+		}
+	};
+
+
+	Highlighter::Highlighter(HighlightScorer * fragmentScorer):
+        delete_formatter(true),
+        delete_encoder(true),
+		delete_textFragmenter(true),
+		delete_fragmentScorer(false)		
+	{
+		maxDocBytesToAnalyze = DEFAULT_MAX_DOC_BYTES_TO_ANALYZE;
+		
+		_textFragmenter = _CLNEW SimpleFragmenter();
+		_fragmentScorer = fragmentScorer;
+		_formatter = _CLNEW SimpleHTMLFormatter();
+		_encoder = _CLNEW DefaultEncoder();
+	}
+
+	Highlighter::Highlighter(Formatter * formatter, HighlightScorer * fragmentScorer):
+        delete_formatter(false),
+        delete_encoder(true),
+		delete_textFragmenter(true),
+		delete_fragmentScorer(false)	
+	{
+		maxDocBytesToAnalyze = DEFAULT_MAX_DOC_BYTES_TO_ANALYZE;
+		
+		_textFragmenter = _CLNEW SimpleFragmenter();
+		_fragmentScorer = fragmentScorer;
+		_formatter = formatter;
+		_encoder = _CLNEW DefaultEncoder();
+	}
+
+	Highlighter::Highlighter(Formatter * formatter, Encoder* encoder, HighlightScorer * fragmentScorer):
+        delete_formatter(false),
+        delete_encoder(true),
+        delete_textFragmenter(true),
+        delete_fragmentScorer(false)    
+	{
+		maxDocBytesToAnalyze = DEFAULT_MAX_DOC_BYTES_TO_ANALYZE;
+		_textFragmenter = _CLNEW SimpleFragmenter();
+		_fragmentScorer = fragmentScorer;
+		_formatter = formatter;
+		_encoder = encoder;
+	}
+
+	Highlighter::~Highlighter()
+	{
+		if ( delete_textFragmenter )
+			_CLDELETE ( _textFragmenter );
+
+		if ( delete_fragmentScorer )
+			_CLDELETE(_fragmentScorer);
+
+		if( delete_formatter )
+			_CLDELETE(_formatter);
+
+		if ( delete_encoder )
+			_CLDELETE(_encoder);
+	}
+
+	TCHAR* Highlighter::getBestFragment(TokenStream * tokenStream, const TCHAR* text)
+	{
+		TCHAR** results = getBestFragments(tokenStream,text, 1);
+		TCHAR* result = 0;
+
+		if (results[0] != NULL )
+			result = stringDuplicate(results[0]);
+
+		_CLDELETE_CARRAY_ALL(results);
+
+		return result;
+	}
+
+	/**
+  	* Highlights chosen terms in a text, extracting the most relevant section.
+  	* This is a convenience method that calls
+  	* {@link #getBestFragment(TokenStream, const TCHAR*)}
+  	*
+  	* @param analyzer   the analyzer that will be used to split <code>text</code>
+  	* into chunks
+  	* @param text text to highlight terms in
+  	* @param fieldName Name of field used to influence analyzer's tokenization policy
+  	*
+  	* @return highlighted text fragment or NULL if no terms found
+  	*/
+  	TCHAR* Highlighter::getBestFragment(Analyzer* analyzer, const TCHAR* fieldName, const TCHAR* text)
+  	{
+  	    TokenStream* tokenStream = analyzer->tokenStream(fieldName, _CLNEW StringReader(text));
+  	    return getBestFragment(tokenStream, text);
+  	}
+
+	TCHAR** Highlighter::getBestFragments(
+		TokenStream * tokenStream,	
+		const TCHAR* text,
+		int32_t maxNumFragments)
+	{
+		maxNumFragments = max((int32_t)1, maxNumFragments); //sanity check
+		
+		StringBuffer buffer;
+		TextFragment** frags = getBestTextFragments(&buffer,tokenStream,text, true,maxNumFragments);
+
+		//Get text
+		CL_NS(util)::StringArray fragTexts;
+		for (uint32_t i=0; frags[i]!=NULL; i++)
+		{
+			TextFragment* f = frags[i];
+			if ((f != NULL) && (f->getScore() > 0))
+			{
+				 fragTexts.push_back(f->toString(&buffer));
+			}
+			_CLDELETE(f);
+		}
+
+		_CLDELETE_ARRAY(frags);
+
+		TCHAR** ret = _CL_NEWARRAY(TCHAR*,fragTexts.size()+1);
+		fragTexts.toArray(ret);
+		return ret;
+	}
+
+	TCHAR* Highlighter::getBestFragments(
+		TokenStream * tokenStream,	
+		const TCHAR* text,
+		int32_t maxNumFragments,
+		const TCHAR* separator)
+	{
+		TCHAR** sections = getBestFragments(tokenStream,text, maxNumFragments);
+		StringBuffer result;
+
+		for (int32_t i = 0; sections[i]!=NULL; i++)
+		{
+			if (i > 0)
+			{
+				result.append(separator);
+			}
+			result.append(sections[i]);
+		}
+
+		_CLDELETE_CARRAY_ALL(sections);
+		return result.toString();
+	}
+
+	TextFragment** Highlighter::getBestTextFragments(
+		StringBuffer* writeTo,
+		TokenStream * tokenStream,	
+		const TCHAR* text,
+		bool mergeContiguousFragments,
+		int32_t maxNumFragments)
+	{
+		CLArrayList<TextFragment*> docFrags(false);
+		TextFragment* currentFrag = _CLNEW TextFragment(writeTo->length(), docFrags.size());
+		_fragmentScorer->startFragment(currentFrag);
+		docFrags.push_back(currentFrag);
+
+		FragmentQueue fragQueue(maxNumFragments);
+
+		try
+		{
+			int32_t startOffset;
+			int32_t endOffset;
+			int32_t lastEndOffset = 0;
+			_textFragmenter->start(text);
+			TCHAR substringBuffer[LUCENE_MAX_WORD_LEN];
+
+			TokenGroup* tokenGroup=_CLNEW TokenGroup();
+
+			TCHAR buffer[LUCENE_MAX_FIELD_LEN+1];
+			Token token;
+			while ( tokenStream->next(&token) )
+			{
+				if((tokenGroup->getNumTokens()>0)&&(tokenGroup->isDistinct(&token))){
+					//the current token is distinct from previous tokens -
+					// markup the cached token group info
+					 startOffset = tokenGroup->getStartOffset();
+					 endOffset = tokenGroup->getEndOffset();
+
+					 _tcsncpy(substringBuffer,text+startOffset,endOffset-startOffset);
+					 substringBuffer[endOffset-startOffset]=_T('\0');
+
+					 TCHAR* encoded = _encoder->encodeText(substringBuffer);
+					 const TCHAR* markedUpText=_formatter->highlightTerm(encoded, tokenGroup);
+					 _CLDELETE_CARRAY(encoded);
+
+					 //store any whitespace etc from between this and last group
+					 if (startOffset > lastEndOffset){
+						 int len = startOffset-lastEndOffset;
+						 if ( len > LUCENE_MAX_FIELD_LEN )
+							 len = LUCENE_MAX_FIELD_LEN;
+						 _tcsncpy(buffer,text+lastEndOffset,len);
+						 buffer[len]=_T('\0');
+
+						 TCHAR* encoded = _encoder->encodeText(buffer);
+						 writeTo->append(encoded);
+						 _CLDELETE_CARRAY(encoded);
+					 }
+					 writeTo->append(markedUpText);
+					 lastEndOffset=endOffset;
+					 tokenGroup->clear();
+					 _CLDELETE_CARRAY(markedUpText);
+
+					//check if current token marks the start of a new fragment
+					if (_textFragmenter->isNewFragment(&token))
+					{
+						currentFrag->setScore(_fragmentScorer->getFragmentScore());
+						//record stats for a new fragment
+						currentFrag->setTextEndPos( writeTo->length() );
+						currentFrag =_CLNEW TextFragment(writeTo->length(), docFrags.size());
+						_fragmentScorer->startFragment(currentFrag);
+						docFrags.push_back(currentFrag);
+					}
+				}
+
+				// does query contain current token?
+				float_t score=_fragmentScorer->getTokenScore(&token);			
+				//TCHAR* highlightedTerm = _formatter->highlightTerm(&substringBuffer, token->termText(), score, startOffset);
+				//newText->append(highlightedTerm);
+				//_CLDELETE_CARRAY(highlightedTerm);
+				//_CLDELETE(token);
+
+				tokenGroup->addToken(&token,_fragmentScorer->getTokenScore(&token));
+
+				if(lastEndOffset>maxDocBytesToAnalyze)
+				{
+					break;
+				}
+			}
+			currentFrag->setScore(_fragmentScorer->getFragmentScore());
+
+			if(tokenGroup->getNumTokens()>0)
+  	        {
+  	            //flush the accumulated text (same code as in above loop)
+  	            startOffset = tokenGroup->getStartOffset();
+  	            endOffset = tokenGroup->getEndOffset();
+
+				_tcsncpy(substringBuffer,text+startOffset,endOffset-startOffset);
+				substringBuffer[endOffset-startOffset]=_T('\0');
+
+				TCHAR* encoded = _encoder->encodeText(substringBuffer);
+  	            const TCHAR* markedUpText=_formatter->highlightTerm(encoded, tokenGroup);
+				_CLDELETE_CARRAY(encoded);
+
+  	            //store any whitespace etc from between this and last group
+				if (startOffset > lastEndOffset){
+					int len = startOffset-lastEndOffset;
+					if ( len > LUCENE_MAX_FIELD_LEN )
+						len = LUCENE_MAX_FIELD_LEN;
+					_tcsncpy(buffer,text+lastEndOffset,len);
+					buffer[len]=_T('\0');
+
+					TCHAR* encoded = _encoder->encodeText(buffer);
+  					writeTo->append(encoded);
+					_CLDELETE_CARRAY(encoded);
+				}
+  	            writeTo->append(markedUpText);
+  	            lastEndOffset=endOffset;
+
+				_CLDELETE_CARRAY(markedUpText);
+  	        }
+
+			// append text after end of last token
+			//if (lastEndOffset < (int32_t)_tcslen(text))
+			//newText->append(text+lastEndOffset);
+
+			currentFrag->setTextEndPos(writeTo->length());
+
+			//sort the most relevant sections of the text
+			while (docFrags.size() > 0) {
+			//for (TextFragmentList::iterator i = docFrags.begin(); i != docFrags.end(); i++)
+			//{
+				currentFrag = (TextFragment*) docFrags[0];
+				docFrags.remove(0);
+
+				//If you are running with a version of Lucene before 11th Sept 03
+				// you do not have PriorityQueue.insert() - so uncomment the code below					
+
+				/*if (currentFrag->getScore() >= minScore)
+				{
+					fragQueue.put(currentFrag);
+					if (fragQueue.size() > maxNumFragments)
+					{ // if hit queue overfull
+						_CLLDELETE(fragQueue.pop()); // remove lowest in hit queue
+						minScore = ((TextFragment *) fragQueue.top())->getScore(); // reset minScore
+					}
+
+
+				} else {
+					_CLDELETE(currentFrag);
+				}*/
+
+				//The above code caused a problem as a result of Christoph Goller's 11th Sept 03
+				//fix to PriorityQueue. The correct method to use here is the new "insert" method
+				// USE ABOVE CODE IF THIS DOES NOT COMPILE!
+				if ( !fragQueue.insert(currentFrag) )
+					_CLDELETE(currentFrag);
+
+				//todo: check this
+			}
+
+			//return the most relevant fragments
+			int32_t fragsLen = fragQueue.size();
+			TextFragment** frags = _CL_NEWARRAY(TextFragment*,fragsLen+1);
+			for ( int32_t i=0;i<fragsLen;i++ )
+				frags[i] = fragQueue.pop();
+			frags[fragsLen]=NULL;
+
+			//merge any contiguous fragments to improve readability
+  	        if(mergeContiguousFragments)
+  	        {
+  	            _mergeContiguousFragments(frags,fragsLen);
+  	            CLArrayList<TextFragment*> fragTexts;
+  	            for (int32_t i = 0; i < fragsLen; i++)
+  	            {
+					TextFragment* tf = frags[i];
+  	                if ((tf != NULL) && (tf->getScore() > 0))
+  						fragTexts.push_back(tf);
+  	                else
+						_CLDELETE(tf);
+  	            }
+				_CLDELETE_ARRAY(frags);
+				frags = _CL_NEWARRAY(TextFragment*,fragTexts.size()+1);
+				fragTexts.toArray(frags);
+  	        }
+
+			_CLDELETE(tokenGroup);
+			//_CLDELETE(newText);
+			return frags;
+
+		}
+		_CLFINALLY(
+			if (tokenStream)
+			{
+				try
+				{
+					tokenStream->close();
+				}
+				catch (...)
+				{
+				}
+			}
+		)
+	}
+
+
+	void Highlighter::_mergeContiguousFragments(TextFragment** frag, int32_t fragsLen)
+	{
+		bool mergingStillBeingDone;
+		if ( frag[0] != NULL )
+			do
+			{
+				mergingStillBeingDone = false; //initialise loop control flag
+				//for each fragment, scan other frags looking for contiguous blocks
+				for (int32_t i=0; i<fragsLen; i++)
+				{
+					if (frag[i] == NULL)
+					{
+						continue;
+					}
+					//merge any contiguous blocks 
+					for (int32_t x=0; x<fragsLen; x++)
+					{
+					   if ( x==i )
+					      continue; //bug 1072183. don't try and merge with self
+
+						if (frag[x] == NULL)
+							continue;
+						if (frag[i] == NULL)
+							break;
+
+						TextFragment * frag1 = NULL;
+						TextFragment * frag2 = NULL;
+						int32_t frag1Num = 0;
+						int32_t frag2Num = 0;
+						int32_t bestScoringFragNum;
+						int32_t worstScoringFragNum;
+						//if blocks are contiguous....
+						if (frag[i]->follows(frag[x]))
+						{
+							frag1 = frag[x];
+							frag1Num = x;
+							frag2 = frag[i];
+							frag2Num = i;
+						}
+						else if (frag[x]->follows(frag[i]))
+						{
+							frag1 = frag[i];
+							frag1Num = i;
+							frag2 = frag[x];
+							frag2Num = x;
+						}
+						//merging required..
+						if (frag1 != NULL)
+						{
+							if (frag1->getScore() > frag2->getScore())
+							{
+								bestScoringFragNum = frag1Num;
+								worstScoringFragNum = frag2Num;
+							}
+							else
+							{
+								bestScoringFragNum = frag2Num;
+								worstScoringFragNum = frag1Num;
+							}
+							frag1->merge(frag2);
+							frag[worstScoringFragNum]= NULL;
+							mergingStillBeingDone = true;
+							frag[bestScoringFragNum]=frag1;
+							_CLDELETE(frag2);
+						}
+					}
+				}
+			}
+			while (mergingStillBeingDone);
+	}
+
+
+
+CL_NS_END2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/Highlighter.h	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,265 @@
+/**
+ * Copyright 2002-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _lucene_search_highlight_highlighter_
+#define _lucene_search_highlight_highlighter_
+
+#if defined(_LUCENE_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include "CLucene/util/StringBuffer.h"
+#include "CLucene/util/PriorityQueue.h"
+#include "CLucene/util/VoidList.h"
+#include "CLucene/highlighter/Formatter.h"
+#include "CLucene/highlighter/Encoder.h"
+#include "CLucene/highlighter/SimpleHTMLFormatter.h"
+#include "CLucene/highlighter/Fragmenter.h"
+#include "CLucene/highlighter/HighlightScorer.h"
+#include "CLucene/highlighter/SimpleFragmenter.h"
+#include "CLucene/highlighter/TextFragment.h"
+
+CL_NS_DEF2(search,highlight)
+
+/**
+* Class used to markup highlighted terms found in the best sections of a
+* text, using configurable {@link Fragmenter}, {@link Scorer}, {@link Formatter},
+* and tokenizers. 	  
+* {@link Encoder} and tokenizers.
+*/
+class Highlighter :LUCENE_BASE
+{
+private:
+	int32_t maxDocBytesToAnalyze;
+
+	Formatter * _formatter;
+	bool delete_formatter;
+	
+	Encoder* _encoder;
+	bool delete_encoder;
+
+	Fragmenter * _textFragmenter;
+	bool delete_textFragmenter;
+
+	HighlightScorer * _fragmentScorer;
+	bool delete_fragmentScorer;
+
+	/** Improves readability of a score-sorted list of TextFragments by merging any fragments 
+	 * that were contiguous in the original text into one larger fragment with the correct order.
+	 * This will leave a "null" in the array entry for the lesser scored fragment. 
+	 * 
+	 * @param frag An array of document fragments in descending score
+	 */
+	void _mergeContiguousFragments(TextFragment** frag, int32_t fragsLen);
+	
+public:
+	LUCENE_STATIC_CONSTANT(int32_t, DEFAULT_MAX_DOC_BYTES_TO_ANALYZE=50*1024);
+
+	/**
+	 * Constructs a Highlighter object with the provided scorer. The HighlightScorer object is owned
+	 * by the Highlighter object, and it will freed in the destructor.
+	 */
+	Highlighter(HighlightScorer * fragmentScorer);
+
+	Highlighter(Formatter * formatter, HighlightScorer * fragmentScorer);
+
+	Highlighter(Formatter * formatter, Encoder* encoder, HighlightScorer * fragmentScorer);
+
+
+	/**
+	 * Destructor for Highlighter. It deletes the owned HighlightScorer, formatter and textFragmenter.
+	 */
+	~Highlighter();
+
+	/**
+	 * Highlights chosen terms in a text, extracting the most relevant section.
+	 * The document text is analysed in chunks to record hit statistics
+	 * across the document. After accumulating stats, the fragment with the highest score
+	 * is returned
+	 *
+	 * @param tokenStream   a stream of tokens identified in the text parameter, including offset information. 
+	 * This is typically produced by an analyzer re-parsing a document's 
+	 * text. Some work may be done on retrieving TokenStreams more efficently 
+	 * by adding support for storing original text position data in the Lucene
+	 * index but this support is not currently available (as of Lucene 1.4 rc2).  
+	 * @param text text to highlight terms in
+	 *
+	 * @return highlighted text fragment or null if no terms found
+	 */
+	TCHAR* getBestFragment(CL_NS(analysis)::TokenStream * tokenStream, const TCHAR* text);
+
+	/**
+	 * Highlights chosen terms in a text, extracting the most relevant section.
+	 * This is a convenience method that calls
+	 * {@link #getBestFragment(TokenStream, const TCHAR*)}
+	 *
+	 * @param analyzer   the analyzer that will be used to split <code>text</code>
+	 * into chunks  
+	 * @param text text to highlight terms in
+	 * @param fieldName Name of field used to influence analyzer's tokenization policy 
+	 *
+	 * @return highlighted text fragment or null if no terms found
+	 */
+	TCHAR* getBestFragment(CL_NS(analysis)::Analyzer* analyzer, const TCHAR* fieldName, const TCHAR* text);
+
+	/**
+	 * Highlights chosen terms in a text, extracting the most relevant sections.
+	 * This is a convenience method that calls
+	 * {@link #getBestFragments(TokenStream, const TCHAR*, int)}
+	 *
+	 * @param analyzer   the analyzer that will be used to split <code>text</code>
+	 * into chunks  
+	 * @param text        	text to highlight terms in
+	 * @param maxNumFragments  the maximum number of fragments.
+	 *
+	 * @return highlighted text fragments (between 0 and maxNumFragments number of fragments)
+	 */
+	TCHAR** getBestFragments(
+		CL_NS(analysis)::Analyzer* analyzer,	
+		const TCHAR* text,
+		int32_t maxNumFragments);
+
+	/**
+	 * Highlights chosen terms in a text, extracting the most relevant sections.
+	 * The document text is analysed in chunks to record hit statistics
+	 * across the document. After accumulating stats, the fragments with the highest scores
+	 * are returned as an array of strings in order of score (contiguous fragments are merged into 
+	 * one in their original order to improve readability)
+	 *
+	 * @param text        	text to highlight terms in
+	 * @param maxNumFragments  the maximum number of fragments.
+	 *
+	 * @return highlighted text fragments (between 0 and maxNumFragments number of fragments)
+	 */
+	 TCHAR** getBestFragments(
+		CL_NS(analysis)::TokenStream * tokenStream,	
+		const TCHAR* text,
+		int32_t maxNumFragments);
+
+	/**
+    * Low level api to get the most relevant (formatted) sections of the document.
+  	* This method has been made public to allow visibility of score information held in TextFragment objects.
+  	* Thanks to Jason Calabrese for help in redefining the interface.
+    * @param tokenStream
+    * @param text
+    * @param maxNumFragments
+    * @param mergeContiguousFragments
+    */
+	TextFragment** getBestTextFragments(
+		CL_NS(util)::StringBuffer* writeTo,
+		CL_NS(analysis)::TokenStream * tokenStream,	
+		const TCHAR* text,
+		bool mergeContiguousFragments,
+		int32_t maxNumFragments);
+
+	/**
+	 * Highlights terms in the  text , extracting the most relevant sections
+	 * and concatenating the chosen fragments with a separator (typically "...").
+	 * The document text is analysed in chunks to record hit statistics
+	 * across the document. After accumulating stats, the fragments with the highest scores
+	 * are returned in order as "separator" delimited strings.
+	 *
+	 * @param text        text to highlight terms in
+	 * @param maxNumFragments  the maximum number of fragments.
+	 * @param separator  the separator used to intersperse the document fragments (typically "...")
+	 *
+	 * @return highlighted text
+	 */
+	TCHAR* getBestFragments(
+		CL_NS(analysis)::TokenStream * tokenStream,	
+		const TCHAR* text,
+		int32_t maxNumFragments,
+		const TCHAR* separator);
+
+	/**
+	 * @return the maximum number of bytes to be tokenized per doc 
+	 */
+	int32_t getMaxDocBytesToAnalyze()
+	{
+		return maxDocBytesToAnalyze;
+	}
+
+	/**
+	 * @param byteCount the maximum number of bytes to be tokenized per doc
+	 * (This can improve performance with large documents)
+	 */
+	void setMaxDocBytesToAnalyze(int32_t byteCount)
+	{
+		maxDocBytesToAnalyze = byteCount;
+	}
+
+	/**
+	 */
+	Fragmenter * getTextFragmenter()
+	{
+		return _textFragmenter;
+	}
+
+	/**
+	 * @param fragmenter
+	 */
+	void setTextFragmenter(Fragmenter * fragmenter)
+	{
+		if ( delete_textFragmenter ){
+			_CLDELETE(_textFragmenter);
+			delete_textFragmenter = false;
+		}
+		_textFragmenter = fragmenter;
+	}
+
+	/**
+	 * @return Object used to score each text fragment 
+	 */
+	HighlightScorer * getFragmentScorer()
+	{
+		return _fragmentScorer;
+	}
+
+
+	/**
+	 * @param HighlightScorer
+	 */
+	void setFragmentScorer(HighlightScorer * scorer)
+	{
+		if ( delete_fragmentScorer ){
+			delete_fragmentScorer = false;
+			_CLDELETE(scorer);
+		}
+		_fragmentScorer = scorer;
+	}
+
+	
+    Encoder* getEncoder()
+    {
+        return _encoder;
+    }
+    void setEncoder(Encoder* encoder)
+    {
+		if ( delete_encoder ){
+			_CLDELETE(encoder);
+			delete_encoder = false;
+		}
+        this->_encoder = encoder;
+    }
+
+
+};
+
+
+CL_NS_END2
+
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/Makefile.am	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,19 @@
+## Makefile.am -- Process this file with automake to produce Makefile.in
+highlighterdir = $(lsrcdir)/highlighter
+highlighterhdir = $(includedir)/CLucene/highlighter
+ 
+libclucene_contrib_la_SOURCES  += $(highlighterdir)/Encoder.cpp
+libclucene_contrib_la_SOURCES  += $(highlighterdir)/Formatter.cpp
+libclucene_contrib_la_SOURCES  += $(highlighterdir)/Fragmenter.cpp
+libclucene_contrib_la_SOURCES  += $(highlighterdir)/Highlighter.cpp
+libclucene_contrib_la_SOURCES  += $(highlighterdir)/QueryScorer.cpp
+libclucene_contrib_la_SOURCES  += $(highlighterdir)/QueryTermExtractor.cpp
+libclucene_contrib_la_SOURCES  += $(highlighterdir)/SimpleFragmenter.cpp
+libclucene_contrib_la_SOURCES  += $(highlighterdir)/SimpleHTMLEncoder.cpp
+libclucene_contrib_la_SOURCES  += $(highlighterdir)/SimpleHTMLFormatter.cpp
+libclucene_contrib_la_SOURCES  += $(highlighterdir)/TextFragment.cpp
+libclucene_contrib_la_SOURCES  += $(highlighterdir)/TokenGroup.cpp
+libclucene_contrib_la_SOURCES  += $(highlighterdir)/TokenSources.cpp
+libclucene_contrib_la_SOURCES  += $(highlighterdir)/WeightedTerm.cpp
+
+highlighterh_HEADERS = $(highlighterdir)/*.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/QueryScorer.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,100 @@
+#include "clucene/stdheader.h"
+#include "queryscorer.h"
+
+CL_NS_DEF2(search,highlight)
+CL_NS_USE(index)
+CL_NS_USE(analysis)
+
+	QueryScorer::QueryScorer(const Query * query):
+        _uniqueTermsInFragment(true),
+		_termsToFind(false,true)
+		
+	 {
+		 WeightedTerm** _weighted_terms = QueryTermExtractor::getTerms(query);
+		 initialize(_weighted_terms);
+		 _CLDELETE_ARRAY(_weighted_terms);
+	 }
+	 QueryScorer::~QueryScorer()
+	 {
+	 }
+
+/*	 QueryScorer(Query* query, CL_NS(index)::IndexReader* reader, const TCHAR* fieldName)
+	 {
+		 WeightedTerm** _weighted_terms = QueryTermExtractor.getIdfWeightedTerms(query, reader, fieldName);
+		 initialize(_weighted_terms);
+	 }*/
+
+
+	QueryScorer::QueryScorer(WeightedTerm** weightedTerms)
+	{
+		 initialize(weightedTerms);
+	}
+	
+	void QueryScorer::initialize(WeightedTerm** weightedTerms)
+	{
+		_currentTextFragment = NULL;
+		_totalScore = 0;
+		_maxTermWeight = 0;
+
+		// Copy external weighted terms
+		 int i=0;
+		 while ( weightedTerms[i] != NULL ){
+			const WeightedTerm* existingTerm=_termsToFind.get(weightedTerms[i]->getTerm());
+			if( (existingTerm==NULL) ||(existingTerm->getWeight()<weightedTerms[i]->getWeight()) )
+  	        {
+  				//if a term is defined more than once, always use the highest scoring weight
+				WeightedTerm* term = weightedTerms[i];
+				_termsToFind.put(term->getTerm(), term);
+
+				_maxTermWeight=max(_maxTermWeight,weightedTerms[i]->getWeight());
+  	        }else
+				_CLDELETE(weightedTerms[i]);
+
+			i++;
+		 }
+	}
+
+	void QueryScorer::startFragment(TextFragment * newFragment)
+	{
+		_uniqueTermsInFragment.clear();
+		_currentTextFragment=newFragment;
+		_totalScore=0;
+		
+	}
+	
+	float_t QueryScorer::getTokenScore(Token * token)
+	{
+		const TCHAR* termText=token->termText();
+		
+		const WeightedTerm* queryTerm = _termsToFind.get(termText);
+		if(queryTerm==NULL)
+		{
+			//not a query term - return
+			return 0;
+		}
+		//found a query term - is it unique in this doc?
+		if(_uniqueTermsInFragment.find(termText)==_uniqueTermsInFragment.end())
+		{
+			_totalScore+=queryTerm->getWeight();
+			TCHAR* owned_term = stringDuplicate(termText);
+			_uniqueTermsInFragment.insert(owned_term);
+		}
+		return queryTerm->getWeight();
+	}
+	
+	/**
+  	*
+  	* @return The highest weighted term (useful for passing to GradientFormatter to set
+  	* top end of coloring scale.
+  	*/
+	float_t QueryScorer::getMaxTermWeight()
+	{
+  		return _maxTermWeight;
+	}
+
+
+	float_t QueryScorer::getFragmentScore(){
+		return _totalScore;
+	}
+
+CL_NS_END2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/QueryScorer.h	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,114 @@
+/**
+ * Copyright 2002-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _lucene_search_highlighter_queryscorer_
+#define _lucene_search_highlighter_queryscorer_
+
+#if defined(_LUCENE_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include <math.h> // float_t
+#include "CLucene/analysis/AnalysisHeader.h"
+#include "CLucene/search/SearchHeader.h"
+#include "CLucene/index/IndexReader.h"
+#include "CLucene/highlighter/WeightedTerm.h"
+#include "CLucene/highlighter/HighlightScorer.h"
+#include "CLucene/highlighter/QueryTermExtractor.h"
+#include "CLucene/highlighter/TextFragment.h"
+
+CL_NS_DEF2(search,highlight)
+
+/**
+ * {@link Scorer} implementation which scores text fragments by the number of unique query terms found.
+ * This class uses the {@link QueryTermExtractor} class to process determine the query terms and 
+ * their boosts to be used. 
+ */
+//TODO: provide option to boost score of fragments near beginning of document 
+// based on fragment.getFragNum()
+class QueryScorer : public HighlightScorer
+{
+private:
+	TextFragment * _currentTextFragment;
+	CL_NS(util)::CLHashSet<const TCHAR*,
+		CL_NS(util)::Compare::TChar,
+		CL_NS(util)::Deletor::tcArray> _uniqueTermsInFragment;
+	double _totalScore;
+	double _maxTermWeight;
+	CL_NS(util)::LHashMap<const TCHAR*, const WeightedTerm *,
+		CL_NS(util)::Compare::TChar,
+		CL_NS(util)::Equals::TChar,
+		CL_NS(util)::Deletor::tcArray,
+		CL_NS(util)::Deletor::Object<const WeightedTerm> > _termsToFind;
+
+public:
+	/**
+	* 
+	* @param query a Lucene query (ideally rewritten using query.rewrite 
+	* before being passed to this class and the searcher)
+	*/
+	QueryScorer(const Query * query);
+
+	/**
+	* 
+	* @param query a Lucene query (ideally rewritten using query.rewrite 
+	* before being passed to this class and the searcher)
+	* @param reader used to compute IDF which can be used to a) score selected fragments better 
+	* b) use graded highlights eg set font color intensity
+	* @param fieldName the field on which Inverse Document Frequency (IDF) calculations are based
+	*/
+	QueryScorer(const Query* query, CL_NS(index)::IndexReader* reader, const TCHAR* fieldName);
+
+	QueryScorer(WeightedTerm** weightedTerms);
+
+	~QueryScorer();
+
+	/* (non-Javadoc)
+	 * @see org.apache.lucene.search.highlight.FragmentScorer#startFragment(org.apache.lucene.search.highlight.TextFragment)
+	 */
+	void startFragment(TextFragment* newFragment);
+	
+	/* (non-Javadoc)
+	 * @see org.apache.lucene.search.highlight.FragmentScorer#scoreToken(org.apache.lucene.analysis.Token)
+	 */
+	float_t getTokenScore(CL_NS(analysis)::Token * token);
+	
+	/* (non-Javadoc)
+	 * @see org.apache.lucene.search.highlight.FragmentScorer#endFragment(org.apache.lucene.search.highlight.TextFragment)
+	 */
+	float_t getFragmentScore();
+
+	/* (non-Javadoc)
+	 * @see org.apache.lucene.search.highlight.FragmentScorer#allFragmentsProcessed()
+	 */
+	void allFragmentsProcessed();
+
+	/**
+	 * 
+	 * @return The highest weighted term (useful for passing to GradientFormatter to set
+	 * top end of coloring scale.  
+		*/
+	float_t getMaxTermWeight();
+
+private:
+	void initialize(WeightedTerm** weightedTerms);
+
+};
+
+CL_NS_END2
+
+#endif
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/QueryTermExtractor.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,136 @@
+#include "CLucene/StdHeader.h"
+#include "QueryTermExtractor.h"
+
+CL_NS_DEF2(search,highlight)
+CL_NS_USE(index)
+
+	WeightedTerm** QueryTermExtractor::getTerms(const Query *query) 
+	{
+		WeightedTerm** ret = getTerms(query,false);
+		return ret;
+	}
+
+	WeightedTerm** QueryTermExtractor::getTerms(const Query * query, bool prohibited) 
+	{
+		WeightedTermList terms(false);
+		getTerms(query,&terms,prohibited);
+
+		// Return extracted terms
+		WeightedTerm** ret = _CL_NEWARRAY(WeightedTerm*,terms.size()+1);
+		terms.toArray(ret);
+
+		return ret;
+	}
+
+	void QueryTermExtractor::getTerms(const Query * query, WeightedTermList * terms,bool prohibited) 
+	{
+		if (query->instanceOf( BooleanQuery::getClassName() ))
+			getTermsFromBooleanQuery((BooleanQuery *) query, terms, prohibited);
+		else if (query->instanceOf( PhraseQuery::getClassName() ))
+			getTermsFromPhraseQuery((PhraseQuery *) query, terms);
+		else if (query->instanceOf( TermQuery::getClassName() ))
+			getTermsFromTermQuery((TermQuery *) query, terms);
+		//else if(query->instanceOf(_T("SpanNearQuery"))
+		//	getTermsFromSpanNearQuery((SpanNearQuery*) query, terms);
+	}
+
+	/**
+  	* Extracts all terms texts of a given Query into an array of WeightedTerms
+  	*
+  	* @param query      Query to extract term texts from
+  	* @param reader used to compute IDF which can be used to a) score selected fragments better
+  	* b) use graded highlights eg chaning intensity of font color
+  	* @param fieldName the field on which Inverse Document Frequency (IDF) calculations are based
+  	* @return an array of the terms used in a query, plus their weights.
+  	*/
+  	WeightedTerm** QueryTermExtractor::getIdfWeightedTerms(const Query* query, IndexReader* reader, const TCHAR* fieldName)
+  	{
+  	    WeightedTermList terms(true);
+		getTerms(query,&terms,false);
+
+  	    int32_t totalNumDocs=reader->numDocs();
+		
+		WeightedTermList::iterator itr = terms.begin();
+  	    while ( itr != terms.end() )
+  		{
+  			try
+  			{
+				Term* term = _CLNEW Term(fieldName,(*itr)->getTerm());
+  				int32_t docFreq=reader->docFreq(term);
+				_CLDECDELETE(term);
+
+  				//IDF algorithm taken from DefaultSimilarity class
+  				float_t idf=(float_t)(log(totalNumDocs/(float_t)(docFreq+1)) + 1.0);
+  				(*itr)->setWeight((*itr)->getWeight() * idf);
+  			}catch (LuceneError& e){
+  				if ( e.number()!=CL_ERR_IO )
+					throw e;
+  			}
+
+			itr++;
+  		}
+  	   
+		// Return extracted terms
+		WeightedTerm** ret = _CL_NEWARRAY(WeightedTerm*,terms.size()+1);
+		terms.toArray(ret);
+
+		return ret;
+  	}
+
+	void QueryTermExtractor::getTermsFromBooleanQuery(const BooleanQuery * query, WeightedTermList * terms, bool prohibited)
+	{
+		// TODO: change Query to get the queryclauses and their number in one function call
+		BooleanClause** queryClauses = query->getClauses();
+		uint32_t numClauses = query->getClauseCount();
+
+		for (uint32_t i = 0; i < numClauses; i++)
+		{
+			if (prohibited || !queryClauses[i]->prohibited){
+				Query* qry = queryClauses[i]->query;
+				getTerms(qry, terms, prohibited);
+			}
+		}
+
+		_CLDELETE_ARRAY(queryClauses);
+	}
+
+	void QueryTermExtractor::getTermsFromPhraseQuery(const PhraseQuery * query, WeightedTermList * terms)
+	{
+		Term** queryTerms = query->getTerms();
+		int32_t i = 0;
+		while ( queryTerms[i] != NULL ){
+			WeightedTerm * pWT = _CLNEW WeightedTerm(query->getBoost(),queryTerms[i]->text());
+			if (terms->find(pWT)==terms->end()) // possible memory leak if key already present
+				terms->insert(pWT);
+			else
+				_CLDELETE(pWT);
+
+			i++;
+		}
+		_CLDELETE_ARRAY(queryTerms);
+	}
+
+	void QueryTermExtractor::getTermsFromTermQuery(const TermQuery * query, WeightedTermList * terms)
+	{
+		Term * term = query->getTerm();
+		WeightedTerm * pWT = _CLNEW WeightedTerm(query->getBoost(),term->text());
+		_CLDECDELETE(term);
+		if (terms->find(pWT)==terms->end()) // possible memory leak if key already present
+			terms->insert(pWT);
+		else
+			_CLDELETE(pWT);
+	}
+
+	//todo: implement this when span queries are implemented
+	/*void getTermsFromSpanNearQuery(SpanNearQuery* query, WeightedTermList* terms){
+  	    Collection queryTerms = query.getTerms();
+
+  	    for(Iterator iterator = queryTerms.iterator(); iterator.hasNext();){
+  	        // break it out for debugging.
+  	        Term term = (Term) iterator.next();
+  	        const TCHAR* text = term.text();
+  	        terms.add(_CLNEW WeightedTerm(query.getBoost(), text));
+  	    }
+  	}*/
+
+CL_NS_END2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/QueryTermExtractor.h	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,86 @@
+/**
+ * Copyright 2002-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _lucene_search_highlight_querytermextractor_
+#define _lucene_search_highlight_querytermextractor_
+
+#if defined(_LUCENE_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include "CLucene/util/VoidList.h"
+#include "CLucene/search/SearchHeader.h"
+#include "CLucene/index/IndexReader.h"
+#include "CLucene/search/BooleanQuery.h"
+#include "CLucene/search/PhraseQuery.h"
+#include "CLucene/search/TermQuery.h"
+#include "CLucene/highlighter/WeightedTerm.h"
+
+CL_NS_DEF2(search,highlight)
+
+/**
+ * Utility class used to extract the terms used in a query, plus any weights.
+ * This class will not find terms for MultiTermQuery, RangeQuery and PrefixQuery classes
+ * so the caller must pass a rewritten query (see Query.rewrite) to obtain a list of 
+ * expanded terms. 
+ * 
+ */
+class QueryTermExtractor
+{
+	QueryTermExtractor(){
+	}
+public:
+
+	/**
+	 * Extracts all terms texts of a given Query into an array of WeightedTerms
+	 *
+	 * @param query      Query to extract term texts from
+	 * @return an array of the terms used in a query, plus their weights.
+	 */
+	static WeightedTerm** getTerms(const Query *query);
+
+	/**
+	 * Extracts all terms texts of a given Query into an array of WeightedTerms
+	 *
+	 * @param query      Query to extract term texts from
+	 * @param reader used to compute IDF which can be used to a) score selected fragments better 
+	 * b) use graded highlights eg chaning intensity of font color
+	 * @param fieldName the field on which Inverse Document Frequency (IDF) calculations are based
+	 * @return an array of the terms used in a query, plus their weights.
+	 */
+	 static WeightedTerm** getIdfWeightedTerms(const Query* query, CL_NS(index)::IndexReader* reader, const TCHAR* fieldName);
+
+	/**
+	 * Extracts all terms texts of a given Query into an array of WeightedTerms
+	 *
+	 * @param query      Query to extract term texts from
+	 * @param prohibited <code>true</code> to extract "prohibited" terms, too
+     * @return an array of the terms used in a query, plus their weights.Memory owned by the caller
+     */
+	static WeightedTerm** getTerms(const Query * query, bool prohibited);
+
+
+	static void getTerms(const Query * query, WeightedTermList* terms,bool prohibited);
+	static void getTermsFromBooleanQuery(const BooleanQuery * query, WeightedTermList* terms, bool prohibited);
+	static void getTermsFromPhraseQuery(const PhraseQuery * query, WeightedTermList* terms);
+	static void getTermsFromTermQuery(const TermQuery * query, WeightedTermList* terms);
+//	static void getTermsFromSpanNearQuery(SpanNearQuery* query, WeightedTermList* terms);
+};
+
+CL_NS_END2
+
+
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/Scorer.h	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,62 @@
+#ifndef _lucene_search_highlight_scorer_h_
+#define _lucene_search_highlight_scorer_h_
+
+/**
+ * Copyright 2002-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <CLucene\StdHeader.h>
+#include <CLucene\Analysis\AnalysisHeader.h>
+
+using namespace lucene::analysis;
+
+/**
+ * Adds to the score for a fragment based on its tokens
+ */
+namespace lucene { namespace search { namespace highlight {
+
+	class TextFragment;
+
+class Scorer
+{
+public:
+	virtual ~Scorer() = 0;
+	/**
+	 * called when a new fragment is started for consideration
+	 * @param newFragment
+	 */
+	virtual void startFragment(TextFragment * newFragment) = 0;
+
+	/**
+	 * Called for each token in the current fragment
+	 * @param token The token to be scored
+	 * @return a score which is passed to the TermHighlighter class to influence the mark-up of the text
+	 * (this return value is NOT used to score the fragment)
+	 */
+	virtual float_t getTokenScore(Token * token) = 0;
+	
+
+	/**
+	 * Called when the highlighter has no more tokens for the current fragment - the scorer will typically
+	 * call setScore() on the fragment passed in startFragment to record total info
+	 *
+	 */	
+	virtual float_t getFragmentScore() = 0;
+
+};
+
+}}}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/SimpleFragmenter.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,38 @@
+#include "CLucene/StdHeader.h"
+#include "SimpleFragmenter.h"
+
+CL_NS_DEF2(search,highlight)
+CL_NS_USE(analysis)
+
+SimpleFragmenter::SimpleFragmenter(int32_t fragmentSize)
+	: _currentNumFrags(0), _fragmentSize(fragmentSize)
+{
+}
+SimpleFragmenter::~SimpleFragmenter(){
+}
+
+void SimpleFragmenter::start(const TCHAR*)
+{
+	_currentNumFrags=1;
+}
+
+bool SimpleFragmenter::isNewFragment(const Token * token)
+{
+	bool isNewFrag= token->endOffset()>=(_fragmentSize*_currentNumFrags);
+	if (isNewFrag) {
+		_currentNumFrags++;
+	}
+	return isNewFrag;
+}
+
+int32_t SimpleFragmenter::getFragmentSize() const
+{
+	return _fragmentSize;
+}
+
+void SimpleFragmenter::setFragmentSize(int32_t size)
+{
+	_fragmentSize = size;
+}
+
+CL_NS_END2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/SimpleFragmenter.h	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,74 @@
+/**
+ * Copyright 2002-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _lucene_search_highlight_simplefragmenter_
+#define _lucene_search_highlight_simplefragmenter_
+
+#if defined(_LUCENE_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include "CLucene/analysis/AnalysisHeader.h"
+#include "CLucene/highlighter/Fragmenter.h"
+
+CL_NS_DEF2(search,highlight)
+
+/**
+ * {@link Fragmenter} implementation which breaks text up into same-size 
+ * fragments with no concerns over spotting sentence boundaries.
+ */
+
+class SimpleFragmenter:public Fragmenter
+{
+private:
+	LUCENE_STATIC_CONSTANT(int32_t, DEFAULT_FRAGMENT_SIZE =100 );
+	int32_t _currentNumFrags;
+	int32_t _fragmentSize;
+
+public:
+	/**
+	 * 
+	 * @param fragmentSize size in bytes of each fragment
+	 */
+	SimpleFragmenter(int32_t fragmentSize = DEFAULT_FRAGMENT_SIZE);
+
+	~SimpleFragmenter();
+
+	/* (non-Javadoc)
+	 * @see org.apache.lucene.search.highlight.TextFragmenter#start(const TCHAR*)
+	 */
+	void start(const TCHAR* originalText);
+
+	/* (non-Javadoc)
+	 * @see org.apache.lucene.search.highlight.TextFragmenter#isNewFragment(org.apache.lucene.analysis.Token)
+	 */
+	bool isNewFragment(const CL_NS(analysis)::Token * token);
+
+	/**
+	 * @return size in bytes of each fragment
+	 */
+	int32_t getFragmentSize() const;
+
+	/**
+	 * @param size size in bytes of each fragment
+	 */
+	void setFragmentSize(int32_t size);
+
+};
+
+CL_NS_END2
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/SimpleHTMLEncoder.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,64 @@
+#include "CLucene/StdHeader.h"
+#include "SimpleHTMLEncoder.h"
+CL_NS_DEF2(search,highlight)
+
+SimpleHTMLEncoder::SimpleHTMLEncoder(void)
+{
+}
+
+SimpleHTMLEncoder::~SimpleHTMLEncoder(void)
+{
+}
+
+TCHAR* SimpleHTMLEncoder::encodeText(TCHAR* originalText)
+{
+	return htmlEncode(originalText);
+}
+
+TCHAR* SimpleHTMLEncoder::htmlEncode(TCHAR* plainText) 
+{
+	size_t plainTextLen = _tcslen(plainText);
+	if (plainText == NULL || plainTextLen == 0)
+	{
+		return STRDUP_TtoT(_T(""));
+	}
+
+	CL_NS(util)::StringBuffer result(plainTextLen);
+
+	for (int32_t index=0; index<plainTextLen; index++) 
+	{
+		TCHAR ch = plainText[index];
+
+		switch (ch) 
+		{
+		case '"':
+			result.append(_T("&quot;"));
+			break;
+
+		case '&':
+			result.append(_T("&amp;"));
+			break;
+
+		case '<':
+			result.append(_T("&lt;"));
+			break;
+
+		case '>':
+			result.append(_T("&gt;"));
+			break;
+
+		default:
+			if (ch < 128)
+				result.appendChar(ch);
+			else{
+  	            result.append(_T("&#"));
+				result.appendInt(ch);
+				result.append(_T(";"));
+			}
+		}
+	}
+
+	return result.toString();
+}
+
+CL_NS_END2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/SimpleHTMLEncoder.h	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,49 @@
+/**
+ * Copyright 2002-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _lucene_search_highlight_simplehtmlencoder_
+#define _lucene_search_highlight_simplehtmlencoder_
+
+#if defined(_LUCENE_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include "CLucene/util/StringBuffer.h"
+#include "Encoder.h"
+
+CL_NS_DEF2(search,highlight)
+
+/**
+ * Simple {@link Encoder} implementation to escape text for HTML output
+ *
+ */
+class SimpleHTMLEncoder:public Encoder
+{
+public:
+	SimpleHTMLEncoder(void);
+	~SimpleHTMLEncoder(void);
+	
+	TCHAR* encodeText(TCHAR* originalText);
+	
+	/**
+	 * Encode string into HTML
+	 */
+	static TCHAR* htmlEncode(TCHAR* plainText) ;
+};
+
+CL_NS_END2
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/SimpleHTMLFormatter.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,38 @@
+#include "CLucene/StdHeader.h"
+#include "SimpleHTMLFormatter.h"
+
+
+CL_NS_DEF2(search,highlight)
+CL_NS_USE(util)
+
+SimpleHTMLFormatter::SimpleHTMLFormatter(const TCHAR* preTag, const TCHAR* postTag):
+	_preTag(stringDuplicate(preTag)),
+	_postTag(stringDuplicate(postTag))
+{
+}
+
+SimpleHTMLFormatter::SimpleHTMLFormatter()
+{
+	_preTag = stringDuplicate(_T("<B>"));
+	_postTag = stringDuplicate(_T("</B>"));
+}
+
+SimpleHTMLFormatter::~SimpleHTMLFormatter() 
+{
+	_CLDELETE_CARRAY(_preTag);
+	_CLDELETE_CARRAY(_postTag);
+}
+
+TCHAR* SimpleHTMLFormatter::highlightTerm(const TCHAR* originalText, const TokenGroup* tokenGroup)
+{
+	if(tokenGroup->getTotalScore()>0){
+		StringBuffer sb;
+		sb.append(_preTag);
+		sb.append(originalText);
+		sb.append(_postTag);
+		return sb.toString();
+	}
+	return stringDuplicate(originalText);
+}
+
+CL_NS_END2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/SimpleHTMLFormatter.h	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,63 @@
+/**
+ * Copyright 2002-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _lucene_search_highlight_simplehtmlformatter_
+#define _lucene_search_highlight_simplehtmlformatter_
+
+#if defined(_LUCENE_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include "CLucene/util/StringBuffer.h"
+#include "CLucene/highlighter/Formatter.h"
+
+CL_NS_DEF2(search,highlight)
+
+/**
+ * Simple {@link Formatter} implementation to highlight terms with a pre and post tag
+ *
+ */
+class SimpleHTMLFormatter :public Formatter
+{
+private:
+	const TCHAR* _preTag;
+	const TCHAR* _postTag;
+
+public:
+	~SimpleHTMLFormatter(); 
+
+
+	SimpleHTMLFormatter(const TCHAR* preTag, const TCHAR* postTag);
+
+	/**
+	 * Default constructor uses HTML: &lt;B&gt; tags to markup terms
+	 * 
+	 **/
+	SimpleHTMLFormatter();
+
+	
+	/**
+	* Returns the original text enclosed in _preTag and _postTag, if the score is greater 
+	* than 0. Otherwise, it returns the original text.
+	* It doesn't use the stemmed text nor the startOffset. 
+	* It allocates memory for the returned text, and it has to be freed by the caller.
+	*/
+	TCHAR* highlightTerm(const TCHAR* originalText, const TokenGroup* tokenGroup);
+};
+
+CL_NS_END2
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/TextFragment.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,61 @@
+#include "CLucene/StdHeader.h"
+#include "TextFragment.h"
+
+CL_NS_DEF2(search,highlight)
+CL_NS_USE(util)
+
+	TextFragment::TextFragment(int32_t textStartPos, int32_t fragNum)
+	{
+		_textStartPos = textStartPos;
+		_textEndPos=0;
+		_fragNum = fragNum;
+	}
+	TextFragment::~TextFragment(){
+	}
+
+	void TextFragment::setScore(float_t score)
+	{
+		_score=score;
+	}
+
+	float_t TextFragment::getScore() const
+	{
+		return _score;
+	}
+
+	/**
+	 * @param frag2 Fragment to be merged into this one
+	 */
+	void TextFragment::merge(const TextFragment * frag2)
+	{
+		_textEndPos = frag2->_textEndPos;
+		_score=max(_score,frag2->_score);
+	}
+	/**
+	 * @param fragment 
+	 * @return true if this fragment follows the one passed
+	 */
+	bool TextFragment::follows(const TextFragment * fragment) const
+	{
+		return _textStartPos == fragment->_textEndPos;
+	}
+
+	/**
+	 * @return the fragment sequence number
+	 */
+	int32_t TextFragment::getFragNum() const
+	{
+		return _fragNum;
+	}
+
+	/* Returns the marked-up text for this text fragment 
+	 */
+	TCHAR* TextFragment::toString(StringBuffer* buffer) {
+		TCHAR* ret = _CL_NEWARRAY(TCHAR,_textEndPos-_textStartPos+1);
+		_tcsncpy(ret,buffer->getBuffer()+_textStartPos,_textEndPos-_textStartPos);
+		ret[_textEndPos-_textStartPos]=_T('\0');
+		
+		return ret;
+	}
+
+CL_NS_END2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/TextFragment.h	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,90 @@
+/**
+ * Copyright 2002-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _lucene_search_highlight_textfragment_
+#define _lucene_search_highlight_textfragment_
+
+#if defined(_LUCENE_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include "CLucene/util/StringBuffer.h"
+#include "CLucene/util/VoidList.h"
+
+CL_NS_DEF2(search,highlight)
+
+/**
+ * Low-level class used to record information about a section of a document 
+ * with a score.
+ */
+class TextFragment:LUCENE_BASE
+{
+	int32_t _fragNum;
+	int32_t _textStartPos;
+	int32_t _textEndPos;
+	float_t _score;
+
+public:
+	TextFragment(int32_t textStartPos, int32_t fragNum);
+	~TextFragment();
+
+	void setScore(float_t score);
+	float_t getScore() const;
+
+	int32_t textEndPos(){ return _textEndPos; }
+	void setTextEndPos(int32_t val){ _textEndPos = val; }
+
+	/**
+	 * @param frag2 Fragment to be merged into this one
+	 */
+	void merge(const TextFragment * frag2);
+
+	/**
+	 * @param fragment 
+	 * @return true if this fragment follows the one passed
+	 */
+	bool follows(const TextFragment * fragment) const;
+
+	/**
+	 * @return the fragment sequence number
+	 */
+	int32_t getFragNum() const;
+
+	/* Returns the marked-up text for this text fragment 
+	 */
+	TCHAR* toString(CL_NS(util)::StringBuffer* buffer);
+
+	/**
+	 * Compare weighted terms, according to the term text.
+	 * @todo Do we have to take boost factors into account
+	 */
+	class Compare:LUCENE_BASE, public CL_NS(util)::Compare::_base //<TextFragment*>
+	{
+	public:
+	//todo: this should be more efficient, but will be using a hash table soon, anyway
+		bool operator()( TextFragment* t1, TextFragment* t2 ) const;
+		size_t operator()( TextFragment* t ) const;
+	};
+};
+
+/**
+ * Text fragment list.
+ */
+//typedef CL_NS(util)::CLSetList<TextFragment*,TextFragment::Compare,CL_NS(util)::Deletor::Object<TextFragment> > TextFragmentList;
+
+CL_NS_END2
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/TokenGroup.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,104 @@
+#include "CLucene/StdHeader.h"
+#include "TokenGroup.h"
+
+CL_NS_DEF2(search,highlight)
+CL_NS_USE(analysis)
+
+TokenGroup::TokenGroup(void)
+{
+	numTokens=0;
+	startOffset=0;
+	endOffset=0;
+}
+
+TokenGroup::~TokenGroup(void)
+{
+}
+
+void TokenGroup::addToken(Token* token, float_t score)
+{
+	if(numTokens < MAX_NUM_TOKENS_PER_GROUP)
+    {	    
+		if(numTokens==0)
+		{
+			startOffset=token->startOffset();		
+			endOffset=token->endOffset();		
+		}
+		else
+		{
+			startOffset=min(startOffset,token->startOffset());		
+			endOffset=max(endOffset,token->endOffset());		
+		}
+		tokens[numTokens].set(token->termText(),token->startOffset(),token->endOffset(),token->type());;
+		scores[numTokens]=score;
+		numTokens++;
+    }
+}
+
+CL_NS(analysis)::Token& TokenGroup::getToken(int32_t index)
+{
+	return tokens[index];
+}
+
+float_t TokenGroup::getScore(int32_t index) const
+{
+	return scores[index];
+}
+
+int32_t TokenGroup::getEndOffset() const
+{
+	return endOffset;
+}
+
+int32_t TokenGroup::getNumTokens() const
+{
+	return numTokens;
+}
+
+int32_t TokenGroup::getStartOffset() const
+{
+	return startOffset;
+}
+
+float_t TokenGroup::getTotalScore() const
+{
+	float_t total=0;
+	for (int32_t i = 0; i < numTokens; i++)
+	{
+		total+=scores[i];
+	}
+	return total;
+}
+
+/*void addToken(CL_NS(analysis)::Token* token, float_t score)
+{
+	if(numTokens < MAX_NUM_TOKENS_PER_GROUP)
+  		{
+		if(numTokens==0)
+		{
+			startOffset=token->startOffset();		
+			endOffset=token->endOffset();		
+		}
+		else
+		{
+			startOffset=min(startOffset,token->startOffset());		
+			endOffset=max(endOffset,token->endOffset());		
+		}
+		tokens[numTokens]=token;
+		scores[numTokens]=score;
+		numTokens++;
+	}
+}*/
+
+bool TokenGroup::isDistinct(CL_NS(analysis)::Token* token) const
+{
+	return token->startOffset() > endOffset;
+}
+
+
+void TokenGroup::clear()
+{
+	numTokens=0;
+}
+
+CL_NS_END2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/TokenGroup.h	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,86 @@
+/**
+ * Copyright 2002-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _lucene_search_highlight_tokengroup_
+#define _lucene_search_highlight_tokengroup_
+
+#if defined(_LUCENE_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include "CLucene/analysis/AnalysisHeader.h"
+
+CL_NS_DEF2(search,highlight)
+
+/**
+ * One, or several overlapping tokens, along with the score(s) and the
+ * scope of the original text
+ */
+class TokenGroup: LUCENE_BASE
+{
+	LUCENE_STATIC_CONSTANT(int32_t,MAX_NUM_TOKENS_PER_GROUP=50);
+	CL_NS(analysis)::Token tokens[MAX_NUM_TOKENS_PER_GROUP];
+	float_t scores[MAX_NUM_TOKENS_PER_GROUP];
+	int32_t numTokens;
+	int32_t startOffset;
+	int32_t endOffset;
+
+public:
+	TokenGroup(void);
+	~TokenGroup(void);
+
+	void addToken(CL_NS(analysis)::Token* token, float_t score);
+
+	/**
+	 * 
+	 * @param index a value between 0 and numTokens -1
+	 * @return the "n"th token
+	 */
+	CL_NS(analysis)::Token& getToken(int32_t index);
+
+	/**
+	 * 
+	 * @param index a value between 0 and numTokens -1
+	 * @return the "n"th score
+	 */
+	float_t getScore(int32_t index) const;
+
+	/**
+	 * @return the end position in the original text
+	 */
+	int32_t getEndOffset() const;
+
+	/**
+	 * @return the number of tokens in this group
+	 */
+	int32_t getNumTokens() const;
+
+	/**
+	 * @return the start position in the original text
+	 */
+	int32_t getStartOffset() const;
+
+	/**
+	 * @return all tokens' scores summed up
+	 */
+	float_t getTotalScore() const;
+
+	bool isDistinct(CL_NS(analysis)::Token* token)  const;
+	void clear();
+};
+
+CL_NS_END2
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/TokenSources.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,219 @@
+#include "CLucene/StdHeader.h"
+#include "TokenSources.h"
+
+#include "CLucene/util/VoidList.h"
+
+CL_NS_DEF2(search,highlight)
+CL_NS_USE(analysis)
+CL_NS_USE(index)
+CL_NS_USE(util)
+
+TokenSources::TokenSources(void)
+{
+}
+
+TokenSources::~TokenSources(void)
+{
+}
+
+TokenStream* TokenSources::getAnyTokenStream(IndexReader* reader,int32_t docId, TCHAR* field, Analyzer* analyzer)
+{
+	TokenStream* ts=NULL;
+
+	TermFreqVector* tfv=reader->getTermFreqVector(docId,field);
+	if(tfv!=NULL)
+	{
+//		todo: this is actually very dodgy... we try casting
+//		to TermPositionVector, we take the token stream
+//		only if the cast works... should have a way of
+//		knowing what type this is
+		TermPositionVector* tmp = NULL;
+		try{
+             tmp = dynamic_cast<TermPositionVector *> (tfv);
+		}catch(...){
+			//ignore
+		}
+		if ( tmp != NULL )
+		    ts=getTokenStream(tmp);
+	}
+	//No token info stored so fall back to analyzing raw content
+	if(ts==NULL)
+	{
+		ts=getTokenStream(reader,docId,field,analyzer);
+	}
+	return ts;
+}
+
+
+TokenStream* TokenSources::getTokenStream(TermPositionVector* tpv)
+{
+    //assumes the worst and makes no assumptions about token position sequences.
+    return getTokenStream(tpv,false);   
+}
+
+TokenStream* TokenSources::getTokenStream(TermPositionVector* tpv, bool tokenPositionsGuaranteedContiguous)
+{
+    //an object used to iterate across an array of tokens
+    /*class StoredTokenStream extends TokenStream
+    {
+        Token tokens[];
+        int32_t currentToken=0;
+        StoredTokenStream(Token tokens[])
+        {
+            this.tokens=tokens;
+        }
+        public Token next()
+        {
+            if(currentToken>=tokens.length)
+            {
+                return NULL;
+            }
+            return tokens[currentToken++];
+        }            
+    }     */   
+    //code to reconstruct the original sequence of Tokens
+    const TCHAR** terms=tpv->getTerms();          
+    const int32_t* freq= (int32_t *)tpv->getTermFrequencies();
+	int32_t freqLen = tpv->size();
+
+    size_t totalTokens=0;
+	{
+		for (int32_t t = 0; t < freqLen; t++)
+			totalTokens+=freq[t];
+	}
+
+    Token** tokensInOriginalOrder=NULL;
+	CLSetList<Token*,Token::OrderCompare>* unsortedTokens = NULL;
+    for (int32_t t = 0; t < freqLen; t++)
+    {
+        TermVectorOffsetInfo** offsets=(TermVectorOffsetInfo**)tpv->getOffsets(t);
+        if(offsets==NULL)
+            return NULL;
+        
+        int32_t* pos=NULL;
+		int32_t posLen=0;
+        if(tokenPositionsGuaranteedContiguous)
+        {
+            //try get the token position info to speed up assembly of tokens into sorted sequence
+            pos=(int32_t *)tpv->getTermPositions(t);
+			posLen=1;//todo
+        }
+
+		if ( tokensInOriginalOrder != NULL )
+			tokensInOriginalOrder = _CL_NEWARRAY(Token*, totalTokens+1);
+
+        if(pos==NULL)
+        {	
+            //tokens NOT stored with positions or not guaranteed contiguous - must add to list and sort later
+            if(unsortedTokens==NULL)
+                unsortedTokens=_CLNEW CLSetList<Token*,Token::OrderCompare>(false);
+            for (int32_t tp=0; offsets[tp]!=NULL; tp++)
+            {
+                unsortedTokens->insert(_CLNEW Token(terms[t],
+                    offsets[tp]->getStartOffset(),
+                    offsets[tp]->getEndOffset()));
+            }
+        }
+        else
+        {
+            //We have positions stored and a guarantee that the token position information is contiguous
+            
+            // This may be fast BUT wont work if Tokenizers used which create >1 token in same position or
+            // creates jumps in position numbers - this code would fail under those circumstances
+            
+            //tokens stored with positions - can use this to index straight into sorted array
+            for (int32_t tp = 0; tp < posLen; tp++)
+            {
+                if ( tokensInOriginalOrder )
+                    {
+                    tokensInOriginalOrder[pos[tp]]=_CLNEW Token(terms[t],
+                        offsets[tp]->getStartOffset(),
+                        offsets[tp]->getEndOffset());
+                    }
+            }                
+        }
+    }
+    //If the field has been stored without position data we must perform a sort        
+    if(unsortedTokens!=NULL)
+    {
+		if ( totalTokens<unsortedTokens->size() ){
+			_CLDELETE_ARRAY(tokensInOriginalOrder);
+			tokensInOriginalOrder = _CL_NEWARRAY(Token*,unsortedTokens->size()+1);
+		}
+		//the list has already sorted our items //todo:check that this is true...
+		if ( tokensInOriginalOrder )
+		   unsortedTokens->toArray(tokensInOriginalOrder);
+		
+		return _CLNEW StoredTokenStream(tokensInOriginalOrder,unsortedTokens->size());
+    }else
+		return _CLNEW StoredTokenStream(tokensInOriginalOrder,totalTokens);
+}
+
+TokenStream* TokenSources::getTokenStream(IndexReader* reader,int32_t docId, TCHAR* field)
+{
+	TermFreqVector* tfv=reader->getTermFreqVector(docId,field);
+	if(tfv==NULL)
+	{
+		TCHAR buf[250];
+		_sntprintf(buf,250,_T("%s in doc #%d does not have any term position data stored"),field,docId);
+		_CLTHROWT(CL_ERR_IllegalArgument,buf);
+		return NULL;
+	}
+
+	//todo:bad way of doing this...
+	TermPositionVector* tmp = NULL;
+	try{
+		tmp = dynamic_cast<TermPositionVector *> (tfv); //check to see if tfv is a Tpv
+	}catch(...){}
+	TokenStream* stream = NULL;
+	if ( tmp != NULL ){
+		TermPositionVector* tpv = dynamic_cast<TermPositionVector *> (reader->getTermFreqVector(docId,field));
+		if ( tpv )
+		    stream = getTokenStream(tpv);  
+	    //return getTokenStream(tpv);	        
+	}else{
+		TCHAR buf[250];
+		_sntprintf(buf,250,_T("%s in doc #%d does not have any term position data stored"),field,docId);
+		_CLTHROWT(CL_ERR_IllegalArgument,buf);
+		//return NULL;
+	}
+	return stream;
+}
+
+//convenience method
+TokenStream* TokenSources::getTokenStream(IndexReader* reader,int32_t docId, TCHAR* field,Analyzer* analyzer)
+{
+	CL_NS(document)::Document* doc=reader->document(docId);
+	const TCHAR* contents=doc->get(field);
+	if(contents==NULL)
+	{
+		TCHAR buf[250];
+		_sntprintf(buf,250,_T("Field %s in document #%d is not stored and cannot be analyzed"),field,docId);
+		_CLTHROWT(CL_ERR_IllegalArgument,buf);
+		return NULL;
+	}
+    return analyzer->tokenStream(field,_CLNEW StringReader(contents));
+}
+
+TokenSources::StoredTokenStream::StoredTokenStream(CL_NS(analysis)::Token** tokens, size_t len)
+{
+	currentToken = 0;
+    this->tokens=tokens;
+	this->length = len;
+}
+bool TokenSources::StoredTokenStream::next(CL_NS(analysis)::Token* token)
+{
+    if(currentToken>=length)
+    {
+        return false;
+    }
+	Token* t = tokens[currentToken++];
+
+	token->set(t->termText(),t->startOffset(),t->endOffset(),t->type());;
+    return true;
+}
+void TokenSources::StoredTokenStream::close(){
+	
+}
+
+CL_NS_END2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/TokenSources.h	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,98 @@
+/**
+ * Copyright 2002-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _lucene_search_highlight_tokensources_
+#define _lucene_search_highlight_tokensources_
+
+#if defined(_LUCENE_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include "CLucene/analysis/AnalysisHeader.h"
+#include "CLucene/index/IndexReader.h"
+#include "CLucene/index/TermVector.h"
+#include "CLucene/analysis/AnalysisHeader.h"
+
+CL_NS_DEF2(search,highlight)
+
+class TokenSources: LUCENE_BASE
+{
+	//an object used to iterate across an array of tokens
+	class StoredTokenStream:public CL_NS(analysis)::TokenStream
+    {
+	public:
+        CL_NS(analysis)::Token** tokens;
+		size_t length;
+        int32_t currentToken;
+        StoredTokenStream(CL_NS(analysis)::Token** tokens, size_t len);
+		bool next(CL_NS(analysis)::Token* token);
+		void close();
+    };
+public:
+	TokenSources(void);
+	~TokenSources(void);
+
+	/**
+     * A convenience method that tries a number of approaches to getting a token stream.
+     * The cost of finding there are no termVectors in the index is minimal (1000 invocations still 
+     * registers 0 ms). So this "lazy" (flexible?) approach to coding is probably acceptable
+     * @param reader
+     * @param docId
+     * @param field
+     * @param analyzer
+     * @return null if field not stored correctly 
+     * @throws IOException
+     */
+	static CL_NS(analysis)::TokenStream* getAnyTokenStream(CL_NS(index)::IndexReader* reader,int32_t docId, TCHAR* field, CL_NS(analysis)::Analyzer* analyzer);
+    
+    static CL_NS(analysis)::TokenStream* getTokenStream(CL_NS(index)::TermPositionVector* tpv);
+
+    /**
+     * Low level api.
+     * Returns a token stream or null if no offset info available in index.
+     * This can be used to feed the highlighter with a pre-parsed token stream 
+     * 
+     * In my tests the speeds to recreate 1000 token streams using this method are:
+     * - with TermVector offset only data stored - 420  milliseconds 
+     * - with TermVector offset AND position data stored - 271 milliseconds
+     *  (nb timings for TermVector with position data are based on a tokenizer with contiguous
+     *  positions - no overlaps or gaps)
+     * The cost of not using TermPositionVector to store
+     * pre-parsed content and using an analyzer to re-parse the original content: 
+     * - reanalyzing the original content - 980 milliseconds
+     * 
+     * The re-analyze timings will typically vary depending on -
+     * 	1) The complexity of the analyzer code (timings above were using a 
+     * 	   stemmer/lowercaser/stopword combo)
+     *  2) The  number of other fields (Lucene reads ALL fields off the disk 
+     *     when accessing just one document field - can cost dear!)
+     *  3) Use of compression on field storage - could be faster cos of compression (less disk IO)
+     *     or slower (more CPU burn) depending on the content.
+     *
+     * @param tpv
+     * @param tokenPositionsGuaranteedContiguous true if the token position numbers have no overlaps or gaps. If looking
+     * to eek out the last drops of performance, set to true. If in doubt, set to false.
+     */
+    static CL_NS(analysis)::TokenStream* getTokenStream(CL_NS(index)::TermPositionVector* tpv, bool tokenPositionsGuaranteedContiguous);
+
+	static CL_NS(analysis)::TokenStream* getTokenStream(CL_NS(index)::IndexReader* reader,int32_t docId, TCHAR* field);
+
+    //convenience method
+	static CL_NS(analysis)::TokenStream* getTokenStream(CL_NS(index)::IndexReader* reader,int32_t docId, TCHAR* field,CL_NS(analysis)::Analyzer* analyzer);
+};
+
+CL_NS_END2
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/WeightedTerm.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,84 @@
+#include "CLucene/StdHeader.h"
+#include "WeightedTerm.h"
+#include "CLucene/search/Similarity.h"
+
+CL_NS_DEF2(search,highlight)
+
+	WeightedTerm::WeightedTerm (float_t weight,const TCHAR* term)
+	{
+		_weight=weight;
+		_term = stringDuplicate(term);
+		cachedHashCode = 0;
+	}
+	
+	WeightedTerm::~WeightedTerm()
+	{
+		_CLDELETE_CARRAY(_term);
+	}
+
+	WeightedTerm::WeightedTerm(const WeightedTerm& other) 
+	{
+		_weight = other.getWeight();
+		_term = STRDUP_TtoT(other.getTerm());
+	}
+	
+	WeightedTerm* WeightedTerm::clone() const{
+		return _CLNEW WeightedTerm(*this);
+	}
+
+	/**
+	 * @return the term value (stemmed)
+	 */
+	const TCHAR* WeightedTerm::getTerm() const
+	{
+		return _term;
+	}
+
+	/**
+	 * @return the weight associated with this term
+	 */
+	float_t WeightedTerm::getWeight() const 
+	{
+		return _weight;
+	}
+
+	/**
+	 * @param term the term value (stemmed)
+	 */
+	void WeightedTerm::setTerm(TCHAR* term)
+	{
+		_CLDELETE_CARRAY(this->_term);
+		this->_term = STRDUP_TtoT(_term);
+		cachedHashCode = 0;
+	}
+
+	/**
+	 * @param weight the weight associated with this term
+	 */
+	void WeightedTerm::setWeight(float_t aweight) {
+		this->_weight = aweight;
+		cachedHashCode = 0;
+	}
+
+	size_t WeightedTerm::hashCode(){
+		if ( cachedHashCode == 0 ){
+			cachedHashCode = ( CL_NS(util)::Misc::thashCode(this->_term) ^ CL_NS(search)::Similarity::floatToByte(_weight) );
+		}
+
+		return cachedHashCode;
+	}
+
+	bool WeightedTerm::Compare::operator()( WeightedTerm* t1, WeightedTerm* t2 ) const{
+		int r = _tcscmp(t1->getTerm(), t2->getTerm());
+		if ( r < 0 )
+			return true;
+		else if ( r == 0 )
+			return t1->getWeight() < t2->getWeight();
+		else
+			return false;
+	}
+	size_t WeightedTerm::Compare::operator()( WeightedTerm* t ) const{
+		return t->hashCode();
+	}
+
+CL_NS_END2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/searchengine/oss/cl/clucene/src/clucene/highlighter/WeightedTerm.h	Fri Jun 11 14:43:47 2010 +0300
@@ -0,0 +1,82 @@
+/**
+ * Copyright 2002-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _lucene_search_highlight_weightedterm_
+#define _lucene_search_highlight_weightedterm_
+
+#if defined(_LUCENE_PRAGMA_ONCE)
+# pragma once
+#endif
+
+#include "CLucene/util/VoidList.h"
+
+CL_NS_DEF2(search,highlight)
+
+/** Lightweight class to hold term and a weight value used for scoring this term 
+ */
+class WeightedTerm:LUCENE_BASE
+{
+private:
+	float_t _weight; // multiplier
+	TCHAR* _term; //stemmed form
+	size_t cachedHashCode;
+	WeightedTerm(const WeightedTerm& other);
+public:
+	WeightedTerm (float_t weight,const TCHAR* term);
+	~WeightedTerm();
+
+	/**
+	 * @return the term value (stemmed)
+	 */
+	const TCHAR* getTerm() const;
+
+	/**
+	 * @return the weight associated with this term
+	 */
+	float_t getWeight() const ;
+
+	/**
+	 * @param term the term value (stemmed)
+	 */
+	void setTerm(TCHAR* term);
+	/**
+	 * @param weight the weight associated with this term
+	 */
+	void setWeight(float_t aweight);
+
+	size_t hashCode();
+	WeightedTerm* clone() const;
+
+	/**
+	 * Compare weighted terms, according to the term text.
+	 * @todo Do we have to take boost factors into account
+	 */
+	class Compare:LUCENE_BASE, public CL_NS(util)::Compare::_base //<WeightedTerm*>
+	{
+	public:
+	//todo: this should be more efficient, but will be using a hash table soon, anyway
+		bool operator()( WeightedTerm* t1, WeightedTerm* t2 ) const;
+		size_t operator()( WeightedTerm* t ) const;
+	};
+};
+
+/** CLHashSet of WeightedTerm */
+typedef CL_NS(util)::CLHashSet<WeightedTerm*, WeightedTerm::Compare, CL_NS(util)::Deletor::Object<WeightedTerm> > WeightedTermList;
+
+CL_NS_END2
+
+#endif
+
--- a/searchengine/oss/cl/clucene/src/clucene/util/equators.h	Thu May 27 13:59:44 2010 +0300
+++ b/searchengine/oss/cl/clucene/src/clucene/util/equators.h	Fri Jun 11 14:43:47 2010 +0300
@@ -188,7 +188,7 @@
 	class Object{
 	public:
 		static void doDelete(_kt* obj){
-			_CLLDELETE(obj);
+			_CLVDELETE(obj);
 		}
 	};
 	template<typename _kt>
--- a/searcher/searchclient/traces/OstTraceDefinitions.h	Thu May 27 13:59:44 2010 +0300
+++ b/searcher/searchclient/traces/OstTraceDefinitions.h	Fri Jun 11 14:43:47 2010 +0300
@@ -1,19 +1,3 @@
-/*
-* Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
-* All rights reserved.
-* This component and the accompanying materials are made available
-* under the terms of "Eclipse Public License v1.0"
-* which accompanies this distribution, and is available
-* at the URL "http://www.eclipse.org/legal/epl-v10.html".
-*
-* Initial Contributors:
-* Nokia Corporation - initial contribution.
-*
-* Contributors:
-*
-* Description: 
-*
-*/
 #ifndef __OSTTRACEDEFINITIONS_H__
 #define __OSTTRACEDEFINITIONS_H__
 // OST_TRACE_COMPILER_IN_USE flag has been added by Trace Compiler
--- a/searcher/searchserver/traces/OstTraceDefinitions.h	Thu May 27 13:59:44 2010 +0300
+++ b/searcher/searchserver/traces/OstTraceDefinitions.h	Fri Jun 11 14:43:47 2010 +0300
@@ -1,19 +1,3 @@
-/*
-* Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
-* All rights reserved.
-* This component and the accompanying materials are made available
-* under the terms of "Eclipse Public License v1.0"
-* which accompanies this distribution, and is available
-* at the URL "http://www.eclipse.org/legal/epl-v10.html".
-*
-* Initial Contributors:
-* Nokia Corporation - initial contribution.
-*
-* Contributors:
-*
-* Description: 
-*
-*/
 #ifndef __OSTTRACEDEFINITIONS_H__
 #define __OSTTRACEDEFINITIONS_H__
 // OST_TRACE_COMPILER_IN_USE flag has been added by Trace Compiler
--- a/searchsrv_plat/cpix_framework_api/inc/cdocumentfield.h	Thu May 27 13:59:44 2010 +0300
+++ b/searchsrv_plat/cpix_framework_api/inc/cdocumentfield.h	Fri Jun 11 14:43:47 2010 +0300
@@ -60,7 +60,8 @@
 		{
 		EIndexNo = 16,
 		EIndexTokenized = 32,
-		EIndexUnTokenized = 64
+		EIndexUnTokenized = 64,
+		EIndexFreeText = 128
 		};
 
 	/**
--- a/sis/centrep.pkg	Thu May 27 13:59:44 2010 +0300
+++ b/sis/centrep.pkg	Fri Jun 11 14:43:47 2010 +0300
@@ -1,18 +1,3 @@
-;
-; Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
-; All rights reserved.
-; This component and the accompanying materials are made available
-; under the terms of "Eclipse Public License v1.0"
-; which accompanies this distribution, and is available
-; at the URL "http://www.eclipse.org/legal/epl-v10.html".
-;
-; Initial Contributors:
-; Nokia Corporation - initial contribution.
-;
-; Contributors:
-;
-; Description: 
-;
 ; Languages
 &EN
 
@@ -28,4 +13,7 @@
 ; CenRep ini file
 ;"12345678.txt" - "c:\private\10202be9\12345678.txt"
 "\epoc32\winscw\c\private\10202be9\20029ac7.cre"
--"!:\private\10202be9\20029ac7.cre"
\ No newline at end of file
+-"!:\private\10202be9\20029ac7.cre"
+
+"\epoc32\winscw\c\private\10202be9\2001f6fb.cre"
+-"!:\private\10202be9\2001f6fb.cre"
\ No newline at end of file
--- a/sis/cpixsearch_stub.pkg	Thu May 27 13:59:44 2010 +0300
+++ b/sis/cpixsearch_stub.pkg	Fri Jun 11 14:43:47 2010 +0300
@@ -1,18 +1,3 @@
-;
-; Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
-; All rights reserved.
-; This component and the accompanying materials are made available
-; under the terms of "Eclipse Public License v1.0"
-; which accompanies this distribution, and is available
-; at the URL "http://www.eclipse.org/legal/epl-v10.html".
-;
-; Initial Contributors:
-; Nokia Corporation - initial contribution.
-;
-; Contributors:
-;
-; Description: 
-;
 
 
 
--- a/sis/makeme.bat	Thu May 27 13:59:44 2010 +0300
+++ b/sis/makeme.bat	Fri Jun 11 14:43:47 2010 +0300
@@ -1,18 +1,3 @@
-@rem
-@rem Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
-@rem All rights reserved.
-@rem This component and the accompanying materials are made available
-@rem under the terms of "Eclipse Public License v1.0"
-@rem which accompanies this distribution, and is available
-@rem at the URL "http://www.eclipse.org/legal/epl-v10.html".
-@rem
-@rem Initial Contributors:
-@rem Nokia Corporation - initial contribution.
-@rem
-@rem Contributors:
-@rem
-@rem Description: 
-@rem
 REM echo %EPOCROOT%
 set FOOBAR=%EPOCROOT%
 REM echo %FOOBAR%
--- a/sis/makeme.pl	Thu May 27 13:59:44 2010 +0300
+++ b/sis/makeme.pl	Fri Jun 11 14:43:47 2010 +0300
@@ -1,19 +1,3 @@
-#
-# Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
-# All rights reserved.
-# This component and the accompanying materials are made available
-# under the terms of "Eclipse Public License v1.0"
-# which accompanies this distribution, and is available
-# at the URL "http://www.eclipse.org/legal/epl-v10.html".
-#
-# Initial Contributors:
-# Nokia Corporation - initial contribution.
-#
-# Contributors:
-#
-# Description: 
-#
-
 if (!$ARGV[0]) {
 	printf("ERROR: Target (udeb/urel) needs to be specified.\n"); 
 	printf("Use either 'makeme udeb' or 'makeme urel'\n"); 
--- a/sis/makesis.mk	Thu May 27 13:59:44 2010 +0300
+++ b/sis/makesis.mk	Fri Jun 11 14:43:47 2010 +0300
@@ -1,18 +1,3 @@
-#
-# Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
-# All rights reserved.
-# This component and the accompanying materials are made available
-# under the terms of "Eclipse Public License v1.0"
-# which accompanies this distribution, and is available
-# at the URL "http://www.eclipse.org/legal/epl-v10.html".
-#
-# Initial Contributors:
-# Nokia Corporation - initial contribution.
-#
-# Contributors:
-#
-# Description:  
-#
 # ============================================================================
 #  Name	 : makeSis.mk
 #  Part of  : 
--- a/tsrc/cpixmwtester/conf/cpixmwtester.cfg	Thu May 27 13:59:44 2010 +0300
+++ b/tsrc/cpixmwtester/conf/cpixmwtester.cfg	Fri Jun 11 14:43:47 2010 +0300
@@ -63,4 +63,18 @@
 create CPixMWTester foobar 
 foobar TestUpdateINStatus contacts
 delete foobar
+[Endtest]
+
+[Test]
+title TestAddUnloadlist
+create CPixMWTester foobar 
+foobar TestAddUnloadlist
+delete foobar
+[Endtest]
+
+[Test]
+title TestRemovefromUnloadlist
+create CPixMWTester foobar 
+foobar TestRemovefromUnloadlist
+delete foobar
 [Endtest]
\ No newline at end of file
--- a/tsrc/cpixmwtester/inc/cpixmwtester.h	Thu May 27 13:59:44 2010 +0300
+++ b/tsrc/cpixmwtester/inc/cpixmwtester.h	Fri Jun 11 14:43:47 2010 +0300
@@ -176,6 +176,10 @@
         
         virtual TInt TestUpdateINStatusL( CStifItemParser& aItem );
         
+        virtual TInt TestAddUnloadlistL( CStifItemParser& aItem );
+        
+        virtual TInt TestRemovefromUnloadlistL( CStifItemParser& aItem );
+        
         /**
          * Method used to log version of test class
          */
--- a/tsrc/cpixmwtester/src/cpixmwtesterblocks.cpp	Thu May 27 13:59:44 2010 +0300
+++ b/tsrc/cpixmwtester/src/cpixmwtesterblocks.cpp	Fri Jun 11 14:43:47 2010 +0300
@@ -167,6 +167,8 @@
         ENTRY( "TestResetContent",CCPixMWTester::TestResetContentL ),
         ENTRY( "TestUpdateBLStatus",CCPixMWTester::TestUpdateBLStatusL ),
         ENTRY( "TestUpdateINStatus",CCPixMWTester::TestUpdateINStatusL ),
+        ENTRY( "TestAddUnloadlist",CCPixMWTester::TestAddUnloadlistL ),
+        ENTRY( "TestRemovefromUnloadlist",CCPixMWTester::TestRemovefromUnloadlistL ),
         //ADD NEW ENTRY HERE
         // [test cases entries] - Do not remove
 
@@ -435,6 +437,54 @@
     doLog( iLog, err, KNoErrorString );
     return err;
     }
+
+// -----------------------------------------------------------------------------
+// CCPixMWTester::TestAddUnloadlistL
+// -----------------------------------------------------------------------------
+//
+TInt CCPixMWTester::TestAddUnloadlistL( CStifItemParser& aItem)
+    {
+    TInt err = KErrNone;        
+    CBlacklistMgr* blacklistmanager = CBlacklistMgr::NewL();
+    CleanupStack::PushL( blacklistmanager );
+    //Add an Uid to Blacklist DB
+    blacklistmanager->AddtoUnloadListL( KTestUid );
+    //Check if the Uid is added to database or not
+    TBool found = blacklistmanager->FindfromUnloadListL(KTestUid );
+    
+    if(!found) err = KErrNotFound;
+    //clear the UID from the database
+    blacklistmanager->RemoveFromUnloadListL(KTestUid);
+    CleanupStack::PopAndDestroy( blacklistmanager ); 
+    doLog( iLog, err, KNoErrorString );        
+    return err;
+    }
+
+// -----------------------------------------------------------------------------
+// CCPixMWTester::TestRemovefromUnloadlistL
+// -----------------------------------------------------------------------------
+//
+TInt CCPixMWTester::TestRemovefromUnloadlistL( CStifItemParser& aItem)
+    {
+    TInt err = KErrNotFound;        
+    CBlacklistMgr* blacklistmanager = CBlacklistMgr::NewL();
+    CleanupStack::PushL( blacklistmanager );
+    //Add an Uid to Blacklist DB
+    blacklistmanager->AddtoUnloadListL( KTestUid );
+    //Check if the Uid is added to database or not
+    TBool found = blacklistmanager->FindfromUnloadListL(KTestUid );
+    
+    if(found)
+        {
+         //clear the UID from the database
+        blacklistmanager->RemoveFromUnloadListL(KTestUid);
+        found = blacklistmanager->FindfromUnloadListL(KTestUid );
+        if ( !found )  err = KErrNone;
+        }
+    CleanupStack::PopAndDestroy( blacklistmanager ); 
+    doLog( iLog, err, KNoErrorString );        
+    return err;
+    }
 // -----------------------------------------------------------------------------
 // CCPixMWTester::?member_function
 // ?implementation_description
--- a/watchdog/traces/OstTraceDefinitions.h	Thu May 27 13:59:44 2010 +0300
+++ b/watchdog/traces/OstTraceDefinitions.h	Fri Jun 11 14:43:47 2010 +0300
@@ -1,19 +1,3 @@
-/*
-* Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
-* All rights reserved.
-* This component and the accompanying materials are made available
-* under the terms of "Eclipse Public License v1.0"
-* which accompanies this distribution, and is available
-* at the URL "http://www.eclipse.org/legal/epl-v10.html".
-*
-* Initial Contributors:
-* Nokia Corporation - initial contribution.
-*
-* Contributors:
-*
-* Description: 
-*
-*/
 #ifndef __OSTTRACEDEFINITIONS_H__
 #define __OSTTRACEDEFINITIONS_H__
 // OST_TRACE_COMPILER_IN_USE flag has been added by Trace Compiler