3  * Author: Florian Corzilius
 
    5  * Created on September 4, 2014, 11:24 AM
 
   16     const typename Cache<T>::Ref Cache<T>::NO_REF = 0;
 
   19     Cache<T>::Cache( size_t _maxCacheSize, double _cacheReductionAmount, double _decay ):
 
   20         mMaxCacheSize( _maxCacheSize ),
 
   21         mCacheReductionAmount( _cacheReductionAmount ), // TODO: use it, but without the effort of quick select
 
   25         mUnusedPositionsInCacheRefs()
 
   27         assert( _decay >= 0.9 && _decay <= 1.0 );
 
   28         mCache.reserve( _maxCacheSize ); // TODO: maybe no reservation of memory and let it grow dynamically
 
   29         mCacheRefs.reserve( _maxCacheSize ); // TODO: maybe no reservation of memory and let it grow dynamically
 
   30         // reserve the first entry with index 0 as default
 
   31         mCacheRefs.push_back( nullptr ); 
 
   38         while( !mCache.empty() )
 
   40             TypeInfoPair<T,Info>* tip = *mCache.begin();
 
   41             mCache.erase( mCache.begin() );
 
   49     std::pair<typename Cache<T>::Ref,bool> Cache<T>::cache( T* _toCache, bool (*_canBeUpdated)( const T&, const T& ), void (*_update)( const T&, const T& ) )
 
   51         std::lock_guard<std::recursive_mutex> lock( mMutex );
 
   52         if( mCache.size() >= mMaxCacheSize ) // Clean, if the number of elements in the cache exceeds the threshold.
 
   56         auto newElement = new TypeInfoPair<T,Info>(_toCache, Info(mMaxActivity));
 
   57         auto ret = mCache.insert( newElement );
 
   59         if( !ret.second ) // There is already an equal object in the cache.
 
   61             // Try to update the entry in the cache by the information in the given object.
 
   62             if( (*_canBeUpdated)( *((*ret.first)->first), *_toCache ) )
 
   64                 TypeInfoPair<T,Info>* element = *ret.first;
 
   65                 (*_update)( *element->first, *_toCache );
 
   66                 mCache.erase( ret.first );
 
   67                 element->first->rehash();
 
   68                 auto retB = mCache.insert( element );
 
   69                 assert( retB.second );
 
   70                 for( const Ref& ref : element->second.refStoragePositions )
 
   71                     mCacheRefs[ref] = *retB.first;
 
   73                 Info& info = (*retB.first)->second;
 
   74                 assert( info.refStoragePositions.size() > 0);
 
   75                 assert( info.refStoragePositions.front() > 0 );
 
   76                 info.refStoragePositions.insert( info.refStoragePositions.end(), element->second.refStoragePositions.begin(), element->second.refStoragePositions.end() );
 
   77                 return std::make_pair( info.refStoragePositions.front(), false );
 
   82         else // Create a new entry in the cache.
 
   84             if( mUnusedPositionsInCacheRefs.empty() ) // Get a brand new reference.
 
   86                 assert( mCacheRefs.size() > 0);
 
   87                 (*ret.first)->second.refStoragePositions.push_back( mCacheRefs.size() );
 
   88                 assert( !hasDuplicates( (*ret.first)->second.refStoragePositions ) );
 
   89                 mCacheRefs.push_back( newElement );
 
   91             else // Try to take the reference from the stack of old ones.
 
   93                 mCacheRefs[mUnusedPositionsInCacheRefs.top()] = newElement;
 
   94                 assert( mUnusedPositionsInCacheRefs.top() > 0);
 
   95                 newElement->second.refStoragePositions.push_back( mUnusedPositionsInCacheRefs.top() );
 
   96                 assert( !hasDuplicates( newElement->second.refStoragePositions ) );
 
   97                 mUnusedPositionsInCacheRefs.pop();
 
   99             assert( mNumOfUnusedEntries < std::numeric_limits<sint>::max() );
 
  100             ++mNumOfUnusedEntries;
 
  102         assert( (*ret.first)->second.refStoragePositions.size() > 0);
 
  103         assert( (*ret.first)->second.refStoragePositions.front() > 0 );
 
  104         return std::make_pair( (*ret.first)->second.refStoragePositions.front(), ret.second );
 
  108     void Cache<T>::reg( Ref _refStoragePos )
 
  110         std::lock_guard<std::recursive_mutex> lock( mMutex );
 
  111         assert( _refStoragePos < mCacheRefs.size() );
 
  112         TypeInfoPair<T,Info>* cacheRef = mCacheRefs[_refStoragePos];
 
  113         assert( cacheRef != nullptr );
 
  114         if( cacheRef->second.usageCount == 0 )
 
  116             assert( mNumOfUnusedEntries > 0 );
 
  117             --mNumOfUnusedEntries;
 
  119         assert( cacheRef->second.usageCount < std::numeric_limits<sint>::max() );
 
  120         ++cacheRef->second.usageCount;
 
  124     void Cache<T>::dereg( Ref _refStoragePos )
 
  126         std::lock_guard<std::recursive_mutex> lock( mMutex );
 
  127         assert( _refStoragePos < mCacheRefs.size() );
 
  128         TypeInfoPair<T,Info>* cacheRef = mCacheRefs[_refStoragePos];
 
  129         assert( cacheRef != nullptr );
 
  130         assert( cacheRef->second.usageCount > 0 );
 
  131         --cacheRef->second.usageCount;
 
  132         if( cacheRef->second.usageCount == 0 ) // no more usage
 
  134             assert( mNumOfUnusedEntries < std::numeric_limits<sint>::max() );
 
  135             ++mNumOfUnusedEntries;
 
  136             // If the cache contains more used elements than the maximum desired cache size, remove this entry directly.
 
  137             if( mCache.size() - mNumOfUnusedEntries >= mMaxCacheSize )
 
  145     void Cache<T>::rehash( Ref _refStoragePos )
 
  147         std::lock_guard<std::recursive_mutex> lock( mMutex );
 
  148         assert( _refStoragePos < mCacheRefs.size() );
 
  149         TypeInfoPair<T,Info>* cacheRef = mCacheRefs[_refStoragePos];
 
  150         assert( cacheRef != nullptr );
 
  151         mCache.erase( cacheRef );
 
  152         cacheRef->first->rehash();
 
  153         const Info& infoB = cacheRef->second;
 
  154         auto ret = mCache.insert( cacheRef );
 
  157             Info& info = (*ret.first)->second;
 
  158             if( infoB.usageCount == 0 )
 
  160                 assert( mNumOfUnusedEntries > 0 );
 
  161                 --mNumOfUnusedEntries;
 
  163             else if( info.usageCount == 0 )
 
  165                 assert( mNumOfUnusedEntries > 0 );
 
  166                 --mNumOfUnusedEntries;
 
  168             assert( info.usageCount + infoB.usageCount >= info.usageCount );
 
  169             info.usageCount += infoB.usageCount;
 
  170             info.refStoragePositions.insert( info.refStoragePositions.end(), infoB.refStoragePositions.begin(), infoB.refStoragePositions.end() );
 
  171             assert( !hasDuplicates( info.refStoragePositions ) );
 
  172             assert( std::find( infoB.refStoragePositions.begin(), infoB.refStoragePositions.end(), _refStoragePos ) != infoB.refStoragePositions.end() );
 
  173             for( const Ref& ref : infoB.refStoragePositions )
 
  175                 assert( mCacheRefs[ref] != *(ret.first) );
 
  176                 mCacheRefs[ref] = *(ret.first);
 
  178             delete cacheRef->first;
 
  184     void Cache<T>::clean()
 
  186         //CARL_LOG_TRACE( "carl.util.cache", "Cleaning cache..." );
 
  187         if( double(mNumOfUnusedEntries) < (double(mCache.size()) * mCacheReductionAmount) )
 
  189             if( mNumOfUnusedEntries > 0 )
 
  191                 // There are less entries we can delete than we want to delete: just delete them all
 
  192                 for( auto iter = mCache.begin(); iter != mCache.end(); )
 
  194                     if( (*iter)->second.usageCount == 0 )
 
  196                         iter = erase( iter );
 
  204             assert( mNumOfUnusedEntries == 0 );
 
  208             std::vector<typename Container::iterator> noUsageEntries;
 
  209             // Calculate the expected median of the activities of all entries in the cache with no usage.
 
  211             for( auto iter = mCache.begin(); iter != mCache.end(); ++iter )
 
  213                 if( (*iter)->second.usageCount == 0 )
 
  215                     noUsageEntries.push_back( iter );
 
  216                     limit += (*iter)->second.activity;
 
  219             limit = limit / double(noUsageEntries.size());
 
  220             // Remove all entries in the cache with no usage, which have an activity below the calculated median.
 
  221             for( auto iter = noUsageEntries.begin(); iter != noUsageEntries.end(); ++iter )
 
  223                 if( (**iter)->second.activity <= limit )
 
  232     void Cache<T>::decayActivity()
 
  234         std::lock_guard<std::recursive_mutex> lock( mMutex );
 
  235         mActivityIncrement *= (1 / mDecay);
 
  239     void Cache<T>::strengthenActivity( Ref _refStoragePos )
 
  241         assert( _refStoragePos < mCacheRefs.size() );
 
  242         TypeInfoPair<T,Info>* cacheRef = mCacheRefs[_refStoragePos];
 
  243         assert( cacheRef != nullptr );
 
  244         // update the activity of the cache entry at the given position
 
  245         if( (cacheRef->second.activity += mActivityIncrement) > mActivityThreshold )
 
  247             std::lock_guard<std::recursive_mutex> lock( mMutex );
 
  248             // rescale if the threshold for the maximum activity has been exceeded
 
  249             for( auto iter = mCache.begin(); iter != mCache.end(); ++iter )
 
  250                 (*iter)->second.activity *= mActivityDecrementFactor;
 
  251             mActivityIncrement *= mActivityDecrementFactor;
 
  252             mMaxActivity *= mActivityDecrementFactor;
 
  254         // update the maximum activity
 
  255         std::lock_guard<std::recursive_mutex> lock( mMutex );
 
  256         if( mMaxActivity < cacheRef->second.activity )
 
  257             mMaxActivity = cacheRef->second.activity;
 
  261     void Cache<T>::print( std::ostream& _out ) const
 
  263         _out << "General cache information:" << std::endl;
 
  264         _out << "   desired maximum cache size                                 : "  << mMaxCacheSize << std::endl;
 
  265         _out << "   number of unused entries                                   : "  << mNumOfUnusedEntries << std::endl;
 
  266         _out << "   desired reduction amount when cleaning the cache (not used): "  << mCacheReductionAmount << std::endl;
 
  267         _out << "   maximum of all activities                                  : "  << mMaxActivity << std::endl;
 
  268         _out << "   the current value of the activity increment                : "  << mActivityIncrement << std::endl;
 
  269         _out << "   decay factor for the given activities                      : "  << mDecay << std::endl;
 
  270         _out << "   upper bound of the activities                              : "  << mActivityThreshold << std::endl;
 
  271         _out << "   scaling factor of the activities                           : "  << mActivityDecrementFactor << std::endl;
 
  272         _out << "   current size of the cache                                  : "  << mCache.size() << std::endl;
 
  273         _out << "   number of yet involved references                          : "  << mCacheRefs.size() << std::endl;
 
  274         _out << "   number of currently freed references                       : "  << mUnusedPositionsInCacheRefs.size() << std::endl;
 
  275         _out << "Cache contains:" << std::endl;
 
  276         for( auto iter = mCache.begin(); iter != mCache.end(); ++iter )
 
  278             assert( (*iter)->first != nullptr );
 
  279             _out << "   " << *(*iter)->first << std::endl;
 
  280             _out << "                       usage count: " << (*iter)->second.usageCount << std::endl;
 
  281             _out << "        reference storage positions:";
 
  282             for( Ref ref : (*iter)->second.refStoragePositions )
 
  284             _out << "                          activity: " << (*iter)->second.activity << std::endl;