carl  24.04
Computer ARithmetic Library
Cache.tpp
Go to the documentation of this file.
1 /*
2  * File: Cache.tpp
3  * Author: Florian Corzilius
4  *
5  * Created on September 4, 2014, 11:24 AM
6  */
7 
8 #pragma once
9 
10 #include "Cache.h"
11 
12 
13 namespace carl
14 {
15  template<typename T>
16  const typename Cache<T>::Ref Cache<T>::NO_REF = 0;
17 
18  template<typename T>
19  Cache<T>::Cache( size_t _maxCacheSize, double _cacheReductionAmount, double _decay ):
20  mMaxCacheSize( _maxCacheSize ),
21  mCacheReductionAmount( _cacheReductionAmount ), // TODO: use it, but without the effort of quick select
22  mDecay( _decay ),
23  mCache(),
24  mCacheRefs(),
25  mUnusedPositionsInCacheRefs()
26  {
27  assert( _decay >= 0.9 && _decay <= 1.0 );
28  mCache.reserve( _maxCacheSize ); // TODO: maybe no reservation of memory and let it grow dynamically
29  mCacheRefs.reserve( _maxCacheSize ); // TODO: maybe no reservation of memory and let it grow dynamically
30  // reserve the first entry with index 0 as default
31  mCacheRefs.push_back( nullptr );
32  }
33 
34  template<typename T>
35  Cache<T>::~Cache()
36  {
37  mCacheRefs.clear();
38  while( !mCache.empty() )
39  {
40  TypeInfoPair<T,Info>* tip = *mCache.begin();
41  mCache.erase( mCache.begin() );
42  T* t = tip->first;
43  delete tip;
44  delete t;
45  }
46  }
47 
48  template<typename T>
49  std::pair<typename Cache<T>::Ref,bool> Cache<T>::cache( T* _toCache, bool (*_canBeUpdated)( const T&, const T& ), void (*_update)( const T&, const T& ) )
50  {
51  std::lock_guard<std::recursive_mutex> lock( mMutex );
52  if( mCache.size() >= mMaxCacheSize ) // Clean, if the number of elements in the cache exceeds the threshold.
53  {
54  clean();
55  }
56  auto newElement = new TypeInfoPair<T,Info>(_toCache, Info(mMaxActivity));
57  auto ret = mCache.insert( newElement );
58 
59  if( !ret.second ) // There is already an equal object in the cache.
60  {
61  // Try to update the entry in the cache by the information in the given object.
62  if( (*_canBeUpdated)( *((*ret.first)->first), *_toCache ) )
63  {
64  TypeInfoPair<T,Info>* element = *ret.first;
65  (*_update)( *element->first, *_toCache );
66  mCache.erase( ret.first );
67  element->first->rehash();
68  auto retB = mCache.insert( element );
69  assert( retB.second );
70  for( const Ref& ref : element->second.refStoragePositions )
71  mCacheRefs[ref] = *retB.first;
72  delete newElement;
73  Info& info = (*retB.first)->second;
74  assert( info.refStoragePositions.size() > 0);
75  assert( info.refStoragePositions.front() > 0 );
76  info.refStoragePositions.insert( info.refStoragePositions.end(), element->second.refStoragePositions.begin(), element->second.refStoragePositions.end() );
77  return std::make_pair( info.refStoragePositions.front(), false );
78  }
79  else
80  delete newElement;
81  }
82  else // Create a new entry in the cache.
83  {
84  if( mUnusedPositionsInCacheRefs.empty() ) // Get a brand new reference.
85  {
86  assert( mCacheRefs.size() > 0);
87  (*ret.first)->second.refStoragePositions.push_back( mCacheRefs.size() );
88  assert( !hasDuplicates( (*ret.first)->second.refStoragePositions ) );
89  mCacheRefs.push_back( newElement );
90  }
91  else // Try to take the reference from the stack of old ones.
92  {
93  mCacheRefs[mUnusedPositionsInCacheRefs.top()] = newElement;
94  assert( mUnusedPositionsInCacheRefs.top() > 0);
95  newElement->second.refStoragePositions.push_back( mUnusedPositionsInCacheRefs.top() );
96  assert( !hasDuplicates( newElement->second.refStoragePositions ) );
97  mUnusedPositionsInCacheRefs.pop();
98  }
99  assert( mNumOfUnusedEntries < std::numeric_limits<sint>::max() );
100  ++mNumOfUnusedEntries;
101  }
102  assert( (*ret.first)->second.refStoragePositions.size() > 0);
103  assert( (*ret.first)->second.refStoragePositions.front() > 0 );
104  return std::make_pair( (*ret.first)->second.refStoragePositions.front(), ret.second );
105  }
106 
107  template<typename T>
108  void Cache<T>::reg( Ref _refStoragePos )
109  {
110  std::lock_guard<std::recursive_mutex> lock( mMutex );
111  assert( _refStoragePos < mCacheRefs.size() );
112  TypeInfoPair<T,Info>* cacheRef = mCacheRefs[_refStoragePos];
113  assert( cacheRef != nullptr );
114  if( cacheRef->second.usageCount == 0 )
115  {
116  assert( mNumOfUnusedEntries > 0 );
117  --mNumOfUnusedEntries;
118  }
119  assert( cacheRef->second.usageCount < std::numeric_limits<sint>::max() );
120  ++cacheRef->second.usageCount;
121  }
122 
123  template<typename T>
124  void Cache<T>::dereg( Ref _refStoragePos )
125  {
126  std::lock_guard<std::recursive_mutex> lock( mMutex );
127  assert( _refStoragePos < mCacheRefs.size() );
128  TypeInfoPair<T,Info>* cacheRef = mCacheRefs[_refStoragePos];
129  assert( cacheRef != nullptr );
130  assert( cacheRef->second.usageCount > 0 );
131  --cacheRef->second.usageCount;
132  if( cacheRef->second.usageCount == 0 ) // no more usage
133  {
134  assert( mNumOfUnusedEntries < std::numeric_limits<sint>::max() );
135  ++mNumOfUnusedEntries;
136  // If the cache contains more used elements than the maximum desired cache size, remove this entry directly.
137  if( mCache.size() - mNumOfUnusedEntries >= mMaxCacheSize )
138  {
139  erase( cacheRef );
140  }
141  }
142  }
143 
144  template<typename T>
145  void Cache<T>::rehash( Ref _refStoragePos )
146  {
147  std::lock_guard<std::recursive_mutex> lock( mMutex );
148  assert( _refStoragePos < mCacheRefs.size() );
149  TypeInfoPair<T,Info>* cacheRef = mCacheRefs[_refStoragePos];
150  assert( cacheRef != nullptr );
151  mCache.erase( cacheRef );
152  cacheRef->first->rehash();
153  const Info& infoB = cacheRef->second;
154  auto ret = mCache.insert( cacheRef );
155  if( !ret.second )
156  {
157  Info& info = (*ret.first)->second;
158  if( infoB.usageCount == 0 )
159  {
160  assert( mNumOfUnusedEntries > 0 );
161  --mNumOfUnusedEntries;
162  }
163  else if( info.usageCount == 0 )
164  {
165  assert( mNumOfUnusedEntries > 0 );
166  --mNumOfUnusedEntries;
167  }
168  assert( info.usageCount + infoB.usageCount >= info.usageCount );
169  info.usageCount += infoB.usageCount;
170  info.refStoragePositions.insert( info.refStoragePositions.end(), infoB.refStoragePositions.begin(), infoB.refStoragePositions.end() );
171  assert( !hasDuplicates( info.refStoragePositions ) );
172  assert( std::find( infoB.refStoragePositions.begin(), infoB.refStoragePositions.end(), _refStoragePos ) != infoB.refStoragePositions.end() );
173  for( const Ref& ref : infoB.refStoragePositions )
174  {
175  assert( mCacheRefs[ref] != *(ret.first) );
176  mCacheRefs[ref] = *(ret.first);
177  }
178  delete cacheRef->first;
179  delete cacheRef;
180  }
181  }
182 
183  template<typename T>
184  void Cache<T>::clean()
185  {
186  //CARL_LOG_TRACE( "carl.util.cache", "Cleaning cache..." );
187  if( double(mNumOfUnusedEntries) < (double(mCache.size()) * mCacheReductionAmount) )
188  {
189  if( mNumOfUnusedEntries > 0 )
190  {
191  // There are less entries we can delete than we want to delete: just delete them all
192  for( auto iter = mCache.begin(); iter != mCache.end(); )
193  {
194  if( (*iter)->second.usageCount == 0 )
195  {
196  iter = erase( iter );
197  }
198  else
199  {
200  ++iter;
201  }
202  }
203  }
204  assert( mNumOfUnusedEntries == 0 );
205  }
206  else
207  {
208  std::vector<typename Container::iterator> noUsageEntries;
209  // Calculate the expected median of the activities of all entries in the cache with no usage.
210  double limit = 0.0;
211  for( auto iter = mCache.begin(); iter != mCache.end(); ++iter )
212  {
213  if( (*iter)->second.usageCount == 0 )
214  {
215  noUsageEntries.push_back( iter );
216  limit += (*iter)->second.activity;
217  }
218  }
219  limit = limit / double(noUsageEntries.size());
220  // Remove all entries in the cache with no usage, which have an activity below the calculated median.
221  for( auto iter = noUsageEntries.begin(); iter != noUsageEntries.end(); ++iter )
222  {
223  if( (**iter)->second.activity <= limit )
224  {
225  erase( *iter );
226  }
227  }
228  }
229  }
230 
231  template<typename T>
232  void Cache<T>::decayActivity()
233  {
234  std::lock_guard<std::recursive_mutex> lock( mMutex );
235  mActivityIncrement *= (1 / mDecay);
236  }
237 
238  template<typename T>
239  void Cache<T>::strengthenActivity( Ref _refStoragePos )
240  {
241  assert( _refStoragePos < mCacheRefs.size() );
242  TypeInfoPair<T,Info>* cacheRef = mCacheRefs[_refStoragePos];
243  assert( cacheRef != nullptr );
244  // update the activity of the cache entry at the given position
245  if( (cacheRef->second.activity += mActivityIncrement) > mActivityThreshold )
246  {
247  std::lock_guard<std::recursive_mutex> lock( mMutex );
248  // rescale if the threshold for the maximum activity has been exceeded
249  for( auto iter = mCache.begin(); iter != mCache.end(); ++iter )
250  (*iter)->second.activity *= mActivityDecrementFactor;
251  mActivityIncrement *= mActivityDecrementFactor;
252  mMaxActivity *= mActivityDecrementFactor;
253  }
254  // update the maximum activity
255  std::lock_guard<std::recursive_mutex> lock( mMutex );
256  if( mMaxActivity < cacheRef->second.activity )
257  mMaxActivity = cacheRef->second.activity;
258  }
259 
260  template<typename T>
261  void Cache<T>::print( std::ostream& _out ) const
262  {
263  _out << "General cache information:" << std::endl;
264  _out << " desired maximum cache size : " << mMaxCacheSize << std::endl;
265  _out << " number of unused entries : " << mNumOfUnusedEntries << std::endl;
266  _out << " desired reduction amount when cleaning the cache (not used): " << mCacheReductionAmount << std::endl;
267  _out << " maximum of all activities : " << mMaxActivity << std::endl;
268  _out << " the current value of the activity increment : " << mActivityIncrement << std::endl;
269  _out << " decay factor for the given activities : " << mDecay << std::endl;
270  _out << " upper bound of the activities : " << mActivityThreshold << std::endl;
271  _out << " scaling factor of the activities : " << mActivityDecrementFactor << std::endl;
272  _out << " current size of the cache : " << mCache.size() << std::endl;
273  _out << " number of yet involved references : " << mCacheRefs.size() << std::endl;
274  _out << " number of currently freed references : " << mUnusedPositionsInCacheRefs.size() << std::endl;
275  _out << "Cache contains:" << std::endl;
276  for( auto iter = mCache.begin(); iter != mCache.end(); ++iter )
277  {
278  assert( (*iter)->first != nullptr );
279  _out << " " << *(*iter)->first << std::endl;
280  _out << " usage count: " << (*iter)->second.usageCount << std::endl;
281  _out << " reference storage positions:";
282  for( Ref ref : (*iter)->second.refStoragePositions )
283  _out << " " << ref;
284  _out << " activity: " << (*iter)->second.activity << std::endl;
285  }
286  }
287 
288 } // namespace carl