forked from NatronGitHub/Natron
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Cache.h
1902 lines (1619 loc) · 69.9 KB
/
Cache.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* ***** BEGIN LICENSE BLOCK *****
* This file is part of Natron <https://natrongithub.github.io/>,
* (C) 2018-2021 The Natron developers
* (C) 2013-2018 INRIA and Alexandre Gauthier-Foichat
*
* Natron is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Natron is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Natron. If not, see <http://www.gnu.org/licenses/gpl-2.0.html>
* ***** END LICENSE BLOCK ***** */
#ifndef NATRON_ENGINE_ABSTRACTCACHE_H
#define NATRON_ENGINE_ABSTRACTCACHE_H
// ***** BEGIN PYTHON BLOCK *****
// from <https://docs.python.org/3/c-api/intro.html#include-files>:
// "Since Python may define some pre-processor definitions which affect the standard headers on some systems, you must include Python.h before any standard headers are included."
#include <Python.h>
// ***** END PYTHON BLOCK *****
#include "Global/Macros.h"
#include <vector>
#include <sstream> // stringstream
#include <fstream>
#include <functional>
#include <list>
#include <set>
#include <cstddef>
#include <utility>
#include <algorithm> // min, max
#include <string>
#include <stdexcept>
#include "Global/GlobalDefines.h"
#include "Global/StrUtils.h"
GCC_DIAG_OFF(deprecated)
#include <QtCore/QMutex>
#include <QtCore/QThread>
#include <QtCore/QWaitCondition>
#include <QtCore/QMutexLocker>
#include <QtCore/QObject>
#include <QtCore/QBuffer>
#include <QtCore/QRunnable>
GCC_DIAG_ON(deprecated)
#if !defined(Q_MOC_RUN) && !defined(SBK_RUN)
#include <boost/shared_ptr.hpp>
#include <boost/make_shared.hpp>
#endif
#include "Engine/AppManager.h" //for access to settings
#include "Engine/CacheEntry.h"
#include "Engine/ImageLocker.h"
#include "Engine/LRUHashTable.h"
#include "Engine/MemoryInfo.h" // getSystemTotalRAM
#include "Engine/Settings.h"
#include "Engine/StandardPaths.h"
#include "Engine/EngineFwd.h"
//Beyond that percentage of occupation, the cache will start evicting LRU entries
#define NATRON_CACHE_LIMIT_PERCENT 0.9
#define NATRON_TILE_CACHE_FILE_SIZE_BYTES 2000000000
///When defined, number of opened files, memory size and disk size of the cache are printed whenever there's activity.
//#define NATRON_DEBUG_CACHE
NATRON_NAMESPACE_ENTER
/**
* @brief The point of this thread is to delete the content of the list in a separate thread so the thread calling
* get() doesn't wait for all the entries to be deleted (which can be expensive for large images)
**/
template <typename T>
class DeleterThread
: public QThread
{
mutable QMutex _entriesQueueMutex;
std::list<boost::shared_ptr<T> >_entriesQueue;
QWaitCondition _entriesQueueNotEmptyCond;
CacheAPI* cache;
QMutex mustQuitMutex;
QWaitCondition mustQuitCond;
bool mustQuit;
public:
DeleterThread(CacheAPI* cache)
: QThread()
, _entriesQueueMutex()
, _entriesQueue()
, _entriesQueueNotEmptyCond()
, cache(cache)
, mustQuitMutex()
, mustQuitCond()
, mustQuit(false)
{
setObjectName( QString::fromUtf8("CacheDeleter") );
}
virtual ~DeleterThread()
{
}
void appendToQueue(const std::list<boost::shared_ptr<T> > & entriesToDelete)
{
if ( entriesToDelete.empty() ) {
return;
}
{
QMutexLocker k(&_entriesQueueMutex);
_entriesQueue.insert( _entriesQueue.begin(), entriesToDelete.begin(), entriesToDelete.end() );
}
if ( !isRunning() ) {
start();
} else {
QMutexLocker k(&_entriesQueueMutex);
_entriesQueueNotEmptyCond.wakeOne();
}
}
void quitThread()
{
if ( !isRunning() ) {
return;
}
QMutexLocker k(&mustQuitMutex);
assert(!mustQuit);
mustQuit = true;
{
QMutexLocker k2(&_entriesQueueMutex);
_entriesQueue.push_back( boost::shared_ptr<T>() );
_entriesQueueNotEmptyCond.wakeOne();
}
while (mustQuit) {
mustQuitCond.wait(&mustQuitMutex);
}
}
bool isWorking() const
{
QMutexLocker k(&_entriesQueueMutex);
return !_entriesQueue.empty();
}
private:
virtual void run() OVERRIDE FINAL
{
for (;; ) {
bool quit;
{
QMutexLocker k(&mustQuitMutex);
quit = mustQuit;
}
{
boost::shared_ptr<T> front;
{
QMutexLocker k(&_entriesQueueMutex);
if ( quit && _entriesQueue.empty() ) {
_entriesQueueMutex.unlock();
QMutexLocker k(&mustQuitMutex);
assert(mustQuit);
mustQuit = false;
mustQuitCond.wakeOne();
return;
}
while ( _entriesQueue.empty() ) {
_entriesQueueNotEmptyCond.wait(&_entriesQueueMutex);
}
assert( !_entriesQueue.empty() );
front = _entriesQueue.front();
_entriesQueue.pop_front();
}
if (front) {
front->scheduleForDestruction();
}
} // front. After this scope, the image is guaranteed to be freed
cache->notifyMemoryDeallocated();
}
}
};
/**
* @brief The point of this thread is to remove entries that we are sure are no longer needed
* e.g: they may have a hash that can no longer be produced
**/
class CacheCleanerThread
: public QThread
{
mutable QMutex _requestQueueMutex;
struct CleanRequest
{
std::string holderID;
U64 nodeHash;
bool removeAll;
};
std::list<CleanRequest> _requestsQueues;
QWaitCondition _requestsQueueNotEmptyCond;
CacheAPI* cache;
QMutex mustQuitMutex;
QWaitCondition mustQuitCond;
bool mustQuit;
public:
CacheCleanerThread(CacheAPI* cache)
: QThread()
, _requestQueueMutex()
, _requestsQueues()
, _requestsQueueNotEmptyCond()
, cache(cache)
, mustQuitMutex()
, mustQuitCond()
, mustQuit(false)
{
setObjectName( QString::fromUtf8("CacheCleaner") );
}
virtual ~CacheCleanerThread()
{
}
void appendToQueue(const std::string & holderID,
U64 nodeHash,
bool removeAll)
{
{
QMutexLocker k(&_requestQueueMutex);
CleanRequest r;
r.holderID = holderID;
r.nodeHash = nodeHash;
r.removeAll = removeAll;
_requestsQueues.push_back(r);
}
if ( !isRunning() ) {
start();
} else {
QMutexLocker k(&_requestQueueMutex);
_requestsQueueNotEmptyCond.wakeOne();
}
}
void quitThread()
{
if ( !isRunning() ) {
return;
}
QMutexLocker k(&mustQuitMutex);
assert(!mustQuit);
mustQuit = true;
{
QMutexLocker k2(&_requestQueueMutex);
CleanRequest r;
_requestsQueues.push_back(r);
_requestsQueueNotEmptyCond.wakeOne();
}
while (mustQuit) {
mustQuitCond.wait(&mustQuitMutex);
}
}
bool isWorking() const
{
QMutexLocker k(&_requestQueueMutex);
return !_requestsQueues.empty();
}
private:
virtual void run() OVERRIDE FINAL
{
for (;; ) {
bool quit;
{
QMutexLocker k(&mustQuitMutex);
quit = mustQuit;
}
{
CleanRequest front;
{
QMutexLocker k(&_requestQueueMutex);
if ( quit && _requestsQueues.empty() ) {
_requestQueueMutex.unlock();
QMutexLocker k(&mustQuitMutex);
assert(mustQuit);
mustQuit = false;
mustQuitCond.wakeOne();
return;
}
while ( _requestsQueues.empty() ) {
_requestsQueueNotEmptyCond.wait(&_requestQueueMutex);
}
assert( !_requestsQueues.empty() );
front = _requestsQueues.front();
_requestsQueues.pop_front();
}
cache->removeAllEntriesWithDifferentNodeHashForHolderPrivate(front.holderID, front.nodeHash, front.removeAll);
}
}
}
};
class CacheSignalEmitter
: public QObject
{
Q_OBJECT
public:
CacheSignalEmitter()
{
}
~CacheSignalEmitter()
{
}
void emitSignalClearedInMemoryPortion()
{
Q_EMIT clearedInMemoryPortion();
}
void emitClearedDiskPortion()
{
Q_EMIT clearedDiskPortion();
}
void emitAddedEntry(SequenceTime time)
{
Q_EMIT addedEntry(time);
}
void emitRemovedEntry(SequenceTime time,
int storage)
{
Q_EMIT removedEntry(time, storage);
}
void emitEntryStorageChanged(SequenceTime time,
int oldStorage,
int newStorage)
{
Q_EMIT entryStorageChanged(time, oldStorage, newStorage);
}
Q_SIGNALS:
void clearedInMemoryPortion();
void clearedDiskPortion();
void addedEntry(SequenceTime);
void removedEntry(SequenceTime, int);
void entryStorageChanged(SequenceTime, int, int);
};
/*
* ValueType must be derived of CacheEntryHelper
*/
template<typename EntryType>
class Cache
: public CacheAPI
{
friend class CacheCleanerThread;
public:
typedef typename EntryType::hash_type hash_type;
typedef typename EntryType::data_t data_t;
typedef typename EntryType::key_t key_t;
typedef typename EntryType::param_t param_t;
typedef boost::shared_ptr<param_t> ParamsTypePtr;
typedef boost::shared_ptr<EntryType> EntryTypePtr;
struct SerializedEntry;
typedef std::list<SerializedEntry> CacheTOC;
public:
#ifdef USE_VARIADIC_TEMPLATES
#ifdef NATRON_CACHE_USE_BOOST
#ifdef NATRON_CACHE_USE_HASH
typedef BoostLRUHashTable<hash_type, EntryTypePtr>, boost::bimaps::unordered_set_of > CacheContainer;
#else
typedef BoostLRUHashTable<hash_type, EntryTypePtr>, boost::bimaps::set_of > CacheContainer;
#endif
typedef typename CacheContainer::container_type::left_iterator CacheIterator;
typedef typename CacheContainer::container_type::left_const_iterator ConstCacheIterator;
static std::list<CachedValue> & getValueFromIterator(CacheIterator it)
{
return it->second;
}
#else // cache use STL
#ifdef NATRON_CACHE_USE_HASH
typedef StlLRUHashTable<hash_type, EntryTypePtr>, std::unordered_map > CacheContainer;
#else
typedef StlLRUHashTable<hash_type, EntryTypePtr>, std::map > CacheContainer;
#endif
typedef typename CacheContainer::key_to_value_type::iterator CacheIterator;
typedef typename CacheContainer::key_to_value_type::const_iterator ConstCacheIterator;
static std::list<EntryTypePtr> & getValueFromIterator(CacheIterator it)
{
return it->second;
}
#endif // NATRON_CACHE_USE_BOOST
#else // !USE_VARIADIC_TEMPLATES
#ifdef NATRON_CACHE_USE_BOOST
#ifdef NATRON_CACHE_USE_HASH
typedef BoostLRUHashTable<hash_type, EntryTypePtr> CacheContainer;
#else
typedef BoostLRUHashTable<hash_type, EntryTypePtr> CacheContainer;
#endif
typedef typename CacheContainer::container_type::left_iterator CacheIterator;
typedef typename CacheContainer::container_type::left_const_iterator ConstCacheIterator;
static std::list<EntryTypePtr> & getValueFromIterator(CacheIterator it)
{
return it->second;
}
#else // cache use STL and tree (std map)
typedef StlLRUHashTable<hash_type, EntryTypePtr> CacheContainer;
typedef typename CacheContainer::key_to_value_type::iterator CacheIterator;
typedef typename CacheContainer::key_to_value_type::const_iterator ConstCacheIterator;
static std::list<EntryTypePtr> & getValueFromIterator(CacheIterator it)
{
return it->second.first;
}
#endif // NATRON_CACHE_USE_BOOST
#endif // USE_VARIADIC_TEMPLATES
private:
std::size_t _maximumInMemorySize; // the maximum size of the in-memory portion of the cache.(in % of the maximum cache size)
std::size_t _maximumCacheSize; // maximum size allowed for the cache
/*mutable because we need to change modify it in the sealEntryInternal function which
is called by an external object that have a const ref to the cache.
*/
mutable std::size_t _memoryCacheSize; // current size of the cache in bytes
mutable std::size_t _diskCacheSize;
mutable QMutex _sizeLock; // protects _memoryCacheSize & _diskCacheSize & _maximumInMemorySize & _maximumCacheSize
mutable QMutex _lock; //protects _memoryCache & _diskCache
mutable QMutex _getLock; //prevents get() and getOrCreate() to be called simultaneously
/*These 2 are mutable because we need to modify the LRU list even
when we call get() and we want this function to be const.*/
mutable CacheContainer _memoryCache;
mutable CacheContainer _diskCache;
const std::string _cacheName;
const unsigned int _version;
/*mutable because it doesn't hold any data, it just emits signals but signals cannot
be const somehow .*/
mutable CacheSignalEmitterPtr _signalEmitter;
///Store the system physical total RAM in a member
std::size_t _maxPhysicalRAM;
bool _tearingDown;
mutable DeleterThread<EntryType> _deleterThread;
mutable QWaitCondition _memoryFullCondition; //< protected by _sizeLock
mutable CacheCleanerThread _cleanerThread;
// If tiled, the cache will consist only of a few large files that each contain tiles of the same size.
// This is useful to cache chunks of data that always have the same size.
mutable QMutex _tileCacheMutex;
bool _isTiled;
std::size_t _tileByteSize;
// True when clearing the cache, protected by _tileCacheMutex
bool _clearingCache;
// Used when the cache is tiled
std::set<TileCacheFilePtr> _cacheFiles;
// When set these are used for fast search of a free tile
TileCacheFileWPtr _nextAvailableCacheFile;
int _nextAvailableCacheFileIndex;
public:
Cache(const std::string & cacheName,
unsigned int version,
U64 maximumCacheSize, // total size
double maximumInMemoryPercentage //how much should live in RAM
)
: CacheAPI()
, _maximumInMemorySize(maximumCacheSize * maximumInMemoryPercentage)
, _maximumCacheSize(maximumCacheSize)
, _memoryCacheSize(0)
, _diskCacheSize(0)
, _sizeLock()
, _lock()
, _getLock()
, _memoryCache()
, _diskCache()
, _cacheName(cacheName)
, _version(version)
, _signalEmitter()
, _maxPhysicalRAM( getSystemTotalRAM() )
, _tearingDown(false)
, _deleterThread(this)
, _memoryFullCondition()
, _cleanerThread(this)
, _tileCacheMutex()
, _isTiled(false)
, _tileByteSize(0)
, _clearingCache(false)
, _cacheFiles()
, _nextAvailableCacheFile()
, _nextAvailableCacheFileIndex(-1)
{
_signalEmitter = boost::make_shared<CacheSignalEmitter>();
}
virtual ~Cache()
{
QMutexLocker locker(&_lock);
_tearingDown = true;
_memoryCache.clear();
_diskCache.clear();
}
virtual bool isTileCache() const OVERRIDE FINAL
{
QMutexLocker k(&_tileCacheMutex);
return _isTiled;
}
virtual std::size_t getTileSizeBytes() const OVERRIDE FINAL
{
QMutexLocker k(&_tileCacheMutex);
return _tileByteSize;
}
/**
* @brief Set the cache to be in tile mode.
* If tiled, the cache will consist only of a few large files that each contain tiles of the same size.
* This is useful to cache chunks of data that always have the same size.
**/
void setTiled(bool tiled, std::size_t tileByteSize)
{
QMutexLocker k(&_tileCacheMutex);
_isTiled = tiled;
_tileByteSize = tileByteSize;
}
void waitForDeleterThread()
{
_deleterThread.quitThread();
_cleanerThread.quitThread();
}
/**
* @brief Look-up the cache for an entry whose key matches the params.
* @param params The key identifying the entry we're looking for.
* @param [out] params A pointer to the params that go along the returnValue.
* They do not help to identify they entry, rather
* this class can be used to cache other parameters along with the value_type.
* @param [out] returnValue The returnValue, contains the cache entry if the return value
* of the function is true, otherwise the pointer is left untouched.
* @returns True if the cache successfully found an entry matching the params.
* False otherwise.
**/
bool get(const typename EntryType::key_type & key,
std::list<EntryTypePtr>* returnValue) const
{
///Be atomic, so it cannot be created by another thread in the meantime
QMutexLocker getlocker(&_getLock);
///lock the cache before reading it.
QMutexLocker locker(&_lock);
return getInternal(key, returnValue);
} // get
private:
virtual TileCacheFilePtr getTileCacheFile(const std::string& filepath, std::size_t dataOffset) OVERRIDE FINAL WARN_UNUSED_RETURN
{
QMutexLocker k(&_tileCacheMutex);
assert(_isTiled);
if (!_isTiled) {
throw std::logic_error("allocTile() but cache is not tiled!");
}
for (std::set<TileCacheFilePtr>::iterator it = _cacheFiles.begin(); it != _cacheFiles.end(); ++it) {
if ((*it)->file->path() == filepath) {
int index = dataOffset / _tileByteSize;
// The dataOffset should be a multiple of the tile size
assert(_tileByteSize * index == dataOffset);
assert(!(*it)->usedTiles[index]);
(*it)->usedTiles[index] = true;
return *it;
}
}
if (!fileExists(filepath)) {
return TileCacheFilePtr();
} else {
TileCacheFilePtr ret = boost::make_shared<TileCacheFile>();
ret->file = boost::make_shared<MemoryFile>(filepath, MemoryFile::eFileOpenModeEnumIfExistsKeepElseFail);
std::size_t nTilesPerFile = std::floor( ( (double)NATRON_TILE_CACHE_FILE_SIZE_BYTES ) / _tileByteSize );
ret->usedTiles.resize(nTilesPerFile, false);
int index = dataOffset / _tileByteSize;
// The dataOffset should be a multiple of the tile size
assert(_tileByteSize * index == dataOffset);
assert(index >= 0 && index < (int)ret->usedTiles.size());
assert(!ret->usedTiles[index]);
ret->usedTiles[index] = true;
_cacheFiles.insert(ret);
return ret;
}
}
/**
* @brief Relevant only for tiled caches. This will allocate the memory required for a tile in the cache and lock it.
* Note that the calling entry should have exactly the size of a tile in the cache.
* In return, a pointer to a memory file is returned and the output parameter dataOffset will be set to the offset - in bytes - where the
* contiguous memory block for this tile begin relative to the start of the data of the memory file.
* This function may throw exceptions in case of failure.
* To retrieve the exact pointer of the block of memory for this tile use tileFile->file->data() + dataOffset
**/
virtual TileCacheFilePtr allocTile(std::size_t *dataOffset) OVERRIDE FINAL
{
QMutexLocker k(&_tileCacheMutex);
assert(_isTiled);
if (!_isTiled) {
throw std::logic_error("allocTile() but cache is not tiled!");
}
// First, search for a file with available space.
// If not found create one
TileCacheFilePtr foundAvailableFile;
int foundTileIndex = -1;
{
foundAvailableFile = _nextAvailableCacheFile.lock();
if (_nextAvailableCacheFileIndex != -1 && foundAvailableFile) {
foundTileIndex = _nextAvailableCacheFileIndex;
*dataOffset = foundTileIndex * _tileByteSize;
_nextAvailableCacheFileIndex = -1;
_nextAvailableCacheFile.reset();
} else {
foundTileIndex = -1;
foundAvailableFile.reset();
}
}
if (foundTileIndex == -1) {
for (std::set<TileCacheFilePtr>::iterator it = _cacheFiles.begin(); it != _cacheFiles.end(); ++it) {
for (std::size_t i = 0; i < (*it)->usedTiles.size(); ++i) {
if (!(*it)->usedTiles[i]) {
foundTileIndex = i;
*dataOffset = i * _tileByteSize;
break;
}
}
if (foundTileIndex != -1) {
foundAvailableFile = *it;
break;
}
}
}
if (!foundAvailableFile) {
// Create a file if all space is taken
foundAvailableFile = boost::make_shared<TileCacheFile>();
int nCacheFiles = (int)_cacheFiles.size();
std::stringstream cacheFilePathSs;
cacheFilePathSs << getCachePath().toStdString() << "/CachePart" << nCacheFiles;
std::string cacheFilePath = cacheFilePathSs.str();
foundAvailableFile->file = boost::make_shared<MemoryFile>(cacheFilePath, MemoryFile::eFileOpenModeEnumIfExistsKeepElseCreate);
std::size_t nTilesPerFile = std::floor(((double)NATRON_TILE_CACHE_FILE_SIZE_BYTES) / _tileByteSize);
std::size_t cacheFileSize = nTilesPerFile * _tileByteSize;
foundAvailableFile->file->resize(cacheFileSize);
foundAvailableFile->usedTiles.resize(nTilesPerFile, false);
*dataOffset = 0;
foundTileIndex = 0;
_cacheFiles.insert(foundAvailableFile);
_nextAvailableCacheFile = foundAvailableFile;
_nextAvailableCacheFileIndex = 1;
}
// Notify the memory file that this portion of the file is valid
foundAvailableFile->usedTiles[foundTileIndex] = true;
return foundAvailableFile;
}
/**
* @brief Free a tile from the cache that was previously allocated with allocTile. It will be made available again for other entries.
**/
virtual void freeTile(const TileCacheFilePtr& file, std::size_t dataOffset) OVERRIDE FINAL
{
QMutexLocker k(&_tileCacheMutex);
assert(_isTiled);
if (!_isTiled) {
throw std::logic_error("allocTile() but cache is not tiled!");
}
std::set<TileCacheFilePtr>::iterator foundTileFile = _cacheFiles.find(file);
assert(foundTileFile != _cacheFiles.end());
if (foundTileFile == _cacheFiles.end()) {
return;
}
int index = dataOffset / _tileByteSize;
// The dataOffset should be a multiple of the tile size
assert(_tileByteSize * index == dataOffset);
assert(index >= 0 && index < (int)(*foundTileFile)->usedTiles.size());
assert((*foundTileFile)->usedTiles[index]);
(*foundTileFile)->usedTiles[index] = false;
// If the file does not have any tile associated, remove it
// A use_count of 2 means that the tile file is only referenced by the cache itself and the entry calling
// the freeTile() function, hence once its freed, no tile should be using it anymore
if ((*foundTileFile).use_count() <= 2) {
// Do not remove the file except if we are clearing the cache
if (_clearingCache) {
(*foundTileFile)->file->remove();
_cacheFiles.erase(foundTileFile);
} else {
// Invalidate this portion of the cache
(*foundTileFile)->file->flush(MemoryFile::eFlushTypeInvalidate, (*foundTileFile)->file->data() + dataOffset, _tileByteSize);
}
} else {
_nextAvailableCacheFile = *foundTileFile;
_nextAvailableCacheFileIndex = index;
}
}
void createInternal(const typename EntryType::key_type & key,
const ParamsTypePtr & params,
ImageLockerHelper<EntryType>* entryLocker,
EntryTypePtr* returnValue) const
{
//_lock must not be taken here
///Before allocating the memory check that there's enough space to fit in memory
appPTR->checkCacheFreeMemoryIsGoodEnough();
///Just in case, we don't allow more than X files to be removed at once.
int safeCounter = 0;
///If too many files are opened, fall-back on RAM storage.
while (!_isTiled && appPTR->isNCacheFilesOpenedCapped() && safeCounter < 1000) {
#ifdef NATRON_DEBUG_CACHE
qDebug() << "Reached maximum cache files opened limit,clearing last recently used one...";
#endif
if ( !evictLRUDiskEntry() ) {
break;
}
++safeCounter;
}
U64 memoryCacheSize, maximumInMemorySize;
{
QMutexLocker k(&_sizeLock);
memoryCacheSize = _memoryCacheSize;
maximumInMemorySize = std::max( (std::size_t)1, _maximumInMemorySize );
}
{
QMutexLocker locker(&_lock);
std::list<EntryTypePtr> entriesToBeDeleted;
double occupationPercentage = (double)memoryCacheSize / maximumInMemorySize;
///While the current cache size can't fit the new entry, erase the last recently used entries.
///Also if the total free RAM is under the limit of the system free RAM to keep free, erase LRU entries.
while (occupationPercentage > NATRON_CACHE_LIMIT_PERCENT) {
std::list<EntryTypePtr> deleted;
if ( !tryEvictInMemoryEntry(deleted) ) {
break;
}
for (typename std::list<EntryTypePtr>::iterator it = deleted.begin(); it != deleted.end(); ++it) {
entriesToBeDeleted.push_back(*it);
memoryCacheSize -= (*it)->size();
}
occupationPercentage = (double)memoryCacheSize / maximumInMemorySize;
}
if ( !entriesToBeDeleted.empty() ) {
///Launch a separate thread whose function will be to delete all the entries to be deleted
_deleterThread.appendToQueue(entriesToBeDeleted);
///Clearing the list here will not delete the objects pointing to by the shared_ptr's because we made a copy
///that the separate thread will delete
entriesToBeDeleted.clear();
}
}
{
//If _maximumcacheSize == 0 we don't return 1 otherwise we would cause a deadlock
QMutexLocker k(&_sizeLock);
double occupationPercentage = _maximumCacheSize == 0 ? 0.99 : (double)_memoryCacheSize / _maximumCacheSize;
//_memoryCacheSize member will get updated while images are being destroyed by the parallel thread.
//we wait for cache memory occupation to be < 100% to be sure we don't hit swap here
while ( occupationPercentage >= 1. && _deleterThread.isWorking() ) {
_memoryFullCondition.wait(&_sizeLock);
occupationPercentage = _maximumCacheSize == 0 ? 0.99 : (double)_memoryCacheSize / _maximumCacheSize;
}
}
if (_isTiled) {
QMutexLocker locker(&_lock);
// For tiled caches, we insert directly into the disk cache, so make sure there is room for it
std::list<EntryTypePtr> entriesToBeDeleted;
U64 diskCacheSize, maximumDiskCacheSize;
{
QMutexLocker k(&_sizeLock);
diskCacheSize = _diskCacheSize;
maximumDiskCacheSize = std::max( (std::size_t)1, _maximumCacheSize - _maximumInMemorySize );
}
double diskPercentage = (double)diskCacheSize / maximumDiskCacheSize;
while (diskPercentage >= NATRON_CACHE_LIMIT_PERCENT) {
std::list<EntryTypePtr> deleted;
if ( !tryEvictDiskEntry(deleted) ) {
break;
}
for (typename std::list<EntryTypePtr>::iterator it = deleted.begin(); it != deleted.end(); ++it) {
diskCacheSize -= (*it)->size();
entriesToBeDeleted.push_back(*it);
}
diskPercentage = (double)diskCacheSize / maximumDiskCacheSize;
}
if ( !entriesToBeDeleted.empty() ) {
///Launch a separate thread whose function will be to delete all the entries to be deleted
_deleterThread.appendToQueue(entriesToBeDeleted);
///Clearing the list here will not delete the objects pointing to by the shared_ptr's because we made a copy
///that the separate thread will delete
entriesToBeDeleted.clear();
}
}
{
QMutexLocker locker(&_lock);
try {
returnValue->reset( new EntryType(key, params, this ) );
///Don't call allocateMemory() here because we're still under the lock and we might force tons of threads to wait unnecesserarily
} catch (const std::bad_alloc & e) {
*returnValue = EntryTypePtr();
}
// For a tiled cache, all entries must have the same size
assert(!_isTiled || (*returnValue)->getSizeInBytesFromParams() == _tileByteSize);
if (*returnValue) {
// If there is a lock, lock it before exposing the entry to other threads
if (entryLocker) {
entryLocker->lock(*returnValue);
}
sealEntry(*returnValue, _isTiled ? false : true);
}
}
} // createInternal
public:
void swapOrInsert(const EntryTypePtr& entryToBeEvicted,
const EntryTypePtr& newEntry)
{
QMutexLocker locker(&_lock);
const typename EntryType::key_type& key = entryToBeEvicted->getKey();
typename EntryType::hash_type hash = entryToBeEvicted->getHashKey();
///find a matching value in the internal memory container
CacheIterator memoryCached = _memoryCache(hash);
if ( memoryCached != _memoryCache.end() ) {
std::list<EntryTypePtr> & ret = getValueFromIterator(memoryCached);
for (typename std::list<EntryTypePtr>::iterator it = ret.begin(); it != ret.end(); ++it) {
if ( ( (*it)->getKey() == key ) && ( (*it)->getParams() == entryToBeEvicted->getParams() ) ) {
ret.erase(it);
break;
}
}
///Append it
ret.push_back(newEntry);
} else {
///Look in disk cache
CacheIterator diskCached = _diskCache(hash);
if ( diskCached != _diskCache.end() ) {
///Remove the old entry
std::list<EntryTypePtr> & ret = getValueFromIterator(diskCached);
for (typename std::list<EntryTypePtr>::iterator it = ret.begin(); it != ret.end(); ++it) {
if ( ( (*it)->getKey() == key ) && ( (*it)->getParams() == entryToBeEvicted->getParams() ) ) {
ret.erase(it);
break;
}
}
}
///Insert in mem cache
_memoryCache.insert(hash, newEntry);
}
}
/**
* @brief Look-up the cache for an entry whose key matches the 'key' and 'params'.
* Unlike get(...) this function creates a new entry if it couldn't be found.
* Note that this function also takes extra parameters which go along the value_type
* and will be cached. These parameters aren't taken into account for the computation of
* the hash key. It is a safe place to cache any extra data that is relative to an entry,
* but doesn't make it an identifier of that entry. The base class just adds the necessary
* info for the cache to be able to instantiate a new entry (that is the cost and the elements count).
*
* @param key The key identifying the entry we're looking for.
*
* @param params The non unique parameters. They do not help to identify they entry, rather
* this class can be used to cache other parameters along with the value_type.
*
* @param imageLocker A pointer to an ImageLockerHelperBase which will lock the image if it was freshly
* created so that you can call allocateMemory() safely without another thread accessing it.
*
* @param [out] returnValue The returnValue, contains the cache entry.
* Internally the allocation of the new entry might fail on the requested device,
* e.g: if you ask for an entry with a large cost, the cache will try to put the
* entry on disk to preserve it, but if the allocation failed it will fallback
* on RAM instead.
*
* Either way the returnValue parameter can never be NULL.
* @returns True if the cache successfully found an entry matching the key.
* False otherwise.
**/
bool getOrCreate(const typename EntryType::key_type & key,
const ParamsTypePtr & params,
ImageLockerHelper<EntryType>* locker,
EntryTypePtr* returnValue) const
{
///Make sure the shared_ptrs live in this list and are destroyed not while under the lock
///so that the memory freeing (which might be expensive for large images) doesn't happen while under the lock
{
///Be atomic, so it cannot be created by another thread in the meantime
QMutexLocker getlocker(&_getLock);
std::list<EntryTypePtr> entries;
bool didGetSucceed;
{
QMutexLocker locker(&_lock);
didGetSucceed = getInternal(key, &entries);
}
if (didGetSucceed) {
for (typename std::list<EntryTypePtr>::iterator it = entries.begin(); it != entries.end(); ++it) {
if (*(*it)->getParams() == *params) {
*returnValue = *it;
return true;
}
}
}
createInternal(key, params, locker, returnValue);
return false;