comparison mupdf-source/source/fitz/store.c @ 2:b50eed0cc0ef upstream

ADD: MuPDF v1.26.7: the MuPDF source as downloaded by a default build of PyMuPDF 1.26.4. The directory name has changed: no version number in the expanded directory now.
author Franz Glasner <fzglas.hg@dom66.de>
date Mon, 15 Sep 2025 11:43:07 +0200
parents
children
comparison
equal deleted inserted replaced
1:1d09e1dec1d9 2:b50eed0cc0ef
1 // Copyright (C) 2004-2025 Artifex Software, Inc.
2 //
3 // This file is part of MuPDF.
4 //
5 // MuPDF is free software: you can redistribute it and/or modify it under the
6 // terms of the GNU Affero General Public License as published by the Free
7 // Software Foundation, either version 3 of the License, or (at your option)
8 // any later version.
9 //
10 // MuPDF is distributed in the hope that it will be useful, but WITHOUT ANY
11 // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
12 // FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
13 // details.
14 //
15 // You should have received a copy of the GNU Affero General Public License
16 // along with MuPDF. If not, see <https://www.gnu.org/licenses/agpl-3.0.en.html>
17 //
18 // Alternative licensing terms are available from the licensor.
19 // For commercial licensing, see <https://www.artifex.com/> or contact
20 // Artifex Software, Inc., 39 Mesa Street, Suite 108A, San Francisco,
21 // CA 94129, USA, for further information.
22
23 #include "mupdf/fitz.h"
24
25 #include <assert.h>
26 #include <limits.h>
27 #include <stdio.h>
28 #include <string.h>
29
30 typedef struct fz_item
31 {
32 void *key;
33 fz_storable *val;
34 size_t size;
35 struct fz_item *next;
36 struct fz_item *prev;
37 fz_store *store;
38 const fz_store_type *type;
39 } fz_item;
40
41 /* Every entry in fz_store is protected by the alloc lock */
42 struct fz_store
43 {
44 int refs;
45
46 /* Every item in the store is kept in a doubly linked list, ordered
47 * by usage (so LRU entries are at the end). */
48 fz_item *head;
49 fz_item *tail;
50
51 /* We have a hash table that allows to quickly find a subset of the
52 * entries (those whose keys are indirect objects). */
53 fz_hash_table *hash;
54
55 /* We keep track of the size of the store, and keep it below max. */
56 size_t max;
57 size_t size;
58
59 int defer_reap_count;
60 int needs_reaping;
61 int scavenging;
62 };
63
64 void
65 fz_new_store_context(fz_context *ctx, size_t max)
66 {
67 fz_store *store;
68 store = fz_malloc_struct(ctx, fz_store);
69 fz_try(ctx)
70 {
71 store->hash = fz_new_hash_table(ctx, 4096, sizeof(fz_store_hash), FZ_LOCK_ALLOC, NULL);
72 }
73 fz_catch(ctx)
74 {
75 fz_free(ctx, store);
76 fz_rethrow(ctx);
77 }
78 store->refs = 1;
79 store->head = NULL;
80 store->tail = NULL;
81 store->size = 0;
82 store->max = max;
83 store->defer_reap_count = 0;
84 store->needs_reaping = 0;
85 ctx->store = store;
86 }
87
88 void *
89 fz_keep_storable(fz_context *ctx, const fz_storable *sc)
90 {
91 /* Explicitly drop const to allow us to use const
92 * sanely throughout the code. */
93 fz_storable *s = (fz_storable *)sc;
94
95 return fz_keep_imp(ctx, s, &s->refs);
96 }
97
98 void *fz_keep_key_storable(fz_context *ctx, const fz_key_storable *sc)
99 {
100 return fz_keep_storable(ctx, &sc->storable);
101 }
102
103 /*
104 Entered with FZ_LOCK_ALLOC held.
105 Drops FZ_LOCK_ALLOC.
106 */
107 static void
108 do_reap(fz_context *ctx)
109 {
110 fz_store *store = ctx->store;
111 fz_item *item, *prev, *remove;
112
113 if (store == NULL)
114 {
115 fz_unlock(ctx, FZ_LOCK_ALLOC);
116 return;
117 }
118
119 fz_assert_lock_held(ctx, FZ_LOCK_ALLOC);
120
121 ctx->store->needs_reaping = 0;
122
123 FZ_LOG_DUMP_STORE(ctx, "Before reaping store:\n");
124
125 /* Reap the items */
126 remove = NULL;
127 for (item = store->tail; item; item = prev)
128 {
129 prev = item->prev;
130
131 if (item->type->needs_reap == NULL || item->type->needs_reap(ctx, item->key) == 0)
132 continue;
133
134 /* We have to drop it */
135 store->size -= item->size;
136
137 /* Unlink from the linked list */
138 if (item->next)
139 item->next->prev = item->prev;
140 else
141 store->tail = item->prev;
142 if (item->prev)
143 item->prev->next = item->next;
144 else
145 store->head = item->next;
146
147 /* Remove from the hash table */
148 if (item->type->make_hash_key)
149 {
150 fz_store_hash hash = { NULL };
151 hash.drop = item->val->drop;
152 if (item->type->make_hash_key(ctx, &hash, item->key))
153 fz_hash_remove(ctx, store->hash, &hash);
154 }
155
156 /* Store whether to drop this value or not in 'prev' */
157 if (item->val->refs > 0)
158 (void)Memento_dropRef(item->val);
159 item->prev = (item->val->refs > 0 && --item->val->refs == 0) ? item : NULL;
160
161 /* Store it in our removal chain - just singly linked */
162 item->next = remove;
163 remove = item;
164 }
165 fz_unlock(ctx, FZ_LOCK_ALLOC);
166
167 /* Now drop the remove chain */
168 for (item = remove; item != NULL; item = remove)
169 {
170 remove = item->next;
171
172 /* Drop a reference to the value (freeing if required) */
173 if (item->prev)
174 item->val->drop(ctx, item->val);
175
176 /* Always drops the key and drop the item */
177 item->type->drop_key(ctx, item->key);
178 fz_free(ctx, item);
179 }
180 FZ_LOG_DUMP_STORE(ctx, "After reaping store:\n");
181 }
182
183 void fz_drop_key_storable(fz_context *ctx, const fz_key_storable *sc)
184 {
185 /* Explicitly drop const to allow us to use const
186 * sanely throughout the code. */
187 fz_key_storable *s = (fz_key_storable *)sc;
188 int drop;
189 int unlock = 1;
190
191 if (s == NULL)
192 return;
193
194 fz_lock(ctx, FZ_LOCK_ALLOC);
195 assert(s->storable.refs != 0);
196 if (s->storable.refs > 0)
197 {
198 (void)Memento_dropRef(s);
199 drop = --s->storable.refs == 0;
200 if (!drop && s->storable.refs == s->store_key_refs)
201 {
202 if (ctx->store->defer_reap_count > 0)
203 {
204 ctx->store->needs_reaping = 1;
205 }
206 else
207 {
208 do_reap(ctx);
209 unlock = 0;
210 }
211 }
212 }
213 else
214 drop = 0;
215 if (unlock)
216 fz_unlock(ctx, FZ_LOCK_ALLOC);
217 /*
218 If we are dropping the last reference to an object, then
219 it cannot possibly be in the store (as the store always
220 keeps a ref to everything in it, and doesn't drop via
221 this method. So we can simply drop the storable object
222 itself without any operations on the fz_store.
223 */
224 if (drop)
225 s->storable.drop(ctx, &s->storable);
226 }
227
228 void *fz_keep_key_storable_key(fz_context *ctx, const fz_key_storable *sc)
229 {
230 /* Explicitly drop const to allow us to use const
231 * sanely throughout the code. */
232 fz_key_storable *s = (fz_key_storable *)sc;
233
234 if (s == NULL)
235 return NULL;
236
237 fz_lock(ctx, FZ_LOCK_ALLOC);
238 if (s->storable.refs > 0)
239 {
240 (void)Memento_takeRef(s);
241 ++s->storable.refs;
242 ++s->store_key_refs;
243 }
244 fz_unlock(ctx, FZ_LOCK_ALLOC);
245 return s;
246 }
247
248 void fz_drop_key_storable_key(fz_context *ctx, const fz_key_storable *sc)
249 {
250 /* Explicitly drop const to allow us to use const
251 * sanely throughout the code. */
252 fz_key_storable *s = (fz_key_storable *)sc;
253 int drop;
254
255 if (s == NULL)
256 return;
257
258 fz_lock(ctx, FZ_LOCK_ALLOC);
259 assert(s->store_key_refs > 0 && s->storable.refs >= s->store_key_refs);
260 (void)Memento_dropRef(s);
261 drop = --s->storable.refs == 0;
262 --s->store_key_refs;
263 fz_unlock(ctx, FZ_LOCK_ALLOC);
264 /*
265 If we are dropping the last reference to an object, then
266 it cannot possibly be in the store (as the store always
267 keeps a ref to everything in it, and doesn't drop via
268 this method. So we can simply drop the storable object
269 itself without any operations on the fz_store.
270 */
271 if (drop)
272 s->storable.drop(ctx, &s->storable);
273 }
274
275 static void
276 evict(fz_context *ctx, fz_item *item)
277 {
278 fz_store *store = ctx->store;
279 int drop;
280
281 store->size -= item->size;
282 /* Unlink from the linked list */
283 if (item->next)
284 item->next->prev = item->prev;
285 else
286 store->tail = item->prev;
287 if (item->prev)
288 item->prev->next = item->next;
289 else
290 store->head = item->next;
291
292 /* Drop a reference to the value (freeing if required) */
293 if (item->val->refs > 0)
294 (void)Memento_dropRef(item->val);
295 drop = (item->val->refs > 0 && --item->val->refs == 0);
296
297 /* Remove from the hash table */
298 if (item->type->make_hash_key)
299 {
300 fz_store_hash hash = { NULL };
301 hash.drop = item->val->drop;
302 if (item->type->make_hash_key(ctx, &hash, item->key))
303 fz_hash_remove(ctx, store->hash, &hash);
304 }
305 fz_unlock(ctx, FZ_LOCK_ALLOC);
306 if (drop)
307 item->val->drop(ctx, item->val);
308
309 /* Always drops the key and drop the item */
310 item->type->drop_key(ctx, item->key);
311 fz_free(ctx, item);
312 fz_lock(ctx, FZ_LOCK_ALLOC);
313 }
314
315 static size_t
316 ensure_space(fz_context *ctx, size_t tofree)
317 {
318 fz_item *item, *prev;
319 size_t count;
320 fz_store *store = ctx->store;
321 fz_item *to_be_freed = NULL;
322
323 fz_assert_lock_held(ctx, FZ_LOCK_ALLOC);
324
325 /* First check that we *can* free tofree; if not, we'd rather not
326 * cache this. */
327 count = 0;
328 for (item = store->tail; item; item = item->prev)
329 {
330 if (item->val->refs == 1)
331 {
332 count += item->size;
333 if (count >= tofree)
334 break;
335 }
336 }
337
338 /* If we ran out of items to search, then we can never free enough */
339 if (item == NULL)
340 {
341 return 0;
342 }
343
344 /* Now move all the items to be freed onto 'to_be_freed' */
345 count = 0;
346 for (item = store->tail; item; item = prev)
347 {
348 prev = item->prev;
349 if (item->val->refs != 1)
350 continue;
351
352 store->size -= item->size;
353
354 /* Unlink from the linked list */
355 if (item->next)
356 item->next->prev = item->prev;
357 else
358 store->tail = item->prev;
359 if (item->prev)
360 item->prev->next = item->next;
361 else
362 store->head = item->next;
363
364 /* Remove from the hash table */
365 if (item->type->make_hash_key)
366 {
367 fz_store_hash hash = { NULL };
368 hash.drop = item->val->drop;
369 if (item->type->make_hash_key(ctx, &hash, item->key))
370 fz_hash_remove(ctx, store->hash, &hash);
371 }
372
373 /* Link into to_be_freed */
374 item->next = to_be_freed;
375 to_be_freed = item;
376
377 count += item->size;
378 if (count >= tofree)
379 break;
380 }
381
382 /* Now we can safely drop the lock and free our pending items. These
383 * have all been removed from both the store list, and the hash table,
384 * so they can't be 'found' by anyone else in the meantime. */
385
386 while (to_be_freed)
387 {
388 int drop;
389
390 item = to_be_freed;
391 to_be_freed = to_be_freed->next;
392
393 /* Drop a reference to the value (freeing if required) */
394 if (item->val->refs > 0)
395 (void)Memento_dropRef(item->val);
396 drop = (item->val->refs > 0 && --item->val->refs == 0);
397
398 fz_unlock(ctx, FZ_LOCK_ALLOC);
399 if (drop)
400 item->val->drop(ctx, item->val);
401
402 /* Always drops the key and drop the item */
403 item->type->drop_key(ctx, item->key);
404 fz_free(ctx, item);
405 fz_lock(ctx, FZ_LOCK_ALLOC);
406 }
407
408 return count;
409 }
410
411 static void
412 touch(fz_store *store, fz_item *item)
413 {
414 if (item->next != item)
415 {
416 /* Already in the list - unlink it */
417 if (item->next)
418 item->next->prev = item->prev;
419 else
420 store->tail = item->prev;
421 if (item->prev)
422 item->prev->next = item->next;
423 else
424 store->head = item->next;
425 }
426 /* Now relink it at the start of the LRU chain */
427 item->next = store->head;
428 if (item->next)
429 item->next->prev = item;
430 else
431 store->tail = item;
432 store->head = item;
433 item->prev = NULL;
434 }
435
436 void *
437 fz_store_item(fz_context *ctx, void *key, void *val_, size_t itemsize, const fz_store_type *type)
438 {
439 fz_item *item = NULL;
440 size_t size;
441 fz_storable *val = (fz_storable *)val_;
442 fz_store *store = ctx->store;
443 fz_store_hash hash = { NULL };
444 int use_hash = 0;
445
446 if (!store)
447 return NULL;
448
449 /* If we fail for any reason, we swallow the exception and continue.
450 * All that the above program will see is that we failed to store
451 * the item. */
452
453 item = Memento_label(fz_malloc_no_throw(ctx, sizeof (fz_item)), "fz_item");
454 if (!item)
455 return NULL;
456 memset(item, 0, sizeof (fz_item));
457
458 if (type->make_hash_key)
459 {
460 hash.drop = val->drop;
461 use_hash = type->make_hash_key(ctx, &hash, key);
462 }
463
464 type->keep_key(ctx, key);
465 fz_lock(ctx, FZ_LOCK_ALLOC);
466
467 /* Fill out the item. To start with, we always set item->next == item
468 * and item->prev == item. This is so that we can spot items that have
469 * been put into the hash table without having made it into the linked
470 * list yet. */
471 item->key = key;
472 item->val = val;
473 item->size = itemsize;
474 item->next = item;
475 item->prev = item;
476 item->type = type;
477
478 /* If we can index it fast, put it into the hash table. This serves
479 * to check whether we have one there already. */
480 if (use_hash)
481 {
482 fz_item *existing = NULL;
483
484 fz_try(ctx)
485 {
486 /* May drop and retake the lock */
487 existing = fz_hash_insert(ctx, store->hash, &hash, item);
488 }
489 fz_catch(ctx)
490 {
491 /* Any error here means that item never made it into the
492 * hash - so no one else can have a reference. */
493 fz_unlock(ctx, FZ_LOCK_ALLOC);
494 fz_free(ctx, item);
495 type->drop_key(ctx, key);
496 return NULL;
497 }
498 if (existing)
499 {
500 /* There was one there already! Take a new reference
501 * to the existing one, and drop our current one. */
502 fz_warn(ctx, "found duplicate %s in the store", type->name);
503 touch(store, existing);
504 if (existing->val->refs > 0)
505 {
506 (void)Memento_takeRef(existing->val);
507 existing->val->refs++;
508 }
509 fz_unlock(ctx, FZ_LOCK_ALLOC);
510 fz_free(ctx, item);
511 type->drop_key(ctx, key);
512 return existing->val;
513 }
514 }
515
516 /* Now bump the ref */
517 if (val->refs > 0)
518 {
519 (void)Memento_takeRef(val);
520 val->refs++;
521 }
522
523 /* If we haven't got an infinite store, check for space within it */
524 if (store->max != FZ_STORE_UNLIMITED)
525 {
526 /* FIXME: Overflow? */
527 size = store->size + itemsize;
528 if (size > store->max)
529 {
530 FZ_LOG_STORE(ctx, "Store size exceeded: item=%zu, size=%zu, max=%zu\n",
531 itemsize, store->size, store->max);
532 while (size > store->max)
533 {
534 size_t saved;
535
536 /* First, do any outstanding reaping, even if defer_reap_count > 0 */
537 if (store->needs_reaping)
538 {
539 do_reap(ctx); /* Drops alloc lock */
540 fz_lock(ctx, FZ_LOCK_ALLOC);
541 }
542 size = store->size + itemsize;
543 if (size <= store->max)
544 break;
545
546 /* ensure_space may drop, then retake the lock */
547 saved = ensure_space(ctx, size - store->max);
548 size -= saved;
549 if (saved == 0)
550 {
551 /* Failed to free any space. */
552 /* We used to 'unstore' it here, but that's wrong.
553 * If we've already spent the memory to malloc it
554 * then not putting it in the store just means that
555 * a resource used multiple times will just be malloced
556 * again. Better to put it in the store, have the
557 * store account for it, and for it to potentially be reused.
558 * When the caller drops the reference to it, it can then
559 * be dropped from the store on the next attempt to store
560 * anything else. */
561 break;
562 }
563 }
564 FZ_LOG_DUMP_STORE(ctx, "After eviction:\n");
565 }
566 }
567 store->size += itemsize;
568
569 /* Regardless of whether it's indexed, it goes into the linked list */
570 touch(store, item);
571 fz_unlock(ctx, FZ_LOCK_ALLOC);
572
573 return NULL;
574 }
575
576 void *
577 fz_find_item(fz_context *ctx, fz_store_drop_fn *drop, void *key, const fz_store_type *type)
578 {
579 fz_item *item;
580 fz_store *store = ctx->store;
581 fz_store_hash hash = { NULL };
582 int use_hash = 0;
583
584 if (!store)
585 return NULL;
586
587 if (!key)
588 return NULL;
589
590 if (type->make_hash_key)
591 {
592 hash.drop = drop;
593 use_hash = type->make_hash_key(ctx, &hash, key);
594 }
595
596 fz_lock(ctx, FZ_LOCK_ALLOC);
597 if (use_hash)
598 {
599 /* We can find objects keyed on indirected objects quickly */
600 item = fz_hash_find(ctx, store->hash, &hash);
601 }
602 else
603 {
604 /* Others we have to hunt for slowly */
605 for (item = store->head; item; item = item->next)
606 {
607 if (item->val->drop == drop && !type->cmp_key(ctx, item->key, key))
608 break;
609 }
610 }
611 if (item)
612 {
613 /* LRU the block. This also serves to ensure that any item
614 * picked up from the hash before it has made it into the
615 * linked list does not get whipped out again due to the
616 * store being full. */
617 touch(store, item);
618 /* And bump the refcount before returning */
619 if (item->val->refs > 0)
620 {
621 (void)Memento_takeRef(item->val);
622 item->val->refs++;
623 }
624 fz_unlock(ctx, FZ_LOCK_ALLOC);
625 return (void *)item->val;
626 }
627 fz_unlock(ctx, FZ_LOCK_ALLOC);
628
629 return NULL;
630 }
631
632 void
633 fz_remove_item(fz_context *ctx, fz_store_drop_fn *drop, void *key, const fz_store_type *type)
634 {
635 fz_item *item;
636 fz_store *store = ctx->store;
637 int dodrop;
638 fz_store_hash hash = { NULL };
639 int use_hash = 0;
640
641 if (type->make_hash_key)
642 {
643 hash.drop = drop;
644 use_hash = type->make_hash_key(ctx, &hash, key);
645 }
646
647 fz_lock(ctx, FZ_LOCK_ALLOC);
648 if (use_hash)
649 {
650 /* We can find objects keyed on indirect objects quickly */
651 item = fz_hash_find(ctx, store->hash, &hash);
652 if (item)
653 fz_hash_remove(ctx, store->hash, &hash);
654 }
655 else
656 {
657 /* Others we have to hunt for slowly */
658 for (item = store->head; item; item = item->next)
659 if (item->val->drop == drop && !type->cmp_key(ctx, item->key, key))
660 break;
661 }
662 if (item)
663 {
664 /* Momentarily things can be in the hash table without being
665 * in the list. Don't attempt to unlink these. We indicate
666 * such items by setting item->next == item. */
667 if (item->next != item)
668 {
669 if (item->next)
670 item->next->prev = item->prev;
671 else
672 store->tail = item->prev;
673 if (item->prev)
674 item->prev->next = item->next;
675 else
676 store->head = item->next;
677 }
678 if (item->val->refs > 0)
679 (void)Memento_dropRef(item->val);
680 dodrop = (item->val->refs > 0 && --item->val->refs == 0);
681 fz_unlock(ctx, FZ_LOCK_ALLOC);
682 if (dodrop)
683 item->val->drop(ctx, item->val);
684 type->drop_key(ctx, item->key);
685 fz_free(ctx, item);
686 }
687 else
688 fz_unlock(ctx, FZ_LOCK_ALLOC);
689 }
690
691 void
692 fz_empty_store(fz_context *ctx)
693 {
694 fz_store *store = ctx->store;
695
696 if (store == NULL)
697 return;
698
699 fz_lock(ctx, FZ_LOCK_ALLOC);
700 /* Run through all the items in the store */
701 while (store->head)
702 evict(ctx, store->head); /* Drops then retakes lock */
703 fz_unlock(ctx, FZ_LOCK_ALLOC);
704 }
705
706 fz_store *
707 fz_keep_store_context(fz_context *ctx)
708 {
709 if (ctx == NULL || ctx->store == NULL)
710 return NULL;
711 return fz_keep_imp(ctx, ctx->store, &ctx->store->refs);
712 }
713
714 void
715 fz_drop_store_context(fz_context *ctx)
716 {
717 if (!ctx)
718 return;
719 if (fz_drop_imp(ctx, ctx->store, &ctx->store->refs))
720 {
721 fz_empty_store(ctx);
722 fz_drop_hash_table(ctx, ctx->store->hash);
723 fz_free(ctx, ctx->store);
724 ctx->store = NULL;
725 }
726 }
727
728 static void
729 fz_debug_store_item(fz_context *ctx, void *state, void *key_, int keylen, void *item_)
730 {
731 unsigned char *key = key_;
732 fz_item *item = item_;
733 int i;
734 char buf[256];
735 fz_output *out = (fz_output *)state;
736 fz_unlock(ctx, FZ_LOCK_ALLOC);
737 item->type->format_key(ctx, buf, sizeof buf, item->key);
738 fz_lock(ctx, FZ_LOCK_ALLOC);
739 fz_write_printf(ctx, out, "STORE\thash[");
740 for (i=0; i < keylen; ++i)
741 fz_write_printf(ctx, out,"%02x", key[i]);
742 fz_write_printf(ctx, out, "][refs=%d][size=%d] key=%s val=%p\n", item->val->refs, (int)item->size, buf, (void *)item->val);
743 }
744
745 static void
746 fz_debug_store_locked(fz_context *ctx, fz_output *out)
747 {
748 fz_item *item, *next;
749 char buf[256];
750 fz_store *store = ctx->store;
751 size_t list_total = 0;
752
753 fz_write_printf(ctx, out, "STORE\t-- resource store contents --\n");
754
755 for (item = store->head; item; item = next)
756 {
757 next = item->next;
758 if (next)
759 {
760 (void)Memento_takeRef(next->val);
761 next->val->refs++;
762 }
763 fz_unlock(ctx, FZ_LOCK_ALLOC);
764 item->type->format_key(ctx, buf, sizeof buf, item->key);
765 fz_lock(ctx, FZ_LOCK_ALLOC);
766 fz_write_printf(ctx, out, "STORE\tstore[*][refs=%d][size=%d] key=%s val=%p\n",
767 item->val->refs, (int)item->size, buf, (void *)item->val);
768 list_total += item->size;
769 if (next)
770 {
771 (void)Memento_dropRef(next->val);
772 next->val->refs--;
773 }
774 }
775
776 fz_write_printf(ctx, out, "STORE\t-- resource store hash contents --\n");
777 fz_hash_for_each(ctx, store->hash, out, fz_debug_store_item);
778 fz_write_printf(ctx, out, "STORE\t-- end --\n");
779
780 fz_write_printf(ctx, out, "STORE\tmax=%zu, size=%zu, actual size=%zu\n", store->max, store->size, list_total);
781 }
782
783 void
784 fz_debug_store(fz_context *ctx, fz_output *out)
785 {
786 fz_lock(ctx, FZ_LOCK_ALLOC);
787 fz_debug_store_locked(ctx, out);
788 fz_unlock(ctx, FZ_LOCK_ALLOC);
789 }
790
791 /*
792 Consider if we have blocks of the following sizes in the store, from oldest
793 to newest:
794
795 A 32
796 B 64
797 C 128
798 D 256
799
800 Further suppose we need to free 97 bytes. Naively freeing blocks until we have
801 freed enough would mean we'd free A, B and C, when we could have freed just C.
802
803 We are forced into an n^2 algorithm by the need to drop the lock as part of the
804 eviction, so we might as well embrace it and go for a solution that properly
805 drops just C.
806
807 The algorithm used is to scan the list of blocks from oldest to newest, counting
808 how many bytes we can free in the blocks we pass. We stop this scan when we have
809 found enough blocks. We then free the largest block. This releases the lock
810 momentarily, which means we have to start the scan process all over again, so
811 we repeat. This guarantees we only evict a minimum of blocks, but does mean we
812 scan more blocks than we'd ideally like.
813 */
814 static int
815 scavenge(fz_context *ctx, size_t tofree)
816 {
817 fz_store *store = ctx->store;
818 size_t freed = 0;
819 fz_item *item;
820
821 if (store->scavenging)
822 return 0;
823
824 store->scavenging = 1;
825
826 do
827 {
828 /* Count through a suffix of objects in the store until
829 * we find enough to give us what we need to evict. */
830 size_t suffix_size = 0;
831 fz_item *largest = NULL;
832
833 for (item = store->tail; item; item = item->prev)
834 {
835 if (item->val->refs == 1 && (item->val->droppable == NULL || item->val->droppable(ctx, item->val)))
836 {
837 /* This one is evictable */
838 suffix_size += item->size;
839 if (largest == NULL || item->size > largest->size)
840 largest = item;
841 if (suffix_size >= tofree - freed)
842 break;
843 }
844 }
845
846 /* If there are no evictable blocks, we can't find anything to free. */
847 if (largest == NULL)
848 break;
849
850 /* Free largest. */
851 if (freed == 0) {
852 FZ_LOG_DUMP_STORE(ctx, "Before scavenge:\n");
853 }
854 freed += largest->size;
855 evict(ctx, largest); /* Drops then retakes lock */
856 }
857 while (freed < tofree);
858
859 if (freed != 0) {
860 FZ_LOG_DUMP_STORE(ctx, "After scavenge:\n");
861 }
862 store->scavenging = 0;
863 /* Success is managing to evict any blocks */
864 return freed != 0;
865 }
866
867 void
868 fz_drop_storable(fz_context *ctx, const fz_storable *sc)
869 {
870 /* Explicitly drop const to allow us to use const
871 * sanely throughout the code. */
872 fz_storable *s = (fz_storable *)sc;
873 int num;
874
875 if (s == NULL)
876 return;
877
878 fz_lock(ctx, FZ_LOCK_ALLOC);
879 /* Drop the ref, and leave num as being the number of
880 * refs left (-1 meaning, "statically allocated"). */
881 if (s->refs > 0)
882 {
883 (void)Memento_dropIntRef(s);
884 num = --s->refs;
885 }
886 else
887 num = -1;
888
889 /* If we have just 1 ref left, it's possible that
890 * this ref is held by the store. If the store is
891 * oversized, we ought to throw any such references
892 * away to try to bring the store down to a "legal"
893 * size. Run a scavenge to check for this case. */
894 if (ctx->store->max != FZ_STORE_UNLIMITED)
895 if (num == 1 && ctx->store->size > ctx->store->max)
896 scavenge(ctx, ctx->store->size - ctx->store->max);
897 fz_unlock(ctx, FZ_LOCK_ALLOC);
898
899 /* If we have no references to an object left, then
900 * it cannot possibly be in the store (as the store always
901 * keeps a ref to everything in it, and doesn't drop via
902 * this method). So we can simply drop the storable object
903 * itself without any operations on the fz_store.
904 */
905 if (num == 0)
906 s->drop(ctx, s);
907 }
908
909 int fz_store_scavenge_external(fz_context *ctx, size_t size, int *phase)
910 {
911 int ret;
912
913 fz_lock(ctx, FZ_LOCK_ALLOC);
914 ret = fz_store_scavenge(ctx, size, phase);
915 fz_unlock(ctx, FZ_LOCK_ALLOC);
916
917 return ret;
918 }
919
920 int fz_store_scavenge(fz_context *ctx, size_t size, int *phase)
921 {
922 fz_store *store;
923 size_t max;
924
925 store = ctx->store;
926 if (store == NULL)
927 return 0;
928
929 #ifdef DEBUG_SCAVENGING
930 fz_write_printf(ctx, fz_stdout(ctx), "Scavenging: store=%zu size=%zu phase=%d\n", store->size, size, *phase);
931 fz_debug_store_locked(ctx, fz_stdout(ctx));
932 Memento_stats();
933 #endif
934 do
935 {
936 size_t tofree;
937
938 /* Calculate 'max' as the maximum size of the store for this phase */
939 if (*phase >= 16)
940 max = 0;
941 else if (store->max != FZ_STORE_UNLIMITED)
942 max = store->max / 16 * (16 - *phase);
943 else
944 max = store->size / (16 - *phase) * (15 - *phase);
945 (*phase)++;
946
947 /* Slightly baroque calculations to avoid overflow */
948 if (size > SIZE_MAX - store->size)
949 tofree = SIZE_MAX - max;
950 else if (size + store->size > max)
951 continue;
952 else
953 tofree = size + store->size - max;
954
955 if (scavenge(ctx, tofree))
956 {
957 #ifdef DEBUG_SCAVENGING
958 fz_write_printf(ctx, fz_stdout(ctx), "scavenged: store=%zu\n", store->size);
959 fz_debug_store(ctx, fz_stdout(ctx));
960 Memento_stats();
961 #endif
962 return 1;
963 }
964 }
965 while (max > 0);
966
967 #ifdef DEBUG_SCAVENGING
968 fz_write_printf(ctx, fz_stdout(ctx), "scavenging failed\n");
969 fz_debug_store(ctx, fz_stdout(ctx));
970 Memento_listBlocks();
971 #endif
972 return 0;
973 }
974
975 int
976 fz_shrink_store(fz_context *ctx, unsigned int percent)
977 {
978 int success;
979 fz_store *store;
980 size_t new_size;
981
982 if (percent >= 100)
983 return 1;
984
985 store = ctx->store;
986 if (store == NULL)
987 return 0;
988
989 #ifdef DEBUG_SCAVENGING
990 fz_write_printf(ctx, fz_stdout(ctx), "fz_shrink_store: %zu\n", store->size/(1024*1024));
991 #endif
992 fz_lock(ctx, FZ_LOCK_ALLOC);
993
994 new_size = (size_t)(((uint64_t)store->size * percent) / 100);
995 if (store->size > new_size)
996 scavenge(ctx, store->size - new_size);
997
998 success = (store->size <= new_size) ? 1 : 0;
999 fz_unlock(ctx, FZ_LOCK_ALLOC);
1000 #ifdef DEBUG_SCAVENGING
1001 fz_write_printf(ctx, fz_stdout(ctx), "fz_shrink_store after: %zu\n", store->size/(1024*1024));
1002 #endif
1003
1004 return success;
1005 }
1006
1007 void fz_filter_store(fz_context *ctx, fz_store_filter_fn *fn, void *arg, const fz_store_type *type)
1008 {
1009 fz_store *store;
1010 fz_item *item, *prev, *remove;
1011
1012 store = ctx->store;
1013 if (store == NULL)
1014 return;
1015
1016 fz_lock(ctx, FZ_LOCK_ALLOC);
1017
1018 /* Filter the items */
1019 remove = NULL;
1020 for (item = store->tail; item; item = prev)
1021 {
1022 prev = item->prev;
1023 if (item->type != type)
1024 continue;
1025
1026 if (fn(ctx, arg, item->key) == 0)
1027 continue;
1028
1029 /* We have to drop it */
1030 store->size -= item->size;
1031
1032 /* Unlink from the linked list */
1033 if (item->next)
1034 item->next->prev = item->prev;
1035 else
1036 store->tail = item->prev;
1037 if (item->prev)
1038 item->prev->next = item->next;
1039 else
1040 store->head = item->next;
1041
1042 /* Remove from the hash table */
1043 if (item->type->make_hash_key)
1044 {
1045 fz_store_hash hash = { NULL };
1046 hash.drop = item->val->drop;
1047 if (item->type->make_hash_key(ctx, &hash, item->key))
1048 fz_hash_remove(ctx, store->hash, &hash);
1049 }
1050
1051 /* Store whether to drop this value or not in 'prev' */
1052 if (item->val->refs > 0)
1053 (void)Memento_dropRef(item->val);
1054 item->prev = (item->val->refs > 0 && --item->val->refs == 0) ? item : NULL;
1055
1056 /* Store it in our removal chain - just singly linked */
1057 item->next = remove;
1058 remove = item;
1059 }
1060 fz_unlock(ctx, FZ_LOCK_ALLOC);
1061
1062 /* Now drop the remove chain */
1063 for (item = remove; item != NULL; item = remove)
1064 {
1065 remove = item->next;
1066
1067 /* Drop a reference to the value (freeing if required) */
1068 if (item->prev) /* See above for our abuse of prev here */
1069 item->val->drop(ctx, item->val);
1070
1071 /* Always drops the key and drop the item */
1072 item->type->drop_key(ctx, item->key);
1073 fz_free(ctx, item);
1074 }
1075 }
1076
1077 void fz_defer_reap_start(fz_context *ctx)
1078 {
1079 if (ctx->store == NULL)
1080 return;
1081
1082 fz_lock(ctx, FZ_LOCK_ALLOC);
1083 ctx->store->defer_reap_count++;
1084 fz_unlock(ctx, FZ_LOCK_ALLOC);
1085 }
1086
1087 void fz_defer_reap_end(fz_context *ctx)
1088 {
1089 int reap;
1090
1091 if (ctx->store == NULL)
1092 return;
1093
1094 fz_lock(ctx, FZ_LOCK_ALLOC);
1095 --ctx->store->defer_reap_count;
1096 reap = ctx->store->defer_reap_count == 0 && ctx->store->needs_reaping;
1097 if (reap)
1098 do_reap(ctx); /* Drops FZ_LOCK_ALLOC */
1099 else
1100 fz_unlock(ctx, FZ_LOCK_ALLOC);
1101 }
1102
1103 #ifdef ENABLE_STORE_LOGGING
1104
1105 void fz_log_dump_store(fz_context *ctx, const char *fmt, ...)
1106 {
1107 fz_output *out;
1108 va_list args;
1109 va_start(args, fmt);
1110 out = fz_new_log_for_module(ctx, "STORE");
1111 fz_write_vprintf(ctx, out, fmt, args);
1112 va_end(args);
1113 fz_debug_store(ctx, out);
1114 fz_write_printf(ctx, out, "STORE\tEND\n");
1115 fz_close_output(ctx, out);
1116 fz_drop_output(ctx, out);
1117 }
1118
1119 #endif