1 /*-------------------------------------------------------------------------
4 * POSTGRES free space map for quickly finding free space in relations
7 * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
11 * $Header: /cvsroot/pgsql/src/backend/storage/freespace/freespace.c,v 1.13 2002/09/04 20:31:25 momjian Exp $
16 * The only really interesting aspect of this code is the heuristics for
17 * deciding how much information we can afford to keep about each relation,
18 * given that we have a limited amount of workspace in shared memory.
19 * These currently work as follows:
21 * The number of distinct relations tracked is limited by a configuration
22 * variable (MaxFSMRelations). When this would be exceeded, we discard the
23 * least frequently used relation. A previously-unknown relation is always
24 * entered into the map with useCount 1 on first reference, even if this
25 * causes an existing entry with higher useCount to be discarded. This may
26 * cause a little bit of thrashing among the bottom entries in the list,
27 * but if we didn't do it then there'd be no way for a relation not in the
28 * map to get in once the map is full. Note we allow a relation to be in the
29 * map even if no pages are currently stored for it: this allows us to track
30 * its useCount & threshold, which may eventually go high enough to give it
31 * priority for page storage.
33 * The total number of pages tracked is also set by a configuration variable
34 * (MaxFSMPages). We allocate these dynamically among the known relations,
35 * giving preference to the more-frequently-referenced relations and those
36 * with smaller tuples. This is done by a thresholding mechanism: for each
37 * relation we keep track of a current target threshold, and allow only pages
38 * with free space >= threshold to be stored in the map. The threshold starts
39 * at BLCKSZ/2 (a somewhat arbitrary choice) for a newly entered relation.
40 * On each GetFreeSpace reference, the threshold is moved towards the
41 * request size using a slow exponential moving average; this means that
42 * for heavily used relations, the threshold will approach the average
43 * freespace request size (average tuple size). Whenever we run out of
44 * storage space in the map, we double the threshold of all existing relations
45 * (but not to more than BLCKSZ, so as to prevent runaway thresholds).
46 * Infrequently used relations will thus tend to have large thresholds.
48 * XXX this thresholding mechanism is experimental; need to see how well
49 * it works in practice.
51 *-------------------------------------------------------------------------
57 #include "storage/freespace.h"
58 #include "storage/itemid.h"
59 #include "storage/lwlock.h"
60 #include "storage/shmem.h"
64 * Shared free-space-map objects
66 * Note: we handle pointers to these items as pointers, not as SHMEM_OFFSETs.
67 * This assumes that all processes accessing the map will have the shared
68 * memory segment mapped at the same place in their address space.
70 typedef struct FSMHeader FSMHeader;
71 typedef struct FSMRelation FSMRelation;
72 typedef struct FSMChunk FSMChunk;
74 /* Header for whole map */
77 HTAB *relHash; /* hashtable of FSMRelation entries */
78 FSMRelation *relList; /* FSMRelations in useCount order */
79 FSMRelation *relListTail; /* tail of FSMRelation list */
80 int numRels; /* number of FSMRelations now in use */
81 FSMChunk *freeChunks; /* linked list of currently-free chunks */
82 int numFreeChunks; /* number of free chunks */
86 * Per-relation struct --- this is an entry in the shared hash table.
87 * The hash key is the RelFileNode value (hence, we look at the physical
88 * relation ID, not the logical ID, which is appropriate).
92 RelFileNode key; /* hash key (must be first) */
93 FSMRelation *nextRel; /* next rel in useCount order */
94 FSMRelation *priorRel; /* prior rel in useCount order */
95 int useCount; /* use count for prioritizing rels */
96 Size threshold; /* minimum amount of free space to keep */
97 int nextPage; /* index (from 0) to start next search at */
98 int numPages; /* total number of pages we have info
100 int numChunks; /* number of FSMChunks allocated to rel */
101 FSMChunk *relChunks; /* linked list of page info chunks */
105 * Info about individual pages in a relation is stored in chunks to reduce
106 * allocation overhead. Note that we allow any chunk of a relation's list
107 * to be partly full, not only the last chunk; this speeds deletion of
108 * individual page entries. When the total number of unused slots reaches
109 * CHUNKPAGES, we compact out the unused slots so that we can return a chunk
110 * to the freelist; but there's no point in doing the compaction before that.
113 #define CHUNKPAGES 32 /* each chunk can store this many pages */
117 FSMChunk *next; /* linked-list link */
118 int numPages; /* number of pages described here */
119 BlockNumber pages[CHUNKPAGES]; /* page numbers within relation */
120 ItemLength bytes[CHUNKPAGES]; /* free space available on each
125 int MaxFSMRelations; /* these are set by guc.c */
128 static FSMHeader *FreeSpaceMap; /* points to FSMHeader in shared memory */
131 static FSMRelation *lookup_fsm_rel(RelFileNode *rel);
132 static FSMRelation *create_fsm_rel(RelFileNode *rel);
133 static void delete_fsm_rel(FSMRelation *fsmrel);
134 static void link_fsm_rel_after(FSMRelation *fsmrel, FSMRelation *oldrel);
135 static void unlink_fsm_rel(FSMRelation *fsmrel);
136 static void free_chunk_chain(FSMChunk *fchunk);
137 static BlockNumber find_free_space(FSMRelation *fsmrel, Size spaceNeeded);
138 static void fsm_record_free_space(FSMRelation *fsmrel, BlockNumber page,
140 static bool lookup_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page,
141 FSMChunk **outChunk, int *outChunkRelIndex);
142 static bool insert_fsm_page_entry(FSMRelation *fsmrel,
143 BlockNumber page, Size spaceAvail,
144 FSMChunk *chunk, int chunkRelIndex);
145 static bool push_fsm_page_entry(BlockNumber page, Size spaceAvail,
146 FSMChunk *chunk, int chunkRelIndex);
147 static void delete_fsm_page_entry(FSMRelation *fsmrel, FSMChunk *chunk,
149 static void compact_fsm_page_list(FSMRelation *fsmrel);
150 static void acquire_fsm_free_space(void);
159 * InitFreeSpaceMap -- Initialize the freespace module.
161 * This must be called once during shared memory initialization.
162 * It builds the empty free space map table. FreeSpaceLock must also be
163 * initialized at some point, but is not touched here --- we assume there is
164 * no need for locking, since only the calling process can be accessing shared
168 InitFreeSpaceMap(void)
175 /* Create table header */
176 FreeSpaceMap = (FSMHeader *) ShmemAlloc(sizeof(FSMHeader));
177 if (FreeSpaceMap == NULL)
178 elog(FATAL, "Insufficient shared memory for free space map");
179 MemSet(FreeSpaceMap, 0, sizeof(FSMHeader));
181 /* Create hashtable for FSMRelations */
182 info.keysize = sizeof(RelFileNode);
183 info.entrysize = sizeof(FSMRelation);
184 info.hash = tag_hash;
186 FreeSpaceMap->relHash = ShmemInitHash("Free Space Map Hash",
187 MaxFSMRelations / 10,
190 (HASH_ELEM | HASH_FUNCTION));
192 if (!FreeSpaceMap->relHash)
193 elog(FATAL, "Insufficient shared memory for free space map");
195 /* Allocate FSMChunks and fill up the free-chunks list */
196 nchunks = (MaxFSMPages - 1) / CHUNKPAGES + 1;
197 FreeSpaceMap->numFreeChunks = nchunks;
199 chunks = (FSMChunk *) ShmemAlloc(nchunks * sizeof(FSMChunk));
201 elog(FATAL, "Insufficient shared memory for free space map");
204 while (nchunks-- > 0)
206 chunks->next = prevchunk;
210 FreeSpaceMap->freeChunks = prevchunk;
214 * Estimate amount of shmem space needed for FSM.
217 FreeSpaceShmemSize(void)
223 size = MAXALIGN(sizeof(FSMHeader));
225 /* hash table, including the FSMRelation objects */
226 size += hash_estimate_size(MaxFSMRelations, sizeof(FSMRelation));
228 /* FSMChunk objects */
229 nchunks = (MaxFSMPages - 1) / CHUNKPAGES + 1;
231 size += MAXALIGN(nchunks * sizeof(FSMChunk));
237 * GetPageWithFreeSpace - try to find a page in the given relation with
238 * at least the specified amount of free space.
240 * If successful, return the block number; if not, return InvalidBlockNumber.
242 * The caller must be prepared for the possibility that the returned page
243 * will turn out to have too little space available by the time the caller
244 * gets a lock on it. In that case, the caller should report the actual
245 * amount of free space available on that page (via RecordFreeSpace) and
246 * then try again. If InvalidBlockNumber is returned, extend the relation.
249 GetPageWithFreeSpace(RelFileNode *rel, Size spaceNeeded)
252 BlockNumber freepage;
254 LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
257 * We always add a rel to the hashtable when it is inquired about.
259 fsmrel = create_fsm_rel(rel);
262 * Adjust the threshold towards the space request. This essentially
263 * implements an exponential moving average with an equivalent period
264 * of about 63 requests. Ignore silly requests, however, to ensure
265 * that the average stays in bounds.
267 * In theory, if the threshold increases here we should immediately
268 * delete any pages that fall below the new threshold. In practice it
269 * seems OK to wait until we have a need to compact space.
271 if (spaceNeeded > 0 && spaceNeeded < BLCKSZ)
273 int cur_avg = (int) fsmrel->threshold;
275 cur_avg += ((int) spaceNeeded - cur_avg) / 32;
276 fsmrel->threshold = (Size) cur_avg;
278 freepage = find_free_space(fsmrel, spaceNeeded);
279 LWLockRelease(FreeSpaceLock);
284 * RecordFreeSpace - record the amount of free space available on a page.
286 * The FSM is at liberty to forget about the page instead, if the amount of
287 * free space is too small to be interesting. The only guarantee is that
288 * a subsequent request to get a page with more than spaceAvail space free
289 * will not return this page.
292 RecordFreeSpace(RelFileNode *rel, BlockNumber page, Size spaceAvail)
296 /* Sanity check: ensure spaceAvail will fit into ItemLength */
297 AssertArg(spaceAvail < BLCKSZ);
299 LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
302 * We choose not to add rels to the hashtable unless they've been
303 * inquired about with GetPageWithFreeSpace. Also, a Record operation
304 * does not increase the useCount or change threshold, only Get does.
306 fsmrel = lookup_fsm_rel(rel);
308 fsm_record_free_space(fsmrel, page, spaceAvail);
309 LWLockRelease(FreeSpaceLock);
313 * RecordAndGetPageWithFreeSpace - combo form to save one lock and
314 * hash table lookup cycle.
317 RecordAndGetPageWithFreeSpace(RelFileNode *rel,
323 BlockNumber freepage;
325 /* Sanity check: ensure spaceAvail will fit into ItemLength */
326 AssertArg(oldSpaceAvail < BLCKSZ);
328 LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
331 * We always add a rel to the hashtable when it is inquired about.
333 fsmrel = create_fsm_rel(rel);
336 * Adjust the threshold towards the space request, same as in
337 * GetPageWithFreeSpace.
339 * Note that we do this before storing data for oldPage, which means this
340 * isn't exactly equivalent to Record followed by Get; but it seems
341 * appropriate to adjust the threshold first.
343 if (spaceNeeded > 0 && spaceNeeded < BLCKSZ)
345 int cur_avg = (int) fsmrel->threshold;
347 cur_avg += ((int) spaceNeeded - cur_avg) / 32;
348 fsmrel->threshold = (Size) cur_avg;
351 fsm_record_free_space(fsmrel, oldPage, oldSpaceAvail);
353 freepage = find_free_space(fsmrel, spaceNeeded);
354 LWLockRelease(FreeSpaceLock);
359 * MultiRecordFreeSpace - record available-space info about multiple pages
360 * of a relation in one call.
362 * First, if minPage <= maxPage, the FSM must discard any entries it has for
363 * pages in that page number range (inclusive). This allows obsolete info
364 * to be discarded. Second, if nPages > 0, record the page numbers and free
365 * space amounts in the given arrays. As with RecordFreeSpace, the FSM is at
366 * liberty to discard some of the information. However, it *must* discard
367 * previously stored info in the minPage..maxPage range (for example, this
368 * case is used to remove info about deleted pages during relation truncation).
371 MultiRecordFreeSpace(RelFileNode *rel,
381 LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
382 fsmrel = lookup_fsm_rel(rel);
386 * Remove entries in specified range
388 if (minPage <= maxPage)
394 /* Use lookup to locate first entry >= minPage */
395 lookup_fsm_page_entry(fsmrel, minPage, &chunk, &chunkRelIndex);
396 /* Set free space to 0 for each page within range */
398 while (chunk && !done)
400 int numPages = chunk->numPages;
402 for (; chunkRelIndex < numPages; chunkRelIndex++)
404 if (chunk->pages[chunkRelIndex] > maxPage)
409 chunk->bytes[chunkRelIndex] = 0;
414 /* Now compact out the zeroed entries */
415 compact_fsm_page_list(fsmrel);
419 * Add new entries, if appropriate.
421 * XXX we could probably be smarter about this than doing it
422 * completely separately for each one. FIXME later.
424 * One thing we can do is short-circuit the process entirely if a
425 * page (a) has too little free space to be recorded, and (b) is
426 * within the minPage..maxPage range --- then we deleted any old
427 * entry above, and we aren't going to make a new one. This is
428 * particularly useful since in most cases, all the passed pages
429 * will in fact be in the minPage..maxPage range.
431 for (i = 0; i < nPages; i++)
433 BlockNumber page = pages[i];
434 Size avail = spaceAvail[i];
436 if (avail >= fsmrel->threshold ||
437 page < minPage || page > maxPage)
438 fsm_record_free_space(fsmrel, page, avail);
441 LWLockRelease(FreeSpaceLock);
445 * FreeSpaceMapForgetRel - forget all about a relation.
447 * This is called when a relation is deleted. Although we could just let
448 * the rel age out of the map, it's better to reclaim and reuse the space
452 FreeSpaceMapForgetRel(RelFileNode *rel)
456 LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
457 fsmrel = lookup_fsm_rel(rel);
459 delete_fsm_rel(fsmrel);
460 LWLockRelease(FreeSpaceLock);
464 * FreeSpaceMapForgetDatabase - forget all relations of a database.
466 * This is called during DROP DATABASE. As above, might as well reclaim
467 * map space sooner instead of later.
469 * XXX when we implement tablespaces, target Oid will need to be tablespace
470 * ID not database ID.
473 FreeSpaceMapForgetDatabase(Oid dbid)
478 LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
479 for (fsmrel = FreeSpaceMap->relList; fsmrel; fsmrel = nextrel)
481 nextrel = fsmrel->nextRel; /* in case we delete it */
482 if (fsmrel->key.tblNode == dbid)
483 delete_fsm_rel(fsmrel);
485 LWLockRelease(FreeSpaceLock);
490 * Internal routines. These all assume the caller holds the FreeSpaceLock.
494 * Lookup a relation in the hash table. If not present, return NULL.
495 * The relation's useCount is not changed.
498 lookup_fsm_rel(RelFileNode *rel)
502 fsmrel = (FSMRelation *) hash_search(FreeSpaceMap->relHash,
513 * Lookup a relation in the hash table, creating an entry if not present.
515 * On successful lookup, the relation's useCount is incremented, possibly
516 * causing it to move up in the priority list.
519 create_fsm_rel(RelFileNode *rel)
525 fsmrel = (FSMRelation *) hash_search(FreeSpaceMap->relHash,
530 elog(ERROR, "FreeSpaceMap hashtable out of memory");
534 /* New hashtable entry, initialize it (hash_search set the key) */
535 fsmrel->useCount = 1;
536 fsmrel->threshold = BLCKSZ / 2; /* starting point for new entry */
537 fsmrel->nextPage = 0;
538 fsmrel->numPages = 0;
539 fsmrel->numChunks = 0;
540 fsmrel->relChunks = NULL;
541 /* Discard lowest-priority existing rel, if we are over limit */
542 if (FreeSpaceMap->numRels >= MaxFSMRelations)
543 delete_fsm_rel(FreeSpaceMap->relListTail);
546 * Add new entry in front of any others with useCount 1 (since it
547 * is more recently used than them).
549 oldrel = FreeSpaceMap->relListTail;
550 while (oldrel && oldrel->useCount <= 1)
551 oldrel = oldrel->priorRel;
552 link_fsm_rel_after(fsmrel, oldrel);
553 FreeSpaceMap->numRels++;
559 /* Existing entry, advance its useCount */
560 if (++(fsmrel->useCount) >= INT_MAX / 2)
562 /* When useCounts threaten to overflow, reduce 'em all 2X */
563 for (oldrel = FreeSpaceMap->relList;
565 oldrel = oldrel->nextRel)
566 oldrel->useCount >>= 1;
568 /* If warranted, move it up the priority list */
569 oldrel = fsmrel->priorRel;
570 myCount = fsmrel->useCount;
571 if (oldrel && oldrel->useCount <= myCount)
573 unlink_fsm_rel(fsmrel);
574 while (oldrel && oldrel->useCount <= myCount)
575 oldrel = oldrel->priorRel;
576 link_fsm_rel_after(fsmrel, oldrel);
584 * Remove an existing FSMRelation entry.
587 delete_fsm_rel(FSMRelation *fsmrel)
591 free_chunk_chain(fsmrel->relChunks);
592 unlink_fsm_rel(fsmrel);
593 FreeSpaceMap->numRels--;
594 result = (FSMRelation *) hash_search(FreeSpaceMap->relHash,
595 (void *) &(fsmrel->key),
599 elog(ERROR, "FreeSpaceMap hashtable corrupted");
603 * Link a FSMRelation into the priority list just after the given existing
604 * entry (or at the head of the list, if oldrel is NULL).
607 link_fsm_rel_after(FSMRelation *fsmrel, FSMRelation *oldrel)
612 fsmrel->priorRel = NULL;
613 fsmrel->nextRel = FreeSpaceMap->relList;
614 FreeSpaceMap->relList = fsmrel;
615 if (fsmrel->nextRel != NULL)
616 fsmrel->nextRel->priorRel = fsmrel;
618 FreeSpaceMap->relListTail = fsmrel;
622 /* insert after oldrel */
623 fsmrel->priorRel = oldrel;
624 fsmrel->nextRel = oldrel->nextRel;
625 oldrel->nextRel = fsmrel;
626 if (fsmrel->nextRel != NULL)
627 fsmrel->nextRel->priorRel = fsmrel;
629 FreeSpaceMap->relListTail = fsmrel;
634 * Delink a FSMRelation from the priority list.
637 unlink_fsm_rel(FSMRelation *fsmrel)
639 if (fsmrel->priorRel != NULL)
640 fsmrel->priorRel->nextRel = fsmrel->nextRel;
642 FreeSpaceMap->relList = fsmrel->nextRel;
643 if (fsmrel->nextRel != NULL)
644 fsmrel->nextRel->priorRel = fsmrel->priorRel;
646 FreeSpaceMap->relListTail = fsmrel->priorRel;
650 * Return all the FSMChunks in the chain starting at fchunk to the freelist.
651 * (Caller must handle unlinking them from wherever they were.)
654 free_chunk_chain(FSMChunk *fchunk)
663 while (lchunk->next != NULL)
666 lchunk = lchunk->next;
668 lchunk->next = FreeSpaceMap->freeChunks;
669 FreeSpaceMap->freeChunks = fchunk;
670 FreeSpaceMap->numFreeChunks += nchunks;
674 * Look to see if a page with at least the specified amount of space is
675 * available in the given FSMRelation. If so, return its page number,
676 * and advance the nextPage counter so that the next inquiry will return
677 * a different page if possible. Return InvalidBlockNumber if no success.
680 find_free_space(FSMRelation *fsmrel, Size spaceNeeded)
682 int pagesToCheck, /* outer loop counter */
683 pageIndex, /* current page index relative to relation */
684 chunkRelIndex; /* current page index relative to curChunk */
687 pageIndex = fsmrel->nextPage;
688 /* Last operation may have left nextPage pointing past end */
689 if (pageIndex >= fsmrel->numPages)
691 curChunk = fsmrel->relChunks;
692 chunkRelIndex = pageIndex;
694 for (pagesToCheck = fsmrel->numPages; pagesToCheck > 0; pagesToCheck--)
697 * Make sure we are in the right chunk. First time through, we
698 * may have to advance through multiple chunks; subsequent loops
699 * should do this at most once.
701 while (chunkRelIndex >= curChunk->numPages)
703 chunkRelIndex -= curChunk->numPages;
704 curChunk = curChunk->next;
706 /* Check the next page */
707 if ((Size) curChunk->bytes[chunkRelIndex] >= spaceNeeded)
709 fsmrel->nextPage = pageIndex + 1;
710 return curChunk->pages[chunkRelIndex];
712 /* Advance pageIndex and chunkRelIndex, wrapping around if needed */
713 if (++pageIndex >= fsmrel->numPages)
716 curChunk = fsmrel->relChunks;
723 return InvalidBlockNumber; /* nothing found */
727 * fsm_record_free_space - guts of RecordFreeSpace, which is also used by
728 * RecordAndGetPageWithFreeSpace.
731 fsm_record_free_space(FSMRelation *fsmrel, BlockNumber page, Size spaceAvail)
736 if (lookup_fsm_page_entry(fsmrel, page, &chunk, &chunkRelIndex))
738 /* Found an existing entry for page; update or delete it */
739 if (spaceAvail >= fsmrel->threshold)
740 chunk->bytes[chunkRelIndex] = (ItemLength) spaceAvail;
742 delete_fsm_page_entry(fsmrel, chunk, chunkRelIndex);
747 * No existing entry; add one if spaceAvail exceeds threshold.
749 * CORNER CASE: if we have to do acquire_fsm_free_space then our own
750 * threshold will increase, possibly meaning that we shouldn't
751 * store the page after all. Loop to redo the test if that
752 * happens. The loop also covers the possibility that
753 * acquire_fsm_free_space must be executed more than once to free
754 * any space (ie, thresholds must be more than doubled).
756 while (spaceAvail >= fsmrel->threshold)
758 if (insert_fsm_page_entry(fsmrel, page, spaceAvail,
759 chunk, chunkRelIndex))
761 /* No space, acquire some and recheck threshold */
762 acquire_fsm_free_space();
763 if (spaceAvail < fsmrel->threshold)
767 * Need to redo the lookup since our own page list may well
768 * have lost entries, so position is not correct anymore.
770 if (lookup_fsm_page_entry(fsmrel, page,
771 &chunk, &chunkRelIndex))
772 elog(ERROR, "fsm_record_free_space: unexpected match");
778 * Look for an entry for a specific page (block number) in a FSMRelation.
779 * Returns TRUE if a matching entry exists, else FALSE.
781 * The output arguments *outChunk, *outChunkRelIndex are set to indicate where
782 * the entry exists (if TRUE result) or could be inserted (if FALSE result).
783 * *chunk is set to NULL if there is no place to insert, ie, the entry would
784 * need to be added to a new chunk.
787 lookup_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page,
788 FSMChunk **outChunk, int *outChunkRelIndex)
793 for (chunk = fsmrel->relChunks; chunk; chunk = chunk->next)
795 int numPages = chunk->numPages;
797 /* Can skip the chunk quickly if page must be after last in chunk */
798 if (numPages > 0 && page <= chunk->pages[numPages - 1])
800 for (chunkRelIndex = 0; chunkRelIndex < numPages; chunkRelIndex++)
802 if (page <= chunk->pages[chunkRelIndex])
805 *outChunkRelIndex = chunkRelIndex;
806 return (page == chunk->pages[chunkRelIndex]);
809 /* Should not get here, given above test */
814 * If we are about to fall off the end, and there's space
815 * available in the end chunk, return a pointer to it.
817 if (chunk->next == NULL && numPages < CHUNKPAGES)
820 *outChunkRelIndex = numPages;
826 * Adding the page would require a new chunk (or, perhaps, compaction
827 * of available free space --- not my problem here).
830 *outChunkRelIndex = 0;
835 * Insert a new page entry into a FSMRelation's list at given position
836 * (chunk == NULL implies at end).
838 * If there is no space available to insert the entry, return FALSE.
841 insert_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page, Size spaceAvail,
842 FSMChunk *chunk, int chunkRelIndex)
844 /* Outer loop handles retry after compacting rel's page list */
847 if (fsmrel->numPages >= fsmrel->numChunks * CHUNKPAGES)
849 /* No free space within chunk list, so need another chunk */
852 if ((newChunk = FreeSpaceMap->freeChunks) == NULL)
853 return false; /* can't do it */
854 FreeSpaceMap->freeChunks = newChunk->next;
855 FreeSpaceMap->numFreeChunks--;
856 newChunk->next = NULL;
857 newChunk->numPages = 0;
858 if (fsmrel->relChunks == NULL)
859 fsmrel->relChunks = newChunk;
862 FSMChunk *priorChunk = fsmrel->relChunks;
864 while (priorChunk->next != NULL)
865 priorChunk = priorChunk->next;
866 priorChunk->next = newChunk;
871 /* Original search found that new page belongs at end */
878 * Try to insert it the easy way, ie, just move down subsequent
882 push_fsm_page_entry(page, spaceAvail, chunk, chunkRelIndex))
885 fsmrel->nextPage++; /* don't return same page twice running */
890 * There is space available, but evidently it's before the place
891 * where the page entry needs to go. Compact the list and try
892 * again. This will require us to redo the search for the
893 * appropriate place. Furthermore, compact_fsm_page_list deletes
894 * empty end chunks, so we may need to repeat the action of
895 * grabbing a new end chunk.
897 compact_fsm_page_list(fsmrel);
898 if (lookup_fsm_page_entry(fsmrel, page, &chunk, &chunkRelIndex))
899 elog(ERROR, "insert_fsm_page_entry: entry already exists!");
904 * Auxiliary routine for insert_fsm_page_entry: try to push entries to the
905 * right to insert at chunk/chunkRelIndex. Return TRUE if successful.
906 * Note that the FSMRelation's own fields are not updated.
909 push_fsm_page_entry(BlockNumber page, Size spaceAvail,
910 FSMChunk *chunk, int chunkRelIndex)
914 if (chunk->numPages >= CHUNKPAGES)
916 if (chunk->next == NULL)
917 return false; /* no space */
918 /* try to push chunk's last item to next chunk */
919 if (!push_fsm_page_entry(chunk->pages[CHUNKPAGES - 1],
920 chunk->bytes[CHUNKPAGES - 1],
923 /* successfully pushed it */
926 for (i = chunk->numPages; i > chunkRelIndex; i--)
928 chunk->pages[i] = chunk->pages[i - 1];
929 chunk->bytes[i] = chunk->bytes[i - 1];
932 chunk->pages[chunkRelIndex] = page;
933 chunk->bytes[chunkRelIndex] = (ItemLength) spaceAvail;
938 * Delete one page entry from a FSMRelation's list. Compact free space
939 * in the list, but only if a chunk can be returned to the freelist.
942 delete_fsm_page_entry(FSMRelation *fsmrel, FSMChunk *chunk, int chunkRelIndex)
947 Assert(chunk && chunkRelIndex >= 0 && chunkRelIndex < chunk->numPages);
948 /* Compact out space in this chunk */
949 lim = --chunk->numPages;
950 for (i = chunkRelIndex; i < lim; i++)
952 chunk->pages[i] = chunk->pages[i + 1];
953 chunk->bytes[i] = chunk->bytes[i + 1];
955 /* Compact the whole list if a chunk can be freed */
957 if (fsmrel->numPages <= (fsmrel->numChunks - 1) * CHUNKPAGES)
958 compact_fsm_page_list(fsmrel);
962 * Remove any pages with free space less than the current threshold for the
963 * FSMRelation, compact out free slots in non-last chunks, and return any
964 * completely freed chunks to the freelist.
967 compact_fsm_page_list(FSMRelation *fsmrel)
969 Size threshold = fsmrel->threshold;
977 srcChunk = dstChunk = fsmrel->relChunks;
978 srcIndex = dstIndex = 0;
979 dstPages = 0; /* total pages kept */
980 dstChunkCnt = 1; /* includes current dstChunk */
982 while (srcChunk != NULL)
984 int srcPages = srcChunk->numPages;
986 while (srcIndex < srcPages)
988 if ((Size) srcChunk->bytes[srcIndex] >= threshold)
990 if (dstIndex >= CHUNKPAGES)
993 * At this point srcChunk must be pointing to a later
994 * chunk, so it's OK to overwrite dstChunk->numPages.
996 dstChunk->numPages = dstIndex;
997 dstChunk = dstChunk->next;
1001 dstChunk->pages[dstIndex] = srcChunk->pages[srcIndex];
1002 dstChunk->bytes[dstIndex] = srcChunk->bytes[srcIndex];
1008 srcChunk = srcChunk->next;
1014 /* No chunks to be kept at all */
1015 fsmrel->nextPage = 0;
1016 fsmrel->numPages = 0;
1017 fsmrel->numChunks = 0;
1018 fsmrel->relChunks = NULL;
1019 free_chunk_chain(dstChunk);
1023 /* we deliberately don't change nextPage here */
1024 fsmrel->numPages = dstPages;
1025 fsmrel->numChunks = dstChunkCnt;
1026 dstChunk->numPages = dstIndex;
1027 free_chunk_chain(dstChunk->next);
1028 dstChunk->next = NULL;
1033 * Acquire some free space by raising the thresholds of all FSMRelations.
1035 * Note there is no guarantee as to how much space will be freed by a single
1036 * invocation; caller may repeat if necessary.
1039 acquire_fsm_free_space(void)
1041 FSMRelation *fsmrel;
1043 for (fsmrel = FreeSpaceMap->relList; fsmrel; fsmrel = fsmrel->nextRel)
1045 fsmrel->threshold *= 2;
1046 /* Limit thresholds to BLCKSZ so they can't get ridiculously large */
1047 if (fsmrel->threshold > BLCKSZ)
1048 fsmrel->threshold = BLCKSZ;
1049 /* Release any pages that don't meet the new threshold */
1050 compact_fsm_page_list(fsmrel);
1055 #ifdef FREESPACE_DEBUG
1057 * Dump contents of freespace map for debugging.
1059 * We assume caller holds the FreeSpaceLock, or is otherwise unconcerned
1060 * about other processes.
1065 FSMRelation *fsmrel;
1066 FSMRelation *prevrel = NULL;
1073 for (fsmrel = FreeSpaceMap->relList; fsmrel; fsmrel = fsmrel->nextRel)
1076 fprintf(stderr, "Map %d: rel %u/%u useCount %d threshold %u nextPage %d\nMap= ",
1077 relNum, fsmrel->key.tblNode, fsmrel->key.relNode,
1078 fsmrel->useCount, fsmrel->threshold, fsmrel->nextPage);
1079 nChunks = nPages = 0;
1080 for (chunk = fsmrel->relChunks; chunk; chunk = chunk->next)
1082 int numPages = chunk->numPages;
1085 for (chunkRelIndex = 0; chunkRelIndex < numPages; chunkRelIndex++)
1088 fprintf(stderr, " %u:%u",
1089 chunk->pages[chunkRelIndex],
1090 chunk->bytes[chunkRelIndex]);
1093 fprintf(stderr, "\n");
1094 /* Cross-check local counters and list links */
1095 if (nPages != fsmrel->numPages)
1096 fprintf(stderr, "DumpFreeSpace: %d pages in rel, but numPages = %d\n",
1097 nPages, fsmrel->numPages);
1098 if (nChunks != fsmrel->numChunks)
1099 fprintf(stderr, "DumpFreeSpace: %d chunks in rel, but numChunks = %d\n",
1100 nChunks, fsmrel->numChunks);
1101 if (prevrel != fsmrel->priorRel)
1102 fprintf(stderr, "DumpFreeSpace: broken list links\n");
1105 if (prevrel != FreeSpaceMap->relListTail)
1106 fprintf(stderr, "DumpFreeSpace: broken list links\n");
1107 /* Cross-check global counters */
1108 if (relNum != FreeSpaceMap->numRels)
1109 fprintf(stderr, "DumpFreeSpace: %d rels in list, but numRels = %d\n",
1110 relNum, FreeSpaceMap->numRels);
1112 for (chunk = FreeSpaceMap->freeChunks; chunk; chunk = chunk->next)
1114 if (nChunks != FreeSpaceMap->numFreeChunks)
1115 fprintf(stderr, "DumpFreeSpace: %d chunks in list, but numFreeChunks = %d\n",
1116 nChunks, FreeSpaceMap->numFreeChunks);
1119 #endif /* FREESPACE_DEBUG */