1 /*-------------------------------------------------------------------------
4 * Support functions to rewrite tables.
6 * These functions provide a facility to completely rewrite a heap, while
7 * preserving visibility information and update chains.
11 * The caller is responsible for creating the new heap, all catalog
12 * changes, supplying the tuples to be written to the new heap, and
13 * rebuilding indexes. The caller must hold AccessExclusiveLock on the
14 * target table, because we assume no one else is writing into it.
16 * To use the facility:
19 * while (fetch next tuple)
22 * rewrite_heap_dead_tuple
25 * // do any transformations here if required
31 * The contents of the new relation shouldn't be relied on until after
32 * end_heap_rewrite is called.
37 * This would be a fairly trivial affair, except that we need to maintain
38 * the ctid chains that link versions of an updated tuple together.
39 * Since the newly stored tuples will have tids different from the original
40 * ones, if we just copied t_ctid fields to the new table the links would
41 * be wrong. When we are required to copy a (presumably recently-dead or
42 * delete-in-progress) tuple whose ctid doesn't point to itself, we have
43 * to substitute the correct ctid instead.
45 * For each ctid reference from A -> B, we might encounter either A first
46 * or B first. (Note that a tuple in the middle of a chain is both A and B
47 * of different pairs.)
49 * If we encounter A first, we'll store the tuple in the unresolved_tups
50 * hash table. When we later encounter B, we remove A from the hash table,
51 * fix the ctid to point to the new location of B, and insert both A and B
54 * If we encounter B first, we can insert B to the new heap right away.
55 * We then add an entry to the old_new_tid_map hash table showing B's
56 * original tid (in the old heap) and new tid (in the new heap).
57 * When we later encounter A, we get the new location of B from the table,
58 * and can write A immediately with the correct ctid.
60 * Entries in the hash tables can be removed as soon as the later tuple
61 * is encountered. That helps to keep the memory usage down. At the end,
62 * both tables are usually empty; we should have encountered both A and B
63 * of each pair. However, it's possible for A to be RECENTLY_DEAD and B
64 * entirely DEAD according to HeapTupleSatisfiesVacuum, because the test
65 * for deadness using OldestXmin is not exact. In such a case we might
66 * encounter B first, and skip it, and find A later. Then A would be added
67 * to unresolved_tups, and stay there until end of the rewrite. Since
68 * this case is very unusual, we don't worry about the memory usage.
70 * Using in-memory hash tables means that we use some memory for each live
71 * update chain in the table, from the time we find one end of the
72 * reference until we find the other end. That shouldn't be a problem in
73 * practice, but if you do something like an UPDATE without a where-clause
74 * on a large table, and then run CLUSTER in the same transaction, you
75 * could run out of memory. It doesn't seem worthwhile to add support for
76 * spill-to-disk, as there shouldn't be that many RECENTLY_DEAD tuples in a
77 * table under normal circumstances. Furthermore, in the typical scenario
78 * of CLUSTERing on an unchanging key column, we'll see all the versions
79 * of a given tuple together anyway, and so the peak memory usage is only
80 * proportional to the number of RECENTLY_DEAD versions of a single row, not
81 * in the whole table. Note that if we do fail halfway through a CLUSTER,
82 * the old table is still valid, so failure is not catastrophic.
84 * We can't use the normal heap_insert function to insert into the new
85 * heap, because heap_insert overwrites the visibility information.
86 * We use a special-purpose raw_heap_insert function instead, which
87 * is optimized for bulk inserting a lot of tuples, knowing that we have
88 * exclusive access to the heap. raw_heap_insert builds new pages in
89 * local storage. When a page is full, or at the end of the process,
90 * we insert it to WAL as a single record and then write it to disk
91 * directly through smgr. Note, however, that any data sent to the new
92 * heap's TOAST table will go through the normal bufmgr.
95 * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
96 * Portions Copyright (c) 1994-5, Regents of the University of California
99 * $PostgreSQL: pgsql/src/backend/access/heap/rewriteheap.c,v 1.22 2010/04/28 16:10:40 heikki Exp $
101 *-------------------------------------------------------------------------
103 #include "postgres.h"
105 #include "access/heapam.h"
106 #include "access/rewriteheap.h"
107 #include "access/transam.h"
108 #include "access/tuptoaster.h"
109 #include "storage/bufmgr.h"
110 #include "storage/smgr.h"
111 #include "utils/memutils.h"
112 #include "utils/rel.h"
116 * State associated with a rewrite operation. This is opaque to the user
117 * of the rewrite facility.
119 typedef struct RewriteStateData
121 Relation rs_new_rel; /* destination heap */
122 Page rs_buffer; /* page currently being built */
123 BlockNumber rs_blockno; /* block where page will go */
124 bool rs_buffer_valid; /* T if any tuples in buffer */
125 bool rs_use_wal; /* must we WAL-log inserts? */
126 TransactionId rs_oldest_xmin; /* oldest xmin used by caller to
127 * determine tuple visibility */
128 TransactionId rs_freeze_xid;/* Xid that will be used as freeze cutoff
130 MemoryContext rs_cxt; /* for hash tables and entries and tuples in
132 HTAB *rs_unresolved_tups; /* unmatched A tuples */
133 HTAB *rs_old_new_tid_map; /* unmatched B tuples */
137 * The lookup keys for the hash tables are tuple TID and xmin (we must check
138 * both to avoid false matches from dead tuples). Beware that there is
139 * probably some padding space in this struct; it must be zeroed out for
140 * correct hashtable operation.
144 TransactionId xmin; /* tuple xmin */
145 ItemPointerData tid; /* tuple location in old heap */
149 * Entry structures for the hash tables
153 TidHashKey key; /* expected xmin/old location of B tuple */
154 ItemPointerData old_tid; /* A's location in the old heap */
155 HeapTuple tuple; /* A's tuple contents */
158 typedef UnresolvedTupData *UnresolvedTup;
162 TidHashKey key; /* actual xmin/old location of B tuple */
163 ItemPointerData new_tid; /* where we put it in the new heap */
164 } OldToNewMappingData;
166 typedef OldToNewMappingData *OldToNewMapping;
169 /* prototypes for internal functions */
170 static void raw_heap_insert(RewriteState state, HeapTuple tup);
174 * Begin a rewrite of a table
176 * new_heap new, locked heap relation to insert tuples to
177 * oldest_xmin xid used by the caller to determine which tuples are dead
178 * freeze_xid xid before which tuples will be frozen
179 * use_wal should the inserts to the new heap be WAL-logged?
181 * Returns an opaque RewriteState, allocated in current memory context,
182 * to be used in subsequent calls to the other functions.
185 begin_heap_rewrite(Relation new_heap, TransactionId oldest_xmin,
186 TransactionId freeze_xid, bool use_wal)
189 MemoryContext rw_cxt;
190 MemoryContext old_cxt;
194 * To ease cleanup, make a separate context that will contain the
195 * RewriteState struct itself plus all subsidiary data.
197 rw_cxt = AllocSetContextCreate(CurrentMemoryContext,
199 ALLOCSET_DEFAULT_MINSIZE,
200 ALLOCSET_DEFAULT_INITSIZE,
201 ALLOCSET_DEFAULT_MAXSIZE);
202 old_cxt = MemoryContextSwitchTo(rw_cxt);
204 /* Create and fill in the state struct */
205 state = palloc0(sizeof(RewriteStateData));
207 state->rs_new_rel = new_heap;
208 state->rs_buffer = (Page) palloc(BLCKSZ);
209 /* new_heap needn't be empty, just locked */
210 state->rs_blockno = RelationGetNumberOfBlocks(new_heap);
211 state->rs_buffer_valid = false;
212 state->rs_use_wal = use_wal;
213 state->rs_oldest_xmin = oldest_xmin;
214 state->rs_freeze_xid = freeze_xid;
215 state->rs_cxt = rw_cxt;
217 /* Initialize hash tables used to track update chains */
218 memset(&hash_ctl, 0, sizeof(hash_ctl));
219 hash_ctl.keysize = sizeof(TidHashKey);
220 hash_ctl.entrysize = sizeof(UnresolvedTupData);
221 hash_ctl.hcxt = state->rs_cxt;
222 hash_ctl.hash = tag_hash;
224 state->rs_unresolved_tups =
225 hash_create("Rewrite / Unresolved ctids",
226 128, /* arbitrary initial size */
228 HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
230 hash_ctl.entrysize = sizeof(OldToNewMappingData);
232 state->rs_old_new_tid_map =
233 hash_create("Rewrite / Old to new tid map",
234 128, /* arbitrary initial size */
236 HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
238 MemoryContextSwitchTo(old_cxt);
246 * state and any other resources are freed.
249 end_heap_rewrite(RewriteState state)
251 HASH_SEQ_STATUS seq_status;
252 UnresolvedTup unresolved;
255 * Write any remaining tuples in the UnresolvedTups table. If we have any
256 * left, they should in fact be dead, but let's err on the safe side.
258 * XXX this really is a waste of code no?
260 hash_seq_init(&seq_status, state->rs_unresolved_tups);
262 while ((unresolved = hash_seq_search(&seq_status)) != NULL)
264 ItemPointerSetInvalid(&unresolved->tuple->t_data->t_ctid);
265 raw_heap_insert(state, unresolved->tuple);
268 /* Write the last page, if any */
269 if (state->rs_buffer_valid)
271 if (state->rs_use_wal)
272 log_newpage(&state->rs_new_rel->rd_node,
276 RelationOpenSmgr(state->rs_new_rel);
277 smgrextend(state->rs_new_rel->rd_smgr, MAIN_FORKNUM, state->rs_blockno,
278 (char *) state->rs_buffer, true);
282 * If the rel isn't temp, must fsync before commit. We use heap_sync to
283 * ensure that the toast table gets fsync'd too.
285 * It's obvious that we must do this when not WAL-logging. It's less
286 * obvious that we have to do it even if we did WAL-log the pages. The
287 * reason is the same as in tablecmds.c's copy_relation_data(): we're
288 * writing data that's not in shared buffers, and so a CHECKPOINT
289 * occurring during the rewriteheap operation won't have fsync'd data we
290 * wrote before the checkpoint.
292 if (!state->rs_new_rel->rd_istemp)
293 heap_sync(state->rs_new_rel);
295 /* Deleting the context frees everything */
296 MemoryContextDelete(state->rs_cxt);
300 * Add a tuple to the new heap.
302 * Visibility information is copied from the original tuple, except that
303 * we "freeze" very-old tuples. Note that since we scribble on new_tuple,
304 * it had better be temp storage not a pointer to the original tuple.
306 * state opaque state as returned by begin_heap_rewrite
307 * old_tuple original tuple in the old heap
308 * new_tuple new, rewritten tuple to be inserted to new heap
311 rewrite_heap_tuple(RewriteState state,
312 HeapTuple old_tuple, HeapTuple new_tuple)
314 MemoryContext old_cxt;
315 ItemPointerData old_tid;
320 old_cxt = MemoryContextSwitchTo(state->rs_cxt);
323 * Copy the original tuple's visibility information into new_tuple.
325 * XXX we might later need to copy some t_infomask2 bits, too? Right now,
326 * we intentionally clear the HOT status bits.
328 memcpy(&new_tuple->t_data->t_choice.t_heap,
329 &old_tuple->t_data->t_choice.t_heap,
330 sizeof(HeapTupleFields));
332 new_tuple->t_data->t_infomask &= ~HEAP_XACT_MASK;
333 new_tuple->t_data->t_infomask2 &= ~HEAP2_XACT_MASK;
334 new_tuple->t_data->t_infomask |=
335 old_tuple->t_data->t_infomask & HEAP_XACT_MASK;
338 * While we have our hands on the tuple, we may as well freeze any
339 * very-old xmin or xmax, so that future VACUUM effort can be saved.
341 * Note we abuse heap_freeze_tuple() a bit here, since it's expecting to
342 * be given a pointer to a tuple in a disk buffer. It happens though that
343 * we can get the right things to happen by passing InvalidBuffer for the
346 heap_freeze_tuple(new_tuple->t_data, state->rs_freeze_xid, InvalidBuffer);
349 * Invalid ctid means that ctid should point to the tuple itself. We'll
350 * override it later if the tuple is part of an update chain.
352 ItemPointerSetInvalid(&new_tuple->t_data->t_ctid);
355 * If the tuple has been updated, check the old-to-new mapping hash table.
357 if (!(old_tuple->t_data->t_infomask & (HEAP_XMAX_INVALID |
359 !(ItemPointerEquals(&(old_tuple->t_self),
360 &(old_tuple->t_data->t_ctid))))
362 OldToNewMapping mapping;
364 memset(&hashkey, 0, sizeof(hashkey));
365 hashkey.xmin = HeapTupleHeaderGetXmax(old_tuple->t_data);
366 hashkey.tid = old_tuple->t_data->t_ctid;
368 mapping = (OldToNewMapping)
369 hash_search(state->rs_old_new_tid_map, &hashkey,
375 * We've already copied the tuple that t_ctid points to, so we can
376 * set the ctid of this tuple to point to the new location, and
377 * insert it right away.
379 new_tuple->t_data->t_ctid = mapping->new_tid;
381 /* We don't need the mapping entry anymore */
382 hash_search(state->rs_old_new_tid_map, &hashkey,
383 HASH_REMOVE, &found);
389 * We haven't seen the tuple t_ctid points to yet. Stash this
390 * tuple into unresolved_tups to be written later.
392 UnresolvedTup unresolved;
394 unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
398 unresolved->old_tid = old_tuple->t_self;
399 unresolved->tuple = heap_copytuple(new_tuple);
402 * We can't do anything more now, since we don't know where the
403 * tuple will be written.
405 MemoryContextSwitchTo(old_cxt);
411 * Now we will write the tuple, and then check to see if it is the B tuple
412 * in any new or known pair. When we resolve a known pair, we will be
413 * able to write that pair's A tuple, and then we have to check if it
414 * resolves some other pair. Hence, we need a loop here.
416 old_tid = old_tuple->t_self;
421 ItemPointerData new_tid;
423 /* Insert the tuple and find out where it's put in new_heap */
424 raw_heap_insert(state, new_tuple);
425 new_tid = new_tuple->t_self;
428 * If the tuple is the updated version of a row, and the prior version
429 * wouldn't be DEAD yet, then we need to either resolve the prior
430 * version (if it's waiting in rs_unresolved_tups), or make an entry
431 * in rs_old_new_tid_map (so we can resolve it when we do see it). The
432 * previous tuple's xmax would equal this one's xmin, so it's
433 * RECENTLY_DEAD if and only if the xmin is not before OldestXmin.
435 if ((new_tuple->t_data->t_infomask & HEAP_UPDATED) &&
436 !TransactionIdPrecedes(HeapTupleHeaderGetXmin(new_tuple->t_data),
437 state->rs_oldest_xmin))
440 * Okay, this is B in an update pair. See if we've seen A.
442 UnresolvedTup unresolved;
444 memset(&hashkey, 0, sizeof(hashkey));
445 hashkey.xmin = HeapTupleHeaderGetXmin(new_tuple->t_data);
446 hashkey.tid = old_tid;
448 unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
451 if (unresolved != NULL)
454 * We have seen and memorized the previous tuple already. Now
455 * that we know where we inserted the tuple its t_ctid points
456 * to, fix its t_ctid and insert it to the new heap.
459 heap_freetuple(new_tuple);
460 new_tuple = unresolved->tuple;
462 old_tid = unresolved->old_tid;
463 new_tuple->t_data->t_ctid = new_tid;
466 * We don't need the hash entry anymore, but don't free its
469 hash_search(state->rs_unresolved_tups, &hashkey,
470 HASH_REMOVE, &found);
473 /* loop back to insert the previous tuple in the chain */
479 * Remember the new tid of this tuple. We'll use it to set the
480 * ctid when we find the previous tuple in the chain.
482 OldToNewMapping mapping;
484 mapping = hash_search(state->rs_old_new_tid_map, &hashkey,
488 mapping->new_tid = new_tid;
492 /* Done with this (chain of) tuples, for now */
494 heap_freetuple(new_tuple);
498 MemoryContextSwitchTo(old_cxt);
502 * Register a dead tuple with an ongoing rewrite. Dead tuples are not
503 * copied to the new table, but we still make note of them so that we
504 * can release some resources earlier.
507 rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
510 * If we have already seen an earlier tuple in the update chain that
511 * points to this tuple, let's forget about that earlier tuple. It's in
512 * fact dead as well, our simple xmax < OldestXmin test in
513 * HeapTupleSatisfiesVacuum just wasn't enough to detect it. It happens
514 * when xmin of a tuple is greater than xmax, which sounds
515 * counter-intuitive but is perfectly valid.
517 * We don't bother to try to detect the situation the other way round,
518 * when we encounter the dead tuple first and then the recently dead one
519 * that points to it. If that happens, we'll have some unmatched entries
520 * in the UnresolvedTups hash table at the end. That can happen anyway,
521 * because a vacuum might have removed the dead tuple in the chain before
524 UnresolvedTup unresolved;
528 memset(&hashkey, 0, sizeof(hashkey));
529 hashkey.xmin = HeapTupleHeaderGetXmin(old_tuple->t_data);
530 hashkey.tid = old_tuple->t_self;
532 unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
535 if (unresolved != NULL)
537 /* Need to free the contained tuple as well as the hashtable entry */
538 heap_freetuple(unresolved->tuple);
539 hash_search(state->rs_unresolved_tups, &hashkey,
540 HASH_REMOVE, &found);
546 * Insert a tuple to the new relation. This has to track heap_insert
547 * and its subsidiary functions!
549 * t_self of the tuple is set to the new TID of the tuple. If t_ctid of the
550 * tuple is invalid on entry, it's replaced with the new TID as well (in
551 * the inserted data only, not in the caller's copy).
554 raw_heap_insert(RewriteState state, HeapTuple tup)
556 Page page = state->rs_buffer;
564 * If the new tuple is too big for storage or contains already toasted
565 * out-of-line attributes from some other relation, invoke the toaster.
567 * Note: below this point, heaptup is the data we actually intend to store
568 * into the relation; tup is the caller's original untoasted data.
570 if (state->rs_new_rel->rd_rel->relkind == RELKIND_TOASTVALUE)
572 /* toast table entries should never be recursively toasted */
573 Assert(!HeapTupleHasExternal(tup));
576 else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
577 heaptup = toast_insert_or_update(state->rs_new_rel, tup, NULL,
578 HEAP_INSERT_SKIP_FSM |
580 0 : HEAP_INSERT_SKIP_WAL));
584 len = MAXALIGN(heaptup->t_len); /* be conservative */
587 * If we're gonna fail for oversize tuple, do it right away
589 if (len > MaxHeapTupleSize)
591 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
592 errmsg("row is too big: size %lu, maximum size %lu",
594 (unsigned long) MaxHeapTupleSize)));
596 /* Compute desired extra freespace due to fillfactor option */
597 saveFreeSpace = RelationGetTargetPageFreeSpace(state->rs_new_rel,
598 HEAP_DEFAULT_FILLFACTOR);
600 /* Now we can check to see if there's enough free space already. */
601 if (state->rs_buffer_valid)
603 pageFreeSpace = PageGetHeapFreeSpace(page);
605 if (len + saveFreeSpace > pageFreeSpace)
607 /* Doesn't fit, so write out the existing page */
610 if (state->rs_use_wal)
611 log_newpage(&state->rs_new_rel->rd_node,
617 * Now write the page. We say isTemp = true even if it's not a
618 * temp table, because there's no need for smgr to schedule an
619 * fsync for this write; we'll do it ourselves in
622 RelationOpenSmgr(state->rs_new_rel);
623 smgrextend(state->rs_new_rel->rd_smgr, MAIN_FORKNUM,
624 state->rs_blockno, (char *) page, true);
627 state->rs_buffer_valid = false;
631 if (!state->rs_buffer_valid)
633 /* Initialize a new empty page */
634 PageInit(page, BLCKSZ, 0);
635 state->rs_buffer_valid = true;
638 /* And now we can insert the tuple into the page */
639 newoff = PageAddItem(page, (Item) heaptup->t_data, len,
640 InvalidOffsetNumber, false, true);
641 if (newoff == InvalidOffsetNumber)
642 elog(ERROR, "failed to add tuple");
644 /* Update caller's t_self to the actual position where it was stored */
645 ItemPointerSet(&(tup->t_self), state->rs_blockno, newoff);
648 * Insert the correct position into CTID of the stored tuple, too, if the
649 * caller didn't supply a valid CTID.
651 if (!ItemPointerIsValid(&tup->t_data->t_ctid))
654 HeapTupleHeader onpage_tup;
656 newitemid = PageGetItemId(page, newoff);
657 onpage_tup = (HeapTupleHeader) PageGetItem(page, newitemid);
659 onpage_tup->t_ctid = tup->t_self;
662 /* If heaptup is a private copy, release it. */
664 heap_freetuple(heaptup);