1 /*-------------------------------------------------------------------------
4 * System catalog cache for tuples matching a key.
6 * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.125 2005/10/15 02:49:30 momjian Exp $
13 *-------------------------------------------------------------------------
17 #include "access/genam.h"
18 #include "access/hash.h"
19 #include "access/heapam.h"
20 #include "access/valid.h"
21 #include "catalog/pg_opclass.h"
22 #include "catalog/pg_operator.h"
23 #include "catalog/pg_type.h"
24 #include "catalog/indexing.h"
25 #include "miscadmin.h"
27 #include "storage/ipc.h" /* for on_proc_exit */
29 #include "utils/builtins.h"
30 #include "utils/fmgroids.h"
31 #include "utils/catcache.h"
32 #include "utils/memutils.h"
33 #include "utils/relcache.h"
34 #include "utils/resowner.h"
35 #include "utils/syscache.h"
38 /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
41 * Constants related to size of the catcache.
43 * NCCBUCKETS must be a power of two and must be less than 64K (because
44 * SharedInvalCatcacheMsg crams hash indexes into a uint16 field). In
45 * practice it should be a lot less, anyway, to avoid chewing up too much
46 * space on hash bucket headers.
48 * MAXCCTUPLES could be as small as a few hundred, if per-backend memory
49 * consumption is at a premium.
51 #define NCCBUCKETS 256 /* Hash buckets per CatCache */
52 #define MAXCCTUPLES 5000 /* Maximum # of tuples in all caches */
55 * Given a hash value and the size of the hash table, find the bucket
56 * in which the hash value belongs. Since the hash table must contain
57 * a power-of-2 number of elements, this is a simple bitmask.
59 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
63 * variables, macros and other stuff
67 #define CACHE1_elog(a,b) elog(a,b)
68 #define CACHE2_elog(a,b,c) elog(a,b,c)
69 #define CACHE3_elog(a,b,c,d) elog(a,b,c,d)
70 #define CACHE4_elog(a,b,c,d,e) elog(a,b,c,d,e)
71 #define CACHE5_elog(a,b,c,d,e,f) elog(a,b,c,d,e,f)
72 #define CACHE6_elog(a,b,c,d,e,f,g) elog(a,b,c,d,e,f,g)
74 #define CACHE1_elog(a,b)
75 #define CACHE2_elog(a,b,c)
76 #define CACHE3_elog(a,b,c,d)
77 #define CACHE4_elog(a,b,c,d,e)
78 #define CACHE5_elog(a,b,c,d,e,f)
79 #define CACHE6_elog(a,b,c,d,e,f,g)
82 /* Cache management header --- pointer is NULL until created */
83 static CatCacheHeader *CacheHdr = NULL;
86 static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
88 static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
92 static void CatCachePrintStats(void);
94 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
95 static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
96 static void CatalogCacheInitializeCache(CatCache *cache);
97 static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
98 uint32 hashValue, Index hashIndex,
100 static void CatalogCacheCleanup(CatCTup *savect);
101 static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
105 * internal support functions
109 * Look up the hash and equality functions for system types that are used
110 * as cache key fields.
112 * XXX this should be replaced by catalog lookups,
113 * but that seems to pose considerable risk of circularity...
116 GetCCHashEqFuncs(Oid keytype, PGFunction *hashfunc, RegProcedure *eqfunc)
121 *hashfunc = hashchar;
125 *hashfunc = hashchar;
129 *hashfunc = hashname;
133 *hashfunc = hashint2;
137 *hashfunc = hashint2vector;
138 *eqfunc = F_INT2VECTOREQ;
141 *hashfunc = hashint4;
145 *hashfunc = hashtext;
150 case REGPROCEDUREOID:
159 *hashfunc = hashoidvector;
160 *eqfunc = F_OIDVECTOREQ;
163 elog(FATAL, "type %u not supported as catcache key", keytype);
164 *hashfunc = NULL; /* keep compiler quiet */
165 *eqfunc = InvalidOid;
171 * CatalogCacheComputeHashValue
173 * Compute the hash value associated with a given set of lookup keys
176 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
178 uint32 hashValue = 0;
180 CACHE4_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
189 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
190 cur_skey[3].sk_argument)) << 9;
194 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
195 cur_skey[2].sk_argument)) << 6;
199 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
200 cur_skey[1].sk_argument)) << 3;
204 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
205 cur_skey[0].sk_argument));
208 elog(FATAL, "wrong number of hash keys: %d", nkeys);
216 * CatalogCacheComputeTupleHashValue
218 * Compute the hash value associated with a given tuple to be cached
221 CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
223 ScanKeyData cur_skey[4];
226 /* Copy pre-initialized overhead data for scankey */
227 memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
229 /* Now extract key fields from tuple, insert into scankey */
230 switch (cache->cc_nkeys)
233 cur_skey[3].sk_argument =
234 (cache->cc_key[3] == ObjectIdAttributeNumber)
235 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
243 cur_skey[2].sk_argument =
244 (cache->cc_key[2] == ObjectIdAttributeNumber)
245 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
253 cur_skey[1].sk_argument =
254 (cache->cc_key[1] == ObjectIdAttributeNumber)
255 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
263 cur_skey[0].sk_argument =
264 (cache->cc_key[0] == ObjectIdAttributeNumber)
265 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
273 elog(FATAL, "wrong number of hash keys: %d", cache->cc_nkeys);
277 return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
281 #ifdef CATCACHE_STATS
284 CatCachePrintStats(void)
287 long cc_searches = 0;
289 long cc_neg_hits = 0;
290 long cc_newloads = 0;
292 long cc_discards = 0;
293 long cc_lsearches = 0;
296 elog(DEBUG2, "catcache stats dump: %d/%d tuples in catcaches",
297 CacheHdr->ch_ntup, CacheHdr->ch_maxtup);
299 for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
301 if (cache->cc_ntup == 0 && cache->cc_searches == 0)
302 continue; /* don't print unused caches */
303 elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards, %ld lsrch, %ld lhits",
310 cache->cc_hits + cache->cc_neg_hits,
312 cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
313 cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
318 cc_searches += cache->cc_searches;
319 cc_hits += cache->cc_hits;
320 cc_neg_hits += cache->cc_neg_hits;
321 cc_newloads += cache->cc_newloads;
322 cc_invals += cache->cc_invals;
323 cc_discards += cache->cc_discards;
324 cc_lsearches += cache->cc_lsearches;
325 cc_lhits += cache->cc_lhits;
327 elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards, %ld lsrch, %ld lhits",
332 cc_hits + cc_neg_hits,
334 cc_searches - cc_hits - cc_neg_hits - cc_newloads,
335 cc_searches - cc_hits - cc_neg_hits,
341 #endif /* CATCACHE_STATS */
347 * Unlink and delete the given cache entry
349 * NB: if it is a member of a CatCList, the CatCList is deleted too.
350 * Both the cache entry and the list had better have zero refcount.
353 CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
355 Assert(ct->refcount == 0);
356 Assert(ct->my_cache == cache);
359 CatCacheRemoveCList(cache, ct->c_list);
361 /* delink from linked lists */
362 DLRemove(&ct->lrulist_elem);
363 DLRemove(&ct->cache_elem);
365 /* free associated tuple data */
366 if (ct->tuple.t_data != NULL)
367 pfree(ct->tuple.t_data);
375 * CatCacheRemoveCList
377 * Unlink and delete the given cache list entry
380 CatCacheRemoveCList(CatCache *cache, CatCList *cl)
384 Assert(cl->refcount == 0);
385 Assert(cl->my_cache == cache);
387 /* delink from member tuples */
388 for (i = cl->n_members; --i >= 0;)
390 CatCTup *ct = cl->members[i];
392 Assert(ct->c_list == cl);
396 /* delink from linked list */
397 DLRemove(&cl->cache_elem);
399 /* free associated tuple data */
400 if (cl->tuple.t_data != NULL)
401 pfree(cl->tuple.t_data);
407 * CatalogCacheIdInvalidate
409 * Invalidate entries in the specified cache, given a hash value and
410 * item pointer. Positive entries are deleted if they match the item
411 * pointer. Negative entries must be deleted if they match the hash
412 * value (since we do not have the exact key of the tuple that's being
413 * inserted). But this should only rarely result in loss of a cache
414 * entry that could have been kept.
416 * Note that it's not very relevant whether the tuple identified by
417 * the item pointer is being inserted or deleted. We don't expect to
418 * find matching positive entries in the one case, and we don't expect
419 * to find matching negative entries in the other; but we will do the
420 * right things in any case.
422 * This routine is only quasi-public: it should only be used by inval.c.
425 CatalogCacheIdInvalidate(int cacheId,
434 Assert(ItemPointerIsValid(pointer));
435 CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: called");
438 * inspect caches to find the proper cache
440 for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
446 if (cacheId != ccp->id)
450 * We don't bother to check whether the cache has finished
451 * initialization yet; if not, there will be no entries in it so no
456 * Invalidate *all* CatCLists in this cache; it's too hard to tell
457 * which searches might still be correct, so just zap 'em all.
459 for (elt = DLGetHead(&ccp->cc_lists); elt; elt = nextelt)
461 CatCList *cl = (CatCList *) DLE_VAL(elt);
463 nextelt = DLGetSucc(elt);
465 if (cl->refcount > 0)
468 CatCacheRemoveCList(ccp, cl);
472 * inspect the proper hash bucket for tuple matches
474 hashIndex = HASH_INDEX(hashValue, ccp->cc_nbuckets);
476 for (elt = DLGetHead(&ccp->cc_bucket[hashIndex]); elt; elt = nextelt)
478 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
480 nextelt = DLGetSucc(elt);
482 if (hashValue != ct->hash_value)
483 continue; /* ignore non-matching hash values */
486 ItemPointerEquals(pointer, &ct->tuple.t_self))
488 if (ct->refcount > 0 ||
489 (ct->c_list && ct->c_list->refcount > 0))
492 /* list, if any, was marked dead above */
493 Assert(ct->c_list == NULL || ct->c_list->dead);
496 CatCacheRemoveCTup(ccp, ct);
497 CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: invalidated");
498 #ifdef CATCACHE_STATS
501 /* could be multiple matches, so keep looking! */
504 break; /* need only search this one cache */
508 /* ----------------------------------------------------------------
510 * ----------------------------------------------------------------
515 * Standard routine for creating cache context if it doesn't exist yet
517 * There are a lot of places (probably far more than necessary) that check
518 * whether CacheMemoryContext exists yet and want to create it if not.
519 * We centralize knowledge of exactly how to create it here.
522 CreateCacheMemoryContext(void)
525 * Purely for paranoia, check that context doesn't exist; caller probably
528 if (!CacheMemoryContext)
529 CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
530 "CacheMemoryContext",
531 ALLOCSET_DEFAULT_MINSIZE,
532 ALLOCSET_DEFAULT_INITSIZE,
533 ALLOCSET_DEFAULT_MAXSIZE);
540 * Clean up catcaches at end of main transaction (either commit or abort)
542 * As of PostgreSQL 8.1, catcache pins should get released by the
543 * ResourceOwner mechanism. This routine is just a debugging
544 * cross-check that no pins remain.
547 AtEOXact_CatCache(bool isCommit)
549 #ifdef USE_ASSERT_CHECKING
555 /* Check CatCLists */
556 for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
558 for (elt = DLGetHead(&ccp->cc_lists); elt; elt = DLGetSucc(elt))
560 CatCList *cl = (CatCList *) DLE_VAL(elt);
562 Assert(cl->cl_magic == CL_MAGIC);
563 Assert(cl->refcount == 0);
568 /* Check individual tuples */
569 for (elt = DLGetHead(&CacheHdr->ch_lrulist); elt; elt = DLGetSucc(elt))
571 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
573 Assert(ct->ct_magic == CT_MAGIC);
574 Assert(ct->refcount == 0);
584 * Reset one catalog cache to empty.
586 * This is not very efficient if the target cache is nearly empty.
587 * However, it shouldn't need to be efficient; we don't invoke it often.
590 ResetCatalogCache(CatCache *cache)
596 /* Remove each list in this cache, or at least mark it dead */
597 for (elt = DLGetHead(&cache->cc_lists); elt; elt = nextelt)
599 CatCList *cl = (CatCList *) DLE_VAL(elt);
601 nextelt = DLGetSucc(elt);
603 if (cl->refcount > 0)
606 CatCacheRemoveCList(cache, cl);
609 /* Remove each tuple in this cache, or at least mark it dead */
610 for (i = 0; i < cache->cc_nbuckets; i++)
612 for (elt = DLGetHead(&cache->cc_bucket[i]); elt; elt = nextelt)
614 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
616 nextelt = DLGetSucc(elt);
618 if (ct->refcount > 0 ||
619 (ct->c_list && ct->c_list->refcount > 0))
622 /* list, if any, was marked dead above */
623 Assert(ct->c_list == NULL || ct->c_list->dead);
626 CatCacheRemoveCTup(cache, ct);
627 #ifdef CATCACHE_STATS
637 * Reset all caches when a shared cache inval event forces it
640 ResetCatalogCaches(void)
644 CACHE1_elog(DEBUG2, "ResetCatalogCaches called");
646 for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
647 ResetCatalogCache(cache);
649 CACHE1_elog(DEBUG2, "end of ResetCatalogCaches call");
653 * CatalogCacheFlushRelation
655 * This is called by RelationFlushRelation() to clear out cached information
656 * about a relation being dropped. (This could be a DROP TABLE command,
657 * or a temp table being dropped at end of transaction, or a table created
658 * during the current transaction that is being dropped because of abort.)
659 * Remove all cache entries relevant to the specified relation OID.
661 * A special case occurs when relId is itself one of the cacheable system
662 * tables --- although those'll never be dropped, they can get flushed from
663 * the relcache (VACUUM causes this, for example). In that case we need
664 * to flush all cache entries that came from that table. (At one point we
665 * also tried to force re-execution of CatalogCacheInitializeCache for
666 * the cache(s) on that table. This is a bad idea since it leads to all
667 * kinds of trouble if a cache flush occurs while loading cache entries.
668 * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
669 * rather than relying on the relcache to keep a tupdesc for us. Of course
670 * this assumes the tupdesc of a cachable system table will not change...)
673 CatalogCacheFlushRelation(Oid relId)
677 CACHE2_elog(DEBUG2, "CatalogCacheFlushRelation called for %u", relId);
679 for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
683 /* We can ignore uninitialized caches, since they must be empty */
684 if (cache->cc_tupdesc == NULL)
687 /* Does this cache store tuples of the target relation itself? */
688 if (cache->cc_tupdesc->attrs[0]->attrelid == relId)
690 /* Yes, so flush all its contents */
691 ResetCatalogCache(cache);
695 /* Does this cache store tuples associated with relations at all? */
696 if (cache->cc_reloidattr == 0)
697 continue; /* nope, leave it alone */
699 /* Yes, scan the tuples and remove those related to relId */
700 for (i = 0; i < cache->cc_nbuckets; i++)
705 for (elt = DLGetHead(&cache->cc_bucket[i]); elt; elt = nextelt)
707 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
710 nextelt = DLGetSucc(elt);
713 * Negative entries are never considered related to a rel,
714 * even if the rel is part of their lookup key.
719 if (cache->cc_reloidattr == ObjectIdAttributeNumber)
720 tupRelid = HeapTupleGetOid(&ct->tuple);
726 DatumGetObjectId(fastgetattr(&ct->tuple,
727 cache->cc_reloidattr,
733 if (tupRelid == relId)
735 if (ct->refcount > 0 ||
736 (ct->c_list && ct->c_list->refcount > 0))
739 /* parent list must be considered dead too */
741 ct->c_list->dead = true;
744 CatCacheRemoveCTup(cache, ct);
745 #ifdef CATCACHE_STATS
753 CACHE1_elog(DEBUG2, "end of CatalogCacheFlushRelation call");
759 * This allocates and initializes a cache for a system catalog relation.
760 * Actually, the cache is only partially initialized to avoid opening the
761 * relation. The relation will be opened and the rest of the cache
762 * structure initialized on the first access.
765 #define InitCatCache_DEBUG2 \
767 elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
768 cp->cc_reloid, cp->cc_indexoid, cp->id, \
769 cp->cc_nkeys, cp->cc_nbuckets); \
772 #define InitCatCache_DEBUG2
784 MemoryContext oldcxt;
788 * first switch to the cache context so our allocations do not vanish at
789 * the end of a transaction
791 if (!CacheMemoryContext)
792 CreateCacheMemoryContext();
794 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
797 * if first time through, initialize the cache group header, including
798 * global LRU list header
800 if (CacheHdr == NULL)
802 CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
803 CacheHdr->ch_caches = NULL;
804 CacheHdr->ch_ntup = 0;
805 CacheHdr->ch_maxtup = MAXCCTUPLES;
806 DLInitList(&CacheHdr->ch_lrulist);
807 #ifdef CATCACHE_STATS
808 on_proc_exit(CatCachePrintStats, 0);
813 * allocate a new cache structure
815 * Note: we assume zeroing initializes the Dllist headers correctly
817 cp = (CatCache *) palloc0(sizeof(CatCache) + NCCBUCKETS * sizeof(Dllist));
820 * initialize the cache's relation information for the relation
821 * corresponding to this cache, and initialize some of the new cache's
822 * other internal fields. But don't open the relation yet.
825 cp->cc_relname = "(not known yet)";
826 cp->cc_reloid = reloid;
827 cp->cc_indexoid = indexoid;
828 cp->cc_relisshared = false; /* temporary */
829 cp->cc_tupdesc = (TupleDesc) NULL;
830 cp->cc_reloidattr = reloidattr;
832 cp->cc_nbuckets = NCCBUCKETS;
833 cp->cc_nkeys = nkeys;
834 for (i = 0; i < nkeys; ++i)
835 cp->cc_key[i] = key[i];
838 * new cache is initialized as far as we can go for now. print some
839 * debugging information, if appropriate.
844 * add completed cache to top of group header's list
846 cp->cc_next = CacheHdr->ch_caches;
847 CacheHdr->ch_caches = cp;
850 * back to the old context before we return...
852 MemoryContextSwitchTo(oldcxt);
858 * CatalogCacheInitializeCache
860 * This function does final initialization of a catcache: obtain the tuple
861 * descriptor and set up the hash and equality function links. We assume
862 * that the relcache entry can be opened at this point!
865 #define CatalogCacheInitializeCache_DEBUG1 \
866 elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
869 #define CatalogCacheInitializeCache_DEBUG2 \
871 if (cache->cc_key[i] > 0) { \
872 elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
873 i+1, cache->cc_nkeys, cache->cc_key[i], \
874 tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
876 elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
877 i+1, cache->cc_nkeys, cache->cc_key[i]); \
881 #define CatalogCacheInitializeCache_DEBUG1
882 #define CatalogCacheInitializeCache_DEBUG2
886 CatalogCacheInitializeCache(CatCache *cache)
889 MemoryContext oldcxt;
893 CatalogCacheInitializeCache_DEBUG1;
896 * Open the relation without locking --- we only need the tupdesc, which
897 * we assume will never change ...
899 relation = heap_open(cache->cc_reloid, NoLock);
900 Assert(RelationIsValid(relation));
903 * switch to the cache context so our allocations do not vanish at the end
906 Assert(CacheMemoryContext != NULL);
908 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
911 * copy the relcache's tuple descriptor to permanent cache storage
913 tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
916 * save the relation's name and relisshared flag, too (cc_relname is used
917 * only for debugging purposes)
919 cache->cc_relname = pstrdup(RelationGetRelationName(relation));
920 cache->cc_relisshared = RelationGetForm(relation)->relisshared;
923 * return to the caller's memory context and close the rel
925 MemoryContextSwitchTo(oldcxt);
927 heap_close(relation, NoLock);
929 CACHE3_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
930 cache->cc_relname, cache->cc_nkeys);
933 * initialize cache's key information
935 for (i = 0; i < cache->cc_nkeys; ++i)
940 CatalogCacheInitializeCache_DEBUG2;
942 if (cache->cc_key[i] > 0)
943 keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
946 if (cache->cc_key[i] != ObjectIdAttributeNumber)
947 elog(FATAL, "only sys attr supported in caches is OID");
951 GetCCHashEqFuncs(keytype,
952 &cache->cc_hashfunc[i],
955 cache->cc_isname[i] = (keytype == NAMEOID);
958 * Do equality-function lookup (we assume this won't need a catalog
959 * lookup for any supported type)
961 fmgr_info_cxt(eqfunc,
962 &cache->cc_skey[i].sk_func,
965 /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
966 cache->cc_skey[i].sk_attno = cache->cc_key[i];
968 /* Fill in sk_strategy as well --- always standard equality */
969 cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
970 cache->cc_skey[i].sk_subtype = InvalidOid;
972 CACHE4_elog(DEBUG2, "CatalogCacheInit %s %d %p",
979 * mark this cache fully initialized
981 cache->cc_tupdesc = tupdesc;
985 * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
987 * The only reason to call this routine is to ensure that the relcache
988 * has created entries for all the catalogs and indexes referenced by
989 * catcaches. Therefore, open the index too. An exception is the indexes
990 * on pg_am, which we don't use (cf. IndexScanOK).
993 InitCatCachePhase2(CatCache *cache)
995 if (cache->cc_tupdesc == NULL)
996 CatalogCacheInitializeCache(cache);
998 if (cache->id != AMOID &&
1003 idesc = index_open(cache->cc_indexoid);
1012 * This function checks for tuples that will be fetched by
1013 * IndexSupportInitialize() during relcache initialization for
1014 * certain system indexes that support critical syscaches.
1015 * We can't use an indexscan to fetch these, else we'll get into
1016 * infinite recursion. A plain heap scan will work, however.
1018 * Once we have completed relcache initialization (signaled by
1019 * criticalRelcachesBuilt), we don't have to worry anymore.
1022 IndexScanOK(CatCache *cache, ScanKey cur_skey)
1024 if (cache->id == INDEXRELID)
1027 * Since the OIDs of indexes aren't hardwired, it's painful to figure
1028 * out which is which. Just force all pg_index searches to be heap
1029 * scans while building the relcaches.
1031 if (!criticalRelcachesBuilt)
1034 else if (cache->id == AMOID ||
1035 cache->id == AMNAME)
1038 * Always do heap scans in pg_am, because it's so small there's not
1039 * much point in an indexscan anyway. We *must* do this when
1040 * initially building critical relcache entries, but we might as well
1041 * just always do it.
1045 else if (cache->id == OPEROID)
1047 if (!criticalRelcachesBuilt)
1049 /* Looking for an OID comparison function? */
1050 Oid lookup_oid = DatumGetObjectId(cur_skey[0].sk_argument);
1052 if (lookup_oid >= MIN_OIDCMP && lookup_oid <= MAX_OIDCMP)
1057 /* Normal case, allow index scan */
1064 * This call searches a system cache for a tuple, opening the relation
1065 * if necessary (on the first access to a particular cache).
1067 * The result is NULL if not found, or a pointer to a HeapTuple in
1068 * the cache. The caller must not modify the tuple, and must call
1069 * ReleaseCatCache() when done with it.
1071 * The search key values should be expressed as Datums of the key columns'
1072 * datatype(s). (Pass zeroes for any unused parameters.) As a special
1073 * exception, the passed-in key for a NAME column can be just a C string;
1074 * the caller need not go to the trouble of converting it to a fully
1078 SearchCatCache(CatCache *cache,
1084 ScanKeyData cur_skey[4];
1090 SysScanDesc scandesc;
1094 * one-time startup overhead for each cache
1096 if (cache->cc_tupdesc == NULL)
1097 CatalogCacheInitializeCache(cache);
1099 #ifdef CATCACHE_STATS
1100 cache->cc_searches++;
1104 * initialize the search key information
1106 memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1107 cur_skey[0].sk_argument = v1;
1108 cur_skey[1].sk_argument = v2;
1109 cur_skey[2].sk_argument = v3;
1110 cur_skey[3].sk_argument = v4;
1113 * find the hash bucket in which to look for the tuple
1115 hashValue = CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
1116 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1119 * scan the hash bucket until we find a match or exhaust our tuples
1121 for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
1123 elt = DLGetSucc(elt))
1127 ct = (CatCTup *) DLE_VAL(elt);
1130 continue; /* ignore dead entries */
1132 if (ct->hash_value != hashValue)
1133 continue; /* quickly skip entry if wrong hash val */
1136 * see if the cached tuple matches our key.
1138 HeapKeyTest(&ct->tuple,
1147 * we found a match in the cache: move it to the front of the global
1148 * LRU list. We also move it to the front of the list for its
1149 * hashbucket, in order to speed subsequent searches. (The most
1150 * frequently accessed elements in any hashbucket will tend to be near
1151 * the front of the hashbucket's list.)
1153 DLMoveToFront(&ct->lrulist_elem);
1154 DLMoveToFront(&ct->cache_elem);
1157 * If it's a positive entry, bump its refcount and return it. If it's
1158 * negative, we can report failure to the caller.
1162 ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1164 ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1166 CACHE3_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1167 cache->cc_relname, hashIndex);
1169 #ifdef CATCACHE_STATS
1177 CACHE3_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1178 cache->cc_relname, hashIndex);
1180 #ifdef CATCACHE_STATS
1181 cache->cc_neg_hits++;
1189 * Tuple was not found in cache, so we have to try to retrieve it directly
1190 * from the relation. If found, we will add it to the cache; if not
1191 * found, we will add a negative cache entry instead.
1193 * NOTE: it is possible for recursive cache lookups to occur while reading
1194 * the relation --- for example, due to shared-cache-inval messages being
1195 * processed during heap_open(). This is OK. It's even possible for one
1196 * of those lookups to find and enter the very same tuple we are trying to
1197 * fetch here. If that happens, we will enter a second copy of the tuple
1198 * into the cache. The first copy will never be referenced again, and
1199 * will eventually age out of the cache, so there's no functional problem.
1200 * This case is rare enough that it's not worth expending extra cycles to
1203 relation = heap_open(cache->cc_reloid, AccessShareLock);
1205 scandesc = systable_beginscan(relation,
1207 IndexScanOK(cache, cur_skey),
1214 while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1216 ct = CatalogCacheCreateEntry(cache, ntp,
1217 hashValue, hashIndex,
1219 /* immediately set the refcount to 1 */
1220 ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1222 ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1223 break; /* assume only one match */
1226 systable_endscan(scandesc);
1228 heap_close(relation, AccessShareLock);
1231 * If tuple was not found, we need to build a negative cache entry
1232 * containing a fake tuple. The fake tuple has the correct key columns,
1233 * but nulls everywhere else.
1235 * In bootstrap mode, we don't build negative entries, because the cache
1236 * invalidation mechanism isn't alive and can't clear them if the tuple
1237 * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1238 * cache inval for that.)
1242 if (IsBootstrapProcessingMode())
1245 ntp = build_dummy_tuple(cache, cache->cc_nkeys, cur_skey);
1246 ct = CatalogCacheCreateEntry(cache, ntp,
1247 hashValue, hashIndex,
1249 heap_freetuple(ntp);
1251 CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1252 cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1253 CACHE3_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1254 cache->cc_relname, hashIndex);
1257 * We are not returning the negative entry to the caller, so leave its
1264 CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1265 cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1266 CACHE3_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1267 cache->cc_relname, hashIndex);
1269 #ifdef CATCACHE_STATS
1270 cache->cc_newloads++;
1279 * Decrement the reference count of a catcache entry (releasing the
1280 * hold grabbed by a successful SearchCatCache).
1282 * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1283 * will be freed as soon as their refcount goes to zero. In combination
1284 * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1285 * to catch references to already-released catcache entries.
1288 ReleaseCatCache(HeapTuple tuple)
1290 CatCTup *ct = (CatCTup *) (((char *) tuple) -
1291 offsetof(CatCTup, tuple));
1293 /* Safety checks to ensure we were handed a cache entry */
1294 Assert(ct->ct_magic == CT_MAGIC);
1295 Assert(ct->refcount > 0);
1298 ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
1301 #ifndef CATCACHE_FORCE_RELEASE
1304 ct->refcount == 0 &&
1305 (ct->c_list == NULL || ct->c_list->refcount == 0))
1306 CatCacheRemoveCTup(ct->my_cache, ct);
1311 * SearchCatCacheList
1313 * Generate a list of all tuples matching a partial key (that is,
1314 * a key specifying just the first K of the cache's N key columns).
1316 * The caller must not modify the list object or the pointed-to tuples,
1317 * and must call ReleaseCatCacheList() when done with the list.
1320 SearchCatCacheList(CatCache *cache,
1327 ScanKeyData cur_skey[4];
1332 List *volatile ctlist;
1333 ListCell *ctlist_item;
1337 MemoryContext oldcxt;
1341 * one-time startup overhead for each cache
1343 if (cache->cc_tupdesc == NULL)
1344 CatalogCacheInitializeCache(cache);
1346 Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1348 #ifdef CATCACHE_STATS
1349 cache->cc_lsearches++;
1353 * initialize the search key information
1355 memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1356 cur_skey[0].sk_argument = v1;
1357 cur_skey[1].sk_argument = v2;
1358 cur_skey[2].sk_argument = v3;
1359 cur_skey[3].sk_argument = v4;
1362 * compute a hash value of the given keys for faster search. We don't
1363 * presently divide the CatCList items into buckets, but this still lets
1364 * us skip non-matching items quickly most of the time.
1366 lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
1369 * scan the items until we find a match or exhaust our list
1371 for (elt = DLGetHead(&cache->cc_lists);
1373 elt = DLGetSucc(elt))
1377 cl = (CatCList *) DLE_VAL(elt);
1380 continue; /* ignore dead entries */
1382 if (cl->hash_value != lHashValue)
1383 continue; /* quickly skip entry if wrong hash val */
1386 * see if the cached list matches our key.
1388 if (cl->nkeys != nkeys)
1390 HeapKeyTest(&cl->tuple,
1399 * We found a matching list: mark it as touched since the last
1400 * CatalogCacheCleanup() sweep. Also move the list to the front of
1401 * the cache's list-of-lists, to speed subsequent searches. (We do not
1402 * move the members to the fronts of their hashbucket lists, however,
1403 * since there's no point in that unless they are searched for
1407 DLMoveToFront(&cl->cache_elem);
1409 /* Bump the list's refcount and return it */
1410 ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1412 ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1414 CACHE2_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1417 #ifdef CATCACHE_STATS
1425 * List was not found in cache, so we have to build it by reading the
1426 * relation. For each matching tuple found in the relation, use an
1427 * existing cache entry if possible, else build a new one.
1429 * We have to bump the member refcounts temporarily to ensure they won't get
1430 * dropped from the cache while loading other members. We use a PG_TRY
1431 * block to ensure we can undo those refcounts if we get an error before
1432 * we finish constructing the CatCList.
1434 ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1441 SysScanDesc scandesc;
1443 relation = heap_open(cache->cc_reloid, AccessShareLock);
1445 scandesc = systable_beginscan(relation,
1452 /* The list will be ordered iff we are doing an index scan */
1453 ordered = (scandesc->irel != NULL);
1455 while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1461 * See if there's an entry for this tuple already.
1464 hashValue = CatalogCacheComputeTupleHashValue(cache, ntp);
1465 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1467 for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
1469 elt = DLGetSucc(elt))
1471 ct = (CatCTup *) DLE_VAL(elt);
1473 if (ct->dead || ct->negative)
1474 continue; /* ignore dead and negative entries */
1476 if (ct->hash_value != hashValue)
1477 continue; /* quickly skip entry if wrong hash val */
1479 if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1480 continue; /* not same tuple */
1483 * Found a match, but can't use it if it belongs to another
1489 /* Found a match, so move it to front */
1490 DLMoveToFront(&ct->lrulist_elem);
1497 /* We didn't find a usable entry, so make a new one */
1498 ct = CatalogCacheCreateEntry(cache, ntp,
1499 hashValue, hashIndex,
1503 /* Careful here: add entry to ctlist, then bump its refcount */
1504 /* This way leaves state correct if lappend runs out of memory */
1505 ctlist = lappend(ctlist, ct);
1509 systable_endscan(scandesc);
1511 heap_close(relation, AccessShareLock);
1514 * Now we can build the CatCList entry. First we need a dummy tuple
1515 * containing the key values...
1517 ntp = build_dummy_tuple(cache, nkeys, cur_skey);
1518 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1519 nmembers = list_length(ctlist);
1521 palloc(sizeof(CatCList) + nmembers * sizeof(CatCTup *));
1522 heap_copytuple_with_tuple(ntp, &cl->tuple);
1523 MemoryContextSwitchTo(oldcxt);
1524 heap_freetuple(ntp);
1527 * We are now past the last thing that could trigger an elog before we
1528 * have finished building the CatCList and remembering it in the
1529 * resource owner. So it's OK to fall out of the PG_TRY, and indeed
1530 * we'd better do so before we start marking the members as belonging
1537 foreach(ctlist_item, ctlist)
1539 ct = (CatCTup *) lfirst(ctlist_item);
1540 Assert(ct->c_list == NULL);
1541 Assert(ct->refcount > 0);
1544 #ifndef CATCACHE_FORCE_RELEASE
1547 ct->refcount == 0 &&
1548 (ct->c_list == NULL || ct->c_list->refcount == 0))
1549 CatCacheRemoveCTup(cache, ct);
1556 cl->cl_magic = CL_MAGIC;
1557 cl->my_cache = cache;
1558 DLInitElem(&cl->cache_elem, cl);
1559 cl->refcount = 0; /* for the moment */
1561 cl->ordered = ordered;
1562 cl->touched = false; /* we already moved members to front */
1564 cl->hash_value = lHashValue;
1565 cl->n_members = nmembers;
1568 foreach(ctlist_item, ctlist)
1570 cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
1571 Assert(ct->c_list == NULL);
1573 /* release the temporary refcount on the member */
1574 Assert(ct->refcount > 0);
1576 /* mark list dead if any members already dead */
1580 Assert(i == nmembers);
1582 DLAddHead(&cache->cc_lists, &cl->cache_elem);
1584 /* Finally, bump the list's refcount and return it */
1586 ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1588 CACHE3_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1589 cache->cc_relname, nmembers);
1595 * ReleaseCatCacheList
1597 * Decrement the reference count of a catcache list.
1600 ReleaseCatCacheList(CatCList *list)
1602 /* Safety checks to ensure we were handed a cache entry */
1603 Assert(list->cl_magic == CL_MAGIC);
1604 Assert(list->refcount > 0);
1606 ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
1609 #ifndef CATCACHE_FORCE_RELEASE
1612 list->refcount == 0)
1613 CatCacheRemoveCList(list->my_cache, list);
1618 * CatalogCacheCreateEntry
1619 * Create a new CatCTup entry, copying the given HeapTuple and other
1620 * supplied data into it. The new entry initially has refcount 0.
1623 CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
1624 uint32 hashValue, Index hashIndex, bool negative)
1627 MemoryContext oldcxt;
1630 * Allocate CatCTup header in cache memory, and copy the tuple there too.
1632 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1633 ct = (CatCTup *) palloc(sizeof(CatCTup));
1634 heap_copytuple_with_tuple(ntp, &ct->tuple);
1635 MemoryContextSwitchTo(oldcxt);
1638 * Finish initializing the CatCTup header, and add it to the cache's
1639 * linked lists and counts.
1641 ct->ct_magic = CT_MAGIC;
1642 ct->my_cache = cache;
1643 DLInitElem(&ct->lrulist_elem, (void *) ct);
1644 DLInitElem(&ct->cache_elem, (void *) ct);
1646 ct->refcount = 0; /* for the moment */
1648 ct->negative = negative;
1649 ct->hash_value = hashValue;
1651 DLAddHead(&CacheHdr->ch_lrulist, &ct->lrulist_elem);
1652 DLAddHead(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1655 CacheHdr->ch_ntup++;
1658 * If we've exceeded the desired size of the caches, try to throw away the
1659 * least recently used entry(s). NB: be careful not to throw away the
1660 * newly-built entry...
1662 if (CacheHdr->ch_ntup > CacheHdr->ch_maxtup)
1663 CatalogCacheCleanup(ct);
1669 * CatalogCacheCleanup
1670 * Try to reduce the size of the catcaches when they get too big
1672 * savect can be NULL, or a specific CatCTup not to remove even if it
1673 * has zero refcount.
1676 CatalogCacheCleanup(CatCTup *savect)
1684 * Each time we have to do this, try to cut the cache size down to about
1685 * 90% of the maximum.
1687 tup_target = (CacheHdr->ch_maxtup * 9) / 10;
1690 * Our strategy for managing CatCLists is that, each time we have to throw
1691 * away some cache entries, we first move-to-front all the members of
1692 * CatCLists that have been touched since the last cleanup sweep. Then we
1693 * do strict LRU elimination by individual tuples, zapping a list if any
1694 * of its members gets zapped. Before PostgreSQL 8.1, we moved members to
1695 * front each time their owning list was touched, which was arguably more
1696 * fair in balancing list members against standalone tuples --- but the
1697 * overhead for large lists was horrendous. This scheme is more heavily
1698 * biased towards preserving lists, but that is not necessarily bad
1701 for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
1703 for (elt = DLGetHead(&ccp->cc_lists); elt; elt = DLGetSucc(elt))
1705 CatCList *cl = (CatCList *) DLE_VAL(elt);
1707 Assert(cl->cl_magic == CL_MAGIC);
1708 if (cl->touched && !cl->dead)
1712 for (i = 0; i < cl->n_members; i++)
1713 DLMoveToFront(&cl->members[i]->lrulist_elem);
1715 cl->touched = false;
1719 /* Now get rid of unreferenced tuples in reverse global LRU order */
1720 for (elt = DLGetTail(&CacheHdr->ch_lrulist); elt; elt = prevelt)
1722 CatCTup *ct = (CatCTup *) DLE_VAL(elt);
1724 prevelt = DLGetPred(elt);
1726 if (ct->refcount == 0 &&
1727 (ct->c_list == NULL || ct->c_list->refcount == 0) &&
1730 #ifdef CATCACHE_STATS
1731 ct->my_cache->cc_discards++;
1733 CatCacheRemoveCTup(ct->my_cache, ct);
1735 /* Quit when we've removed enough tuples */
1736 if (CacheHdr->ch_ntup <= tup_target)
1744 * Generate a palloc'd HeapTuple that contains the specified key
1745 * columns, and NULLs for other columns.
1747 * This is used to store the keys for negative cache entries and CatCList
1748 * entries, which don't have real tuples associated with them.
1751 build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
1754 TupleDesc tupDesc = cache->cc_tupdesc;
1757 Oid tupOid = InvalidOid;
1758 NameData tempNames[4];
1761 values = (Datum *) palloc(tupDesc->natts * sizeof(Datum));
1762 nulls = (char *) palloc(tupDesc->natts * sizeof(char));
1764 memset(values, 0, tupDesc->natts * sizeof(Datum));
1765 memset(nulls, 'n', tupDesc->natts * sizeof(char));
1767 for (i = 0; i < nkeys; i++)
1769 int attindex = cache->cc_key[i];
1770 Datum keyval = skeys[i].sk_argument;
1775 * Here we must be careful in case the caller passed a C string
1776 * where a NAME is wanted: convert the given argument to a
1777 * correctly padded NAME. Otherwise the memcpy() done in
1778 * heap_formtuple could fall off the end of memory.
1780 if (cache->cc_isname[i])
1782 Name newval = &tempNames[i];
1784 namestrcpy(newval, DatumGetCString(keyval));
1785 keyval = NameGetDatum(newval);
1787 values[attindex - 1] = keyval;
1788 nulls[attindex - 1] = ' ';
1792 Assert(attindex == ObjectIdAttributeNumber);
1793 tupOid = DatumGetObjectId(keyval);
1797 ntp = heap_formtuple(tupDesc, values, nulls);
1798 if (tupOid != InvalidOid)
1799 HeapTupleSetOid(ntp, tupOid);
1809 * PrepareToInvalidateCacheTuple()
1811 * This is part of a rather subtle chain of events, so pay attention:
1813 * When a tuple is inserted or deleted, it cannot be flushed from the
1814 * catcaches immediately, for reasons explained at the top of cache/inval.c.
1815 * Instead we have to add entry(s) for the tuple to a list of pending tuple
1816 * invalidations that will be done at the end of the command or transaction.
1818 * The lists of tuples that need to be flushed are kept by inval.c. This
1819 * routine is a helper routine for inval.c. Given a tuple belonging to
1820 * the specified relation, find all catcaches it could be in, compute the
1821 * correct hash value for each such catcache, and call the specified function
1822 * to record the cache id, hash value, and tuple ItemPointer in inval.c's
1823 * lists. CatalogCacheIdInvalidate will be called later, if appropriate,
1824 * using the recorded information.
1826 * Note that it is irrelevant whether the given tuple is actually loaded
1827 * into the catcache at the moment. Even if it's not there now, it might
1828 * be by the end of the command, or there might be a matching negative entry
1829 * to flush --- or other backends' caches might have such entries --- so
1830 * we have to make list entries to flush it later.
1832 * Also note that it's not an error if there are no catcaches for the
1833 * specified relation. inval.c doesn't know exactly which rels have
1834 * catcaches --- it will call this routine for any tuple that's in a
1838 PrepareToInvalidateCacheTuple(Relation relation,
1840 void (*function) (int, uint32, ItemPointer, Oid))
1845 CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
1850 Assert(RelationIsValid(relation));
1851 Assert(HeapTupleIsValid(tuple));
1852 Assert(PointerIsValid(function));
1853 Assert(CacheHdr != NULL);
1855 reloid = RelationGetRelid(relation);
1859 * if the cache contains tuples from the specified relation
1860 * compute the tuple's hash value in this cache,
1861 * and call the passed function to register the information.
1865 for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
1867 /* Just in case cache hasn't finished initialization yet... */
1868 if (ccp->cc_tupdesc == NULL)
1869 CatalogCacheInitializeCache(ccp);
1871 if (ccp->cc_reloid != reloid)
1874 (*function) (ccp->id,
1875 CatalogCacheComputeTupleHashValue(ccp, tuple),
1877 ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId);
1883 * Subroutines for warning about reference leaks. These are exported so
1884 * that resowner.c can call them.
1887 PrintCatCacheLeakWarning(HeapTuple tuple)
1889 CatCTup *ct = (CatCTup *) (((char *) tuple) -
1890 offsetof(CatCTup, tuple));
1892 /* Safety check to ensure we were handed a cache entry */
1893 Assert(ct->ct_magic == CT_MAGIC);
1895 elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
1896 ct->my_cache->cc_relname, ct->my_cache->id,
1897 ItemPointerGetBlockNumber(&(tuple->t_self)),
1898 ItemPointerGetOffsetNumber(&(tuple->t_self)),
1903 PrintCatCacheListLeakWarning(CatCList *list)
1905 elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
1906 list->my_cache->cc_relname, list->my_cache->id,
1907 list, list->refcount);