1 /*-------------------------------------------------------------------------
4 * System catalog cache for tuples matching a key.
6 * Copyright (c) 1994, Regents of the University of California
10 * $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.55 1999/12/16 22:19:54 wieck Exp $
12 *-------------------------------------------------------------------------
15 #include "access/genam.h"
16 #include "access/heapam.h"
17 #include "access/valid.h"
18 #include "catalog/pg_operator.h"
19 #include "catalog/pg_type.h"
20 #include "catalog/catname.h"
21 #include "catalog/indexing.h"
22 #include "miscadmin.h"
23 #include "utils/builtins.h"
24 #include "utils/catcache.h"
25 #include "utils/syscache.h"
27 static void CatCacheRemoveCTup(CatCache *cache, Dlelem *e);
28 static Index CatalogCacheComputeHashIndex(struct catcache * cacheInP);
29 static Index CatalogCacheComputeTupleHashIndex(struct catcache * cacheInOutP,
30 Relation relation, HeapTuple tuple);
31 static void CatalogCacheInitializeCache(struct catcache * cache,
33 static long comphash(long l, char *v);
36 * variables, macros and other stuff
38 * note CCSIZE allocates 51 buckets .. one was already allocated in
39 * the catcache structure.
44 #define CACHE1_elog(a,b) elog(a,b)
45 #define CACHE2_elog(a,b,c) elog(a,b,c)
46 #define CACHE3_elog(a,b,c,d) elog(a,b,c,d)
47 #define CACHE4_elog(a,b,c,d,e) elog(a,b,c,d,e)
48 #define CACHE5_elog(a,b,c,d,e,f) elog(a,b,c,d,e,f)
49 #define CACHE6_elog(a,b,c,d,e,f,g) elog(a,b,c,d,e,f,g)
51 #define CACHE1_elog(a,b)
52 #define CACHE2_elog(a,b,c)
53 #define CACHE3_elog(a,b,c,d)
54 #define CACHE4_elog(a,b,c,d,e)
55 #define CACHE5_elog(a,b,c,d,e,f)
56 #define CACHE6_elog(a,b,c,d,e,f,g)
59 static CatCache *Caches = NULL; /* head of list of caches */
61 GlobalMemory CacheCxt; /* context in which caches are allocated */
62 /* CacheCxt is global because relcache uses it too. */
66 * EQPROC is used in CatalogCacheInitializeCache
67 * XXX this should be replaced by catalog lookups soon
70 static long eqproc[] = {
71 F_BOOLEQ, 0l, F_CHAREQ, F_NAMEEQ, 0l,
72 F_INT2EQ, F_KEYFIRSTEQ, F_INT4EQ, 0l, F_TEXTEQ,
73 F_OIDEQ, 0l, 0l, 0l, F_OID8EQ
76 #define EQPROC(SYSTEMTYPEOID) eqproc[(SYSTEMTYPEOID)-16]
78 /* ----------------------------------------------------------------
79 * internal support functions
80 * ----------------------------------------------------------------
82 /* --------------------------------
83 * CatalogCacheInitializeCache
84 * --------------------------------
87 #define CatalogCacheInitializeCache_DEBUG1 \
89 elog(DEBUG, "CatalogCacheInitializeCache: cache @%08lx", cache); \
91 elog(DEBUG, "CatalogCacheInitializeCache: called w/relation(inval)"); \
93 elog(DEBUG, "CatalogCacheInitializeCache: called w/relname %s", \
97 #define CatalogCacheInitializeCache_DEBUG2 \
99 if (cache->cc_key[i] > 0) { \
100 elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d, %d", \
101 i+1, cache->cc_nkeys, cache->cc_key[i], \
102 relation->rd_att->attrs[cache->cc_key[i] - 1]->attlen); \
104 elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d", \
105 i+1, cache->cc_nkeys, cache->cc_key[i]); \
110 #define CatalogCacheInitializeCache_DEBUG1
111 #define CatalogCacheInitializeCache_DEBUG2
115 CatalogCacheInitializeCache(struct catcache * cache,
118 MemoryContext oldcxt;
123 CatalogCacheInitializeCache_DEBUG1;
126 * first switch to the cache context so our allocations
127 * do not vanish at the end of a transaction
131 CacheCxt = CreateGlobalMemory("Cache");
132 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
135 * If no relation was passed we must open it to get access to
136 * its fields. If one of the other caches has already opened
137 * it we use heap_open() instead of heap_openr().
138 * XXX is that really worth the trouble of checking?
141 if (!RelationIsValid(relation))
146 * scan the caches to see if any other cache has opened the relation
149 for (cp = Caches; cp; cp = cp->cc_next)
151 if (strncmp(cp->cc_relname, cache->cc_relname, NAMEDATALEN) == 0)
153 if (cp->relationId != InvalidOid)
159 * open the relation by name or by id
163 relation = heap_open(cp->relationId, NoLock);
165 relation = heap_openr(cache->cc_relname, NoLock);
171 * initialize the cache's relation id
174 Assert(RelationIsValid(relation));
175 cache->relationId = RelationGetRelid(relation);
176 tupdesc = cache->cc_tupdesc = RelationGetDescr(relation);
178 CACHE3_elog(DEBUG, "CatalogCacheInitializeCache: relid %u, %d keys",
179 cache->relationId, cache->cc_nkeys);
182 * initialize cache's key information
185 for (i = 0; i < cache->cc_nkeys; ++i)
187 CatalogCacheInitializeCache_DEBUG2;
189 if (cache->cc_key[i] > 0)
193 * Yoiks. The implementation of the hashing code and the
194 * implementation of int28's are at loggerheads. The right
195 * thing to do is to throw out the implementation of int28's
196 * altogether; until that happens, we do the right thing here
197 * to guarantee that the hash key generator doesn't try to
198 * dereference an int2 by mistake.
201 if (tupdesc->attrs[cache->cc_key[i] - 1]->atttypid == INT28OID)
202 cache->cc_klen[i] = sizeof(short);
204 cache->cc_klen[i] = tupdesc->attrs[cache->cc_key[i] - 1]->attlen;
206 cache->cc_skey[i].sk_procedure = EQPROC(tupdesc->attrs[cache->cc_key[i] - 1]->atttypid);
208 fmgr_info(cache->cc_skey[i].sk_procedure,
209 &cache->cc_skey[i].sk_func);
210 cache->cc_skey[i].sk_nargs = cache->cc_skey[i].sk_func.fn_nargs;
212 CACHE5_elog(DEBUG, "CatalogCacheInit %s %d %d %x",
213 RelationGetRelationName(relation),
215 tupdesc->attrs[cache->cc_key[i] - 1]->attlen,
221 * close the relation if we opened it
225 heap_close(relation, NoLock);
228 * initialize index information for the cache. this
229 * should only be done once per cache.
232 if (cache->cc_indname != NULL && cache->indexId == InvalidOid)
234 if (RelationGetForm(relation)->relhasindex)
238 * If the index doesn't exist we are in trouble.
240 relation = index_openr(cache->cc_indname);
242 cache->indexId = RelationGetRelid(relation);
243 index_close(relation);
246 cache->cc_indname = NULL;
250 * return to the proper memory context
253 MemoryContextSwitchTo(oldcxt);
256 /* --------------------------------
259 * XXX temporary function
260 * --------------------------------
264 CatalogCacheSetId(CatCache *cacheInOutP, int id)
266 Assert(id == InvalidCatalogCacheId || id >= 0);
267 cacheInOutP->id = id;
274 * Compute a hash value, somehow.
276 * XXX explain algorithm here.
278 * l is length of the attribute value, v
279 * v is the attribute value ("Datum")
283 comphash(long l, char *v)
288 CACHE3_elog(DEBUG, "comphash (%d,%x)", l, v);
298 if (l == NAMEDATALEN)
302 * if it's a name, make sure that the values are null-padded.
304 * Note that this other fixed-length types can also have the same
305 * typelen so this may break them - XXX
319 /* --------------------------------
320 * CatalogCacheComputeHashIndex
321 * --------------------------------
324 CatalogCacheComputeHashIndex(struct catcache * cacheInP)
329 CACHE6_elog(DEBUG, "CatalogCacheComputeHashIndex %s %d %d %d %x",
330 cacheInP->cc_relname,
332 cacheInP->cc_klen[0],
333 cacheInP->cc_klen[1],
336 switch (cacheInP->cc_nkeys)
339 hashIndex ^= comphash(cacheInP->cc_klen[3],
340 (char *) cacheInP->cc_skey[3].sk_argument) << 9;
343 hashIndex ^= comphash(cacheInP->cc_klen[2],
344 (char *) cacheInP->cc_skey[2].sk_argument) << 6;
347 hashIndex ^= comphash(cacheInP->cc_klen[1],
348 (char *) cacheInP->cc_skey[1].sk_argument) << 3;
351 hashIndex ^= comphash(cacheInP->cc_klen[0],
352 (char *) cacheInP->cc_skey[0].sk_argument);
355 elog(FATAL, "CCComputeHashIndex: %d cc_nkeys", cacheInP->cc_nkeys);
358 hashIndex %= cacheInP->cc_size;
362 /* --------------------------------
363 * CatalogCacheComputeTupleHashIndex
364 * --------------------------------
367 CatalogCacheComputeTupleHashIndex(struct catcache * cacheInOutP,
373 if (cacheInOutP->relationId == InvalidOid)
374 CatalogCacheInitializeCache(cacheInOutP, relation);
375 switch (cacheInOutP->cc_nkeys)
378 cacheInOutP->cc_skey[3].sk_argument =
379 (cacheInOutP->cc_key[3] == ObjectIdAttributeNumber)
380 ? (Datum) tuple->t_data->t_oid
382 cacheInOutP->cc_key[3],
383 RelationGetDescr(relation),
388 cacheInOutP->cc_skey[2].sk_argument =
389 (cacheInOutP->cc_key[2] == ObjectIdAttributeNumber)
390 ? (Datum) tuple->t_data->t_oid
392 cacheInOutP->cc_key[2],
393 RelationGetDescr(relation),
398 cacheInOutP->cc_skey[1].sk_argument =
399 (cacheInOutP->cc_key[1] == ObjectIdAttributeNumber)
400 ? (Datum) tuple->t_data->t_oid
402 cacheInOutP->cc_key[1],
403 RelationGetDescr(relation),
408 cacheInOutP->cc_skey[0].sk_argument =
409 (cacheInOutP->cc_key[0] == ObjectIdAttributeNumber)
410 ? (Datum) tuple->t_data->t_oid
412 cacheInOutP->cc_key[0],
413 RelationGetDescr(relation),
418 elog(FATAL, "CCComputeTupleHashIndex: %d cc_nkeys",
419 cacheInOutP->cc_nkeys
424 return CatalogCacheComputeHashIndex(cacheInOutP);
427 /* --------------------------------
429 * --------------------------------
432 CatCacheRemoveCTup(CatCache *cache, Dlelem *elt)
439 ct = (CatCTup *) DLE_VAL(elt);
443 other_elt = ct->ct_node;
444 other_ct = (CatCTup *) DLE_VAL(other_elt);
446 DLFreeElem(other_elt);
454 /* --------------------------------
455 * CatalogCacheIdInvalidate()
457 * Invalidate a tuple given a cache id. In this case the id should always
458 * be found (whether the cache has opened its relation or not). Of course,
459 * if the cache has yet to open its relation, there will be no tuples so
461 * --------------------------------
464 CatalogCacheIdInvalidate(int cacheId, /* XXX */
471 MemoryContext oldcxt;
477 Assert(hashIndex < NCCBUCK);
478 Assert(ItemPointerIsValid(pointer));
479 CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: called");
482 * switch to the cache context for our memory allocations
486 CacheCxt = CreateGlobalMemory("Cache");
487 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
490 * inspect every cache that could contain the tuple
493 for (ccp = Caches; ccp; ccp = ccp->cc_next)
495 if (cacheId != ccp->id)
498 * inspect the hash bucket until we find a match or exhaust
501 for (elt = DLGetHead(ccp->cc_cache[hashIndex]);
503 elt = DLGetSucc(elt))
505 ct = (CatCTup *) DLE_VAL(elt);
506 if (ItemPointerEquals(pointer, &ct->ct_tup->t_self))
511 * if we found a matching tuple, invalidate it.
517 CatCacheRemoveCTup(ccp, elt);
519 CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: invalidated");
522 if (cacheId != InvalidCatalogCacheId)
527 * return to the proper memory context
530 MemoryContextSwitchTo(oldcxt);
531 /* sendpm('I', "Invalidated tuple"); */
534 /* ----------------------------------------------------------------
538 * InitIndexedSysCache
541 * RelationInvalidateCatalogCacheTuple
542 * ----------------------------------------------------------------
544 /* --------------------------------
546 * --------------------------------
551 MemoryContext oldcxt;
552 struct catcache *cache;
554 CACHE1_elog(DEBUG, "ResetSystemCache called");
557 * first switch to the cache context so our allocations
558 * do not vanish at the end of a transaction
562 CacheCxt = CreateGlobalMemory("Cache");
564 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
567 * here we purge the contents of all the caches
569 * for each system cache
570 * for each hash bucket
571 * for each tuple in hash bucket
575 for (cache = Caches; PointerIsValid(cache); cache = cache->cc_next)
579 for (hash = 0; hash < NCCBUCK; hash += 1)
584 for (elt = DLGetHead(cache->cc_cache[hash]); elt; elt = nextelt)
586 nextelt = DLGetSucc(elt);
587 CatCacheRemoveCTup(cache, elt);
588 if (cache->cc_ntup < 0)
590 "ResetSystemCache: cc_ntup<0 (software error)");
593 cache->cc_ntup = 0; /* in case of WARN error above */
594 cache->busy = false; /* to recover from recursive-use error */
597 CACHE1_elog(DEBUG, "end of ResetSystemCache call");
600 * back to the old context before we return...
603 MemoryContextSwitchTo(oldcxt);
606 /* --------------------------------
607 * SystemCacheRelationFlushed
609 * This is called by RelationFlushRelation() to clear out cached information
610 * about a relation being dropped. (This could be a DROP TABLE command,
611 * or a temp table being dropped at end of transaction, or a table created
612 * during the current transaction that is being dropped because of abort.)
613 * Remove all cache entries relevant to the specified relation OID.
615 * A special case occurs when relId is itself one of the cacheable system
616 * tables --- although those'll never be dropped, they can get flushed from
617 * the relcache (VACUUM causes this, for example). In that case we need to
618 * force the next SearchSysCache() call to reinitialize the cache itself,
619 * because we have info (such as cc_tupdesc) that is pointing at the about-
620 * to-be-deleted relcache entry.
621 * --------------------------------
624 SystemCacheRelationFlushed(Oid relId)
626 struct catcache *cache;
629 * XXX Ideally we'd search the caches and just zap entries that actually
630 * refer to the indicated relation. For now, we take the brute-force
631 * approach: just flush the caches entirely.
636 * If relcache is dropping a system relation's cache entry, mark the
637 * associated cache structures invalid, so we can rebuild them from
638 * scratch (not just repopulate them) next time they are used.
640 for (cache = Caches; PointerIsValid(cache); cache = cache->cc_next)
642 if (cache->relationId == relId)
643 cache->relationId = InvalidOid;
647 /* --------------------------------
648 * InitIndexedSysCache
650 * This allocates and initializes a cache for a system catalog relation.
651 * Actually, the cache is only partially initialized to avoid opening the
652 * relation. The relation will be opened and the rest of the cache
653 * structure initialized on the first access.
654 * --------------------------------
657 #define InitSysCache_DEBUG1 \
659 elog(DEBUG, "InitSysCache: rid=%u id=%d nkeys=%d size=%d\n", \
660 cp->relationId, cp->id, cp->cc_nkeys, cp->cc_size); \
661 for (i = 0; i < nkeys; i += 1) \
663 elog(DEBUG, "InitSysCache: key=%d len=%d skey=[%d %d %d %d]\n", \
664 cp->cc_key[i], cp->cc_klen[i], \
665 cp->cc_skey[i].sk_flags, \
666 cp->cc_skey[i].sk_attno, \
667 cp->cc_skey[i].sk_procedure, \
668 cp->cc_skey[i].sk_argument); \
673 #define InitSysCache_DEBUG1
677 InitSysCache(char *relname,
682 HeapTuple (*iScanfuncP) ())
686 MemoryContext oldcxt;
690 indname = (iname) ? iname : NULL;
693 * first switch to the cache context so our allocations
694 * do not vanish at the end of a transaction
698 CacheCxt = CreateGlobalMemory("Cache");
700 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
703 * allocate a new cache structure
706 cp = (CatCache *) palloc(sizeof(CatCache));
707 MemSet((char *) cp, 0, sizeof(CatCache));
710 * initialize the cache buckets (each bucket is a list header)
711 * and the LRU tuple list
716 * We can only do this optimization because the number of hash
717 * buckets never changes. Without it, we call malloc() too much.
718 * We could move this to dllist.c, but the way we do this is not
719 * dynamic/portabl, so why allow other routines to use it.
721 Dllist *cache_begin = malloc((NCCBUCK + 1) * sizeof(Dllist));
723 for (i = 0; i <= NCCBUCK; ++i)
725 cp->cc_cache[i] = &cache_begin[i];
726 cp->cc_cache[i]->dll_head = 0;
727 cp->cc_cache[i]->dll_tail = 0;
731 cp->cc_lrulist = DLNewList();
734 * Caches is the pointer to the head of the list of all the
735 * system caches. here we add the new cache to the top of the list.
738 cp->cc_next = Caches; /* list of caches (single link) */
742 * initialize the cache's relation information for the relation
743 * corresponding to this cache and initialize some of the the new
744 * cache's other internal fields.
747 cp->relationId = InvalidOid;
748 cp->indexId = InvalidOid;
749 cp->cc_relname = relname;
750 cp->cc_indname = indname;
751 cp->cc_tupdesc = (TupleDesc) NULL;
754 cp->cc_maxtup = MAXTUP;
755 cp->cc_size = NCCBUCK;
756 cp->cc_nkeys = nkeys;
757 cp->cc_iscanfunc = iScanfuncP;
760 * initialize the cache's key information
763 for (i = 0; i < nkeys; ++i)
765 cp->cc_key[i] = key[i];
767 elog(FATAL, "InitSysCache: called with 0 key[%d]", i);
770 if (key[i] != ObjectIdAttributeNumber)
771 elog(FATAL, "InitSysCache: called with %d key[%d]", key[i], i);
774 cp->cc_klen[i] = sizeof(Oid);
777 * ScanKeyEntryData and struct skey are equivalent. It
778 * looks like a move was made to obsolete struct skey, but
779 * it didn't reach this file. Someday we should clean up
780 * this code and consolidate to ScanKeyEntry - mer 10 Nov
783 ScanKeyEntryInitialize(&cp->cc_skey[i],
786 (RegProcedure) F_OIDEQ,
792 cp->cc_skey[i].sk_attno = key[i];
796 * all done. new cache is initialized. print some debugging
797 * information, if appropriate.
803 * back to the old context before we return...
806 MemoryContextSwitchTo(oldcxt);
811 /* --------------------------------
812 * SearchSelfReferences
814 * This call searches for self-referencing information,
815 * which causes infinite recursion in the system catalog cache.
816 * This code short-circuits the normal index lookup for cache loads
817 * in those cases and replaces it with a heap scan.
819 * cache should already be initailized
820 * --------------------------------
823 SearchSelfReferences(struct catcache * cache)
828 if (cache->id == INDEXRELID)
830 static Oid indexSelfOid = InvalidOid;
831 static HeapTuple indexSelfTuple = NULL;
833 if (!OidIsValid(indexSelfOid))
835 /* Find oid of pg_index_indexrelid_index */
836 rel = heap_openr(RelationRelationName, AccessShareLock);
837 ntp = ClassNameIndexScan(rel, IndexRelidIndex);
838 if (!HeapTupleIsValid(ntp))
839 elog(ERROR, "SearchSelfReferences: %s not found in %s",
840 IndexRelidIndex, RelationRelationName);
841 indexSelfOid = ntp->t_data->t_oid;
843 heap_close(rel, AccessShareLock);
845 /* Looking for something other than pg_index_indexrelid_index? */
846 if ((Oid)cache->cc_skey[0].sk_argument != indexSelfOid)
849 /* Do we need to load our private copy of the tuple? */
850 if (!HeapTupleIsValid(indexSelfTuple))
853 MemoryContext oldcxt;
856 CacheCxt = CreateGlobalMemory("Cache");
857 rel = heap_open(cache->relationId, AccessShareLock);
858 sd = heap_beginscan(rel, false, SnapshotNow, 1, cache->cc_skey);
859 ntp = heap_getnext(sd, 0);
860 if (!HeapTupleIsValid(ntp))
861 elog(ERROR, "SearchSelfReferences: tuple not found");
862 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
863 indexSelfTuple = heap_copytuple(ntp);
864 MemoryContextSwitchTo(oldcxt);
866 heap_close(rel, AccessShareLock);
868 return indexSelfTuple;
870 else if (cache->id == OPEROID)
872 /* bootstrapping this requires preloading a range of rows. bjm */
873 static HeapTuple operatorSelfTuple[MAX_OIDCMP-MIN_OIDCMP+1];
874 Oid lookup_oid = (Oid)cache->cc_skey[0].sk_argument;
876 if (lookup_oid < MIN_OIDCMP || lookup_oid > MAX_OIDCMP)
879 if (!HeapTupleIsValid(operatorSelfTuple[lookup_oid-MIN_OIDCMP]))
882 MemoryContext oldcxt;
885 CacheCxt = CreateGlobalMemory("Cache");
886 rel = heap_open(cache->relationId, AccessShareLock);
887 sd = heap_beginscan(rel, false, SnapshotNow, 1, cache->cc_skey);
888 ntp = heap_getnext(sd, 0);
889 if (!HeapTupleIsValid(ntp))
890 elog(ERROR, "SearchSelfReferences: tuple not found");
891 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
892 operatorSelfTuple[lookup_oid-MIN_OIDCMP] = heap_copytuple(ntp);
893 MemoryContextSwitchTo(oldcxt);
895 heap_close(rel, AccessShareLock);
897 return operatorSelfTuple[lookup_oid-MIN_OIDCMP];
904 /* --------------------------------
907 * This call searches a system cache for a tuple, opening the relation
908 * if necessary (the first access to a particular cache).
909 * --------------------------------
912 SearchSysCache(struct catcache * cache,
926 MemoryContext oldcxt;
932 if (cache->relationId == InvalidOid)
933 CatalogCacheInitializeCache(cache, NULL);
936 * initialize the search key information
939 cache->cc_skey[0].sk_argument = v1;
940 cache->cc_skey[1].sk_argument = v2;
941 cache->cc_skey[2].sk_argument = v3;
942 cache->cc_skey[3].sk_argument = v4;
945 * resolve self referencing informtion
947 if ((ntp = SearchSelfReferences(cache)))
948 return heap_copytuple(ntp);
951 * find the hash bucket in which to look for the tuple
954 hash = CatalogCacheComputeHashIndex(cache);
957 * scan the hash bucket until we find a match or exhaust our tuples
960 for (elt = DLGetHead(cache->cc_cache[hash]);
962 elt = DLGetSucc(elt))
966 ct = (CatCTup *) DLE_VAL(elt);
968 * see if the cached tuple matches our key.
969 * (should we be worried about time ranges? -cim 10/2/90)
972 HeapKeyTest(ct->ct_tup,
982 * if we found a tuple in the cache, move it to the top of the
983 * lru list, and return it. We also move it to the front of the
984 * list for its hashbucket, in order to speed subsequent searches.
985 * (The most frequently accessed elements in any hashbucket will
986 * tend to be near the front of the hashbucket's list.)
991 Dlelem *old_lru_elt = ((CatCTup *) DLE_VAL(elt))->ct_node;
993 DLMoveToFront(old_lru_elt);
997 relation = heap_open(cache->relationId, NoLock);
998 CACHE3_elog(DEBUG, "SearchSysCache(%s): found in bucket %d",
999 RelationGetRelationName(relation), hash);
1000 heap_close(relation, NoLock);
1001 #endif /* CACHEDEBUG */
1007 * Tuple was not found in cache, so we have to try and
1008 * retrieve it directly from the relation. If it's found,
1009 * we add it to the cache.
1011 * To guard against possible infinite recursion, we mark this cache
1012 * "busy" while trying to load a new entry for it. It is OK to
1013 * recursively invoke SearchSysCache for a different cache, but
1014 * a recursive call for the same cache will error out. (We could
1015 * store the specific key(s) being looked for, and consider only
1016 * a recursive request for the same key to be an error, but this
1017 * simple scheme is sufficient for now.)
1023 elog(ERROR, "SearchSysCache: recursive use of cache %d", cache->id);
1028 * open the relation associated with the cache
1031 relation = heap_open(cache->relationId, AccessShareLock);
1032 CACHE2_elog(DEBUG, "SearchSysCache(%s)",
1033 RelationGetRelationName(relation));
1036 * Switch to the cache memory context.
1041 CacheCxt = CreateGlobalMemory("Cache");
1043 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
1046 * Scan the relation to find the tuple. If there's an index, and
1047 * if this isn't bootstrap (initdb) time, use the index.
1050 CACHE2_elog(DEBUG, "SearchSysCache: performing scan (override==%d)",
1053 if ((RelationGetForm(relation))->relhasindex
1054 && !IsBootstrapProcessingMode())
1057 * Switch back to old memory context so memory not freed
1058 * in the scan function will go away at transaction end.
1059 * wieck - 10/18/1996
1062 MemoryContextSwitchTo(oldcxt);
1063 Assert(cache->cc_iscanfunc);
1064 switch (cache->cc_nkeys)
1067 ntp = cache->cc_iscanfunc(relation, v1, v2, v3, v4);
1070 ntp = cache->cc_iscanfunc(relation, v1, v2, v3);
1073 ntp = cache->cc_iscanfunc(relation, v1, v2);
1076 ntp = cache->cc_iscanfunc(relation, v1);
1080 * Back to Cache context. If we got a tuple copy it
1082 * wieck - 10/18/1996
1085 MemoryContextSwitchTo((MemoryContext) CacheCxt);
1086 if (HeapTupleIsValid(ntp))
1087 ntp = heap_copytuple(ntp);
1094 * As above do the lookup in the callers memory
1096 * wieck - 10/18/1996
1099 MemoryContextSwitchTo(oldcxt);
1101 sd = heap_beginscan(relation, 0, SnapshotNow,
1102 cache->cc_nkeys, cache->cc_skey);
1104 ntp = heap_getnext(sd, 0);
1106 MemoryContextSwitchTo((MemoryContext) CacheCxt);
1108 if (HeapTupleIsValid(ntp))
1110 CACHE1_elog(DEBUG, "SearchSysCache: found tuple");
1111 ntp = heap_copytuple(ntp);
1114 MemoryContextSwitchTo(oldcxt);
1118 MemoryContextSwitchTo((MemoryContext) CacheCxt);
1121 cache->busy = false;
1124 * scan is complete. if tup is valid, we copy it and add the copy to
1128 if (HeapTupleIsValid(ntp))
1131 * allocate a new cache tuple holder, store the pointer
1132 * to the heap tuple there and initialize the list pointers.
1138 * this is a little cumbersome here because we want the Dlelem's
1139 * in both doubly linked lists to point to one another. That makes
1140 * it easier to remove something from both the cache bucket and
1141 * the lru list at the same time
1143 nct = (CatCTup *) malloc(sizeof(CatCTup));
1145 elt = DLNewElem(nct);
1146 nct2 = (CatCTup *) malloc(sizeof(CatCTup));
1148 lru_elt = DLNewElem(nct2);
1149 nct2->ct_node = elt;
1150 nct->ct_node = lru_elt;
1152 DLAddHead(cache->cc_lrulist, lru_elt);
1153 DLAddHead(cache->cc_cache[hash], elt);
1156 * If we've exceeded the desired size of this cache,
1157 * throw away the least recently used entry.
1160 if (++cache->cc_ntup > cache->cc_maxtup)
1164 elt = DLGetTail(cache->cc_lrulist);
1165 ct = (CatCTup *) DLE_VAL(elt);
1167 if (ct != nct) /* shouldn't be possible, but be safe... */
1169 CACHE2_elog(DEBUG, "SearchSysCache(%s): Overflow, LRU removal",
1170 RelationGetRelationName(relation));
1172 CatCacheRemoveCTup(cache, elt);
1176 CACHE4_elog(DEBUG, "SearchSysCache(%s): Contains %d/%d tuples",
1177 RelationGetRelationName(relation),
1178 cache->cc_ntup, cache->cc_maxtup);
1179 CACHE3_elog(DEBUG, "SearchSysCache(%s): put in bucket %d",
1180 RelationGetRelationName(relation), hash);
1184 * close the relation, switch back to the original memory context
1185 * and return the tuple we found (or NULL)
1188 heap_close(relation, AccessShareLock);
1190 MemoryContextSwitchTo(oldcxt);
1194 /* --------------------------------
1195 * RelationInvalidateCatalogCacheTuple()
1197 * Invalidate a tuple from a specific relation. This call determines the
1198 * cache in question and calls CatalogCacheIdInvalidate(). It is -ok-
1199 * if the relation cannot be found, it simply means this backend has yet
1201 * --------------------------------
1204 RelationInvalidateCatalogCacheTuple(Relation relation,
1206 void (*function) (int, Index, ItemPointer))
1208 struct catcache *ccp;
1209 MemoryContext oldcxt;
1216 Assert(RelationIsValid(relation));
1217 Assert(HeapTupleIsValid(tuple));
1218 Assert(PointerIsValid(function));
1219 CACHE1_elog(DEBUG, "RelationInvalidateCatalogCacheTuple: called");
1222 * switch to the cache memory context
1226 CacheCxt = CreateGlobalMemory("Cache");
1227 oldcxt = MemoryContextSwitchTo((MemoryContext) CacheCxt);
1231 * if the cache contains tuples from the specified relation
1232 * call the invalidation function on the tuples
1233 * in the proper hash bucket
1236 relationId = RelationGetRelid(relation);
1238 for (ccp = Caches; ccp; ccp = ccp->cc_next)
1240 if (relationId != ccp->relationId)
1244 /* OPT inline simplification of CatalogCacheIdInvalidate */
1245 if (!PointerIsValid(function))
1246 function = CatalogCacheIdInvalidate;
1249 (*function) (ccp->id,
1250 CatalogCacheComputeTupleHashIndex(ccp, relation, tuple),
1255 * return to the proper memory context
1258 MemoryContextSwitchTo(oldcxt);
1260 /* sendpm('I', "Invalidated tuple"); */