OSDN Git Service

918ab7c081ab6f11f5288bca8fc60b2e470ebde6
[pg-rex/syncrep.git] / src / backend / utils / cache / catcache.c
1 /*-------------------------------------------------------------------------
2  *
3  * catcache.c
4  *        System catalog cache for tuples matching a key.
5  *
6  * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *        $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.125 2005/10/15 02:49:30 momjian Exp $
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16
17 #include "access/genam.h"
18 #include "access/hash.h"
19 #include "access/heapam.h"
20 #include "access/valid.h"
21 #include "catalog/pg_opclass.h"
22 #include "catalog/pg_operator.h"
23 #include "catalog/pg_type.h"
24 #include "catalog/indexing.h"
25 #include "miscadmin.h"
26 #ifdef CATCACHE_STATS
27 #include "storage/ipc.h"                /* for on_proc_exit */
28 #endif
29 #include "utils/builtins.h"
30 #include "utils/fmgroids.h"
31 #include "utils/catcache.h"
32 #include "utils/memutils.h"
33 #include "utils/relcache.h"
34 #include "utils/resowner.h"
35 #include "utils/syscache.h"
36
37
38  /* #define CACHEDEBUG */       /* turns DEBUG elogs on */
39
40 /*
41  * Constants related to size of the catcache.
42  *
43  * NCCBUCKETS must be a power of two and must be less than 64K (because
44  * SharedInvalCatcacheMsg crams hash indexes into a uint16 field).      In
45  * practice it should be a lot less, anyway, to avoid chewing up too much
46  * space on hash bucket headers.
47  *
48  * MAXCCTUPLES could be as small as a few hundred, if per-backend memory
49  * consumption is at a premium.
50  */
51 #define NCCBUCKETS 256                  /* Hash buckets per CatCache */
52 #define MAXCCTUPLES 5000                /* Maximum # of tuples in all caches */
53
54 /*
55  * Given a hash value and the size of the hash table, find the bucket
56  * in which the hash value belongs. Since the hash table must contain
57  * a power-of-2 number of elements, this is a simple bitmask.
58  */
59 #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
60
61
62 /*
63  *              variables, macros and other stuff
64  */
65
66 #ifdef CACHEDEBUG
67 #define CACHE1_elog(a,b)                                elog(a,b)
68 #define CACHE2_elog(a,b,c)                              elog(a,b,c)
69 #define CACHE3_elog(a,b,c,d)                    elog(a,b,c,d)
70 #define CACHE4_elog(a,b,c,d,e)                  elog(a,b,c,d,e)
71 #define CACHE5_elog(a,b,c,d,e,f)                elog(a,b,c,d,e,f)
72 #define CACHE6_elog(a,b,c,d,e,f,g)              elog(a,b,c,d,e,f,g)
73 #else
74 #define CACHE1_elog(a,b)
75 #define CACHE2_elog(a,b,c)
76 #define CACHE3_elog(a,b,c,d)
77 #define CACHE4_elog(a,b,c,d,e)
78 #define CACHE5_elog(a,b,c,d,e,f)
79 #define CACHE6_elog(a,b,c,d,e,f,g)
80 #endif
81
82 /* Cache management header --- pointer is NULL until created */
83 static CatCacheHeader *CacheHdr = NULL;
84
85
86 static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
87                                                          ScanKey cur_skey);
88 static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache,
89                                                                   HeapTuple tuple);
90
91 #ifdef CATCACHE_STATS
92 static void CatCachePrintStats(void);
93 #endif
94 static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
95 static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
96 static void CatalogCacheInitializeCache(CatCache *cache);
97 static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
98                                                 uint32 hashValue, Index hashIndex,
99                                                 bool negative);
100 static void CatalogCacheCleanup(CatCTup *savect);
101 static HeapTuple build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys);
102
103
104 /*
105  *                                      internal support functions
106  */
107
108 /*
109  * Look up the hash and equality functions for system types that are used
110  * as cache key fields.
111  *
112  * XXX this should be replaced by catalog lookups,
113  * but that seems to pose considerable risk of circularity...
114  */
115 static void
116 GetCCHashEqFuncs(Oid keytype, PGFunction *hashfunc, RegProcedure *eqfunc)
117 {
118         switch (keytype)
119         {
120                 case BOOLOID:
121                         *hashfunc = hashchar;
122                         *eqfunc = F_BOOLEQ;
123                         break;
124                 case CHAROID:
125                         *hashfunc = hashchar;
126                         *eqfunc = F_CHAREQ;
127                         break;
128                 case NAMEOID:
129                         *hashfunc = hashname;
130                         *eqfunc = F_NAMEEQ;
131                         break;
132                 case INT2OID:
133                         *hashfunc = hashint2;
134                         *eqfunc = F_INT2EQ;
135                         break;
136                 case INT2VECTOROID:
137                         *hashfunc = hashint2vector;
138                         *eqfunc = F_INT2VECTOREQ;
139                         break;
140                 case INT4OID:
141                         *hashfunc = hashint4;
142                         *eqfunc = F_INT4EQ;
143                         break;
144                 case TEXTOID:
145                         *hashfunc = hashtext;
146                         *eqfunc = F_TEXTEQ;
147                         break;
148                 case OIDOID:
149                 case REGPROCOID:
150                 case REGPROCEDUREOID:
151                 case REGOPEROID:
152                 case REGOPERATOROID:
153                 case REGCLASSOID:
154                 case REGTYPEOID:
155                         *hashfunc = hashoid;
156                         *eqfunc = F_OIDEQ;
157                         break;
158                 case OIDVECTOROID:
159                         *hashfunc = hashoidvector;
160                         *eqfunc = F_OIDVECTOREQ;
161                         break;
162                 default:
163                         elog(FATAL, "type %u not supported as catcache key", keytype);
164                         *hashfunc = NULL;       /* keep compiler quiet */
165                         *eqfunc = InvalidOid;
166                         break;
167         }
168 }
169
170 /*
171  *              CatalogCacheComputeHashValue
172  *
173  * Compute the hash value associated with a given set of lookup keys
174  */
175 static uint32
176 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
177 {
178         uint32          hashValue = 0;
179
180         CACHE4_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
181                                 cache->cc_relname,
182                                 nkeys,
183                                 cache);
184
185         switch (nkeys)
186         {
187                 case 4:
188                         hashValue ^=
189                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
190                                                                                           cur_skey[3].sk_argument)) << 9;
191                         /* FALLTHROUGH */
192                 case 3:
193                         hashValue ^=
194                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
195                                                                                           cur_skey[2].sk_argument)) << 6;
196                         /* FALLTHROUGH */
197                 case 2:
198                         hashValue ^=
199                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
200                                                                                           cur_skey[1].sk_argument)) << 3;
201                         /* FALLTHROUGH */
202                 case 1:
203                         hashValue ^=
204                                 DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
205                                                                                                    cur_skey[0].sk_argument));
206                         break;
207                 default:
208                         elog(FATAL, "wrong number of hash keys: %d", nkeys);
209                         break;
210         }
211
212         return hashValue;
213 }
214
215 /*
216  *              CatalogCacheComputeTupleHashValue
217  *
218  * Compute the hash value associated with a given tuple to be cached
219  */
220 static uint32
221 CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
222 {
223         ScanKeyData cur_skey[4];
224         bool            isNull = false;
225
226         /* Copy pre-initialized overhead data for scankey */
227         memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
228
229         /* Now extract key fields from tuple, insert into scankey */
230         switch (cache->cc_nkeys)
231         {
232                 case 4:
233                         cur_skey[3].sk_argument =
234                                 (cache->cc_key[3] == ObjectIdAttributeNumber)
235                                 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
236                                 : fastgetattr(tuple,
237                                                           cache->cc_key[3],
238                                                           cache->cc_tupdesc,
239                                                           &isNull);
240                         Assert(!isNull);
241                         /* FALLTHROUGH */
242                 case 3:
243                         cur_skey[2].sk_argument =
244                                 (cache->cc_key[2] == ObjectIdAttributeNumber)
245                                 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
246                                 : fastgetattr(tuple,
247                                                           cache->cc_key[2],
248                                                           cache->cc_tupdesc,
249                                                           &isNull);
250                         Assert(!isNull);
251                         /* FALLTHROUGH */
252                 case 2:
253                         cur_skey[1].sk_argument =
254                                 (cache->cc_key[1] == ObjectIdAttributeNumber)
255                                 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
256                                 : fastgetattr(tuple,
257                                                           cache->cc_key[1],
258                                                           cache->cc_tupdesc,
259                                                           &isNull);
260                         Assert(!isNull);
261                         /* FALLTHROUGH */
262                 case 1:
263                         cur_skey[0].sk_argument =
264                                 (cache->cc_key[0] == ObjectIdAttributeNumber)
265                                 ? ObjectIdGetDatum(HeapTupleGetOid(tuple))
266                                 : fastgetattr(tuple,
267                                                           cache->cc_key[0],
268                                                           cache->cc_tupdesc,
269                                                           &isNull);
270                         Assert(!isNull);
271                         break;
272                 default:
273                         elog(FATAL, "wrong number of hash keys: %d", cache->cc_nkeys);
274                         break;
275         }
276
277         return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
278 }
279
280
281 #ifdef CATCACHE_STATS
282
283 static void
284 CatCachePrintStats(void)
285 {
286         CatCache   *cache;
287         long            cc_searches = 0;
288         long            cc_hits = 0;
289         long            cc_neg_hits = 0;
290         long            cc_newloads = 0;
291         long            cc_invals = 0;
292         long            cc_discards = 0;
293         long            cc_lsearches = 0;
294         long            cc_lhits = 0;
295
296         elog(DEBUG2, "catcache stats dump: %d/%d tuples in catcaches",
297                  CacheHdr->ch_ntup, CacheHdr->ch_maxtup);
298
299         for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
300         {
301                 if (cache->cc_ntup == 0 && cache->cc_searches == 0)
302                         continue;                       /* don't print unused caches */
303                 elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards, %ld lsrch, %ld lhits",
304                          cache->cc_relname,
305                          cache->cc_indexoid,
306                          cache->cc_ntup,
307                          cache->cc_searches,
308                          cache->cc_hits,
309                          cache->cc_neg_hits,
310                          cache->cc_hits + cache->cc_neg_hits,
311                          cache->cc_newloads,
312                          cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
313                          cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
314                          cache->cc_invals,
315                          cache->cc_discards,
316                          cache->cc_lsearches,
317                          cache->cc_lhits);
318                 cc_searches += cache->cc_searches;
319                 cc_hits += cache->cc_hits;
320                 cc_neg_hits += cache->cc_neg_hits;
321                 cc_newloads += cache->cc_newloads;
322                 cc_invals += cache->cc_invals;
323                 cc_discards += cache->cc_discards;
324                 cc_lsearches += cache->cc_lsearches;
325                 cc_lhits += cache->cc_lhits;
326         }
327         elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld discards, %ld lsrch, %ld lhits",
328                  CacheHdr->ch_ntup,
329                  cc_searches,
330                  cc_hits,
331                  cc_neg_hits,
332                  cc_hits + cc_neg_hits,
333                  cc_newloads,
334                  cc_searches - cc_hits - cc_neg_hits - cc_newloads,
335                  cc_searches - cc_hits - cc_neg_hits,
336                  cc_invals,
337                  cc_discards,
338                  cc_lsearches,
339                  cc_lhits);
340 }
341 #endif   /* CATCACHE_STATS */
342
343
344 /*
345  *              CatCacheRemoveCTup
346  *
347  * Unlink and delete the given cache entry
348  *
349  * NB: if it is a member of a CatCList, the CatCList is deleted too.
350  * Both the cache entry and the list had better have zero refcount.
351  */
352 static void
353 CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
354 {
355         Assert(ct->refcount == 0);
356         Assert(ct->my_cache == cache);
357
358         if (ct->c_list)
359                 CatCacheRemoveCList(cache, ct->c_list);
360
361         /* delink from linked lists */
362         DLRemove(&ct->lrulist_elem);
363         DLRemove(&ct->cache_elem);
364
365         /* free associated tuple data */
366         if (ct->tuple.t_data != NULL)
367                 pfree(ct->tuple.t_data);
368         pfree(ct);
369
370         --cache->cc_ntup;
371         --CacheHdr->ch_ntup;
372 }
373
374 /*
375  *              CatCacheRemoveCList
376  *
377  * Unlink and delete the given cache list entry
378  */
379 static void
380 CatCacheRemoveCList(CatCache *cache, CatCList *cl)
381 {
382         int                     i;
383
384         Assert(cl->refcount == 0);
385         Assert(cl->my_cache == cache);
386
387         /* delink from member tuples */
388         for (i = cl->n_members; --i >= 0;)
389         {
390                 CatCTup    *ct = cl->members[i];
391
392                 Assert(ct->c_list == cl);
393                 ct->c_list = NULL;
394         }
395
396         /* delink from linked list */
397         DLRemove(&cl->cache_elem);
398
399         /* free associated tuple data */
400         if (cl->tuple.t_data != NULL)
401                 pfree(cl->tuple.t_data);
402         pfree(cl);
403 }
404
405
406 /*
407  *      CatalogCacheIdInvalidate
408  *
409  *      Invalidate entries in the specified cache, given a hash value and
410  *      item pointer.  Positive entries are deleted if they match the item
411  *      pointer.  Negative entries must be deleted if they match the hash
412  *      value (since we do not have the exact key of the tuple that's being
413  *      inserted).      But this should only rarely result in loss of a cache
414  *      entry that could have been kept.
415  *
416  *      Note that it's not very relevant whether the tuple identified by
417  *      the item pointer is being inserted or deleted.  We don't expect to
418  *      find matching positive entries in the one case, and we don't expect
419  *      to find matching negative entries in the other; but we will do the
420  *      right things in any case.
421  *
422  *      This routine is only quasi-public: it should only be used by inval.c.
423  */
424 void
425 CatalogCacheIdInvalidate(int cacheId,
426                                                  uint32 hashValue,
427                                                  ItemPointer pointer)
428 {
429         CatCache   *ccp;
430
431         /*
432          * sanity checks
433          */
434         Assert(ItemPointerIsValid(pointer));
435         CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: called");
436
437         /*
438          * inspect caches to find the proper cache
439          */
440         for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
441         {
442                 Index           hashIndex;
443                 Dlelem     *elt,
444                                    *nextelt;
445
446                 if (cacheId != ccp->id)
447                         continue;
448
449                 /*
450                  * We don't bother to check whether the cache has finished
451                  * initialization yet; if not, there will be no entries in it so no
452                  * problem.
453                  */
454
455                 /*
456                  * Invalidate *all* CatCLists in this cache; it's too hard to tell
457                  * which searches might still be correct, so just zap 'em all.
458                  */
459                 for (elt = DLGetHead(&ccp->cc_lists); elt; elt = nextelt)
460                 {
461                         CatCList   *cl = (CatCList *) DLE_VAL(elt);
462
463                         nextelt = DLGetSucc(elt);
464
465                         if (cl->refcount > 0)
466                                 cl->dead = true;
467                         else
468                                 CatCacheRemoveCList(ccp, cl);
469                 }
470
471                 /*
472                  * inspect the proper hash bucket for tuple matches
473                  */
474                 hashIndex = HASH_INDEX(hashValue, ccp->cc_nbuckets);
475
476                 for (elt = DLGetHead(&ccp->cc_bucket[hashIndex]); elt; elt = nextelt)
477                 {
478                         CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
479
480                         nextelt = DLGetSucc(elt);
481
482                         if (hashValue != ct->hash_value)
483                                 continue;               /* ignore non-matching hash values */
484
485                         if (ct->negative ||
486                                 ItemPointerEquals(pointer, &ct->tuple.t_self))
487                         {
488                                 if (ct->refcount > 0 ||
489                                         (ct->c_list && ct->c_list->refcount > 0))
490                                 {
491                                         ct->dead = true;
492                                         /* list, if any, was marked dead above */
493                                         Assert(ct->c_list == NULL || ct->c_list->dead);
494                                 }
495                                 else
496                                         CatCacheRemoveCTup(ccp, ct);
497                                 CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: invalidated");
498 #ifdef CATCACHE_STATS
499                                 ccp->cc_invals++;
500 #endif
501                                 /* could be multiple matches, so keep looking! */
502                         }
503                 }
504                 break;                                  /* need only search this one cache */
505         }
506 }
507
508 /* ----------------------------------------------------------------
509  *                                         public functions
510  * ----------------------------------------------------------------
511  */
512
513
514 /*
515  * Standard routine for creating cache context if it doesn't exist yet
516  *
517  * There are a lot of places (probably far more than necessary) that check
518  * whether CacheMemoryContext exists yet and want to create it if not.
519  * We centralize knowledge of exactly how to create it here.
520  */
521 void
522 CreateCacheMemoryContext(void)
523 {
524         /*
525          * Purely for paranoia, check that context doesn't exist; caller probably
526          * did so already.
527          */
528         if (!CacheMemoryContext)
529                 CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
530                                                                                                    "CacheMemoryContext",
531                                                                                                    ALLOCSET_DEFAULT_MINSIZE,
532                                                                                                    ALLOCSET_DEFAULT_INITSIZE,
533                                                                                                    ALLOCSET_DEFAULT_MAXSIZE);
534 }
535
536
537 /*
538  *              AtEOXact_CatCache
539  *
540  * Clean up catcaches at end of main transaction (either commit or abort)
541  *
542  * As of PostgreSQL 8.1, catcache pins should get released by the
543  * ResourceOwner mechanism.  This routine is just a debugging
544  * cross-check that no pins remain.
545  */
546 void
547 AtEOXact_CatCache(bool isCommit)
548 {
549 #ifdef USE_ASSERT_CHECKING
550         if (assert_enabled)
551         {
552                 CatCache   *ccp;
553                 Dlelem     *elt;
554
555                 /* Check CatCLists */
556                 for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
557                 {
558                         for (elt = DLGetHead(&ccp->cc_lists); elt; elt = DLGetSucc(elt))
559                         {
560                                 CatCList   *cl = (CatCList *) DLE_VAL(elt);
561
562                                 Assert(cl->cl_magic == CL_MAGIC);
563                                 Assert(cl->refcount == 0);
564                                 Assert(!cl->dead);
565                         }
566                 }
567
568                 /* Check individual tuples */
569                 for (elt = DLGetHead(&CacheHdr->ch_lrulist); elt; elt = DLGetSucc(elt))
570                 {
571                         CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
572
573                         Assert(ct->ct_magic == CT_MAGIC);
574                         Assert(ct->refcount == 0);
575                         Assert(!ct->dead);
576                 }
577         }
578 #endif
579 }
580
581 /*
582  *              ResetCatalogCache
583  *
584  * Reset one catalog cache to empty.
585  *
586  * This is not very efficient if the target cache is nearly empty.
587  * However, it shouldn't need to be efficient; we don't invoke it often.
588  */
589 static void
590 ResetCatalogCache(CatCache *cache)
591 {
592         Dlelem     *elt,
593                            *nextelt;
594         int                     i;
595
596         /* Remove each list in this cache, or at least mark it dead */
597         for (elt = DLGetHead(&cache->cc_lists); elt; elt = nextelt)
598         {
599                 CatCList   *cl = (CatCList *) DLE_VAL(elt);
600
601                 nextelt = DLGetSucc(elt);
602
603                 if (cl->refcount > 0)
604                         cl->dead = true;
605                 else
606                         CatCacheRemoveCList(cache, cl);
607         }
608
609         /* Remove each tuple in this cache, or at least mark it dead */
610         for (i = 0; i < cache->cc_nbuckets; i++)
611         {
612                 for (elt = DLGetHead(&cache->cc_bucket[i]); elt; elt = nextelt)
613                 {
614                         CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
615
616                         nextelt = DLGetSucc(elt);
617
618                         if (ct->refcount > 0 ||
619                                 (ct->c_list && ct->c_list->refcount > 0))
620                         {
621                                 ct->dead = true;
622                                 /* list, if any, was marked dead above */
623                                 Assert(ct->c_list == NULL || ct->c_list->dead);
624                         }
625                         else
626                                 CatCacheRemoveCTup(cache, ct);
627 #ifdef CATCACHE_STATS
628                         cache->cc_invals++;
629 #endif
630                 }
631         }
632 }
633
634 /*
635  *              ResetCatalogCaches
636  *
637  * Reset all caches when a shared cache inval event forces it
638  */
639 void
640 ResetCatalogCaches(void)
641 {
642         CatCache   *cache;
643
644         CACHE1_elog(DEBUG2, "ResetCatalogCaches called");
645
646         for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
647                 ResetCatalogCache(cache);
648
649         CACHE1_elog(DEBUG2, "end of ResetCatalogCaches call");
650 }
651
652 /*
653  *              CatalogCacheFlushRelation
654  *
655  *      This is called by RelationFlushRelation() to clear out cached information
656  *      about a relation being dropped.  (This could be a DROP TABLE command,
657  *      or a temp table being dropped at end of transaction, or a table created
658  *      during the current transaction that is being dropped because of abort.)
659  *      Remove all cache entries relevant to the specified relation OID.
660  *
661  *      A special case occurs when relId is itself one of the cacheable system
662  *      tables --- although those'll never be dropped, they can get flushed from
663  *      the relcache (VACUUM causes this, for example).  In that case we need
664  *      to flush all cache entries that came from that table.  (At one point we
665  *      also tried to force re-execution of CatalogCacheInitializeCache for
666  *      the cache(s) on that table.  This is a bad idea since it leads to all
667  *      kinds of trouble if a cache flush occurs while loading cache entries.
668  *      We now avoid the need to do it by copying cc_tupdesc out of the relcache,
669  *      rather than relying on the relcache to keep a tupdesc for us.  Of course
670  *      this assumes the tupdesc of a cachable system table will not change...)
671  */
672 void
673 CatalogCacheFlushRelation(Oid relId)
674 {
675         CatCache   *cache;
676
677         CACHE2_elog(DEBUG2, "CatalogCacheFlushRelation called for %u", relId);
678
679         for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
680         {
681                 int                     i;
682
683                 /* We can ignore uninitialized caches, since they must be empty */
684                 if (cache->cc_tupdesc == NULL)
685                         continue;
686
687                 /* Does this cache store tuples of the target relation itself? */
688                 if (cache->cc_tupdesc->attrs[0]->attrelid == relId)
689                 {
690                         /* Yes, so flush all its contents */
691                         ResetCatalogCache(cache);
692                         continue;
693                 }
694
695                 /* Does this cache store tuples associated with relations at all? */
696                 if (cache->cc_reloidattr == 0)
697                         continue;                       /* nope, leave it alone */
698
699                 /* Yes, scan the tuples and remove those related to relId */
700                 for (i = 0; i < cache->cc_nbuckets; i++)
701                 {
702                         Dlelem     *elt,
703                                            *nextelt;
704
705                         for (elt = DLGetHead(&cache->cc_bucket[i]); elt; elt = nextelt)
706                         {
707                                 CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
708                                 Oid                     tupRelid;
709
710                                 nextelt = DLGetSucc(elt);
711
712                                 /*
713                                  * Negative entries are never considered related to a rel,
714                                  * even if the rel is part of their lookup key.
715                                  */
716                                 if (ct->negative)
717                                         continue;
718
719                                 if (cache->cc_reloidattr == ObjectIdAttributeNumber)
720                                         tupRelid = HeapTupleGetOid(&ct->tuple);
721                                 else
722                                 {
723                                         bool            isNull;
724
725                                         tupRelid =
726                                                 DatumGetObjectId(fastgetattr(&ct->tuple,
727                                                                                                          cache->cc_reloidattr,
728                                                                                                          cache->cc_tupdesc,
729                                                                                                          &isNull));
730                                         Assert(!isNull);
731                                 }
732
733                                 if (tupRelid == relId)
734                                 {
735                                         if (ct->refcount > 0 ||
736                                                 (ct->c_list && ct->c_list->refcount > 0))
737                                         {
738                                                 ct->dead = true;
739                                                 /* parent list must be considered dead too */
740                                                 if (ct->c_list)
741                                                         ct->c_list->dead = true;
742                                         }
743                                         else
744                                                 CatCacheRemoveCTup(cache, ct);
745 #ifdef CATCACHE_STATS
746                                         cache->cc_invals++;
747 #endif
748                                 }
749                         }
750                 }
751         }
752
753         CACHE1_elog(DEBUG2, "end of CatalogCacheFlushRelation call");
754 }
755
756 /*
757  *              InitCatCache
758  *
759  *      This allocates and initializes a cache for a system catalog relation.
760  *      Actually, the cache is only partially initialized to avoid opening the
761  *      relation.  The relation will be opened and the rest of the cache
762  *      structure initialized on the first access.
763  */
764 #ifdef CACHEDEBUG
765 #define InitCatCache_DEBUG2 \
766 do { \
767         elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
768                  cp->cc_reloid, cp->cc_indexoid, cp->id, \
769                  cp->cc_nkeys, cp->cc_nbuckets); \
770 } while(0)
771 #else
772 #define InitCatCache_DEBUG2
773 #endif
774
775 CatCache *
776 InitCatCache(int id,
777                          Oid reloid,
778                          Oid indexoid,
779                          int reloidattr,
780                          int nkeys,
781                          const int *key)
782 {
783         CatCache   *cp;
784         MemoryContext oldcxt;
785         int                     i;
786
787         /*
788          * first switch to the cache context so our allocations do not vanish at
789          * the end of a transaction
790          */
791         if (!CacheMemoryContext)
792                 CreateCacheMemoryContext();
793
794         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
795
796         /*
797          * if first time through, initialize the cache group header, including
798          * global LRU list header
799          */
800         if (CacheHdr == NULL)
801         {
802                 CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
803                 CacheHdr->ch_caches = NULL;
804                 CacheHdr->ch_ntup = 0;
805                 CacheHdr->ch_maxtup = MAXCCTUPLES;
806                 DLInitList(&CacheHdr->ch_lrulist);
807 #ifdef CATCACHE_STATS
808                 on_proc_exit(CatCachePrintStats, 0);
809 #endif
810         }
811
812         /*
813          * allocate a new cache structure
814          *
815          * Note: we assume zeroing initializes the Dllist headers correctly
816          */
817         cp = (CatCache *) palloc0(sizeof(CatCache) + NCCBUCKETS * sizeof(Dllist));
818
819         /*
820          * initialize the cache's relation information for the relation
821          * corresponding to this cache, and initialize some of the new cache's
822          * other internal fields.  But don't open the relation yet.
823          */
824         cp->id = id;
825         cp->cc_relname = "(not known yet)";
826         cp->cc_reloid = reloid;
827         cp->cc_indexoid = indexoid;
828         cp->cc_relisshared = false; /* temporary */
829         cp->cc_tupdesc = (TupleDesc) NULL;
830         cp->cc_reloidattr = reloidattr;
831         cp->cc_ntup = 0;
832         cp->cc_nbuckets = NCCBUCKETS;
833         cp->cc_nkeys = nkeys;
834         for (i = 0; i < nkeys; ++i)
835                 cp->cc_key[i] = key[i];
836
837         /*
838          * new cache is initialized as far as we can go for now. print some
839          * debugging information, if appropriate.
840          */
841         InitCatCache_DEBUG2;
842
843         /*
844          * add completed cache to top of group header's list
845          */
846         cp->cc_next = CacheHdr->ch_caches;
847         CacheHdr->ch_caches = cp;
848
849         /*
850          * back to the old context before we return...
851          */
852         MemoryContextSwitchTo(oldcxt);
853
854         return cp;
855 }
856
857 /*
858  *              CatalogCacheInitializeCache
859  *
860  * This function does final initialization of a catcache: obtain the tuple
861  * descriptor and set up the hash and equality function links.  We assume
862  * that the relcache entry can be opened at this point!
863  */
864 #ifdef CACHEDEBUG
865 #define CatalogCacheInitializeCache_DEBUG1 \
866         elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
867                  cache->cc_reloid)
868
869 #define CatalogCacheInitializeCache_DEBUG2 \
870 do { \
871                 if (cache->cc_key[i] > 0) { \
872                         elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
873                                 i+1, cache->cc_nkeys, cache->cc_key[i], \
874                                  tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
875                 } else { \
876                         elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
877                                 i+1, cache->cc_nkeys, cache->cc_key[i]); \
878                 } \
879 } while(0)
880 #else
881 #define CatalogCacheInitializeCache_DEBUG1
882 #define CatalogCacheInitializeCache_DEBUG2
883 #endif
884
885 static void
886 CatalogCacheInitializeCache(CatCache *cache)
887 {
888         Relation        relation;
889         MemoryContext oldcxt;
890         TupleDesc       tupdesc;
891         int                     i;
892
893         CatalogCacheInitializeCache_DEBUG1;
894
895         /*
896          * Open the relation without locking --- we only need the tupdesc, which
897          * we assume will never change ...
898          */
899         relation = heap_open(cache->cc_reloid, NoLock);
900         Assert(RelationIsValid(relation));
901
902         /*
903          * switch to the cache context so our allocations do not vanish at the end
904          * of a transaction
905          */
906         Assert(CacheMemoryContext != NULL);
907
908         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
909
910         /*
911          * copy the relcache's tuple descriptor to permanent cache storage
912          */
913         tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
914
915         /*
916          * save the relation's name and relisshared flag, too (cc_relname is used
917          * only for debugging purposes)
918          */
919         cache->cc_relname = pstrdup(RelationGetRelationName(relation));
920         cache->cc_relisshared = RelationGetForm(relation)->relisshared;
921
922         /*
923          * return to the caller's memory context and close the rel
924          */
925         MemoryContextSwitchTo(oldcxt);
926
927         heap_close(relation, NoLock);
928
929         CACHE3_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
930                                 cache->cc_relname, cache->cc_nkeys);
931
932         /*
933          * initialize cache's key information
934          */
935         for (i = 0; i < cache->cc_nkeys; ++i)
936         {
937                 Oid                     keytype;
938                 RegProcedure eqfunc;
939
940                 CatalogCacheInitializeCache_DEBUG2;
941
942                 if (cache->cc_key[i] > 0)
943                         keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
944                 else
945                 {
946                         if (cache->cc_key[i] != ObjectIdAttributeNumber)
947                                 elog(FATAL, "only sys attr supported in caches is OID");
948                         keytype = OIDOID;
949                 }
950
951                 GetCCHashEqFuncs(keytype,
952                                                  &cache->cc_hashfunc[i],
953                                                  &eqfunc);
954
955                 cache->cc_isname[i] = (keytype == NAMEOID);
956
957                 /*
958                  * Do equality-function lookup (we assume this won't need a catalog
959                  * lookup for any supported type)
960                  */
961                 fmgr_info_cxt(eqfunc,
962                                           &cache->cc_skey[i].sk_func,
963                                           CacheMemoryContext);
964
965                 /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
966                 cache->cc_skey[i].sk_attno = cache->cc_key[i];
967
968                 /* Fill in sk_strategy as well --- always standard equality */
969                 cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
970                 cache->cc_skey[i].sk_subtype = InvalidOid;
971
972                 CACHE4_elog(DEBUG2, "CatalogCacheInit %s %d %p",
973                                         cache->cc_relname,
974                                         i,
975                                         cache);
976         }
977
978         /*
979          * mark this cache fully initialized
980          */
981         cache->cc_tupdesc = tupdesc;
982 }
983
984 /*
985  * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
986  *
987  * The only reason to call this routine is to ensure that the relcache
988  * has created entries for all the catalogs and indexes referenced by
989  * catcaches.  Therefore, open the index too.  An exception is the indexes
990  * on pg_am, which we don't use (cf. IndexScanOK).
991  */
992 void
993 InitCatCachePhase2(CatCache *cache)
994 {
995         if (cache->cc_tupdesc == NULL)
996                 CatalogCacheInitializeCache(cache);
997
998         if (cache->id != AMOID &&
999                 cache->id != AMNAME)
1000         {
1001                 Relation        idesc;
1002
1003                 idesc = index_open(cache->cc_indexoid);
1004                 index_close(idesc);
1005         }
1006 }
1007
1008
1009 /*
1010  *              IndexScanOK
1011  *
1012  *              This function checks for tuples that will be fetched by
1013  *              IndexSupportInitialize() during relcache initialization for
1014  *              certain system indexes that support critical syscaches.
1015  *              We can't use an indexscan to fetch these, else we'll get into
1016  *              infinite recursion.  A plain heap scan will work, however.
1017  *
1018  *              Once we have completed relcache initialization (signaled by
1019  *              criticalRelcachesBuilt), we don't have to worry anymore.
1020  */
1021 static bool
1022 IndexScanOK(CatCache *cache, ScanKey cur_skey)
1023 {
1024         if (cache->id == INDEXRELID)
1025         {
1026                 /*
1027                  * Since the OIDs of indexes aren't hardwired, it's painful to figure
1028                  * out which is which.  Just force all pg_index searches to be heap
1029                  * scans while building the relcaches.
1030                  */
1031                 if (!criticalRelcachesBuilt)
1032                         return false;
1033         }
1034         else if (cache->id == AMOID ||
1035                          cache->id == AMNAME)
1036         {
1037                 /*
1038                  * Always do heap scans in pg_am, because it's so small there's not
1039                  * much point in an indexscan anyway.  We *must* do this when
1040                  * initially building critical relcache entries, but we might as well
1041                  * just always do it.
1042                  */
1043                 return false;
1044         }
1045         else if (cache->id == OPEROID)
1046         {
1047                 if (!criticalRelcachesBuilt)
1048                 {
1049                         /* Looking for an OID comparison function? */
1050                         Oid                     lookup_oid = DatumGetObjectId(cur_skey[0].sk_argument);
1051
1052                         if (lookup_oid >= MIN_OIDCMP && lookup_oid <= MAX_OIDCMP)
1053                                 return false;
1054                 }
1055         }
1056
1057         /* Normal case, allow index scan */
1058         return true;
1059 }
1060
1061 /*
1062  *      SearchCatCache
1063  *
1064  *              This call searches a system cache for a tuple, opening the relation
1065  *              if necessary (on the first access to a particular cache).
1066  *
1067  *              The result is NULL if not found, or a pointer to a HeapTuple in
1068  *              the cache.      The caller must not modify the tuple, and must call
1069  *              ReleaseCatCache() when done with it.
1070  *
1071  * The search key values should be expressed as Datums of the key columns'
1072  * datatype(s).  (Pass zeroes for any unused parameters.)  As a special
1073  * exception, the passed-in key for a NAME column can be just a C string;
1074  * the caller need not go to the trouble of converting it to a fully
1075  * null-padded NAME.
1076  */
1077 HeapTuple
1078 SearchCatCache(CatCache *cache,
1079                            Datum v1,
1080                            Datum v2,
1081                            Datum v3,
1082                            Datum v4)
1083 {
1084         ScanKeyData cur_skey[4];
1085         uint32          hashValue;
1086         Index           hashIndex;
1087         Dlelem     *elt;
1088         CatCTup    *ct;
1089         Relation        relation;
1090         SysScanDesc scandesc;
1091         HeapTuple       ntp;
1092
1093         /*
1094          * one-time startup overhead for each cache
1095          */
1096         if (cache->cc_tupdesc == NULL)
1097                 CatalogCacheInitializeCache(cache);
1098
1099 #ifdef CATCACHE_STATS
1100         cache->cc_searches++;
1101 #endif
1102
1103         /*
1104          * initialize the search key information
1105          */
1106         memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1107         cur_skey[0].sk_argument = v1;
1108         cur_skey[1].sk_argument = v2;
1109         cur_skey[2].sk_argument = v3;
1110         cur_skey[3].sk_argument = v4;
1111
1112         /*
1113          * find the hash bucket in which to look for the tuple
1114          */
1115         hashValue = CatalogCacheComputeHashValue(cache, cache->cc_nkeys, cur_skey);
1116         hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1117
1118         /*
1119          * scan the hash bucket until we find a match or exhaust our tuples
1120          */
1121         for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
1122                  elt;
1123                  elt = DLGetSucc(elt))
1124         {
1125                 bool            res;
1126
1127                 ct = (CatCTup *) DLE_VAL(elt);
1128
1129                 if (ct->dead)
1130                         continue;                       /* ignore dead entries */
1131
1132                 if (ct->hash_value != hashValue)
1133                         continue;                       /* quickly skip entry if wrong hash val */
1134
1135                 /*
1136                  * see if the cached tuple matches our key.
1137                  */
1138                 HeapKeyTest(&ct->tuple,
1139                                         cache->cc_tupdesc,
1140                                         cache->cc_nkeys,
1141                                         cur_skey,
1142                                         res);
1143                 if (!res)
1144                         continue;
1145
1146                 /*
1147                  * we found a match in the cache: move it to the front of the global
1148                  * LRU list.  We also move it to the front of the list for its
1149                  * hashbucket, in order to speed subsequent searches.  (The most
1150                  * frequently accessed elements in any hashbucket will tend to be near
1151                  * the front of the hashbucket's list.)
1152                  */
1153                 DLMoveToFront(&ct->lrulist_elem);
1154                 DLMoveToFront(&ct->cache_elem);
1155
1156                 /*
1157                  * If it's a positive entry, bump its refcount and return it. If it's
1158                  * negative, we can report failure to the caller.
1159                  */
1160                 if (!ct->negative)
1161                 {
1162                         ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1163                         ct->refcount++;
1164                         ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1165
1166                         CACHE3_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1167                                                 cache->cc_relname, hashIndex);
1168
1169 #ifdef CATCACHE_STATS
1170                         cache->cc_hits++;
1171 #endif
1172
1173                         return &ct->tuple;
1174                 }
1175                 else
1176                 {
1177                         CACHE3_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1178                                                 cache->cc_relname, hashIndex);
1179
1180 #ifdef CATCACHE_STATS
1181                         cache->cc_neg_hits++;
1182 #endif
1183
1184                         return NULL;
1185                 }
1186         }
1187
1188         /*
1189          * Tuple was not found in cache, so we have to try to retrieve it directly
1190          * from the relation.  If found, we will add it to the cache; if not
1191          * found, we will add a negative cache entry instead.
1192          *
1193          * NOTE: it is possible for recursive cache lookups to occur while reading
1194          * the relation --- for example, due to shared-cache-inval messages being
1195          * processed during heap_open().  This is OK.  It's even possible for one
1196          * of those lookups to find and enter the very same tuple we are trying to
1197          * fetch here.  If that happens, we will enter a second copy of the tuple
1198          * into the cache.      The first copy will never be referenced again, and
1199          * will eventually age out of the cache, so there's no functional problem.
1200          * This case is rare enough that it's not worth expending extra cycles to
1201          * detect.
1202          */
1203         relation = heap_open(cache->cc_reloid, AccessShareLock);
1204
1205         scandesc = systable_beginscan(relation,
1206                                                                   cache->cc_indexoid,
1207                                                                   IndexScanOK(cache, cur_skey),
1208                                                                   SnapshotNow,
1209                                                                   cache->cc_nkeys,
1210                                                                   cur_skey);
1211
1212         ct = NULL;
1213
1214         while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1215         {
1216                 ct = CatalogCacheCreateEntry(cache, ntp,
1217                                                                          hashValue, hashIndex,
1218                                                                          false);
1219                 /* immediately set the refcount to 1 */
1220                 ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1221                 ct->refcount++;
1222                 ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1223                 break;                                  /* assume only one match */
1224         }
1225
1226         systable_endscan(scandesc);
1227
1228         heap_close(relation, AccessShareLock);
1229
1230         /*
1231          * If tuple was not found, we need to build a negative cache entry
1232          * containing a fake tuple.  The fake tuple has the correct key columns,
1233          * but nulls everywhere else.
1234          *
1235          * In bootstrap mode, we don't build negative entries, because the cache
1236          * invalidation mechanism isn't alive and can't clear them if the tuple
1237          * gets created later.  (Bootstrap doesn't do UPDATEs, so it doesn't need
1238          * cache inval for that.)
1239          */
1240         if (ct == NULL)
1241         {
1242                 if (IsBootstrapProcessingMode())
1243                         return NULL;
1244
1245                 ntp = build_dummy_tuple(cache, cache->cc_nkeys, cur_skey);
1246                 ct = CatalogCacheCreateEntry(cache, ntp,
1247                                                                          hashValue, hashIndex,
1248                                                                          true);
1249                 heap_freetuple(ntp);
1250
1251                 CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1252                                         cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1253                 CACHE3_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1254                                         cache->cc_relname, hashIndex);
1255
1256                 /*
1257                  * We are not returning the negative entry to the caller, so leave its
1258                  * refcount zero.
1259                  */
1260
1261                 return NULL;
1262         }
1263
1264         CACHE4_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1265                                 cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1266         CACHE3_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1267                                 cache->cc_relname, hashIndex);
1268
1269 #ifdef CATCACHE_STATS
1270         cache->cc_newloads++;
1271 #endif
1272
1273         return &ct->tuple;
1274 }
1275
1276 /*
1277  *      ReleaseCatCache
1278  *
1279  *      Decrement the reference count of a catcache entry (releasing the
1280  *      hold grabbed by a successful SearchCatCache).
1281  *
1282  *      NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1283  *      will be freed as soon as their refcount goes to zero.  In combination
1284  *      with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1285  *      to catch references to already-released catcache entries.
1286  */
1287 void
1288 ReleaseCatCache(HeapTuple tuple)
1289 {
1290         CatCTup    *ct = (CatCTup *) (((char *) tuple) -
1291                                                                   offsetof(CatCTup, tuple));
1292
1293         /* Safety checks to ensure we were handed a cache entry */
1294         Assert(ct->ct_magic == CT_MAGIC);
1295         Assert(ct->refcount > 0);
1296
1297         ct->refcount--;
1298         ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
1299
1300         if (
1301 #ifndef CATCACHE_FORCE_RELEASE
1302                 ct->dead &&
1303 #endif
1304                 ct->refcount == 0 &&
1305                 (ct->c_list == NULL || ct->c_list->refcount == 0))
1306                 CatCacheRemoveCTup(ct->my_cache, ct);
1307 }
1308
1309
1310 /*
1311  *      SearchCatCacheList
1312  *
1313  *              Generate a list of all tuples matching a partial key (that is,
1314  *              a key specifying just the first K of the cache's N key columns).
1315  *
1316  *              The caller must not modify the list object or the pointed-to tuples,
1317  *              and must call ReleaseCatCacheList() when done with the list.
1318  */
1319 CatCList *
1320 SearchCatCacheList(CatCache *cache,
1321                                    int nkeys,
1322                                    Datum v1,
1323                                    Datum v2,
1324                                    Datum v3,
1325                                    Datum v4)
1326 {
1327         ScanKeyData cur_skey[4];
1328         uint32          lHashValue;
1329         Dlelem     *elt;
1330         CatCList   *cl;
1331         CatCTup    *ct;
1332         List       *volatile ctlist;
1333         ListCell   *ctlist_item;
1334         int                     nmembers;
1335         bool            ordered;
1336         HeapTuple       ntp;
1337         MemoryContext oldcxt;
1338         int                     i;
1339
1340         /*
1341          * one-time startup overhead for each cache
1342          */
1343         if (cache->cc_tupdesc == NULL)
1344                 CatalogCacheInitializeCache(cache);
1345
1346         Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1347
1348 #ifdef CATCACHE_STATS
1349         cache->cc_lsearches++;
1350 #endif
1351
1352         /*
1353          * initialize the search key information
1354          */
1355         memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
1356         cur_skey[0].sk_argument = v1;
1357         cur_skey[1].sk_argument = v2;
1358         cur_skey[2].sk_argument = v3;
1359         cur_skey[3].sk_argument = v4;
1360
1361         /*
1362          * compute a hash value of the given keys for faster search.  We don't
1363          * presently divide the CatCList items into buckets, but this still lets
1364          * us skip non-matching items quickly most of the time.
1365          */
1366         lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
1367
1368         /*
1369          * scan the items until we find a match or exhaust our list
1370          */
1371         for (elt = DLGetHead(&cache->cc_lists);
1372                  elt;
1373                  elt = DLGetSucc(elt))
1374         {
1375                 bool            res;
1376
1377                 cl = (CatCList *) DLE_VAL(elt);
1378
1379                 if (cl->dead)
1380                         continue;                       /* ignore dead entries */
1381
1382                 if (cl->hash_value != lHashValue)
1383                         continue;                       /* quickly skip entry if wrong hash val */
1384
1385                 /*
1386                  * see if the cached list matches our key.
1387                  */
1388                 if (cl->nkeys != nkeys)
1389                         continue;
1390                 HeapKeyTest(&cl->tuple,
1391                                         cache->cc_tupdesc,
1392                                         nkeys,
1393                                         cur_skey,
1394                                         res);
1395                 if (!res)
1396                         continue;
1397
1398                 /*
1399                  * We found a matching list: mark it as touched since the last
1400                  * CatalogCacheCleanup() sweep.  Also move the list to the front of
1401                  * the cache's list-of-lists, to speed subsequent searches. (We do not
1402                  * move the members to the fronts of their hashbucket lists, however,
1403                  * since there's no point in that unless they are searched for
1404                  * individually.)
1405                  */
1406                 cl->touched = true;
1407                 DLMoveToFront(&cl->cache_elem);
1408
1409                 /* Bump the list's refcount and return it */
1410                 ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1411                 cl->refcount++;
1412                 ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1413
1414                 CACHE2_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1415                                         cache->cc_relname);
1416
1417 #ifdef CATCACHE_STATS
1418                 cache->cc_lhits++;
1419 #endif
1420
1421                 return cl;
1422         }
1423
1424         /*
1425          * List was not found in cache, so we have to build it by reading the
1426          * relation.  For each matching tuple found in the relation, use an
1427          * existing cache entry if possible, else build a new one.
1428          *
1429          * We have to bump the member refcounts temporarily to ensure they won't get
1430          * dropped from the cache while loading other members. We use a PG_TRY
1431          * block to ensure we can undo those refcounts if we get an error before
1432          * we finish constructing the CatCList.
1433          */
1434         ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1435
1436         ctlist = NIL;
1437
1438         PG_TRY();
1439         {
1440                 Relation        relation;
1441                 SysScanDesc scandesc;
1442
1443                 relation = heap_open(cache->cc_reloid, AccessShareLock);
1444
1445                 scandesc = systable_beginscan(relation,
1446                                                                           cache->cc_indexoid,
1447                                                                           true,
1448                                                                           SnapshotNow,
1449                                                                           nkeys,
1450                                                                           cur_skey);
1451
1452                 /* The list will be ordered iff we are doing an index scan */
1453                 ordered = (scandesc->irel != NULL);
1454
1455                 while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1456                 {
1457                         uint32          hashValue;
1458                         Index           hashIndex;
1459
1460                         /*
1461                          * See if there's an entry for this tuple already.
1462                          */
1463                         ct = NULL;
1464                         hashValue = CatalogCacheComputeTupleHashValue(cache, ntp);
1465                         hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1466
1467                         for (elt = DLGetHead(&cache->cc_bucket[hashIndex]);
1468                                  elt;
1469                                  elt = DLGetSucc(elt))
1470                         {
1471                                 ct = (CatCTup *) DLE_VAL(elt);
1472
1473                                 if (ct->dead || ct->negative)
1474                                         continue;       /* ignore dead and negative entries */
1475
1476                                 if (ct->hash_value != hashValue)
1477                                         continue;       /* quickly skip entry if wrong hash val */
1478
1479                                 if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1480                                         continue;       /* not same tuple */
1481
1482                                 /*
1483                                  * Found a match, but can't use it if it belongs to another
1484                                  * list already
1485                                  */
1486                                 if (ct->c_list)
1487                                         continue;
1488
1489                                 /* Found a match, so move it to front */
1490                                 DLMoveToFront(&ct->lrulist_elem);
1491
1492                                 break;
1493                         }
1494
1495                         if (elt == NULL)
1496                         {
1497                                 /* We didn't find a usable entry, so make a new one */
1498                                 ct = CatalogCacheCreateEntry(cache, ntp,
1499                                                                                          hashValue, hashIndex,
1500                                                                                          false);
1501                         }
1502
1503                         /* Careful here: add entry to ctlist, then bump its refcount */
1504                         /* This way leaves state correct if lappend runs out of memory */
1505                         ctlist = lappend(ctlist, ct);
1506                         ct->refcount++;
1507                 }
1508
1509                 systable_endscan(scandesc);
1510
1511                 heap_close(relation, AccessShareLock);
1512
1513                 /*
1514                  * Now we can build the CatCList entry.  First we need a dummy tuple
1515                  * containing the key values...
1516                  */
1517                 ntp = build_dummy_tuple(cache, nkeys, cur_skey);
1518                 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1519                 nmembers = list_length(ctlist);
1520                 cl = (CatCList *)
1521                         palloc(sizeof(CatCList) + nmembers * sizeof(CatCTup *));
1522                 heap_copytuple_with_tuple(ntp, &cl->tuple);
1523                 MemoryContextSwitchTo(oldcxt);
1524                 heap_freetuple(ntp);
1525
1526                 /*
1527                  * We are now past the last thing that could trigger an elog before we
1528                  * have finished building the CatCList and remembering it in the
1529                  * resource owner.      So it's OK to fall out of the PG_TRY, and indeed
1530                  * we'd better do so before we start marking the members as belonging
1531                  * to the list.
1532                  */
1533
1534         }
1535         PG_CATCH();
1536         {
1537                 foreach(ctlist_item, ctlist)
1538                 {
1539                         ct = (CatCTup *) lfirst(ctlist_item);
1540                         Assert(ct->c_list == NULL);
1541                         Assert(ct->refcount > 0);
1542                         ct->refcount--;
1543                         if (
1544 #ifndef CATCACHE_FORCE_RELEASE
1545                                 ct->dead &&
1546 #endif
1547                                 ct->refcount == 0 &&
1548                                 (ct->c_list == NULL || ct->c_list->refcount == 0))
1549                                 CatCacheRemoveCTup(cache, ct);
1550                 }
1551
1552                 PG_RE_THROW();
1553         }
1554         PG_END_TRY();
1555
1556         cl->cl_magic = CL_MAGIC;
1557         cl->my_cache = cache;
1558         DLInitElem(&cl->cache_elem, cl);
1559         cl->refcount = 0;                       /* for the moment */
1560         cl->dead = false;
1561         cl->ordered = ordered;
1562         cl->touched = false;            /* we already moved members to front */
1563         cl->nkeys = nkeys;
1564         cl->hash_value = lHashValue;
1565         cl->n_members = nmembers;
1566
1567         i = 0;
1568         foreach(ctlist_item, ctlist)
1569         {
1570                 cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
1571                 Assert(ct->c_list == NULL);
1572                 ct->c_list = cl;
1573                 /* release the temporary refcount on the member */
1574                 Assert(ct->refcount > 0);
1575                 ct->refcount--;
1576                 /* mark list dead if any members already dead */
1577                 if (ct->dead)
1578                         cl->dead = true;
1579         }
1580         Assert(i == nmembers);
1581
1582         DLAddHead(&cache->cc_lists, &cl->cache_elem);
1583
1584         /* Finally, bump the list's refcount and return it */
1585         cl->refcount++;
1586         ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1587
1588         CACHE3_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1589                                 cache->cc_relname, nmembers);
1590
1591         return cl;
1592 }
1593
1594 /*
1595  *      ReleaseCatCacheList
1596  *
1597  *      Decrement the reference count of a catcache list.
1598  */
1599 void
1600 ReleaseCatCacheList(CatCList *list)
1601 {
1602         /* Safety checks to ensure we were handed a cache entry */
1603         Assert(list->cl_magic == CL_MAGIC);
1604         Assert(list->refcount > 0);
1605         list->refcount--;
1606         ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
1607
1608         if (
1609 #ifndef CATCACHE_FORCE_RELEASE
1610                 list->dead &&
1611 #endif
1612                 list->refcount == 0)
1613                 CatCacheRemoveCList(list->my_cache, list);
1614 }
1615
1616
1617 /*
1618  * CatalogCacheCreateEntry
1619  *              Create a new CatCTup entry, copying the given HeapTuple and other
1620  *              supplied data into it.  The new entry initially has refcount 0.
1621  */
1622 static CatCTup *
1623 CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
1624                                                 uint32 hashValue, Index hashIndex, bool negative)
1625 {
1626         CatCTup    *ct;
1627         MemoryContext oldcxt;
1628
1629         /*
1630          * Allocate CatCTup header in cache memory, and copy the tuple there too.
1631          */
1632         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1633         ct = (CatCTup *) palloc(sizeof(CatCTup));
1634         heap_copytuple_with_tuple(ntp, &ct->tuple);
1635         MemoryContextSwitchTo(oldcxt);
1636
1637         /*
1638          * Finish initializing the CatCTup header, and add it to the cache's
1639          * linked lists and counts.
1640          */
1641         ct->ct_magic = CT_MAGIC;
1642         ct->my_cache = cache;
1643         DLInitElem(&ct->lrulist_elem, (void *) ct);
1644         DLInitElem(&ct->cache_elem, (void *) ct);
1645         ct->c_list = NULL;
1646         ct->refcount = 0;                       /* for the moment */
1647         ct->dead = false;
1648         ct->negative = negative;
1649         ct->hash_value = hashValue;
1650
1651         DLAddHead(&CacheHdr->ch_lrulist, &ct->lrulist_elem);
1652         DLAddHead(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1653
1654         cache->cc_ntup++;
1655         CacheHdr->ch_ntup++;
1656
1657         /*
1658          * If we've exceeded the desired size of the caches, try to throw away the
1659          * least recently used entry(s).  NB: be careful not to throw away the
1660          * newly-built entry...
1661          */
1662         if (CacheHdr->ch_ntup > CacheHdr->ch_maxtup)
1663                 CatalogCacheCleanup(ct);
1664
1665         return ct;
1666 }
1667
1668 /*
1669  * CatalogCacheCleanup
1670  *              Try to reduce the size of the catcaches when they get too big
1671  *
1672  * savect can be NULL, or a specific CatCTup not to remove even if it
1673  * has zero refcount.
1674  */
1675 static void
1676 CatalogCacheCleanup(CatCTup *savect)
1677 {
1678         int                     tup_target;
1679         CatCache   *ccp;
1680         Dlelem     *elt,
1681                            *prevelt;
1682
1683         /*
1684          * Each time we have to do this, try to cut the cache size down to about
1685          * 90% of the maximum.
1686          */
1687         tup_target = (CacheHdr->ch_maxtup * 9) / 10;
1688
1689         /*
1690          * Our strategy for managing CatCLists is that, each time we have to throw
1691          * away some cache entries, we first move-to-front all the members of
1692          * CatCLists that have been touched since the last cleanup sweep. Then we
1693          * do strict LRU elimination by individual tuples, zapping a list if any
1694          * of its members gets zapped.  Before PostgreSQL 8.1, we moved members to
1695          * front each time their owning list was touched, which was arguably more
1696          * fair in balancing list members against standalone tuples --- but the
1697          * overhead for large lists was horrendous.  This scheme is more heavily
1698          * biased towards preserving lists, but that is not necessarily bad
1699          * either.
1700          */
1701         for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
1702         {
1703                 for (elt = DLGetHead(&ccp->cc_lists); elt; elt = DLGetSucc(elt))
1704                 {
1705                         CatCList   *cl = (CatCList *) DLE_VAL(elt);
1706
1707                         Assert(cl->cl_magic == CL_MAGIC);
1708                         if (cl->touched && !cl->dead)
1709                         {
1710                                 int                     i;
1711
1712                                 for (i = 0; i < cl->n_members; i++)
1713                                         DLMoveToFront(&cl->members[i]->lrulist_elem);
1714                         }
1715                         cl->touched = false;
1716                 }
1717         }
1718
1719         /* Now get rid of unreferenced tuples in reverse global LRU order */
1720         for (elt = DLGetTail(&CacheHdr->ch_lrulist); elt; elt = prevelt)
1721         {
1722                 CatCTup    *ct = (CatCTup *) DLE_VAL(elt);
1723
1724                 prevelt = DLGetPred(elt);
1725
1726                 if (ct->refcount == 0 &&
1727                         (ct->c_list == NULL || ct->c_list->refcount == 0) &&
1728                         ct != savect)
1729                 {
1730 #ifdef CATCACHE_STATS
1731                         ct->my_cache->cc_discards++;
1732 #endif
1733                         CatCacheRemoveCTup(ct->my_cache, ct);
1734
1735                         /* Quit when we've removed enough tuples */
1736                         if (CacheHdr->ch_ntup <= tup_target)
1737                                 break;
1738                 }
1739         }
1740 }
1741
1742 /*
1743  * build_dummy_tuple
1744  *              Generate a palloc'd HeapTuple that contains the specified key
1745  *              columns, and NULLs for other columns.
1746  *
1747  * This is used to store the keys for negative cache entries and CatCList
1748  * entries, which don't have real tuples associated with them.
1749  */
1750 static HeapTuple
1751 build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
1752 {
1753         HeapTuple       ntp;
1754         TupleDesc       tupDesc = cache->cc_tupdesc;
1755         Datum      *values;
1756         char       *nulls;
1757         Oid                     tupOid = InvalidOid;
1758         NameData        tempNames[4];
1759         int                     i;
1760
1761         values = (Datum *) palloc(tupDesc->natts * sizeof(Datum));
1762         nulls = (char *) palloc(tupDesc->natts * sizeof(char));
1763
1764         memset(values, 0, tupDesc->natts * sizeof(Datum));
1765         memset(nulls, 'n', tupDesc->natts * sizeof(char));
1766
1767         for (i = 0; i < nkeys; i++)
1768         {
1769                 int                     attindex = cache->cc_key[i];
1770                 Datum           keyval = skeys[i].sk_argument;
1771
1772                 if (attindex > 0)
1773                 {
1774                         /*
1775                          * Here we must be careful in case the caller passed a C string
1776                          * where a NAME is wanted: convert the given argument to a
1777                          * correctly padded NAME.  Otherwise the memcpy() done in
1778                          * heap_formtuple could fall off the end of memory.
1779                          */
1780                         if (cache->cc_isname[i])
1781                         {
1782                                 Name            newval = &tempNames[i];
1783
1784                                 namestrcpy(newval, DatumGetCString(keyval));
1785                                 keyval = NameGetDatum(newval);
1786                         }
1787                         values[attindex - 1] = keyval;
1788                         nulls[attindex - 1] = ' ';
1789                 }
1790                 else
1791                 {
1792                         Assert(attindex == ObjectIdAttributeNumber);
1793                         tupOid = DatumGetObjectId(keyval);
1794                 }
1795         }
1796
1797         ntp = heap_formtuple(tupDesc, values, nulls);
1798         if (tupOid != InvalidOid)
1799                 HeapTupleSetOid(ntp, tupOid);
1800
1801         pfree(values);
1802         pfree(nulls);
1803
1804         return ntp;
1805 }
1806
1807
1808 /*
1809  *      PrepareToInvalidateCacheTuple()
1810  *
1811  *      This is part of a rather subtle chain of events, so pay attention:
1812  *
1813  *      When a tuple is inserted or deleted, it cannot be flushed from the
1814  *      catcaches immediately, for reasons explained at the top of cache/inval.c.
1815  *      Instead we have to add entry(s) for the tuple to a list of pending tuple
1816  *      invalidations that will be done at the end of the command or transaction.
1817  *
1818  *      The lists of tuples that need to be flushed are kept by inval.c.  This
1819  *      routine is a helper routine for inval.c.  Given a tuple belonging to
1820  *      the specified relation, find all catcaches it could be in, compute the
1821  *      correct hash value for each such catcache, and call the specified function
1822  *      to record the cache id, hash value, and tuple ItemPointer in inval.c's
1823  *      lists.  CatalogCacheIdInvalidate will be called later, if appropriate,
1824  *      using the recorded information.
1825  *
1826  *      Note that it is irrelevant whether the given tuple is actually loaded
1827  *      into the catcache at the moment.  Even if it's not there now, it might
1828  *      be by the end of the command, or there might be a matching negative entry
1829  *      to flush --- or other backends' caches might have such entries --- so
1830  *      we have to make list entries to flush it later.
1831  *
1832  *      Also note that it's not an error if there are no catcaches for the
1833  *      specified relation.  inval.c doesn't know exactly which rels have
1834  *      catcaches --- it will call this routine for any tuple that's in a
1835  *      system relation.
1836  */
1837 void
1838 PrepareToInvalidateCacheTuple(Relation relation,
1839                                                           HeapTuple tuple,
1840                                                         void (*function) (int, uint32, ItemPointer, Oid))
1841 {
1842         CatCache   *ccp;
1843         Oid                     reloid;
1844
1845         CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
1846
1847         /*
1848          * sanity checks
1849          */
1850         Assert(RelationIsValid(relation));
1851         Assert(HeapTupleIsValid(tuple));
1852         Assert(PointerIsValid(function));
1853         Assert(CacheHdr != NULL);
1854
1855         reloid = RelationGetRelid(relation);
1856
1857         /* ----------------
1858          *      for each cache
1859          *         if the cache contains tuples from the specified relation
1860          *                 compute the tuple's hash value in this cache,
1861          *                 and call the passed function to register the information.
1862          * ----------------
1863          */
1864
1865         for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
1866         {
1867                 /* Just in case cache hasn't finished initialization yet... */
1868                 if (ccp->cc_tupdesc == NULL)
1869                         CatalogCacheInitializeCache(ccp);
1870
1871                 if (ccp->cc_reloid != reloid)
1872                         continue;
1873
1874                 (*function) (ccp->id,
1875                                          CatalogCacheComputeTupleHashValue(ccp, tuple),
1876                                          &tuple->t_self,
1877                                          ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId);
1878         }
1879 }
1880
1881
1882 /*
1883  * Subroutines for warning about reference leaks.  These are exported so
1884  * that resowner.c can call them.
1885  */
1886 void
1887 PrintCatCacheLeakWarning(HeapTuple tuple)
1888 {
1889         CatCTup    *ct = (CatCTup *) (((char *) tuple) -
1890                                                                   offsetof(CatCTup, tuple));
1891
1892         /* Safety check to ensure we were handed a cache entry */
1893         Assert(ct->ct_magic == CT_MAGIC);
1894
1895         elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
1896                  ct->my_cache->cc_relname, ct->my_cache->id,
1897                  ItemPointerGetBlockNumber(&(tuple->t_self)),
1898                  ItemPointerGetOffsetNumber(&(tuple->t_self)),
1899                  ct->refcount);
1900 }
1901
1902 void
1903 PrintCatCacheListLeakWarning(CatCList *list)
1904 {
1905         elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
1906                  list->my_cache->cc_relname, list->my_cache->id,
1907                  list, list->refcount);
1908 }