OSDN Git Service

Change target version to PG11 and fix the SPEC file
[pghintplan/pg_hint_plan.git] / pg_stat_statements.c
index 2dd7979..8f74903 100644 (file)
 /*-------------------------------------------------------------------------
  *
  * pg_stat_statements.c
- *             Track statement execution times across a whole database cluster.
+ * 
+ * Part of pg_stat_statements.c in PostgreSQL 10.
  *
- * Execution costs are totalled for each distinct source query, and kept in
- * a shared hashtable. (We track only as many distinct queries as will fit
- * in the designated amount of shared memory.)
- *
- * As of Postgres 9.2, this module normalizes query entries.  Normalization
- * is a process whereby similar queries, typically differing only in their
- * constants (though the exact rules are somewhat more subtle than that) are
- * recognized as equivalent, and are tracked as a single entry.  This is
- * particularly useful for non-prepared queries.
- *
- * Normalization is implemented by fingerprinting queries, selectively
- * serializing those fields of each query tree's nodes that are judged to be
- * essential to the query.     This is referred to as a query jumble.  This is
- * distinct from a regular serialization in that various extraneous
- * information is ignored as irrelevant or not essential to the query, such
- * as the collations of Vars and, most notably, the values of constants.
- *
- * This jumble is acquired at the end of parse analysis of each query, and
- * a 32-bit hash of it is stored into the query's Query.queryId field.
- * The server then copies this value around, making it available in plan
- * tree(s) generated from the query.  The executor can then use this value
- * to blame query costs on the proper queryId.
- *
- * Note about locking issues: to create or delete an entry in the shared
- * hashtable, one must hold pgss->lock exclusively.  Modifying any field
- * in an entry except the counters requires the same.  To look up an entry,
- * one must hold the lock shared.  To read or update the counters within
- * an entry, one must hold the lock shared or exclusive (so the entry doesn't
- * disappear!) and also take the entry's mutex spinlock.
- *
- *
- * Copyright (c) 2008-2013, PostgreSQL Global Development Group
- *
- * IDENTIFICATION
- *       contrib/pg_stat_statements/pg_stat_statements.c
+ * Copyright (c) 2008-2017, PostgreSQL Global Development Group
  *
  *-------------------------------------------------------------------------
  */
 #include "postgres.h"
 
-#ifdef NOT_USED
+#include <sys/stat.h>
 
-#include <unistd.h>
-
-#endif
 #include "access/hash.h"
-#ifdef NOT_USED
-#include "executor/instrument.h"
-#include "funcapi.h"
-#include "mb/pg_wchar.h"
-#include "miscadmin.h"
-#include "parser/analyze.h"
-#include "parser/parsetree.h"
-#endif
 #include "parser/scanner.h"
-#ifdef NOT_USED
-#include "pgstat.h"
-#include "storage/fd.h"
-#include "storage/ipc.h"
-#include "storage/spin.h"
-#include "tcop/utility.h"
-#include "utils/builtins.h"
-
-
-PG_MODULE_MAGIC;
-
-/* Location of stats file */
-#define PGSS_DUMP_FILE "global/pg_stat_statements.stat"
-
-/* This constant defines the magic number in the stats file header */
-static const uint32 PGSS_FILE_HEADER = 0x20120328;
-
-/* XXX: Should USAGE_EXEC reflect execution time and/or buffer usage? */
-#define USAGE_EXEC(duration)   (1.0)
-#define USAGE_INIT                             (1.0)   /* including initial planning */
-#define ASSUMED_MEDIAN_INIT            (10.0)  /* initial assumed median usage */
-#define USAGE_DECREASE_FACTOR  (0.99)  /* decreased every entry_dealloc */
-#define STICKY_DECREASE_FACTOR (0.50)  /* factor for sticky entries */
-#define USAGE_DEALLOC_PERCENT  5               /* free this % of entries at once */
-
-#define JUMBLE_SIZE                            1024    /* query serialization buffer size */
-
-/*
- * Hashtable key that defines the identity of a hashtable entry.  We separate
- * queries by user and by database even if they are otherwise identical.
- *
- * Presently, the query encoding is fully determined by the source database
- * and so we don't really need it to be in the key.  But that might not always
- * be true. Anyway it's notationally convenient to pass it as part of the key.
- */
-typedef struct pgssHashKey
-{
-       Oid                     userid;                 /* user OID */
-       Oid                     dbid;                   /* database OID */
-       int                     encoding;               /* query encoding */
-       uint32          queryid;                /* query identifier */
-} pgssHashKey;
-
-/*
- * The actual stats counters kept within pgssEntry.
- */
-typedef struct Counters
-{
-       int64           calls;                  /* # of times executed */
-       double          total_time;             /* total execution time, in msec */
-       int64           rows;                   /* total # of retrieved or affected rows */
-       int64           shared_blks_hit;        /* # of shared buffer hits */
-       int64           shared_blks_read;               /* # of shared disk blocks read */
-       int64           shared_blks_dirtied;    /* # of shared disk blocks dirtied */
-       int64           shared_blks_written;    /* # of shared disk blocks written */
-       int64           local_blks_hit; /* # of local buffer hits */
-       int64           local_blks_read;        /* # of local disk blocks read */
-       int64           local_blks_dirtied;             /* # of local disk blocks dirtied */
-       int64           local_blks_written;             /* # of local disk blocks written */
-       int64           temp_blks_read; /* # of temp blocks read */
-       int64           temp_blks_written;              /* # of temp blocks written */
-       double          blk_read_time;  /* time spent reading, in msec */
-       double          blk_write_time; /* time spent writing, in msec */
-       double          usage;                  /* usage factor */
-} Counters;
-
-/*
- * Statistics per statement
- *
- * NB: see the file read/write code before changing field order here.
- */
-typedef struct pgssEntry
-{
-       pgssHashKey key;                        /* hash key of entry - MUST BE FIRST */
-       Counters        counters;               /* the statistics for this query */
-       int                     query_len;              /* # of valid bytes in query string */
-       slock_t         mutex;                  /* protects the counters only */
-       char            query[1];               /* VARIABLE LENGTH ARRAY - MUST BE LAST */
-       /* Note: the allocated length of query[] is actually pgss->query_size */
-} pgssEntry;
 
-/*
- * Global shared state
- */
-typedef struct pgssSharedState
-{
-       LWLockId        lock;                   /* protects hashtable search/modification */
-       int                     query_size;             /* max query length in bytes */
-       double          cur_median_usage;               /* current median usage in hashtable */
-} pgssSharedState;
-
-/*
- * Struct for tracking locations/lengths of constants during normalization
- */
-typedef struct pgssLocationLen
-{
-       int                     location;               /* start offset in query text */
-       int                     length;                 /* length in bytes, or -1 to ignore */
-} pgssLocationLen;
-
-/*
- * Working state for computing a query jumble and producing a normalized
- * query string
- */
-typedef struct pgssJumbleState
-{
-       /* Jumble of current query tree */
-       unsigned char *jumble;
-
-       /* Number of bytes used in jumble[] */
-       Size            jumble_len;
-
-       /* Array of locations of constants that should be removed */
-       pgssLocationLen *clocations;
-
-       /* Allocated length of clocations array */
-       int                     clocations_buf_size;
-
-       /* Current number of valid entries in clocations array */
-       int                     clocations_count;
-} pgssJumbleState;
-
-/*---- Local variables ----*/
-
-/* Current nesting depth of ExecutorRun+ProcessUtility calls */
-static int     nested_level = 0;
-
-/* Saved hook values in case of unload */
-static shmem_startup_hook_type prev_shmem_startup_hook = NULL;
-static post_parse_analyze_hook_type prev_post_parse_analyze_hook = NULL;
-static ExecutorStart_hook_type prev_ExecutorStart = NULL;
-static ExecutorRun_hook_type prev_ExecutorRun = NULL;
-static ExecutorFinish_hook_type prev_ExecutorFinish = NULL;
-static ExecutorEnd_hook_type prev_ExecutorEnd = NULL;
-static ProcessUtility_hook_type prev_ProcessUtility = NULL;
-
-/* Links to shared memory state */
-static pgssSharedState *pgss = NULL;
-static HTAB *pgss_hash = NULL;
-
-/*---- GUC variables ----*/
-
-typedef enum
-{
-       PGSS_TRACK_NONE,                        /* track no statements */
-       PGSS_TRACK_TOP,                         /* only top level statements */
-       PGSS_TRACK_ALL                          /* all statements, including nested ones */
-}      PGSSTrackLevel;
-
-static const struct config_enum_entry track_options[] =
-{
-       {"none", PGSS_TRACK_NONE, false},
-       {"top", PGSS_TRACK_TOP, false},
-       {"all", PGSS_TRACK_ALL, false},
-       {NULL, 0, false}
-};
-
-static int     pgss_max;                       /* max # statements to track */
-static int     pgss_track;                     /* tracking level */
-static bool pgss_track_utility; /* whether to track utility commands */
-static bool pgss_save;                 /* whether to save stats across shutdown */
-
-
-#define pgss_enabled() \
-       (pgss_track == PGSS_TRACK_ALL || \
-       (pgss_track == PGSS_TRACK_TOP && nested_level == 0))
-
-/*---- Function declarations ----*/
-
-void           _PG_init(void);
-void           _PG_fini(void);
-
-Datum          pg_stat_statements_reset(PG_FUNCTION_ARGS);
-Datum          pg_stat_statements(PG_FUNCTION_ARGS);
-
-PG_FUNCTION_INFO_V1(pg_stat_statements_reset);
-PG_FUNCTION_INFO_V1(pg_stat_statements);
-
-static void pgss_shmem_startup(void);
-static void pgss_shmem_shutdown(int code, Datum arg);
-static void pgss_post_parse_analyze(ParseState *pstate, Query *query);
-static void pgss_ExecutorStart(QueryDesc *queryDesc, int eflags);
-static void pgss_ExecutorRun(QueryDesc *queryDesc,
-                                ScanDirection direction,
-                                long count);
-static void pgss_ExecutorFinish(QueryDesc *queryDesc);
-static void pgss_ExecutorEnd(QueryDesc *queryDesc);
-static void pgss_ProcessUtility(Node *parsetree, const char *queryString,
-                                       ProcessUtilityContext context, ParamListInfo params,
-                                       DestReceiver *dest, char *completionTag);
-static uint32 pgss_hash_fn(const void *key, Size keysize);
-static int     pgss_match_fn(const void *key1, const void *key2, Size keysize);
-static uint32 pgss_hash_string(const char *str);
-static void pgss_store(const char *query, uint32 queryId,
-                  double total_time, uint64 rows,
-                  const BufferUsage *bufusage,
-                  pgssJumbleState *jstate);
-static Size pgss_memsize(void);
-static pgssEntry *entry_alloc(pgssHashKey *key, const char *query,
-                       int query_len, bool sticky);
-static void entry_dealloc(void);
-static void entry_reset(void);
-#endif
 static void AppendJumble(pgssJumbleState *jstate,
                         const unsigned char *item, Size size);
-#ifdef NOT_USED
 static void JumbleQuery(pgssJumbleState *jstate, Query *query);
-#endif
 static void JumbleRangeTable(pgssJumbleState *jstate, List *rtable);
 static void JumbleExpr(pgssJumbleState *jstate, Node *node);
 static void RecordConstLocation(pgssJumbleState *jstate, int location);
-#ifdef NOT_USED
 static char *generate_normalized_query(pgssJumbleState *jstate, const char *query,
-                                                 int *query_len_p, int encoding);
-#endif
-static void fill_in_constant_lengths(pgssJumbleState *jstate, const char *query);
+                                                 int query_loc, int *query_len_p, int encoding);
+static void fill_in_constant_lengths(pgssJumbleState *jstate, const char *query,
+                                                int query_loc);
 static int     comp_location(const void *a, const void *b);
-#ifdef NOT_USED
-
-
-/*
- * Module load callback
- */
-void
-_PG_init(void)
-{
-       /*
-        * In order to create our shared memory area, we have to be loaded via
-        * shared_preload_libraries.  If not, fall out without hooking into any of
-        * the main system.  (We don't throw error here because it seems useful to
-        * allow the pg_stat_statements functions to be created even when the
-        * module isn't active.  The functions must protect themselves against
-        * being called then, however.)
-        */
-       if (!process_shared_preload_libraries_in_progress)
-               return;
-
-       /*
-        * Define (or redefine) custom GUC variables.
-        */
-       DefineCustomIntVariable("pg_stat_statements.max",
-         "Sets the maximum number of statements tracked by pg_stat_statements.",
-                                                       NULL,
-                                                       &pgss_max,
-                                                       1000,
-                                                       100,
-                                                       INT_MAX,
-                                                       PGC_POSTMASTER,
-                                                       0,
-                                                       NULL,
-                                                       NULL,
-                                                       NULL);
-
-       DefineCustomEnumVariable("pg_stat_statements.track",
-                          "Selects which statements are tracked by pg_stat_statements.",
-                                                        NULL,
-                                                        &pgss_track,
-                                                        PGSS_TRACK_TOP,
-                                                        track_options,
-                                                        PGC_SUSET,
-                                                        0,
-                                                        NULL,
-                                                        NULL,
-                                                        NULL);
-
-       DefineCustomBoolVariable("pg_stat_statements.track_utility",
-          "Selects whether utility commands are tracked by pg_stat_statements.",
-                                                        NULL,
-                                                        &pgss_track_utility,
-                                                        true,
-                                                        PGC_SUSET,
-                                                        0,
-                                                        NULL,
-                                                        NULL,
-                                                        NULL);
-
-       DefineCustomBoolVariable("pg_stat_statements.save",
-                          "Save pg_stat_statements statistics across server shutdowns.",
-                                                        NULL,
-                                                        &pgss_save,
-                                                        true,
-                                                        PGC_SIGHUP,
-                                                        0,
-                                                        NULL,
-                                                        NULL,
-                                                        NULL);
-
-       EmitWarningsOnPlaceholders("pg_stat_statements");
-
-       /*
-        * Request additional shared resources.  (These are no-ops if we're not in
-        * the postmaster process.)  We'll allocate or attach to the shared
-        * resources in pgss_shmem_startup().
-        */
-       RequestAddinShmemSpace(pgss_memsize());
-       RequestAddinLWLocks(1);
-
-       /*
-        * Install hooks.
-        */
-       prev_shmem_startup_hook = shmem_startup_hook;
-       shmem_startup_hook = pgss_shmem_startup;
-       prev_post_parse_analyze_hook = post_parse_analyze_hook;
-       post_parse_analyze_hook = pgss_post_parse_analyze;
-       prev_ExecutorStart = ExecutorStart_hook;
-       ExecutorStart_hook = pgss_ExecutorStart;
-       prev_ExecutorRun = ExecutorRun_hook;
-       ExecutorRun_hook = pgss_ExecutorRun;
-       prev_ExecutorFinish = ExecutorFinish_hook;
-       ExecutorFinish_hook = pgss_ExecutorFinish;
-       prev_ExecutorEnd = ExecutorEnd_hook;
-       ExecutorEnd_hook = pgss_ExecutorEnd;
-       prev_ProcessUtility = ProcessUtility_hook;
-       ProcessUtility_hook = pgss_ProcessUtility;
-}
-
-/*
- * Module unload callback
- */
-void
-_PG_fini(void)
-{
-       /* Uninstall hooks. */
-       shmem_startup_hook = prev_shmem_startup_hook;
-       post_parse_analyze_hook = prev_post_parse_analyze_hook;
-       ExecutorStart_hook = prev_ExecutorStart;
-       ExecutorRun_hook = prev_ExecutorRun;
-       ExecutorFinish_hook = prev_ExecutorFinish;
-       ExecutorEnd_hook = prev_ExecutorEnd;
-       ProcessUtility_hook = prev_ProcessUtility;
-}
-
-/*
- * shmem_startup hook: allocate or attach to shared memory,
- * then load any pre-existing statistics from file.
- */
-static void
-pgss_shmem_startup(void)
-{
-       bool            found;
-       HASHCTL         info;
-       FILE       *file;
-       uint32          header;
-       int32           num;
-       int32           i;
-       int                     query_size;
-       int                     buffer_size;
-       char       *buffer = NULL;
-
-       if (prev_shmem_startup_hook)
-               prev_shmem_startup_hook();
-
-       /* reset in case this is a restart within the postmaster */
-       pgss = NULL;
-       pgss_hash = NULL;
-
-       /*
-        * Create or attach to the shared memory state, including hash table
-        */
-       LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE);
-
-       pgss = ShmemInitStruct("pg_stat_statements",
-                                                  sizeof(pgssSharedState),
-                                                  &found);
-
-       if (!found)
-       {
-               /* First time through ... */
-               pgss->lock = LWLockAssign();
-               pgss->query_size = pgstat_track_activity_query_size;
-               pgss->cur_median_usage = ASSUMED_MEDIAN_INIT;
-       }
-
-       /* Be sure everyone agrees on the hash table entry size */
-       query_size = pgss->query_size;
-
-       memset(&info, 0, sizeof(info));
-       info.keysize = sizeof(pgssHashKey);
-       info.entrysize = offsetof(pgssEntry, query) +query_size;
-       info.hash = pgss_hash_fn;
-       info.match = pgss_match_fn;
-       pgss_hash = ShmemInitHash("pg_stat_statements hash",
-                                                         pgss_max, pgss_max,
-                                                         &info,
-                                                         HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
-
-       LWLockRelease(AddinShmemInitLock);
-
-       /*
-        * If we're in the postmaster (or a standalone backend...), set up a shmem
-        * exit hook to dump the statistics to disk.
-        */
-       if (!IsUnderPostmaster)
-               on_shmem_exit(pgss_shmem_shutdown, (Datum) 0);
-
-       /*
-        * Attempt to load old statistics from the dump file, if this is the first
-        * time through and we weren't told not to.
-        */
-       if (found || !pgss_save)
-               return;
-
-       /*
-        * Note: we don't bother with locks here, because there should be no other
-        * processes running when this code is reached.
-        */
-       file = AllocateFile(PGSS_DUMP_FILE, PG_BINARY_R);
-       if (file == NULL)
-       {
-               if (errno == ENOENT)
-                       return;                         /* ignore not-found error */
-               goto error;
-       }
-
-       buffer_size = query_size;
-       buffer = (char *) palloc(buffer_size);
-
-       if (fread(&header, sizeof(uint32), 1, file) != 1 ||
-               header != PGSS_FILE_HEADER ||
-               fread(&num, sizeof(int32), 1, file) != 1)
-               goto error;
-
-       for (i = 0; i < num; i++)
-       {
-               pgssEntry       temp;
-               pgssEntry  *entry;
-
-               if (fread(&temp, offsetof(pgssEntry, mutex), 1, file) != 1)
-                       goto error;
-
-               /* Encoding is the only field we can easily sanity-check */
-               if (!PG_VALID_BE_ENCODING(temp.key.encoding))
-                       goto error;
-
-               /* Previous incarnation might have had a larger query_size */
-               if (temp.query_len >= buffer_size)
-               {
-                       buffer = (char *) repalloc(buffer, temp.query_len + 1);
-                       buffer_size = temp.query_len + 1;
-               }
-
-               if (fread(buffer, 1, temp.query_len, file) != temp.query_len)
-                       goto error;
-               buffer[temp.query_len] = '\0';
-
-               /* Skip loading "sticky" entries */
-               if (temp.counters.calls == 0)
-                       continue;
-
-               /* Clip to available length if needed */
-               if (temp.query_len >= query_size)
-                       temp.query_len = pg_encoding_mbcliplen(temp.key.encoding,
-                                                                                                  buffer,
-                                                                                                  temp.query_len,
-                                                                                                  query_size - 1);
-
-               /* make the hashtable entry (discards old entries if too many) */
-               entry = entry_alloc(&temp.key, buffer, temp.query_len, false);
-
-               /* copy in the actual stats */
-               entry->counters = temp.counters;
-       }
-
-       pfree(buffer);
-       FreeFile(file);
-
-       /*
-        * Remove the file so it's not included in backups/replication slaves,
-        * etc. A new file will be written on next shutdown.
-        */
-       unlink(PGSS_DUMP_FILE);
-
-       return;
-
-error:
-       ereport(LOG,
-                       (errcode_for_file_access(),
-                        errmsg("could not read pg_stat_statement file \"%s\": %m",
-                                       PGSS_DUMP_FILE)));
-       if (buffer)
-               pfree(buffer);
-       if (file)
-               FreeFile(file);
-       /* If possible, throw away the bogus file; ignore any error */
-       unlink(PGSS_DUMP_FILE);
-}
-
-/*
- * shmem_shutdown hook: Dump statistics into file.
- *
- * Note: we don't bother with acquiring lock, because there should be no
- * other processes running when this is called.
- */
-static void
-pgss_shmem_shutdown(int code, Datum arg)
-{
-       FILE       *file;
-       HASH_SEQ_STATUS hash_seq;
-       int32           num_entries;
-       pgssEntry  *entry;
-
-       /* Don't try to dump during a crash. */
-       if (code)
-               return;
-
-       /* Safety check ... shouldn't get here unless shmem is set up. */
-       if (!pgss || !pgss_hash)
-               return;
-
-       /* Don't dump if told not to. */
-       if (!pgss_save)
-               return;
-
-       file = AllocateFile(PGSS_DUMP_FILE ".tmp", PG_BINARY_W);
-       if (file == NULL)
-               goto error;
-
-       if (fwrite(&PGSS_FILE_HEADER, sizeof(uint32), 1, file) != 1)
-               goto error;
-       num_entries = hash_get_num_entries(pgss_hash);
-       if (fwrite(&num_entries, sizeof(int32), 1, file) != 1)
-               goto error;
-
-       hash_seq_init(&hash_seq, pgss_hash);
-       while ((entry = hash_seq_search(&hash_seq)) != NULL)
-       {
-               int                     len = entry->query_len;
-
-               if (fwrite(entry, offsetof(pgssEntry, mutex), 1, file) != 1 ||
-                       fwrite(entry->query, 1, len, file) != len)
-                       goto error;
-       }
-
-       if (FreeFile(file))
-       {
-               file = NULL;
-               goto error;
-       }
-
-       /*
-        * Rename file into place, so we atomically replace the old one.
-        */
-       if (rename(PGSS_DUMP_FILE ".tmp", PGSS_DUMP_FILE) != 0)
-               ereport(LOG,
-                               (errcode_for_file_access(),
-                                errmsg("could not rename pg_stat_statement file \"%s\": %m",
-                                               PGSS_DUMP_FILE ".tmp")));
-
-       return;
-
-error:
-       ereport(LOG,
-                       (errcode_for_file_access(),
-                        errmsg("could not write pg_stat_statement file \"%s\": %m",
-                                       PGSS_DUMP_FILE ".tmp")));
-       if (file)
-               FreeFile(file);
-       unlink(PGSS_DUMP_FILE ".tmp");
-}
-
-/*
- * Post-parse-analysis hook: mark query with a queryId
- */
-static void
-pgss_post_parse_analyze(ParseState *pstate, Query *query)
-{
-       pgssJumbleState jstate;
-
-       /* Assert we didn't do this already */
-       Assert(query->queryId == 0);
-
-       /* Safety check... */
-       if (!pgss || !pgss_hash)
-               return;
-
-       /*
-        * Utility statements get queryId zero.  We do this even in cases where
-        * the statement contains an optimizable statement for which a queryId
-        * could be derived (such as EXPLAIN or DECLARE CURSOR).  For such cases,
-        * runtime control will first go through ProcessUtility and then the
-        * executor, and we don't want the executor hooks to do anything, since we
-        * are already measuring the statement's costs at the utility level.
-        */
-       if (query->utilityStmt)
-       {
-               query->queryId = 0;
-               return;
-       }
-
-       /* Set up workspace for query jumbling */
-       jstate.jumble = (unsigned char *) palloc(JUMBLE_SIZE);
-       jstate.jumble_len = 0;
-       jstate.clocations_buf_size = 32;
-       jstate.clocations = (pgssLocationLen *)
-               palloc(jstate.clocations_buf_size * sizeof(pgssLocationLen));
-       jstate.clocations_count = 0;
-
-       /* Compute query ID and mark the Query node with it */
-       JumbleQuery(&jstate, query);
-       query->queryId = hash_any(jstate.jumble, jstate.jumble_len);
-
-       /*
-        * If we are unlucky enough to get a hash of zero, use 1 instead, to
-        * prevent confusion with the utility-statement case.
-        */
-       if (query->queryId == 0)
-               query->queryId = 1;
-
-       /*
-        * If we were able to identify any ignorable constants, we immediately
-        * create a hash table entry for the query, so that we can record the
-        * normalized form of the query string.  If there were no such constants,
-        * the normalized string would be the same as the query text anyway, so
-        * there's no need for an early entry.
-        */
-       if (jstate.clocations_count > 0)
-               pgss_store(pstate->p_sourcetext,
-                                  query->queryId,
-                                  0,
-                                  0,
-                                  NULL,
-                                  &jstate);
-}
-
-/*
- * ExecutorStart hook: start up tracking if needed
- */
-static void
-pgss_ExecutorStart(QueryDesc *queryDesc, int eflags)
-{
-       if (prev_ExecutorStart)
-               prev_ExecutorStart(queryDesc, eflags);
-       else
-               standard_ExecutorStart(queryDesc, eflags);
-
-       /*
-        * If query has queryId zero, don't track it.  This prevents double
-        * counting of optimizable statements that are directly contained in
-        * utility statements.
-        */
-       if (pgss_enabled() && queryDesc->plannedstmt->queryId != 0)
-       {
-               /*
-                * Set up to track total elapsed time in ExecutorRun.  Make sure the
-                * space is allocated in the per-query context so it will go away at
-                * ExecutorEnd.
-                */
-               if (queryDesc->totaltime == NULL)
-               {
-                       MemoryContext oldcxt;
-
-                       oldcxt = MemoryContextSwitchTo(queryDesc->estate->es_query_cxt);
-                       queryDesc->totaltime = InstrAlloc(1, INSTRUMENT_ALL);
-                       MemoryContextSwitchTo(oldcxt);
-               }
-       }
-}
-
-/*
- * ExecutorRun hook: all we need do is track nesting depth
- */
-static void
-pgss_ExecutorRun(QueryDesc *queryDesc, ScanDirection direction, long count)
-{
-       nested_level++;
-       PG_TRY();
-       {
-               if (prev_ExecutorRun)
-                       prev_ExecutorRun(queryDesc, direction, count);
-               else
-                       standard_ExecutorRun(queryDesc, direction, count);
-               nested_level--;
-       }
-       PG_CATCH();
-       {
-               nested_level--;
-               PG_RE_THROW();
-       }
-       PG_END_TRY();
-}
-
-/*
- * ExecutorFinish hook: all we need do is track nesting depth
- */
-static void
-pgss_ExecutorFinish(QueryDesc *queryDesc)
-{
-       nested_level++;
-       PG_TRY();
-       {
-               if (prev_ExecutorFinish)
-                       prev_ExecutorFinish(queryDesc);
-               else
-                       standard_ExecutorFinish(queryDesc);
-               nested_level--;
-       }
-       PG_CATCH();
-       {
-               nested_level--;
-               PG_RE_THROW();
-       }
-       PG_END_TRY();
-}
-
-/*
- * ExecutorEnd hook: store results if needed
- */
-static void
-pgss_ExecutorEnd(QueryDesc *queryDesc)
-{
-       uint32          queryId = queryDesc->plannedstmt->queryId;
-
-       if (queryId != 0 && queryDesc->totaltime && pgss_enabled())
-       {
-               /*
-                * Make sure stats accumulation is done.  (Note: it's okay if several
-                * levels of hook all do this.)
-                */
-               InstrEndLoop(queryDesc->totaltime);
-
-               pgss_store(queryDesc->sourceText,
-                                  queryId,
-                                  queryDesc->totaltime->total * 1000.0,                /* convert to msec */
-                                  queryDesc->estate->es_processed,
-                                  &queryDesc->totaltime->bufusage,
-                                  NULL);
-       }
-
-       if (prev_ExecutorEnd)
-               prev_ExecutorEnd(queryDesc);
-       else
-               standard_ExecutorEnd(queryDesc);
-}
-
-/*
- * ProcessUtility hook
- */
-static void
-pgss_ProcessUtility(Node *parsetree, const char *queryString,
-                                       ProcessUtilityContext context, ParamListInfo params,
-                                       DestReceiver *dest, char *completionTag)
-{
-       /*
-        * If it's an EXECUTE statement, we don't track it and don't increment the
-        * nesting level.  This allows the cycles to be charged to the underlying
-        * PREPARE instead (by the Executor hooks), which is much more useful.
-        *
-        * We also don't track execution of PREPARE.  If we did, we would get one
-        * hash table entry for the PREPARE (with hash calculated from the query
-        * string), and then a different one with the same query string (but hash
-        * calculated from the query tree) would be used to accumulate costs of
-        * ensuing EXECUTEs.  This would be confusing, and inconsistent with other
-        * cases where planning time is not included at all.
-        */
-       if (pgss_track_utility && pgss_enabled() &&
-               !IsA(parsetree, ExecuteStmt) &&
-               !IsA(parsetree, PrepareStmt))
-       {
-               instr_time      start;
-               instr_time      duration;
-               uint64          rows = 0;
-               BufferUsage bufusage_start,
-                                       bufusage;
-               uint32          queryId;
-
-               bufusage_start = pgBufferUsage;
-               INSTR_TIME_SET_CURRENT(start);
-
-               nested_level++;
-               PG_TRY();
-               {
-                       if (prev_ProcessUtility)
-                               prev_ProcessUtility(parsetree, queryString,
-                                                                       context, params,
-                                                                       dest, completionTag);
-                       else
-                               standard_ProcessUtility(parsetree, queryString,
-                                                                               context, params,
-                                                                               dest, completionTag);
-                       nested_level--;
-               }
-               PG_CATCH();
-               {
-                       nested_level--;
-                       PG_RE_THROW();
-               }
-               PG_END_TRY();
-
-               INSTR_TIME_SET_CURRENT(duration);
-               INSTR_TIME_SUBTRACT(duration, start);
-
-               /* parse command tag to retrieve the number of affected rows. */
-               if (completionTag &&
-                       sscanf(completionTag, "COPY " UINT64_FORMAT, &rows) != 1)
-                       rows = 0;
-
-               /* calc differences of buffer counters. */
-               bufusage.shared_blks_hit =
-                       pgBufferUsage.shared_blks_hit - bufusage_start.shared_blks_hit;
-               bufusage.shared_blks_read =
-                       pgBufferUsage.shared_blks_read - bufusage_start.shared_blks_read;
-               bufusage.shared_blks_dirtied =
-                       pgBufferUsage.shared_blks_dirtied - bufusage_start.shared_blks_dirtied;
-               bufusage.shared_blks_written =
-                       pgBufferUsage.shared_blks_written - bufusage_start.shared_blks_written;
-               bufusage.local_blks_hit =
-                       pgBufferUsage.local_blks_hit - bufusage_start.local_blks_hit;
-               bufusage.local_blks_read =
-                       pgBufferUsage.local_blks_read - bufusage_start.local_blks_read;
-               bufusage.local_blks_dirtied =
-                       pgBufferUsage.local_blks_dirtied - bufusage_start.local_blks_dirtied;
-               bufusage.local_blks_written =
-                       pgBufferUsage.local_blks_written - bufusage_start.local_blks_written;
-               bufusage.temp_blks_read =
-                       pgBufferUsage.temp_blks_read - bufusage_start.temp_blks_read;
-               bufusage.temp_blks_written =
-                       pgBufferUsage.temp_blks_written - bufusage_start.temp_blks_written;
-               bufusage.blk_read_time = pgBufferUsage.blk_read_time;
-               INSTR_TIME_SUBTRACT(bufusage.blk_read_time, bufusage_start.blk_read_time);
-               bufusage.blk_write_time = pgBufferUsage.blk_write_time;
-               INSTR_TIME_SUBTRACT(bufusage.blk_write_time, bufusage_start.blk_write_time);
-
-               /* For utility statements, we just hash the query string directly */
-               queryId = pgss_hash_string(queryString);
-
-               pgss_store(queryString,
-                                  queryId,
-                                  INSTR_TIME_GET_MILLISEC(duration),
-                                  rows,
-                                  &bufusage,
-                                  NULL);
-       }
-       else
-       {
-               if (prev_ProcessUtility)
-                       prev_ProcessUtility(parsetree, queryString,
-                                                               context, params,
-                                                               dest, completionTag);
-               else
-                       standard_ProcessUtility(parsetree, queryString,
-                                                                       context, params,
-                                                                       dest, completionTag);
-       }
-}
-
-/*
- * Calculate hash value for a key
- */
-static uint32
-pgss_hash_fn(const void *key, Size keysize)
-{
-       const pgssHashKey *k = (const pgssHashKey *) key;
-
-       /* we don't bother to include encoding in the hash */
-       return hash_uint32((uint32) k->userid) ^
-               hash_uint32((uint32) k->dbid) ^
-               hash_uint32((uint32) k->queryid);
-}
-
-/*
- * Compare two keys - zero means match
- */
-static int
-pgss_match_fn(const void *key1, const void *key2, Size keysize)
-{
-       const pgssHashKey *k1 = (const pgssHashKey *) key1;
-       const pgssHashKey *k2 = (const pgssHashKey *) key2;
-
-       if (k1->userid == k2->userid &&
-               k1->dbid == k2->dbid &&
-               k1->encoding == k2->encoding &&
-               k1->queryid == k2->queryid)
-               return 0;
-       else
-               return 1;
-}
-
-/*
- * Given an arbitrarily long query string, produce a hash for the purposes of
- * identifying the query, without normalizing constants.  Used when hashing
- * utility statements.
- */
-static uint32
-pgss_hash_string(const char *str)
-{
-       return hash_any((const unsigned char *) str, strlen(str));
-}
-
-/*
- * Store some statistics for a statement.
- *
- * If jstate is not NULL then we're trying to create an entry for which
- * we have no statistics as yet; we just want to record the normalized
- * query string.  total_time, rows, bufusage are ignored in this case.
- */
-static void
-pgss_store(const char *query, uint32 queryId,
-                  double total_time, uint64 rows,
-                  const BufferUsage *bufusage,
-                  pgssJumbleState *jstate)
-{
-       pgssHashKey key;
-       pgssEntry  *entry;
-       char       *norm_query = NULL;
-
-       Assert(query != NULL);
-
-       /* Safety check... */
-       if (!pgss || !pgss_hash)
-               return;
-
-       /* Set up key for hashtable search */
-       key.userid = GetUserId();
-       key.dbid = MyDatabaseId;
-       key.encoding = GetDatabaseEncoding();
-       key.queryid = queryId;
-
-       /* Lookup the hash table entry with shared lock. */
-       LWLockAcquire(pgss->lock, LW_SHARED);
-
-       entry = (pgssEntry *) hash_search(pgss_hash, &key, HASH_FIND, NULL);
-
-       /* Create new entry, if not present */
-       if (!entry)
-       {
-               int                     query_len;
-
-               /*
-                * We'll need exclusive lock to make a new entry.  There is no point
-                * in holding shared lock while we normalize the string, though.
-                */
-               LWLockRelease(pgss->lock);
-
-               query_len = strlen(query);
-
-               if (jstate)
-               {
-                       /* Normalize the string if enabled */
-                       norm_query = generate_normalized_query(jstate, query,
-                                                                                                  &query_len,
-                                                                                                  key.encoding);
-
-                       /* Acquire exclusive lock as required by entry_alloc() */
-                       LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
-
-                       entry = entry_alloc(&key, norm_query, query_len, true);
-               }
-               else
-               {
-                       /*
-                        * We're just going to store the query string as-is; but we have
-                        * to truncate it if over-length.
-                        */
-                       if (query_len >= pgss->query_size)
-                               query_len = pg_encoding_mbcliplen(key.encoding,
-                                                                                                 query,
-                                                                                                 query_len,
-                                                                                                 pgss->query_size - 1);
-
-                       /* Acquire exclusive lock as required by entry_alloc() */
-                       LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
-
-                       entry = entry_alloc(&key, query, query_len, false);
-               }
-       }
-
-       /* Increment the counts, except when jstate is not NULL */
-       if (!jstate)
-       {
-               /*
-                * Grab the spinlock while updating the counters (see comment about
-                * locking rules at the head of the file)
-                */
-               volatile pgssEntry *e = (volatile pgssEntry *) entry;
-
-               SpinLockAcquire(&e->mutex);
-
-               /* "Unstick" entry if it was previously sticky */
-               if (e->counters.calls == 0)
-                       e->counters.usage = USAGE_INIT;
-
-               e->counters.calls += 1;
-               e->counters.total_time += total_time;
-               e->counters.rows += rows;
-               e->counters.shared_blks_hit += bufusage->shared_blks_hit;
-               e->counters.shared_blks_read += bufusage->shared_blks_read;
-               e->counters.shared_blks_dirtied += bufusage->shared_blks_dirtied;
-               e->counters.shared_blks_written += bufusage->shared_blks_written;
-               e->counters.local_blks_hit += bufusage->local_blks_hit;
-               e->counters.local_blks_read += bufusage->local_blks_read;
-               e->counters.local_blks_dirtied += bufusage->local_blks_dirtied;
-               e->counters.local_blks_written += bufusage->local_blks_written;
-               e->counters.temp_blks_read += bufusage->temp_blks_read;
-               e->counters.temp_blks_written += bufusage->temp_blks_written;
-               e->counters.blk_read_time += INSTR_TIME_GET_MILLISEC(bufusage->blk_read_time);
-               e->counters.blk_write_time += INSTR_TIME_GET_MILLISEC(bufusage->blk_write_time);
-               e->counters.usage += USAGE_EXEC(total_time);
-
-               SpinLockRelease(&e->mutex);
-       }
-
-       LWLockRelease(pgss->lock);
-
-       /* We postpone this pfree until we're out of the lock */
-       if (norm_query)
-               pfree(norm_query);
-}
-
-/*
- * Reset all statement statistics.
- */
-Datum
-pg_stat_statements_reset(PG_FUNCTION_ARGS)
-{
-       if (!pgss || !pgss_hash)
-               ereport(ERROR,
-                               (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
-       entry_reset();
-       PG_RETURN_VOID();
-}
-
-#define PG_STAT_STATEMENTS_COLS_V1_0   14
-#define PG_STAT_STATEMENTS_COLS                        18
-
-/*
- * Retrieve statement statistics.
- */
-Datum
-pg_stat_statements(PG_FUNCTION_ARGS)
-{
-       ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-       TupleDesc       tupdesc;
-       Tuplestorestate *tupstore;
-       MemoryContext per_query_ctx;
-       MemoryContext oldcontext;
-       Oid                     userid = GetUserId();
-       bool            is_superuser = superuser();
-       HASH_SEQ_STATUS hash_seq;
-       pgssEntry  *entry;
-       bool            sql_supports_v1_1_counters = true;
-
-       if (!pgss || !pgss_hash)
-               ereport(ERROR,
-                               (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                errmsg("pg_stat_statements must be loaded via shared_preload_libraries")));
-
-       /* check to see if caller supports us returning a tuplestore */
-       if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
-               ereport(ERROR,
-                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                errmsg("set-valued function called in context that cannot accept a set")));
-       if (!(rsinfo->allowedModes & SFRM_Materialize))
-               ereport(ERROR,
-                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                errmsg("materialize mode required, but it is not " \
-                                               "allowed in this context")));
-
-       /* Build a tuple descriptor for our result type */
-       if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
-               elog(ERROR, "return type must be a row type");
-       if (tupdesc->natts == PG_STAT_STATEMENTS_COLS_V1_0)
-               sql_supports_v1_1_counters = false;
-
-       per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
-       oldcontext = MemoryContextSwitchTo(per_query_ctx);
-
-       tupstore = tuplestore_begin_heap(true, false, work_mem);
-       rsinfo->returnMode = SFRM_Materialize;
-       rsinfo->setResult = tupstore;
-       rsinfo->setDesc = tupdesc;
-
-       MemoryContextSwitchTo(oldcontext);
-
-       LWLockAcquire(pgss->lock, LW_SHARED);
-
-       hash_seq_init(&hash_seq, pgss_hash);
-       while ((entry = hash_seq_search(&hash_seq)) != NULL)
-       {
-               Datum           values[PG_STAT_STATEMENTS_COLS];
-               bool            nulls[PG_STAT_STATEMENTS_COLS];
-               int                     i = 0;
-               Counters        tmp;
-
-               memset(values, 0, sizeof(values));
-               memset(nulls, 0, sizeof(nulls));
-
-               values[i++] = ObjectIdGetDatum(entry->key.userid);
-               values[i++] = ObjectIdGetDatum(entry->key.dbid);
-
-               if (is_superuser || entry->key.userid == userid)
-               {
-                       char       *qstr;
-
-                       qstr = (char *)
-                               pg_do_encoding_conversion((unsigned char *) entry->query,
-                                                                                 entry->query_len,
-                                                                                 entry->key.encoding,
-                                                                                 GetDatabaseEncoding());
-                       values[i++] = CStringGetTextDatum(qstr);
-                       if (qstr != entry->query)
-                               pfree(qstr);
-               }
-               else
-                       values[i++] = CStringGetTextDatum("<insufficient privilege>");
-
-               /* copy counters to a local variable to keep locking time short */
-               {
-                       volatile pgssEntry *e = (volatile pgssEntry *) entry;
-
-                       SpinLockAcquire(&e->mutex);
-                       tmp = e->counters;
-                       SpinLockRelease(&e->mutex);
-               }
-
-               /* Skip entry if unexecuted (ie, it's a pending "sticky" entry) */
-               if (tmp.calls == 0)
-                       continue;
-
-               values[i++] = Int64GetDatumFast(tmp.calls);
-               values[i++] = Float8GetDatumFast(tmp.total_time);
-               values[i++] = Int64GetDatumFast(tmp.rows);
-               values[i++] = Int64GetDatumFast(tmp.shared_blks_hit);
-               values[i++] = Int64GetDatumFast(tmp.shared_blks_read);
-               if (sql_supports_v1_1_counters)
-                       values[i++] = Int64GetDatumFast(tmp.shared_blks_dirtied);
-               values[i++] = Int64GetDatumFast(tmp.shared_blks_written);
-               values[i++] = Int64GetDatumFast(tmp.local_blks_hit);
-               values[i++] = Int64GetDatumFast(tmp.local_blks_read);
-               if (sql_supports_v1_1_counters)
-                       values[i++] = Int64GetDatumFast(tmp.local_blks_dirtied);
-               values[i++] = Int64GetDatumFast(tmp.local_blks_written);
-               values[i++] = Int64GetDatumFast(tmp.temp_blks_read);
-               values[i++] = Int64GetDatumFast(tmp.temp_blks_written);
-               if (sql_supports_v1_1_counters)
-               {
-                       values[i++] = Float8GetDatumFast(tmp.blk_read_time);
-                       values[i++] = Float8GetDatumFast(tmp.blk_write_time);
-               }
-
-               Assert(i == (sql_supports_v1_1_counters ?
-                                        PG_STAT_STATEMENTS_COLS : PG_STAT_STATEMENTS_COLS_V1_0));
-
-               tuplestore_putvalues(tupstore, tupdesc, values, nulls);
-       }
-
-       LWLockRelease(pgss->lock);
-
-       /* clean up and return the tuplestore */
-       tuplestore_donestoring(tupstore);
-
-       return (Datum) 0;
-}
-
-/*
- * Estimate shared memory space needed.
- */
-static Size
-pgss_memsize(void)
-{
-       Size            size;
-       Size            entrysize;
-
-       size = MAXALIGN(sizeof(pgssSharedState));
-       entrysize = offsetof(pgssEntry, query) +pgstat_track_activity_query_size;
-       size = add_size(size, hash_estimate_size(pgss_max, entrysize));
-
-       return size;
-}
-
-/*
- * Allocate a new hashtable entry.
- * caller must hold an exclusive lock on pgss->lock
- *
- * "query" need not be null-terminated; we rely on query_len instead
- *
- * If "sticky" is true, make the new entry artificially sticky so that it will
- * probably still be there when the query finishes execution.  We do this by
- * giving it a median usage value rather than the normal value.  (Strictly
- * speaking, query strings are normalized on a best effort basis, though it
- * would be difficult to demonstrate this even under artificial conditions.)
- *
- * Note: despite needing exclusive lock, it's not an error for the target
- * entry to already exist.     This is because pgss_store releases and
- * reacquires lock after failing to find a match; so someone else could
- * have made the entry while we waited to get exclusive lock.
- */
-static pgssEntry *
-entry_alloc(pgssHashKey *key, const char *query, int query_len, bool sticky)
-{
-       pgssEntry  *entry;
-       bool            found;
-
-       /* Make space if needed */
-       while (hash_get_num_entries(pgss_hash) >= pgss_max)
-               entry_dealloc();
-
-       /* Find or create an entry with desired hash code */
-       entry = (pgssEntry *) hash_search(pgss_hash, key, HASH_ENTER, &found);
-
-       if (!found)
-       {
-               /* New entry, initialize it */
-
-               /* reset the statistics */
-               memset(&entry->counters, 0, sizeof(Counters));
-               /* set the appropriate initial usage count */
-               entry->counters.usage = sticky ? pgss->cur_median_usage : USAGE_INIT;
-               /* re-initialize the mutex each time ... we assume no one using it */
-               SpinLockInit(&entry->mutex);
-               /* ... and don't forget the query text */
-               Assert(query_len >= 0 && query_len < pgss->query_size);
-               entry->query_len = query_len;
-               memcpy(entry->query, query, query_len);
-               entry->query[query_len] = '\0';
-       }
-
-       return entry;
-}
-
-/*
- * qsort comparator for sorting into increasing usage order
- */
-static int
-entry_cmp(const void *lhs, const void *rhs)
-{
-       double          l_usage = (*(pgssEntry *const *) lhs)->counters.usage;
-       double          r_usage = (*(pgssEntry *const *) rhs)->counters.usage;
-
-       if (l_usage < r_usage)
-               return -1;
-       else if (l_usage > r_usage)
-               return +1;
-       else
-               return 0;
-}
-
-/*
- * Deallocate least used entries.
- * Caller must hold an exclusive lock on pgss->lock.
- */
-static void
-entry_dealloc(void)
-{
-       HASH_SEQ_STATUS hash_seq;
-       pgssEntry **entries;
-       pgssEntry  *entry;
-       int                     nvictims;
-       int                     i;
-
-       /*
-        * Sort entries by usage and deallocate USAGE_DEALLOC_PERCENT of them.
-        * While we're scanning the table, apply the decay factor to the usage
-        * values.
-        */
-
-       entries = palloc(hash_get_num_entries(pgss_hash) * sizeof(pgssEntry *));
-
-       i = 0;
-       hash_seq_init(&hash_seq, pgss_hash);
-       while ((entry = hash_seq_search(&hash_seq)) != NULL)
-       {
-               entries[i++] = entry;
-               /* "Sticky" entries get a different usage decay rate. */
-               if (entry->counters.calls == 0)
-                       entry->counters.usage *= STICKY_DECREASE_FACTOR;
-               else
-                       entry->counters.usage *= USAGE_DECREASE_FACTOR;
-       }
-
-       qsort(entries, i, sizeof(pgssEntry *), entry_cmp);
-
-       /* Also, record the (approximate) median usage */
-       if (i > 0)
-               pgss->cur_median_usage = entries[i / 2]->counters.usage;
-
-       nvictims = Max(10, i * USAGE_DEALLOC_PERCENT / 100);
-       nvictims = Min(nvictims, i);
-
-       for (i = 0; i < nvictims; i++)
-       {
-               hash_search(pgss_hash, &entries[i]->key, HASH_REMOVE, NULL);
-       }
-
-       pfree(entries);
-}
-
-/*
- * Release all entries.
- */
-static void
-entry_reset(void)
-{
-       HASH_SEQ_STATUS hash_seq;
-       pgssEntry  *entry;
-
-       LWLockAcquire(pgss->lock, LW_EXCLUSIVE);
-
-       hash_seq_init(&hash_seq, pgss_hash);
-       while ((entry = hash_seq_search(&hash_seq)) != NULL)
-       {
-               hash_search(pgss_hash, &entry->key, HASH_REMOVE, NULL);
-       }
-
-       LWLockRelease(pgss->lock);
-}
-#endif
 
 /*
  * AppendJumble: Append a value that is substantive in a given query to
@@ -1431,8 +92,10 @@ JumbleQuery(pgssJumbleState *jstate, Query *query)
        JumbleRangeTable(jstate, query->rtable);
        JumbleExpr(jstate, (Node *) query->jointree);
        JumbleExpr(jstate, (Node *) query->targetList);
+       JumbleExpr(jstate, (Node *) query->onConflict);
        JumbleExpr(jstate, (Node *) query->returningList);
        JumbleExpr(jstate, (Node *) query->groupClause);
+       JumbleExpr(jstate, (Node *) query->groupingSets);
        JumbleExpr(jstate, query->havingQual);
        JumbleExpr(jstate, (Node *) query->windowClause);
        JumbleExpr(jstate, (Node *) query->distinctClause);
@@ -1453,14 +116,14 @@ JumbleRangeTable(pgssJumbleState *jstate, List *rtable)
 
        foreach(lc, rtable)
        {
-               RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
+               RangeTblEntry *rte = lfirst_node(RangeTblEntry, lc);
 
-               Assert(IsA(rte, RangeTblEntry));
                APP_JUMB(rte->rtekind);
                switch (rte->rtekind)
                {
                        case RTE_RELATION:
                                APP_JUMB(rte->relid);
+                               JumbleExpr(jstate, (Node *) rte->tablesample);
                                break;
                        case RTE_SUBQUERY:
                                JumbleQuery(jstate, rte->subquery);
@@ -1469,7 +132,10 @@ JumbleRangeTable(pgssJumbleState *jstate, List *rtable)
                                APP_JUMB(rte->jointype);
                                break;
                        case RTE_FUNCTION:
-                               JumbleExpr(jstate, rte->funcexpr);
+                               JumbleExpr(jstate, (Node *) rte->functions);
+                               break;
+                       case RTE_TABLEFUNC:
+                               JumbleExpr(jstate, (Node *) rte->tablefunc);
                                break;
                        case RTE_VALUES:
                                JumbleExpr(jstate, (Node *) rte->values_lists);
@@ -1483,6 +149,9 @@ JumbleRangeTable(pgssJumbleState *jstate, List *rtable)
                                APP_JUMB_STRING(rte->ctename);
                                APP_JUMB(rte->ctelevelsup);
                                break;
+                       case RTE_NAMEDTUPLESTORE:
+                               APP_JUMB_STRING(rte->enrname);
+                               break;
                        default:
                                elog(ERROR, "unrecognized RTE kind: %d", (int) rte->rtekind);
                                break;
@@ -1501,7 +170,7 @@ JumbleRangeTable(pgssJumbleState *jstate, List *rtable)
  *
  * Note: the reason we don't simply use expression_tree_walker() is that the
  * point of that function is to support tree walkers that don't care about
- * most tree node types, but here we care about all types.     We should complain
+ * most tree node types, but here we care about all types.  We should complain
  * about any unrecognized node type.
  */
 static void
@@ -1549,6 +218,10 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
                                APP_JUMB(p->paramkind);
                                APP_JUMB(p->paramid);
                                APP_JUMB(p->paramtype);
+                               /* Also, track the highest external Param id */
+                               if (p->paramkind == PARAM_EXTERN &&
+                                       p->paramid > jstate->highest_extern_param_id)
+                                       jstate->highest_extern_param_id = p->paramid;
                        }
                        break;
                case T_Aggref:
@@ -1556,9 +229,18 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
                                Aggref     *expr = (Aggref *) node;
 
                                APP_JUMB(expr->aggfnoid);
+                               JumbleExpr(jstate, (Node *) expr->aggdirectargs);
                                JumbleExpr(jstate, (Node *) expr->args);
                                JumbleExpr(jstate, (Node *) expr->aggorder);
                                JumbleExpr(jstate, (Node *) expr->aggdistinct);
+                               JumbleExpr(jstate, (Node *) expr->aggfilter);
+                       }
+                       break;
+               case T_GroupingFunc:
+                       {
+                               GroupingFunc *grpnode = (GroupingFunc *) node;
+
+                               JumbleExpr(jstate, (Node *) grpnode->refs);
                        }
                        break;
                case T_WindowFunc:
@@ -1568,6 +250,7 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
                                APP_JUMB(expr->winfnoid);
                                APP_JUMB(expr->winref);
                                JumbleExpr(jstate, (Node *) expr->args);
+                               JumbleExpr(jstate, (Node *) expr->aggfilter);
                        }
                        break;
                case T_ArrayRef:
@@ -1628,8 +311,9 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
                                SubLink    *sublink = (SubLink *) node;
 
                                APP_JUMB(sublink->subLinkType);
+                               APP_JUMB(sublink->subLinkId);
                                JumbleExpr(jstate, (Node *) sublink->testexpr);
-                               JumbleQuery(jstate, (Query *) sublink->subselect);
+                               JumbleQuery(jstate, castNode(Query, sublink->subselect));
                        }
                        break;
                case T_FieldSelect:
@@ -1695,9 +379,8 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
                                JumbleExpr(jstate, (Node *) caseexpr->arg);
                                foreach(temp, caseexpr->args)
                                {
-                                       CaseWhen   *when = (CaseWhen *) lfirst(temp);
+                                       CaseWhen   *when = lfirst_node(CaseWhen, temp);
 
-                                       Assert(IsA(when, CaseWhen));
                                        JumbleExpr(jstate, (Node *) when->expr);
                                        JumbleExpr(jstate, (Node *) when->result);
                                }
@@ -1737,6 +420,15 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
                                JumbleExpr(jstate, (Node *) mmexpr->args);
                        }
                        break;
+               case T_SQLValueFunction:
+                       {
+                               SQLValueFunction *svf = (SQLValueFunction *) node;
+
+                               APP_JUMB(svf->op);
+                               /* type is fully determined by op */
+                               APP_JUMB(svf->typmod);
+                       }
+                       break;
                case T_XmlExpr:
                        {
                                XmlExpr    *xexpr = (XmlExpr *) node;
@@ -1794,6 +486,23 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
                                APP_JUMB(ce->cursor_param);
                        }
                        break;
+               case T_NextValueExpr:
+                       {
+                               NextValueExpr *nve = (NextValueExpr *) node;
+
+                               APP_JUMB(nve->seqid);
+                               APP_JUMB(nve->typeId);
+                       }
+                       break;
+               case T_InferenceElem:
+                       {
+                               InferenceElem *ie = (InferenceElem *) node;
+
+                               APP_JUMB(ie->infercollid);
+                               APP_JUMB(ie->inferopclass);
+                               JumbleExpr(jstate, ie->expr);
+                       }
+                       break;
                case T_TargetEntry:
                        {
                                TargetEntry *tle = (TargetEntry *) node;
@@ -1830,12 +539,32 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
                                JumbleExpr(jstate, from->quals);
                        }
                        break;
+               case T_OnConflictExpr:
+                       {
+                               OnConflictExpr *conf = (OnConflictExpr *) node;
+
+                               APP_JUMB(conf->action);
+                               JumbleExpr(jstate, (Node *) conf->arbiterElems);
+                               JumbleExpr(jstate, conf->arbiterWhere);
+                               JumbleExpr(jstate, (Node *) conf->onConflictSet);
+                               JumbleExpr(jstate, conf->onConflictWhere);
+                               APP_JUMB(conf->constraint);
+                               APP_JUMB(conf->exclRelIndex);
+                               JumbleExpr(jstate, (Node *) conf->exclRelTlist);
+                       }
+                       break;
                case T_List:
                        foreach(temp, (List *) node)
                        {
                                JumbleExpr(jstate, (Node *) lfirst(temp));
                        }
                        break;
+               case T_IntList:
+                       foreach(temp, (List *) node)
+                       {
+                               APP_JUMB(lfirst_int(temp));
+                       }
+                       break;
                case T_SortGroupClause:
                        {
                                SortGroupClause *sgc = (SortGroupClause *) node;
@@ -1846,6 +575,13 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
                                APP_JUMB(sgc->nulls_first);
                        }
                        break;
+               case T_GroupingSet:
+                       {
+                               GroupingSet *gsnode = (GroupingSet *) node;
+
+                               JumbleExpr(jstate, (Node *) gsnode->content);
+                       }
+                       break;
                case T_WindowClause:
                        {
                                WindowClause *wc = (WindowClause *) node;
@@ -1864,7 +600,7 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
 
                                /* we store the string name because RTE_CTE RTEs need it */
                                APP_JUMB_STRING(cte->ctename);
-                               JumbleQuery(jstate, (Query *) cte->ctequery);
+                               JumbleQuery(jstate, castNode(Query, cte->ctequery));
                        }
                        break;
                case T_SetOperationStmt:
@@ -1877,6 +613,31 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
                                JumbleExpr(jstate, setop->rarg);
                        }
                        break;
+               case T_RangeTblFunction:
+                       {
+                               RangeTblFunction *rtfunc = (RangeTblFunction *) node;
+
+                               JumbleExpr(jstate, rtfunc->funcexpr);
+                       }
+                       break;
+               case T_TableFunc:
+                       {
+                               TableFunc  *tablefunc = (TableFunc *) node;
+
+                               JumbleExpr(jstate, tablefunc->docexpr);
+                               JumbleExpr(jstate, tablefunc->rowexpr);
+                               JumbleExpr(jstate, (Node *) tablefunc->colexprs);
+                       }
+                       break;
+               case T_TableSampleClause:
+                       {
+                               TableSampleClause *tsc = (TableSampleClause *) node;
+
+                               APP_JUMB(tsc->tsmhandler);
+                               JumbleExpr(jstate, (Node *) tsc->args);
+                               JumbleExpr(jstate, (Node *) tsc->repeatable);
+                       }
+                       break;
                default:
                        /* Only a warning, since we can stumble along anyway */
                        elog(WARNING, "unrecognized node type: %d",
@@ -1919,38 +680,48 @@ RecordConstLocation(pgssJumbleState *jstate, int location)
  * just which "equivalent" query is used to create the hashtable entry.
  * We assume this is OK.
  *
+ * If query_loc > 0, then "query" has been advanced by that much compared to
+ * the original string start, so we need to translate the provided locations
+ * to compensate.  (This lets us avoid re-scanning statements before the one
+ * of interest, so it's worth doing.)
+ *
  * *query_len_p contains the input string length, and is updated with
- * the result string length (which cannot be longer) on exit.
+ * the result string length on exit.  The resulting string might be longer
+ * or shorter depending on what happens with replacement of constants.
  *
- * Returns a palloc'd string, which is not necessarily null-terminated.
+ * Returns a palloc'd string.
  */
 static char *
 generate_normalized_query(pgssJumbleState *jstate, const char *query,
-                                                 int *query_len_p, int encoding)
+                                                 int query_loc, int *query_len_p, int encoding)
 {
        char       *norm_query;
        int                     query_len = *query_len_p;
-       int                     max_output_len;
        int                     i,
+                               norm_query_buflen,      /* Space allowed for norm_query */
                                len_to_wrt,             /* Length (in bytes) to write */
                                quer_loc = 0,   /* Source query byte location */
                                n_quer_loc = 0, /* Normalized query byte location */
                                last_off = 0,   /* Offset from start for previous tok */
-                               last_tok_len = 0;               /* Length (in bytes) of that tok */
+                               last_tok_len = 0;       /* Length (in bytes) of that tok */
 
        /*
         * Get constants' lengths (core system only gives us locations).  Note
         * this also ensures the items are sorted by location.
         */
-       fill_in_constant_lengths(jstate, query);
+       fill_in_constant_lengths(jstate, query, query_loc);
 
-       /* Allocate result buffer, ensuring we limit result to allowed size */
-#ifdef NOT_USED
-       max_output_len = Min(query_len, pgss->query_size - 1);
-#endif
-       /* XXX: pg_hint_plan doesn't truncate query string. */
-       max_output_len = query_len;
-       norm_query = palloc(max_output_len);
+       /*
+        * Allow for $n symbols to be longer than the constants they replace.
+        * Constants must take at least one byte in text form, while a $n symbol
+        * certainly isn't more than 11 bytes, even if n reaches INT_MAX.  We
+        * could refine that limit based on the max value of n for the current
+        * query, but it hardly seems worth any extra effort to do so.
+        */
+       norm_query_buflen = query_len + jstate->clocations_count * 10;
+
+       /* Allocate result buffer */
+       norm_query = palloc(norm_query_buflen + 1);
 
        for (i = 0; i < jstate->clocations_count; i++)
        {
@@ -1958,60 +729,48 @@ generate_normalized_query(pgssJumbleState *jstate, const char *query,
                                        tok_len;        /* Length (in bytes) of that tok */
 
                off = jstate->clocations[i].location;
+               /* Adjust recorded location if we're dealing with partial string */
+               off -= query_loc;
+
                tok_len = jstate->clocations[i].length;
 
                if (tok_len < 0)
                        continue;                       /* ignore any duplicates */
 
-               /* Copy next chunk, or as much as will fit */
+               /* Copy next chunk (what precedes the next constant) */
                len_to_wrt = off - last_off;
                len_to_wrt -= last_tok_len;
-               len_to_wrt = Min(len_to_wrt, max_output_len - n_quer_loc);
 
                Assert(len_to_wrt >= 0);
                memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
                n_quer_loc += len_to_wrt;
 
-               if (n_quer_loc < max_output_len)
-                       norm_query[n_quer_loc++] = '?';
+               /*
+                * PG_HINT_PLAN: DON'T TAKE IN a6f22e8356 so that the designed behavior
+                * is kept stable.
+                */
+               /* And insert a '?' in place of the constant token */
+               norm_query[n_quer_loc++] = '?';
 
                quer_loc = off + tok_len;
                last_off = off;
                last_tok_len = tok_len;
-
-               /* If we run out of space, might as well stop iterating */
-               if (n_quer_loc >= max_output_len)
-                       break;
        }
 
        /*
         * We've copied up until the last ignorable constant.  Copy over the
-        * remaining bytes of the original query string, or at least as much as
-        * will fit.
+        * remaining bytes of the original query string.
         */
        len_to_wrt = query_len - quer_loc;
-       len_to_wrt = Min(len_to_wrt, max_output_len - n_quer_loc);
 
        Assert(len_to_wrt >= 0);
        memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
        n_quer_loc += len_to_wrt;
 
-       /* XXX: pg_hint_plan doesn't truncate query string. */
-#ifdef NOT_USED
-       /*
-        * If we ran out of space, we need to do an encoding-aware truncation,
-        * just to make sure we don't have an incomplete character at the end.
-        */
-       if (n_quer_loc >= max_output_len)
-               query_len = pg_encoding_mbcliplen(encoding,
-                                                                                 norm_query,
-                                                                                 n_quer_loc,
-                                                                                 pgss->query_size - 1);
-       else
-#endif
-               query_len = n_quer_loc;
+       Assert(n_quer_loc <= norm_query_buflen);
+       norm_query[n_quer_loc] = '\0';
 
-       *query_len_p = query_len;
+       *query_len_p = n_quer_loc;
        return norm_query;
 }
 
@@ -2030,15 +789,21 @@ generate_normalized_query(pgssJumbleState *jstate, const char *query,
  * a problem.
  *
  * Duplicate constant pointers are possible, and will have their lengths
- * marked as '-1', so that they are later ignored.     (Actually, we assume the
+ * marked as '-1', so that they are later ignored.  (Actually, we assume the
  * lengths were initialized as -1 to start with, and don't change them here.)
  *
+ * If query_loc > 0, then "query" has been advanced by that much compared to
+ * the original string start, so we need to translate the provided locations
+ * to compensate.  (This lets us avoid re-scanning statements before the one
+ * of interest, so it's worth doing.)
+ *
  * N.B. There is an assumption that a '-' character at a Const location begins
  * a negative numeric constant.  This precludes there ever being another
  * reason for a constant to start with a '-'.
  */
 static void
-fill_in_constant_lengths(pgssJumbleState *jstate, const char *query)
+fill_in_constant_lengths(pgssJumbleState *jstate, const char *query,
+                                                int query_loc)
 {
        pgssLocationLen *locs;
        core_yyscan_t yyscanner;
@@ -2063,12 +828,18 @@ fill_in_constant_lengths(pgssJumbleState *jstate, const char *query)
                                                         ScanKeywords,
                                                         NumScanKeywords);
 
+       /* we don't want to re-emit any escape string warnings */
+       yyextra.escape_string_warning = false;
+
        /* Search for each constant, in sequence */
        for (i = 0; i < jstate->clocations_count; i++)
        {
                int                     loc = locs[i].location;
                int                     tok;
 
+               /* Adjust recorded location if we're dealing with partial string */
+               loc -= query_loc;
+
                Assert(loc >= 0);
 
                if (loc <= last_loc)
@@ -2099,7 +870,7 @@ fill_in_constant_lengths(pgssJumbleState *jstate, const char *query)
                                         * adjustment of location to that of the leading '-'
                                         * operator in the event of a negative constant.  It is
                                         * also useful for our purposes to start from the minus
-                                        * symbol.      In this way, queries like "select * from foo
+                                        * symbol.  In this way, queries like "select * from foo
                                         * where bar = 1" and "select * from foo where bar = -2"
                                         * will have identical normalized query strings.
                                         */
@@ -2143,4 +914,3 @@ comp_location(const void *a, const void *b)
        else
                return 0;
 }
-