OSDN Git Service

Add support for new node types
authorKyotaro Horiguchi <horiguchi.kyotaro@lab.ntt.co.jp>
Tue, 6 Jun 2017 08:30:12 +0000 (17:30 +0900)
committerKyotaro Horiguchi <horiguchi.kyotaro@lab.ntt.co.jp>
Tue, 6 Jun 2017 08:35:02 +0000 (17:35 +0900)
Pg10 has some new nodes. This commit add support for them.

Makefile
json2sql.pl
makeplanfile.sql
pgsp_json.c
pgsp_json_int.h
pgsp_json_text.c
pgsp_json_text.h

index 5bd8d61..dd9fa61 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -62,7 +62,7 @@ all.out: all.sql
        psql $(DBNAME) -a -q -f all.sql > all.out
 
 all.sql: makeplanfile.sql json2sql.pl
-       psql $(DBNAME) -f makeplanfile.sql | ./json2sql.pl > all.sql
+       psql $(DBNAME) -f makeplanfile.sql |& ./json2sql.pl > all.sql
 
 clean-testfiles:
        rm -f all.out all.sql
index 5eba1f5..e476321 100755 (executable)
@@ -28,6 +28,7 @@ print " $escape'$plan')";
 
 $plan_no = 1;
 $state = 0;
+$indent = "";
 while(<>) {
        chomp;
        if ($state == 0) {
@@ -35,15 +36,24 @@ while(<>) {
                $title = "###### Plan $plan_no: $1";
                $state = 1;
        } elsif ($state == 1) {
+               # edit auto_explain's result
+               next if (/^psql:makeplanfile.sql/);
+
                if (/[}\]:,]/) {
                        die("??? : $_");
                }
-               next if (!/^   { *\+$/);
+               next if (!/^( *){ *\+?$/);
+        $indent = $1;
                $plan = $_;
                $plan =~ s/^   (.*[^ ]) *\+$/$1\n/;
+               chomp($plan);
+               $plan .= "\n";
                $state = 2;
        } elsif ($state == 2) {
-               if (/^   } *\+$/) {
+               # edit auto_explain's result
+               next if (/^  "Query Text":/);
+
+               if (/^$indent} *\+?$/) {
                        $state = 3;
                }
                $l = $_;
index 248719e..bf25cfc 100644 (file)
@@ -6,6 +6,7 @@ drop table if exists tt1;
 drop table if exists tt2;
 drop table if exists tt3;
 drop table if exists p cascade;
+drop table if exists ct1;
 create table p (a int, b int, c text);
 create table tt1 (a int, b int not null, c text) inherits (p);
 create table tt2 (a int, b int, c text) inherits (p);
@@ -182,14 +183,62 @@ explain (analyze on, buffers on, verbose on, format :format)
 explain (analyze on, buffers on, verbose on, format :format)
    SELECT * FROM tt1 TABLESAMPLE system(1) REPEATABLE (1);
 
+\echo ###### Project Set
+explain (analyze on, buffers on, verbose on, format :format)
+   SELECT * from XMLTABLE('//towns/town'
+    PASSING BY REF '<towns><town><name>Toronto</name></town><town><name>Ottawa</name></town></towns>'
+        COLUMNS name text);
+
+-- Named Tuplestore Scan -- requires auto_explain
+DROP TABLE IF EXISTS e1 CASCADE;
+CREATE TABLE e1 (a int, b int);
+CREATE OR REPLACE function e1_t1() RETURNS TRIGGER AS $$
+DECLARE
+  total int;
+BEGIN
+  SELECT sum(a) INTO total FROM post;
+  NEW.b := total;
+  RETURN NEW;
+END;
+$$ LANGUAGE plpgsql;
+CREATE TRIGGER e1_t1 AFTER INSERT OR UPDATE ON e1
+ REFERENCING NEW TABLE AS post OLD TABLE AS pre
+ FOR EACH ROW EXECUTE PROCEDURE e1_t1();
+INSERT INTO e1 VALUES (1, 1);
+
+load 'auto_explain';
+set auto_explain.log_min_duration to 0;
+set auto_explain.log_analyze to true;
+set auto_explain.log_buffers to true;
+set auto_explain.log_buffers to true;
+set auto_explain.log_format to :format;
+set auto_explain.log_timing  to true;
+set auto_explain.log_nested_statements to true;
+set client_min_messages to LOG;
+set log_min_messages to FATAL; -- Inhibit LOG by auto_explain
+\echo ###### Named Tuplestore Scan
+UPDATE e1 SET a = a + 1;
+set client_min_messages to DEFAULT;
+set log_min_messages to DEFAULT;
+set auto_explain.log_min_duration to -1;
+
 \echo ###### Parallel
+drop table if exists lt1;
 create table lt1 (a int, b text);
 alter table lt1 alter column b set storage plain;
 insert into lt1 (select a, repeat('x', 1000) from generate_series(0, 99999) a);
-set max_parallel_worders_per_gather to 2;
+set max_parallel_workers_per_gather to 2;
 set parallel_tuple_cost to 0;
 set parallel_setup_cost to 0;
+set min_parallel_table_scan_size to 0;
+set min_parallel_index_scan_size to 0;
+
+\echo ###### Gather
 explain (analyze on, buffers on, verbose on, format :format)
    SELECT * FROM lt1;
 
+\echo ###### Gather Merge
+explain (analyze on, buffers on, verbose on, format :format)
+   SELECT a FROM tt1 ORDER BY a;
+
 -- BitmapAnd/Inner/Right/ForegnScan
index 9de20b0..b126a18 100644 (file)
@@ -97,9 +97,15 @@ word_table propfields[] =
        {P_GroupKey,            "-" ,"Group Key",                       NULL, true,  NULL,                              SETTER(group_key)},
        {P_GroupSets,           "=" ,"Grouping Sets",           NULL, true,  NULL,                              NULL},
        {P_GroupKeys,           "\\" ,"Group Keys",                     NULL, true,  NULL,                              SETTER(group_key)},
+
+       {P_HashKeys,            "~" ,"Hash Keys",                       NULL, true,  NULL,                              SETTER(hash_key)},
+       {P_HashKey,             "|" ,"Hash Key",                        NULL, true,  NULL,                              SETTER(hash_key)},
+
        {P_Parallel,            "`" ,"Parallel Aware",          NULL, true,  NULL,                              SETTER(parallel_aware)},
+       {P_PartialMode,         ">" ,"Partial Mode",            NULL, true,  conv_partialmode,SETTER(partial_mode)},
        {P_WorkersPlanned,      "{" ,"Workers Planned",         NULL, true,  NULL,                              SETTER(workers_planned)},
        {P_WorkersLaunched, "}" ,"Workers Launched",    NULL, true,  NULL,                              SETTER(workers_launched)},
+       {P_InnerUnique,         "?" ,"Inner Unique",            NULL, true,  NULL,                              SETTER(inner_unique)},
                                                                                                                  
        /* Values of these properties are ignored on normalization */
        {P_FunctionCall,        "y" ,"Function Call",           NULL, false, NULL,                              SETTER(func_call)},
@@ -145,11 +151,12 @@ word_table propfields[] =
        {P_ConfArbitIdx,    "@" ,"Conflict Arbiter Indexes",NULL, false,  NULL,                 SETTER(conflict_arbiter_indexes)},
        {P_TuplesInserted,  "^" ,"Tuples Inserted",             NULL, false,  NULL,                             SETTER(tuples_inserted)},
        {P_ConfTuples,          "+" ,"Conflicting Tuples",      NULL, false,  NULL,                             SETTER(conflicting_tuples)},
-       {P_SamplingMethod,  ""  ,"Sampling Method" ,    NULL, false,  NULL,                             SETTER(sampling_method)},
-       {P_SamplingParams,  ""  ,"Sampling Parameters" , NULL, false,  NULL,                    SETTER(sampling_params)},
-       {P_RepeatableSeed,  ""  ,"Repeatable Seed" ,    NULL, false,  NULL,                             SETTER(repeatable_seed)},
+       {P_SamplingMethod,  ":"  ,"Sampling Method" ,   NULL, false,  NULL,                             SETTER(sampling_method)},
+       {P_SamplingParams,  ";"  ,"Sampling Parameters" , NULL, false,  NULL,                   SETTER(sampling_params)},
+       {P_RepeatableSeed,  "<"  ,"Repeatable Seed" ,   NULL, false,  NULL,                             SETTER(repeatable_seed)},
        {P_Workers,             "[" ,"Workers",                         NULL, false,  NULL,                             NULL},
        {P_WorkerNumber,    "]" ,"Worker Number",               NULL, false,  NULL,                             SETTER(worker_number)},
+       {P_TableFuncName,    "aa" ,"Table Function Name",NULL, false,  NULL,                    SETTER(table_func_name)},
 
        {P_Invalid, NULL, NULL, NULL, false, NULL, NULL}
 };
@@ -190,9 +197,19 @@ word_table nodetypes[] =
        {T_SetOp,               "3" ,"SetOp",                   NULL, false, NULL, NULL},
        {T_LockRows,    "4" ,"LockRows",                NULL, false, NULL, NULL},
        {T_Limit,               "5" ,"Limit",                   NULL, false, NULL, NULL},
+#if PG_VERSION_NUM >= 90500
+       {T_SampleScan,  "B" ,"Sample Scan",             NULL, false, NULL, NULL},
 #if PG_VERSION_NUM >= 90600
        {T_Gather,              "6" ,"Gather",                  NULL, false, NULL, NULL},
+#if PG_VERSION_NUM >= 100000
+       {T_ProjectSet,  "7" ,"ProjectSet",              NULL, false, NULL, NULL},
+       {T_TableFuncScan,"8","Table Function Scan",     NULL, false, NULL, NULL},
+       {T_NamedTuplestoreScan,"9","Named Tuplestore Scan",     NULL, false, NULL, NULL},
+       {T_GatherMerge, "A" ,"Gather Merge",    NULL, false, NULL, NULL},
+#endif
 #endif
+#endif
+
        {T_Invalid,             NULL, NULL, NULL, false, NULL, NULL}
 };
 
@@ -220,6 +237,7 @@ word_table strategies[] =
        {S_Plain,       "p" ,"Plain", NULL, false, NULL, NULL},
        {S_Sorted,      "s" ,"Sorted", NULL, false, NULL, NULL},
        {S_Hashed,      "h" ,"Hashed", NULL, false, NULL, NULL},
+       {S_Mixed,       "m" ,"Mixed", NULL, false, NULL, NULL},
        {S_Invalid,     NULL, NULL, NULL, false, NULL, NULL}
 };
 
@@ -268,6 +286,14 @@ word_table sortspacetype[] =
        {T_Invalid, NULL, NULL, NULL, false, NULL, NULL}
 };
 
+word_table partialmode[] =
+{
+       {T_Invalid,  "p" ,"Partial",    NULL, false, NULL, NULL},
+       {T_Invalid,  "f" ,"Finalize",NULL, false, NULL, NULL},
+       {T_Invalid,  "s" ,"Simple",NULL, false, NULL, NULL},
+       {T_Invalid, NULL, NULL, NULL, false, NULL, NULL}
+};
+
 word_table *
 search_word_table(word_table *tbl, const char *word, int mode)
 {
@@ -372,6 +398,7 @@ conv_strategy(const char *src, pgsp_parser_mode mode)
                    tok == TRUE_P || tok == FALSE_P || \
                        tok == CURRENT_DATE || tok == CURRENT_TIME || \
                    tok == LOCALTIME || tok == LOCALTIMESTAMP)
+#define IS_INDENTED_ARRAY(v) ((v) == P_GroupKeys || (v) == P_HashKeys)
 
 /*
  * norm_yylex: core_yylex with replacing some tokens.
@@ -607,6 +634,12 @@ conv_sortspacetype(const char *src, pgsp_parser_mode mode)
        return converter_core(sortspacetype, src, mode);
 }
 
+const char *
+conv_partialmode(const char *src, pgsp_parser_mode mode)
+{
+       return converter_core(partialmode, src, mode);
+}
+
 /**** Parser callbacks ****/
 
 /* JSON */
@@ -657,7 +690,7 @@ json_arrstart(void *state)
 {
        pgspParserContext *ctx = (pgspParserContext *)state;
 
-       if (ctx->current_list == P_GroupKeys)
+       if (IS_INDENTED_ARRAY(ctx->current_list))
                ctx->wlist_level++;
 
        appendStringInfoChar(ctx->dest, '[');
@@ -672,11 +705,11 @@ json_arrend(void *state)
 {
        pgspParserContext *ctx = (pgspParserContext *)state;
 
-       if (ctx->current_list == P_GroupKeys)
+       if (IS_INDENTED_ARRAY(ctx->current_list))
                ctx->wlist_level--;
 
        if (ctx->mode == PGSP_JSON_INFLATE &&
-               (ctx->current_list == P_GroupKeys ?
+               (IS_INDENTED_ARRAY(ctx->current_list) ?
                 ctx->wlist_level == 0 : ctx->last_elem_is_object))
        {
                appendStringInfoChar(ctx->dest, '\n');
@@ -742,7 +775,7 @@ json_ofstart(void *state, char *fname, bool isnull)
        if (ctx->mode == PGSP_JSON_INFLATE)
                appendStringInfoChar(ctx->dest, ' ');
 
-       if (p && p->tag == P_GroupKeys)
+       if (p && IS_INDENTED_ARRAY(p->tag))
        {
                ctx->current_list = p->tag;
                ctx->list_fname = fname;
@@ -769,7 +802,7 @@ json_aestart(void *state, bool isnull)
        if (ctx->remove)
                return;
 
-       if (ctx->current_list == P_GroupKeys &&
+       if (IS_INDENTED_ARRAY(ctx->current_list) &&
                ctx->wlist_level == 1)
        {
                if (!bms_is_member(ctx->level, ctx->first))
index 51a3699..64dabc5 100644 (file)
@@ -25,7 +25,8 @@ typedef enum
        S_Invalid,
        S_Plain,
        S_Sorted,
-       S_Hashed
+       S_Hashed,
+       S_Mixed
 } pgsp_strategies;
 
 typedef const char *(converter_t)(const char *src, pgsp_parser_mode mode);
@@ -55,6 +56,8 @@ typedef enum
        P_GroupKey,
        P_GroupKeys,
        P_GroupSets,
+       P_HashKeys,
+       P_HashKey,
        P_Filter,
        P_JoinFilter,
        P_HashCond,
@@ -69,6 +72,7 @@ typedef enum
        P_TrgRelation,
        P_ConstraintName,
        P_Parallel,
+       P_PartialMode,
        P_WorkersPlanned,
 
        P_FunctionCall,
@@ -119,7 +123,9 @@ typedef enum
        P_RepeatableSeed,
        P_Workers,
        P_WorkersLaunched,
-       P_WorkerNumber
+       P_WorkerNumber,
+       P_InnerUnique,
+       P_TableFuncName
 } pgsp_prop_tags;
 
 typedef struct
@@ -186,6 +192,7 @@ extern const char *conv_strategy(const char *src, pgsp_parser_mode mode);
 extern const char *conv_setsetopcommand(const char *src, pgsp_parser_mode mode);
 extern const char *conv_sortmethod(const char *src, pgsp_parser_mode mode);
 extern const char *conv_sortspacetype(const char *src, pgsp_parser_mode mode);
+extern const char *conv_partialmode(const char *src, pgsp_parser_mode mode);
 
 extern bool run_pg_parse_json(JsonLexContext *lex, JsonSemAction *sem);
 extern void init_parser_context(pgspParserContext *ctx, int mode,
index 1c640e0..59aadb6 100644 (file)
@@ -96,6 +96,8 @@ SETTERDECL(strategy)
                                        vals->node_type = "HashAggregate"; break;
                                case S_Sorted:
                                        vals->node_type = "GroupAggregate"; break;
+                               case S_Mixed:
+                                       vals->node_type = "MixedAggregate"; break;
                                default:
                                        break;
                        }
@@ -121,7 +123,9 @@ CONVERSION_SETTER(setopcommand, conv_setsetopcommand);
 CONVERSION_SETTER(sort_method, conv_sortmethod);
 LIST_SETTER(sort_key);
 LIST_SETTER(group_key);
+LIST_SETTER(hash_key);
 BOOL_SETTER(parallel_aware);
+CONVERSION_SETTER(partial_mode, conv_partialmode);
 SQLQUOTE_SETTER(index_name);
 DEFAULT_SETTER(startup_cost);
 DEFAULT_SETTER(total_cost);
@@ -181,6 +185,8 @@ DEFAULT_SETTER(repeatable_seed);
 DEFAULT_SETTER(worker_number);
 DEFAULT_SETTER(workers_planned);
 DEFAULT_SETTER(workers_launched);
+BOOL_SETTER(inner_unique);
+DEFAULT_SETTER(table_func_name);
 
 #define ISZERO(s) (!s || strcmp(s, "0") == 0 || strcmp(s, "0.000") == 0 )
 #define HASSTRING(s) (s && strlen(s) > 0)
@@ -281,8 +287,9 @@ print_groupingsets_if_exists(StringInfo s, List *gss, int level, int exind)
                foreach (lcg, gs->group_keys)
                {
                        const char *gk = (const char *)lfirst (lcg);
-                       print_prop_if_exists(s, "Group Key: ", gk, level, exind);
+                       print_prop_if_exists(s, gs->key_type, gk, level, exind);
                }
+
        }
 }
 
@@ -741,8 +748,11 @@ json_text_objstart(void *state)
                        v->sort_key = makeStringInfo();
                if (!v->group_key)
                        v->group_key = makeStringInfo();
+               if (!v->hash_key)
+                       v->hash_key = makeStringInfo();
                resetStringInfo(v->sort_key);
                resetStringInfo(v->group_key);
+               resetStringInfo(v->hash_key);
        }
 }
 
@@ -831,11 +841,26 @@ json_text_arrend(void *state)
                         * go into individual "Group Key" lines. Empty innermost list is
                         * represented as "()" there. See explain.c of PostgreSQL.
                         */
-                       ctx->tmp_gset->group_keys =
-                               lappend(ctx->tmp_gset->group_keys,
-                                               (v->group_key->data[0] ?
-                                                pstrdup(v->group_key->data) : "()"));
+                       ctx->tmp_gset->key_type = "Group Key: ";
+                       if (v->group_key->data[0])
+                       {
+                               ctx->tmp_gset->group_keys =
+                                       lappend(ctx->tmp_gset->group_keys,
+                                                       pstrdup(v->group_key->data));
+                       }
+                       else if (v->hash_key->data[0])
+                       {
+                               ctx->tmp_gset->group_keys =
+                                       lappend(ctx->tmp_gset->group_keys,
+                                                       pstrdup(v->hash_key->data));
+                               ctx->tmp_gset->key_type = "Hash Key: ";
+                       }
+                       else
+                               ctx->tmp_gset->group_keys =
+                                       lappend(ctx->tmp_gset->group_keys, "()");
+
                        resetStringInfo(ctx->nodevals->group_key);
+                       resetStringInfo(ctx->nodevals->hash_key);
                }
                ctx->wlist_level--;
        }
index 310fd0f..e4995b5 100644 (file)
@@ -14,6 +14,7 @@ typedef struct
 {
        const char *sort_keys;
        List *group_keys;
+       char *key_type;
 } grouping_set;
 
 typedef struct
@@ -38,6 +39,7 @@ typedef struct
        const char *sort_method;
        StringInfo sort_key;
        StringInfo group_key;
+       StringInfo hash_key;
        List       *grouping_sets;
        const char *index_cond;
        const char *merge_cond;
@@ -93,9 +95,12 @@ typedef struct
        StringInfo sampling_params;
        const char *repeatable_seed;
        bool            parallel_aware;
+       const char *partial_mode;
        const char *worker_number;
        const char *workers_planned;
        const char *workers_launched;
+       bool            inner_unique;
+       const char *table_func_name;
 
        const char *tmp_obj_name;
        const char *tmp_schema_name;
@@ -150,7 +155,9 @@ SETTERDECL(sort_method);
 SETTERDECL(sort_key);
 SETTERDECL(group_key);
 SETTERDECL(group_keys);
+SETTERDECL(hash_key);
 SETTERDECL(parallel_aware);
+SETTERDECL(partial_mode);
 SETTERDECL(index_name);
 SETTERDECL(startup_cost);
 SETTERDECL(total_cost);
@@ -211,3 +218,5 @@ SETTERDECL(repeatable_seed);
 SETTERDECL(worker_number);
 SETTERDECL(workers_planned);
 SETTERDECL(workers_launched);
+SETTERDECL(inner_unique);
+SETTERDECL(table_func_name);