*
* pgsp_json.c: Plan handler for JSON/XML/YAML style plans
*
- * Copyright (c) 2012-2015, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
+ * Copyright (c) 2012-2021, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
*
* IDENTIFICATION
- * pg_store_plan/pgsp_json.c
+ * pg_store_plans/pgsp_json.c
*
*-------------------------------------------------------------------------
*/
#include "parser/gram.h"
#include "utils/xml.h"
#include "utils/json.h"
+#if PG_VERSION_NUM < 130000
#include "utils/jsonapi.h"
-
+#else
+#include "common/jsonapi.h"
+#endif
#include "pgsp_json.h"
#include "pgsp_json_int.h"
{P_ConstraintName, "x" ,"Constraint Name", NULL, true, NULL, NULL},
{P_Plans, "l" ,"Plans", NULL, true, NULL, NULL},
{P_Plan, "p" ,"Plan", NULL, true, NULL, NULL},
-
+ {P_GroupKey, "-" ,"Group Key", NULL, true, NULL, SETTER(group_key)},
+ {P_GroupSets, "=" ,"Grouping Sets", NULL, true, NULL, NULL},
+ {P_GroupKeys, "\\" ,"Group Keys", NULL, true, NULL, SETTER(group_key)},
+
+ {P_HashKeys, "~" ,"Hash Keys", NULL, true, NULL, SETTER(hash_key)},
+ {P_HashKey, "|" ,"Hash Key", NULL, true, NULL, SETTER(hash_key)},
+
+ {P_Parallel, "`" ,"Parallel Aware", NULL, true, NULL, SETTER(parallel_aware)},
+ {P_PartialMode, ">" ,"Partial Mode", NULL, true, conv_partialmode,SETTER(partial_mode)},
+ {P_WorkersPlanned, "{" ,"Workers Planned", NULL, true, NULL, SETTER(workers_planned)},
+ {P_WorkersLaunched, "}" ,"Workers Launched", NULL, true, NULL, SETTER(workers_launched)},
+ {P_InnerUnique, "?" ,"Inner Unique", NULL, true, NULL, SETTER(inner_unique)},
+ {P_AsyncCapable, "ac", "Async Capable", NULL, true, NULL, SETTER(async_capable)},
+
/* Values of these properties are ignored on normalization */
{P_FunctionCall, "y" ,"Function Call", NULL, false, NULL, SETTER(func_call)},
{P_StartupCost, "1" ,"Startup Cost", NULL, false, NULL, SETTER(startup_cost)},
{P_ExactHeapBlks, "&" ,"Exact Heap Blocks", NULL, false, NULL, SETTER(exact_heap_blks)},
{P_LossyHeapBlks, "(" ,"Lossy Heap Blocks", NULL, false, NULL, SETTER(lossy_heap_blks)},
{P_RowsJoinFltRemvd,")" ,"Rows Removed by Join Filter", NULL, false, NULL, SETTER(joinfilt_removed)},
+ {P_TargetTables, "_" ,"Target Tables", NULL, false, NULL, NULL},
+ {P_ConfRes, "%" ,"Conflict Resolution", NULL, false, NULL, SETTER(conflict_resolution)},
+ {P_ConfArbitIdx, "@" ,"Conflict Arbiter Indexes",NULL, false, NULL, SETTER(conflict_arbiter_indexes)},
+ {P_TuplesInserted, "^" ,"Tuples Inserted", NULL, false, NULL, SETTER(tuples_inserted)},
+ {P_ConfTuples, "+" ,"Conflicting Tuples", NULL, false, NULL, SETTER(conflicting_tuples)},
+ {P_SamplingMethod, ":" ,"Sampling Method" , NULL, false, NULL, SETTER(sampling_method)},
+ {P_SamplingParams, ";" ,"Sampling Parameters" , NULL, false, NULL, SETTER(sampling_params)},
+ {P_RepeatableSeed, "<" ,"Repeatable Seed" , NULL, false, NULL, SETTER(repeatable_seed)},
+ {P_Workers, "[" ,"Workers", NULL, false, NULL, NULL},
+ {P_WorkerNumber, "]" ,"Worker Number", NULL, false, NULL, SETTER(worker_number)},
+ {P_TableFuncName, "aa" ,"Table Function Name",NULL, false, NULL, SETTER(table_func_name)},
+
+ {P_PresortedKey, "pk" ,"Presorted Key" ,NULL, false, NULL, SETTER(presorted_key)},
+ {P_FullsortGroups, "fg" ,"Full-sort Groups" ,NULL, false, NULL, NULL},
+ {P_SortMethodsUsed, "su" ,"Sort Methods Used" ,NULL, false, NULL, SETTER(sortmethod_used)},
+ {P_SortSpaceMemory, "sm" ,"Sort Space Memory" ,NULL, false, NULL, SETTER(sortspace_mem)},
+ {P_GroupCount, "gc" ,"Group Count" ,NULL, false, NULL, SETTER(group_count)},
+ {P_AvgSortSpcUsed, "as" ,"Average Sort Space Used",NULL, false, NULL, SETTER(avg_sortspc_used)},
+ {P_PeakSortSpcUsed, "ps" ,"Peak Sort Space Used",NULL, false, NULL, SETTER(peak_sortspc_used)},
+ {P_PreSortedGroups, "pg" ,"Pre-sorted Groups" ,NULL, false, NULL, NULL},
+
{P_Invalid, NULL, NULL, NULL, false, NULL, NULL}
};
{T_NestLoop, "t" ,"Nested Loop", NULL, false, NULL, NULL},
{T_MergeJoin, "u" ,"Merge Join", "Merge", false, NULL, NULL},
{T_HashJoin, "v" ,"Hash Join", "Hash", false, NULL, NULL},
- {T_Material, "w" ,"Materialize", NULL, false, NULL, NULL},
+ {T_Material, "w" ,"Materialize", NULL, false, NULL, NULL},
{T_Sort, "x" ,"Sort", NULL, false, NULL, NULL},
{T_Group, "y" ,"Group", NULL, false, NULL, NULL},
{T_Agg, "z" ,"Aggregate", NULL, false, NULL, NULL},
{T_SetOp, "3" ,"SetOp", NULL, false, NULL, NULL},
{T_LockRows, "4" ,"LockRows", NULL, false, NULL, NULL},
{T_Limit, "5" ,"Limit", NULL, false, NULL, NULL},
+#if PG_VERSION_NUM >= 90500
+ {T_SampleScan, "B" ,"Sample Scan", NULL, false, NULL, NULL},
+#endif
+#if PG_VERSION_NUM >= 90600
+ {T_Gather, "6" ,"Gather", NULL, false, NULL, NULL},
+#endif
+#if PG_VERSION_NUM >= 100000
+ {T_ProjectSet, "7" ,"ProjectSet", NULL, false, NULL, NULL},
+ {T_TableFuncScan,"8","Table Function Scan", NULL, false, NULL, NULL},
+ {T_NamedTuplestoreScan,"9","Named Tuplestore Scan", NULL, false, NULL, NULL},
+ {T_GatherMerge, "A" ,"Gather Merge", NULL, false, NULL, NULL},
+#endif
+#if PG_VERSION_NUM >= 130000
+ {T_IncrementalSort, "C" ,"Incremental Sort", NULL, false, NULL, NULL},
+#endif
+#if PG_VERSION_NUM >= 140000
+ {T_TidRangeScan,"D", "Tid Range Scan", NULL, false, NULL, NULL},
+ {T_Memoize, "E", "Memoize", NULL, false, NULL, NULL},
+#endif
+
{T_Invalid, NULL, NULL, NULL, false, NULL, NULL}
};
{S_Plain, "p" ,"Plain", NULL, false, NULL, NULL},
{S_Sorted, "s" ,"Sorted", NULL, false, NULL, NULL},
{S_Hashed, "h" ,"Hashed", NULL, false, NULL, NULL},
+ {S_Mixed, "m" ,"Mixed", NULL, false, NULL, NULL},
{S_Invalid, NULL, NULL, NULL, false, NULL, NULL}
};
{T_Invalid, NULL, NULL, NULL, false, NULL, NULL}
};
+word_table partialmode[] =
+{
+ {T_Invalid, "p" ,"Partial", NULL, false, NULL, NULL},
+ {T_Invalid, "f" ,"Finalize",NULL, false, NULL, NULL},
+ {T_Invalid, "s" ,"Simple",NULL, false, NULL, NULL},
+ {T_Invalid, NULL, NULL, NULL, false, NULL, NULL}
+};
+
+
word_table *
search_word_table(word_table *tbl, const char *word, int mode)
{
word_table *p;
-
+
bool longname =
(mode == PGSP_JSON_SHORTEN || mode == PGSP_JSON_NORMALIZE);
tok == TRUE_P || tok == FALSE_P || \
tok == CURRENT_DATE || tok == CURRENT_TIME || \
tok == LOCALTIME || tok == LOCALTIMESTAMP)
+#define IS_INDENTED_ARRAY(v) ((v) == P_GroupKeys || (v) == P_HashKeys)
/*
* norm_yylex: core_yylex with replacing some tokens.
return -1;
}
PG_END_TRY();
-
+
/*
* '?' alone is assumed to be an IDENT. If there's a real
* operator '?', this should be confused but there's hardly be.
* uniqueness, preserve_space puts one space for one existent whitespace for
* more readability.
*/
+/* scanner interface is changed in PG12 */
+#if PG_VERSION_NUM < 120000
+#define ScanKeywords (*ScanKeywords)
+#define ScanKeywordTokens NumScanKeywords
+#endif
void
normalize_expr(char *expr, bool preserve_space)
{
wp = expr;
yyscanner = scanner_init(expr,
&yyextra,
- ScanKeywords,
- NumScanKeywords);
+ &ScanKeywords,
+ ScanKeywordTokens);
+ /*
+ * The warnings about nonstandard escape strings is already emitted in the
+ * core. Just silence them here.
+ */
+#if PG_VERSION_NUM >= 90500
+ yyextra.escape_string_warning = false;
+#endif
lasttok = 0;
lastloc = -1;
if (lastloc >= 0)
{
int i, i2;
-
+
/* Skipping preceding whitespaces */
for(i = lastloc ; i < start && IS_WSCHAR(expr[i]) ; i++);
memcpy(wp, expr + i, i2 - i);
wp += i2 - i;
}
+#if PG_VERSION_NUM >= 100000
+ /*
+ * Since PG10 pg_stat_statements doesn't store trailing semicolon
+ * in the column "query". Normalization is basically useless in the
+ * version but still usefull to match utility commands so follow
+ * the behavior change.
+ */
+ else if (lasttok == ';')
+ {
+ /* Just do nothing */
+ }
+#endif
else
{
/* Upcase keywords */
*/
if (tok > 0 &&
i2 < start &&
- (preserve_space ||
+ (preserve_space ||
(tok >= IDENT && lasttok >= IDENT &&
!IS_CONST(tok) && !IS_CONST(lasttok))))
*wp++ = ' ';
*/
if (tok == '-')
tok = norm_yylex(expr, &yylval, &yylloc, yyscanner);
-
+
/* Exit on parse error. */
if (tok < 0)
{
if (IS_CONST(tok))
{
YYLTYPE end;
-
+
tok = norm_yylex(expr, &yylval, &end, yyscanner);
/* Exit on parse error. */
end++;
}
- while (expr[end - 1] == ' ') end--;
+ while (expr[end - 1] == ' ')
+ end--;
*wp++ = '?';
yylloc = end;
return converter_core(sortspacetype, src, mode);
}
+const char *
+conv_partialmode(const char *src, pgsp_parser_mode mode)
+{
+ return converter_core(partialmode, src, mode);
+}
+
/**** Parser callbacks ****/
/* JSON */
{
pgspParserContext *ctx = (pgspParserContext *)state;
+ if (IS_INDENTED_ARRAY(ctx->current_list))
+ ctx->wlist_level++;
+
appendStringInfoChar(ctx->dest, '[');
ctx->fname = NULL;
ctx->level++;
json_arrend(void *state)
{
pgspParserContext *ctx = (pgspParserContext *)state;
+
+ if (IS_INDENTED_ARRAY(ctx->current_list))
+ ctx->wlist_level--;
+
if (ctx->mode == PGSP_JSON_INFLATE &&
- ctx->last_elem_is_object)
+ (IS_INDENTED_ARRAY(ctx->current_list) ?
+ ctx->wlist_level == 0 : ctx->last_elem_is_object))
{
appendStringInfoChar(ctx->dest, '\n');
appendStringInfoSpaces(ctx->dest, (ctx->level - 1) * INDENT_STEP);
ereport(DEBUG1,
(errmsg("JSON parser encoutered unknown field name: \"%s\".", fname),
errdetail_log("INPUT: \"%s\"", ctx->org_string)));
- }
+ }
ctx->remove = (ctx->mode == PGSP_JSON_NORMALIZE &&
(!p || !p->normalize_use));
if (ctx->mode == PGSP_JSON_INFLATE)
appendStringInfoSpaces(ctx->dest, ctx->level * INDENT_STEP);
+ /*
+ * We intentionally let some property names not have a short name. Use long
+ * name for the cases.
+ */
if (!p || !p->longname)
fn = fname;
- else if (ctx->mode == PGSP_JSON_INFLATE)
+ else if (ctx->mode == PGSP_JSON_INFLATE ||
+ !(p->shortname && p->shortname[0]))
fn = p->longname;
else
fn = p->shortname;
if (ctx->mode == PGSP_JSON_INFLATE)
appendStringInfoChar(ctx->dest, ' ');
+
+ if (p && IS_INDENTED_ARRAY(p->tag))
+ {
+ ctx->current_list = p->tag;
+ ctx->list_fname = fname;
+ ctx->wlist_level = 0;
+ }
+}
+
+static void
+json_ofend(void *state, char *fname, bool isnull)
+{
+ pgspParserContext *ctx = (pgspParserContext *)state;
+
+ if (ctx->list_fname && strcmp(fname, ctx->list_fname) == 0)
+ {
+ ctx->list_fname = NULL;
+ ctx->current_list = P_Invalid;
+ }
}
static void
if (ctx->remove)
return;
- if (!bms_is_member(ctx->level, ctx->first))
+ if (IS_INDENTED_ARRAY(ctx->current_list) &&
+ ctx->wlist_level == 1)
{
- appendStringInfoChar(ctx->dest, ',');
- if (ctx->mode == PGSP_JSON_INFLATE &&
- !ctx->last_elem_is_object)
- appendStringInfoChar(ctx->dest, ' ');
+ if (!bms_is_member(ctx->level, ctx->first))
+ appendStringInfoChar(ctx->dest, ',');
+
+ if (ctx->mode == PGSP_JSON_INFLATE)
+ {
+ appendStringInfoChar(ctx->dest, '\n');
+ appendStringInfoSpaces(ctx->dest, (ctx->level) * INDENT_STEP);
+ }
}
else
- ctx->first = bms_del_member(ctx->first, ctx->level);
+ {
+ if (!bms_is_member(ctx->level, ctx->first))
+ {
+ appendStringInfoChar(ctx->dest, ',');
+
+ if (ctx->mode == PGSP_JSON_INFLATE &&
+ !ctx->last_elem_is_object)
+ appendStringInfoChar(ctx->dest, ' ');
+ }
+ }
+
+ ctx->first = bms_del_member(ctx->first, ctx->level);
}
static void
ereport(DEBUG1,
(errmsg("Short JSON parser encoutered unknown field name: \"%s\".", fname),
errdetail_log("INPUT: \"%s\"", ctx->org_string)));
- }
+ }
s = (p ? p->longname : fname);
if (!bms_is_member(ctx->level, ctx->first))
ereport(DEBUG1,
(errmsg("Short JSON parser encoutered unknown field name: \"%s\".", fname),
errdetail_log("INPUT: \"%s\"", ctx->org_string)));
- }
+ }
s = (p ? p->longname : fname);
/*
* There's no problem if P_Plan appears recursively.
*/
if (p && (p->tag == P_Plan || p->tag == P_Triggers))
- ctx->processing = p->tag;
+ ctx->section = p->tag;
appendStringInfoChar(ctx->dest, '\n');
appendStringInfoSpaces(ctx->dest, (ctx->level + 1) * INDENT_STEP);
p = search_word_table(propfields, fname, ctx->mode);
s = (p ? p->longname : fname);
-
+
appendStringInfoString(ctx->dest, "</");
appendStringInfoString(ctx->dest, escape_xml(hyphenate_words(ctx, s)));
appendStringInfoChar(ctx->dest, '>');
ctx->level++;
if (bms_is_member(ctx->level, ctx->not_item))
{
- if (ctx->processing == P_Plan)
+ if (ctx->section == P_Plan)
tag = "<Plan>";
else
tag = "<Trigger>";
if (bms_is_member(ctx->level, ctx->not_item))
{
- if (ctx->processing == P_Plan)
+ if (ctx->section == P_Plan)
tag = "</Plan>";
else
tag = "</Trigger>";
bool
run_pg_parse_json(JsonLexContext *lex, JsonSemAction *sem)
{
+#if PG_VERSION_NUM >= 130000
+ return pg_parse_json(lex, sem) == JSON_SUCCESS;
+#else
MemoryContext ccxt = CurrentMemoryContext;
uint32 saved_IntrHoldoffCount;
ecxt = MemoryContextSwitchTo(ccxt);
errdata = CopyErrorData();
-
+
if (errdata->sqlerrcode == ERRCODE_INVALID_TEXT_REPRESENTATION)
{
FlushErrorState();
PG_END_TRY();
return true;
+#endif
}
void
sem->array_start = json_arrstart;
sem->array_end = json_arrend;
sem->object_field_start = json_ofstart;
- sem->object_field_end = NULL;
+ sem->object_field_end = json_ofend;
sem->array_element_start= json_aestart;
sem->array_element_end = NULL;
sem->scalar = json_scalar;
if (ctx.dest->len > 0 &&
ctx.dest->data[ctx.dest->len - 1] != '\n')
appendStringInfoChar(ctx.dest, '\n');
-
+
if (ctx.dest->len == 0)
appendStringInfoString(ctx.dest, "<Input was not JSON>");
else
if (ctx.dest->len > 0 &&
ctx.dest->data[ctx.dest->len - 1] != '\n')
appendStringInfoChar(ctx.dest, '\n');
-
+
if (ctx.dest->len == 0)
appendStringInfoString(ctx.dest, "<Input was not JSON>");
else
if (ctx.dest->len > start_len &&
ctx.dest->data[ctx.dest->len - 1] != '\n')
appendStringInfoChar(ctx.dest, '\n');
-
+
if (ctx.dest->len == start_len)
{
resetStringInfo(ctx.dest);