/*-------------------------------------------------------------------------
*
- * pg_store_plans
+ * pg_store_plans.c
+ * Take statistics of plan selection across a whole database cluster.
+ *
+ * Execution costs are totaled for each distinct plan for each query,
+ * and plan and queryid are kept in a shared hashtable, each record in
+ * which is associated with a record in pg_stat_statements, if any, by
+ * the queryid.
+ *
+ * For Postgres 9.3 or earlier does not expose query id so
+ * pg_store_plans needs to calculate it based on the given query
+ * string using different algorithm from pg_stat_statements, and later
+ * the id will be matched against the one made from query string
+ * stored in pg_stat_statements. For the reason, queryid matching in
+ * this way will fail if the query string kept in pg_stat_statements
+ * is truncated in the middle.
+ *
+ * Plans are identified by fingerprinting plan representations in
+ * "shortened" JSON format with constants and unstable values such as
+ * rows, width, loops ignored. Nevertheless, stored plan entries hold
+ * them of the latest execution. Entry eviction is done in the same
+ * way to pg_stat_statements.
*
* Copyright (c) 2008-2013, PostgreSQL Global Development Group
+ * Copyright (c) 2012-2015, NIPPON TELEGRAPH AND TELEPHONE CORPORATION
*
* IDENTIFICATION
- * contrib/pg_store_plan/pg_store_plan.c
+ * pg_store_plan/pg_store_plan.c
*
*-------------------------------------------------------------------------
*/
queryDesc->totaltime->total >=
(double)min_duration / 1000.0)
{
- ExplainState es;
-
- ExplainInitState(&es);
- es.analyze = queryDesc->instrument_options;
- es.costs = true;
- es.verbose = log_verbose;
- es.buffers = (es.analyze && log_buffers);
- es.timing = (es.analyze && log_timing);
- es.format = EXPLAIN_FORMAT_JSON;
+ ExplainState *es = NewExplainState();
+ StringInfo es_str = es->str;
+
+ es->analyze = queryDesc->instrument_options;
+ es->verbose = log_verbose;
+ es->buffers = (es->analyze && log_buffers);
+ es->timing = (es->analyze && log_timing);
+ es->format = EXPLAIN_FORMAT_JSON;
- ExplainBeginOutput(&es);
- ExplainPrintPlan(&es, queryDesc);
+ ExplainBeginOutput(es);
+ ExplainPrintPlan(es, queryDesc);
if (log_triggers)
- pgspExplainTriggers(&es, queryDesc);
- ExplainEndOutput(&es);
+ pgspExplainTriggers(es, queryDesc);
+ ExplainEndOutput(es);
/* Remove last line break */
- if (es.str->len > 0 && es.str->data[es.str->len - 1] == '\n')
- es.str->data[--es.str->len] = '\0';
+ if (es_str->len > 0 && es_str->data[es_str->len - 1] == '\n')
+ es_str->data[--es_str->len] = '\0';
/* JSON outmost braces. */
- es.str->data[0] = '{';
- es.str->data[es.str->len - 1] = '}';
+ es_str->data[0] = '{';
+ es_str->data[es_str->len - 1] = '}';
/*
* Make sure stats accumulation is done. (Note: it's okay if several
* levels of hook all do this.)
*/
- store_entry(es.str->data,
+ store_entry(es_str->data,
hash_query(queryDesc->sourceText),
queryDesc->plannedstmt->queryId,
queryDesc->totaltime->total * 1000.0, /* convert to msec */
queryDesc->estate->es_processed,
&queryDesc->totaltime->bufusage);
- pfree(es.str->data);
+ pfree(es_str->data);
}
}
/*
- * Store some statistics for a statement.
- *
- * If jstate is not NULL then we're trying to create an entry for which
- * we have no statistics as yet; we just want to record the normalized
- * query string. total_time, rows, bufusage are ignored in this case.
+ * Store some statistics for a plan.
*/
static void
store_entry(char *plan, uint32 queryId, uint32 queryId2,
normalized_plan = pgsp_json_normalize(plan);
shorten_plan = pgsp_json_shorten(plan);
- //elog(LOG, "Normalized: %s", normalized_plan);
- //elog(LOG, "Shorten: %s", shorten_plan);
- //elog(LOG, "Original: %s", plan);
+ elog(DEBUG3, "pg_store_plans: Normalized plan: %s", normalized_plan);
+ elog(DEBUG3, "pg_store_plans: Shorten plan: %s", shorten_plan);
+ elog(DEBUG3, "pg_store_plans: Original plan: %s", plan);
plan_len = strlen(shorten_plan);
key.planid = hash_any((const unsigned char *)normalized_plan,
shared_state->plan_size - 1);
- /* Lookup the hash table entry with shared lock. */
+ /* Look up the hash table entry with shared lock. */
LWLockAcquire(shared_state->lock, LW_SHARED);
entry = (StatEntry *) hash_search(hash_table, &key, HASH_FIND, NULL);
}
/* Increment the counts, except when jstate is not NULL */
+
/*
* Grab the spinlock while updating the counters (see comment about
* locking rules at the head of the file)
values[i++] = ObjectIdGetDatum(entry->key.userid);
values[i++] = ObjectIdGetDatum(entry->key.dbid);
- values[i++] = Int64GetDatumFast(queryid);
- values[i++] = Int64GetDatumFast(planid);
+ if (is_superuser || entry->key.userid == userid)
+ {
+ values[i++] = Int64GetDatumFast(queryid);
+ values[i++] = Int64GetDatumFast(planid);
+ values[i++] = Int64GetDatumFast(queryid_stmt);
+ }
+ else
+ {
+ values[i++] = Int64GetDatumFast(0);
+ values[i++] = Int64GetDatumFast(0);
+ values[i++] = Int64GetDatumFast(0);
+ }
- values[i++] = Int64GetDatumFast(queryid_stmt);
if (is_superuser || entry->key.userid == userid)
{
switch (plan_format)
{
- case PLAN_FORMAT_TEXT:
- pstr = pgsp_json_textize(entry->plan);
- break;
- case PLAN_FORMAT_JSON:
- pstr = pgsp_json_inflate(entry->plan);
- break;
- case PLAN_FORMAT_YAML:
- pstr = pgsp_json_yamlize(entry->plan);
- break;
- case PLAN_FORMAT_XML:
- pstr = pgsp_json_xmlize(entry->plan);
- break;
- default:
- break;
+ case PLAN_FORMAT_TEXT:
+ pstr = pgsp_json_textize(entry->plan);
+ break;
+ case PLAN_FORMAT_JSON:
+ pstr = pgsp_json_inflate(entry->plan);
+ break;
+ case PLAN_FORMAT_YAML:
+ pstr = pgsp_json_yamlize(entry->plan);
+ break;
+ case PLAN_FORMAT_XML:
+ pstr = pgsp_json_xmlize(entry->plan);
+ break;
+ default:
+ break;
}
estr = (char *)
* Allocate a new hashtable entry.
* caller must hold an exclusive lock on shared_state->lock
*
- * "query" need not be null-terminated; we rely on plan_len instead
+ * "plan" need not be null-terminated; we rely on plan_len instead
*
* If "sticky" is true, make the new entry artificially sticky so that it will
* probably still be there when the query finishes execution. We do this by