From 5479312e35a0a6fa9b11318e97a626b15d98794c Mon Sep 17 00:00:00 2001
From: Antonin Houska <ah@cybertec.at>
Date: Fri, 12 Jul 2019 10:04:31 +0200
Subject: [PATCH 1/3] Introduce RelInfoList structure.

---
 contrib/postgres_fdw/postgres_fdw.c           |   3 +-
 doc/src/sgml/catalogs.sgml                    |   5 +
 doc/src/sgml/ddl.sgml                         |   9 --
 doc/src/sgml/func.sgml                        |  81 ++++++-------
 doc/src/sgml/json.sgml                        |  44 ++++----
 src/backend/access/gist/gistbuildbuffers.c    |   5 +-
 src/backend/commands/copy.c                   |  19 ++--
 src/backend/commands/extension.c              |  16 ---
 src/backend/commands/tablecmds.c              |  23 +---
 src/backend/commands/trigger.c                |   1 +
 src/backend/executor/execMain.c               |   1 -
 src/backend/nodes/outfuncs.c                  |  11 ++
 src/backend/optimizer/geqo/geqo_eval.c        |  12 +-
 src/backend/optimizer/plan/planmain.c         |   3 +-
 src/backend/optimizer/util/relnode.c          | 157 ++++++++++++++++----------
 src/backend/partitioning/partprune.c          |  50 +++-----
 src/backend/tcop/postgres.c                   |  30 +----
 src/bin/initdb/initdb.c                       |   2 +-
 src/bin/pg_basebackup/pg_recvlogical.c        |   5 +-
 src/bin/pg_checksums/pg_checksums.c           |   4 +-
 src/bin/pg_dump/pg_backup_db.c                |   4 +-
 src/bin/pg_dump/pg_dumpall.c                  |   6 +-
 src/bin/pg_upgrade/option.c                   |   2 +-
 src/include/access/tableam.h                  |  12 +-
 src/include/nodes/nodes.h                     |   1 +
 src/include/nodes/pathnodes.h                 |  28 +++--
 src/test/regress/expected/partition_prune.out |  15 +--
 src/test/regress/expected/triggers.out        |  24 ----
 src/test/regress/sql/partition_prune.sql      |  11 +-
 src/test/regress/sql/triggers.sql             |  23 ----
 30 files changed, 248 insertions(+), 359 deletions(-)

diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index 033aeb2556..90414f1168 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -5205,7 +5205,8 @@ foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, JoinType jointype,
 	 */
 	Assert(fpinfo->relation_index == 0);	/* shouldn't be set yet */
 	fpinfo->relation_index =
-		list_length(root->parse->rtable) + list_length(root->join_rel_list);
+		list_length(root->parse->rtable) +
+		list_length(root->join_rel_list->items);
 
 	return true;
 }
diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml
index 68ad5071ca..3428a7c0fa 100644
--- a/doc/src/sgml/catalogs.sgml
+++ b/doc/src/sgml/catalogs.sgml
@@ -9995,6 +9995,11 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
    that blanks out the password field.
   </para>
 
+  <para>
+   This view explicitly exposes the OID column of the underlying table,
+   since that is needed to do joins to other catalogs.
+  </para>
+
   <table>
    <title><structname>pg_roles</structname> Columns</title>
 
diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml
index 9301f0227d..ed2d9c60d5 100644
--- a/doc/src/sgml/ddl.sgml
+++ b/doc/src/sgml/ddl.sgml
@@ -4003,15 +4003,6 @@ ALTER INDEX measurement_city_id_logdate_key
       </para>
      </listitem>
 
-     <listitem>
-      <para>
-       Unique constraints on partitioned tables must include all the
-       partition key columns.  This limitation exists because
-       <productname>PostgreSQL</productname> can only enforce
-       uniqueness in each partition individually.
-      </para>
-     </listitem>
-
      <listitem>
       <para>
        <literal>BEFORE ROW</literal> triggers, if necessary, must be defined
diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml
index a25c122ac8..185a184daa 100644
--- a/doc/src/sgml/func.sgml
+++ b/doc/src/sgml/func.sgml
@@ -11514,8 +11514,7 @@ table2-mapping
    from the JSON data, similar to XPath expressions used
    for SQL access to XML. In <productname>PostgreSQL</productname>,
    path expressions are implemented as the <type>jsonpath</type>
-   data type and can use any elements described in
-   <xref linkend="datatype-jsonpath"/>.
+   data type, described in <xref linkend="datatype-jsonpath"/>.
   </para>
 
   <para>JSON query functions and operators
@@ -11562,7 +11561,7 @@ table2-mapping
       },
       { "location":   [ 47.706, 13.2635 ],
         "start time": "2018-10-14 10:39:21",
-        "HR": 135
+        "HR": 130
       } ]
   }
 }
@@ -11614,33 +11613,23 @@ table2-mapping
 
   <para>
    When defining the path, you can also use one or more
-   <firstterm>filter expressions</firstterm> that work similar to the
-   <literal>WHERE</literal> clause in SQL. A filter expression begins with
-   a question mark and provides a condition in parentheses:
-
-    <programlisting>
-? (<replaceable>condition</replaceable>)
-    </programlisting>
-  </para>
-
-  <para>
-   Filter expressions must be specified right after the path evaluation step
-   to which they are applied. The result of this step is filtered to include
-   only those items that satisfy the provided condition. SQL/JSON defines
-   three-valued logic, so the condition can be <literal>true</literal>, <literal>false</literal>,
+   <firstterm>filter expressions</firstterm>, which work similar to
+   the <literal>WHERE</literal> clause in SQL. Each filter expression
+   can provide one or more filtering conditions that are applied
+   to the result of the path evaluation. Each filter expression must
+   be enclosed in parentheses and preceded by a question mark.
+   Filter expressions are evaluated from left to right and can be nested.
+   The <literal>@</literal> variable denotes the current path evaluation
+   result to be filtered, and can be followed by one or more accessor
+   operators to define the JSON element by which to filter the result.
+   Functions and operators that can be used in the filtering condition
+   are listed in <xref linkend="functions-sqljson-filter-ex-table"/>.
+   SQL/JSON defines three-valued logic, so the result of the filter
+   expression may be <literal>true</literal>, <literal>false</literal>,
    or <literal>unknown</literal>. The <literal>unknown</literal> value
-   plays the same role as SQL <literal>NULL</literal> and can be tested
-   for with the <literal>is unknown</literal> predicate. Further path
+   plays the same role as SQL <literal>NULL</literal>. Further path
    evaluation steps use only those items for which filter expressions
-   return <literal>true</literal>.
-  </para>
-
-  <para>
-   Functions and operators that can be used in filter expressions are listed
-   in <xref linkend="functions-sqljson-filter-ex-table"/>. The path
-   evaluation result to be filtered is denoted by the <literal>@</literal>
-   variable. To refer to a JSON element stored at a lower nesting level,
-   add one or more accessor operators after <literal>@</literal>.
+   return true.
   </para>
 
   <para>
@@ -11654,8 +11643,8 @@ table2-mapping
   <para>
    To get the start time of segments with such values instead, you have to
    filter out irrelevant segments before returning the start time, so the
-   filter expression is applied to the previous step, and the path used
-   in the condition is different:
+   filter is applied to the previous step and the path in the filtering
+   condition is different:
 <programlisting>
 '$.track.segments[*] ? (@.HR &gt; 130)."start time"'
 </programlisting>
@@ -11680,9 +11669,9 @@ table2-mapping
   </para>
 
   <para>
-   You can also nest filter expressions within each other:
+   You can also nest filters within each other:
 <programlisting>
-'$.track ? (exists(@.segments[*] ? (@.HR &gt; 130))).segments.size()'
+'$.track ? (@.segments[*] ? (@.HR &gt; 130)).segments.size()'
 </programlisting>
    This expression returns the size of the track if it contains any
    segments with high heart rate values, or an empty sequence otherwise.
@@ -11965,14 +11954,14 @@ table2-mapping
         <entry>Less-than operator</entry>
         <entry><literal>[1, 2, 3]</literal></entry>
         <entry><literal>$[*] ? (@ &lt; 2)</literal></entry>
-        <entry><literal>1</literal></entry>
+        <entry><literal>1, 2</literal></entry>
        </row>
        <row>
         <entry><literal>&lt;=</literal></entry>
         <entry>Less-than-or-equal-to operator</entry>
         <entry><literal>[1, 2, 3]</literal></entry>
-        <entry><literal>$[*] ? (@ &lt;= 2)</literal></entry>
-        <entry><literal>1, 2</literal></entry>
+        <entry><literal>$[*] ? (@ &lt; 2)</literal></entry>
+        <entry><literal>1</literal></entry>
        </row>
        <row>
         <entry><literal>&gt;</literal></entry>
@@ -11982,7 +11971,7 @@ table2-mapping
         <entry><literal>3</literal></entry>
        </row>
        <row>
-        <entry><literal>&gt;=</literal></entry>
+        <entry><literal>&gt;</literal></entry>
         <entry>Greater-than-or-equal-to operator</entry>
         <entry><literal>[1, 2, 3]</literal></entry>
         <entry><literal>$[*] ? (@ &gt;= 2)</literal></entry>
@@ -12272,7 +12261,7 @@ table2-mapping
        <row>
         <entry><literal>@?</literal></entry>
         <entry><type>jsonpath</type></entry>
-        <entry>Does JSON path return any item for the specified JSON value?</entry>
+        <entry>Does JSON path returns any item for the specified JSON value?</entry>
         <entry><literal>'{"a":[1,2,3,4,5]}'::jsonb @? '$.a[*] ? (@ > 2)'</literal></entry>
        </row>
        <row>
@@ -12300,8 +12289,8 @@ table2-mapping
   <note>
    <para>
     The <literal>@?</literal> and <literal>@@</literal> operators suppress
-    the following errors: lacking object field or array element, unexpected
-    JSON item type, and numeric errors.
+    errors including: lacking object field or array element, unexpected JSON
+    item type and numeric errors.
     This behavior might be helpful while searching over JSON document
     collections of varying structure.
    </para>
@@ -13157,17 +13146,17 @@ table2-mapping
     <literal>jsonb_path_query</literal>, <literal>jsonb_path_query_array</literal> and
     <literal>jsonb_path_query_first</literal>
     functions have optional <literal>vars</literal> and <literal>silent</literal>
-    arguments.
+    argument.
    </para>
    <para>
-    If the <literal>vars</literal> argument is specified, it provides an
-    object containing named variables to be substituted into a
-    <literal>jsonpath</literal> expression.
+    When <literal>vars</literal> argument is specified, it constitutes an object
+    contained variables to be substituted into <literal>jsonpath</literal>
+    expression.
    </para>
    <para>
-    If the <literal>silent</literal> argument is specified and has the
-    <literal>true</literal> value, these functions suppress the same errors
-    as the <literal>@?</literal> and <literal>@@</literal> operators.
+    When <literal>silent</literal> argument is specified and has
+    <literal>true</literal> value, the same errors are suppressed as it is in
+    the <literal>@?</literal> and <literal>@@</literal> operators.
    </para>
   </note>
 
diff --git a/doc/src/sgml/json.sgml b/doc/src/sgml/json.sgml
index 0d8e2c6de4..2aa98024ae 100644
--- a/doc/src/sgml/json.sgml
+++ b/doc/src/sgml/json.sgml
@@ -815,18 +815,21 @@ SELECT jdoc-&gt;'guid', jdoc-&gt;'name' FROM api WHERE jdoc @&gt; '{"tags": ["qu
         <literal>.**{<replaceable>level</replaceable>}</literal>
        </para>
        <para>
-        <literal>.**{<replaceable>start_level</replaceable> to
-        <replaceable>end_level</replaceable>}</literal>
+        <literal>.**{<replaceable>lower_level</replaceable> to
+        <replaceable>upper_level</replaceable>}</literal>
+       </para>
+       <para>
+        <literal>.**{<replaceable>lower_level</replaceable> to
+        last}</literal>
        </para>
       </entry>
       <entry>
        <para>
-        Same as <literal>.**</literal>, but with a filter over nesting
-        levels of JSON hierarchy. Nesting levels are specified as integers.
-        Zero level corresponds to the current object. To access the lowest
-        nesting level, you can use the <literal>last</literal> keyword.
-        This is a <productname>PostgreSQL</productname> extension of
-        the SQL/JSON standard.
+        Same as <literal>.**</literal>, but with filter over nesting
+        level of JSON hierarchy.  Levels are specified as integers.
+        Zero level corresponds to current object.  This is a
+        <productname>PostgreSQL</productname> extension of the SQL/JSON
+        standard.
        </para>
       </entry>
      </row>
@@ -838,22 +841,19 @@ SELECT jdoc-&gt;'guid', jdoc-&gt;'name' FROM api WHERE jdoc @&gt; '{"tags": ["qu
       </entry>
       <entry>
        <para>
-        Array element accessor.
-        <literal><replaceable>subscript</replaceable></literal> can be
-        given in two forms: <literal><replaceable>index</replaceable></literal>
-        or <literal><replaceable>start_index</replaceable> to <replaceable>end_index</replaceable></literal>.
-        The first form returns a single array element by its index. The second
-        form returns an array slice by the range of indexes, including the
-        elements that correspond to the provided
-        <replaceable>start_index</replaceable> and <replaceable>end_index</replaceable>.
+        Array element accessor.  <literal><replaceable>subscript</replaceable></literal>
+        might be given in two forms: <literal><replaceable>expr</replaceable></literal>
+        or <literal><replaceable>lower_expr</replaceable> to <replaceable>upper_expr</replaceable></literal>.
+        The first form specifies single array element by its index.  The second
+        form specified array slice by the range of indexes.  Zero index
+        corresponds to the first array element.
        </para>
        <para>
-        The specified <replaceable>index</replaceable> can be an integer, as
-        well as an expression returning a single numeric value, which is
-        automatically cast to integer. Zero index corresponds to the first
-        array element. You can also use the <literal>last</literal> keyword
-        to denote the last array element, which is useful for handling arrays
-        of unknown length.
+        An expression in the subscript may be an integer,
+        numeric expression, or any other <literal>jsonpath</literal> expression
+        returning single numeric value.  The <literal>last</literal> keyword
+        can be used in the expression denoting the last subscript in an array.
+        That's helpful for handling arrays of unknown length.
        </para>
       </entry>
      </row>
diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c
index 38f786848d..d71354140e 100644
--- a/src/backend/access/gist/gistbuildbuffers.c
+++ b/src/backend/access/gist/gistbuildbuffers.c
@@ -138,7 +138,6 @@ gistGetNodeBuffer(GISTBuildBuffers *gfbb, GISTSTATE *giststate,
 		nodeBuffer->pageBlocknum = InvalidBlockNumber;
 		nodeBuffer->pageBuffer = NULL;
 		nodeBuffer->queuedForEmptying = false;
-		nodeBuffer->isTemp = false;
 		nodeBuffer->level = level;
 
 		/*
@@ -187,8 +186,8 @@ gistAllocateNewPageBuffer(GISTBuildBuffers *gfbb)
 {
 	GISTNodeBufferPage *pageBuffer;
 
-	pageBuffer = (GISTNodeBufferPage *) MemoryContextAllocZero(gfbb->context,
-															   BLCKSZ);
+	pageBuffer = (GISTNodeBufferPage *) MemoryContextAlloc(gfbb->context,
+														   BLCKSZ);
 	pageBuffer->prev = InvalidBlockNumber;
 
 	/* Set page free space */
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 4f04d122c3..f1161f0fee 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -2518,8 +2518,7 @@ CopyMultiInsertBufferFlush(CopyMultiInsertInfo *miinfo,
  * The buffer must be flushed before cleanup.
  */
 static inline void
-CopyMultiInsertBufferCleanup(CopyMultiInsertInfo *miinfo,
-							 CopyMultiInsertBuffer *buffer)
+CopyMultiInsertBufferCleanup(CopyMultiInsertBuffer *buffer)
 {
 	int			i;
 
@@ -2535,9 +2534,6 @@ CopyMultiInsertBufferCleanup(CopyMultiInsertInfo *miinfo,
 	for (i = 0; i < MAX_BUFFERED_TUPLES && buffer->slots[i] != NULL; i++)
 		ExecDropSingleTupleTableSlot(buffer->slots[i]);
 
-	table_finish_bulk_insert(buffer->resultRelInfo->ri_RelationDesc,
-							 miinfo->ti_options);
-
 	pfree(buffer);
 }
 
@@ -2589,7 +2585,7 @@ CopyMultiInsertInfoFlush(CopyMultiInsertInfo *miinfo, ResultRelInfo *curr_rri)
 			buffer = (CopyMultiInsertBuffer *) linitial(miinfo->multiInsertBuffers);
 		}
 
-		CopyMultiInsertBufferCleanup(miinfo, buffer);
+		CopyMultiInsertBufferCleanup(buffer);
 		miinfo->multiInsertBuffers = list_delete_first(miinfo->multiInsertBuffers);
 	}
 }
@@ -2603,7 +2599,7 @@ CopyMultiInsertInfoCleanup(CopyMultiInsertInfo *miinfo)
 	ListCell   *lc;
 
 	foreach(lc, miinfo->multiInsertBuffers)
-		CopyMultiInsertBufferCleanup(miinfo, lfirst(lc));
+		CopyMultiInsertBufferCleanup(lfirst(lc));
 
 	list_free(miinfo->multiInsertBuffers);
 }
@@ -3325,6 +3321,9 @@ CopyFrom(CopyState cstate)
 	{
 		if (!CopyMultiInsertInfoIsEmpty(&multiInsertInfo))
 			CopyMultiInsertInfoFlush(&multiInsertInfo, NULL);
+
+		/* Tear down the multi-insert buffer data */
+		CopyMultiInsertInfoCleanup(&multiInsertInfo);
 	}
 
 	/* Done, clean up */
@@ -3356,10 +3355,6 @@ CopyFrom(CopyState cstate)
 		target_resultRelInfo->ri_FdwRoutine->EndForeignInsert(estate,
 															  target_resultRelInfo);
 
-	/* Tear down the multi-insert buffer data */
-	if (insertMethod != CIM_SINGLE)
-		CopyMultiInsertInfoCleanup(&multiInsertInfo);
-
 	ExecCloseIndices(target_resultRelInfo);
 
 	/* Close all the partitioned tables, leaf partitions, and their indices */
@@ -3371,6 +3366,8 @@ CopyFrom(CopyState cstate)
 
 	FreeExecutorState(estate);
 
+	table_finish_bulk_insert(cstate->rel, ti_options);
+
 	return processed;
 }
 
diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c
index f7202cc9e7..59ca5cd5a9 100644
--- a/src/backend/commands/extension.c
+++ b/src/backend/commands/extension.c
@@ -717,21 +717,9 @@ execute_sql_string(const char *sql)
 	foreach(lc1, raw_parsetree_list)
 	{
 		RawStmt    *parsetree = lfirst_node(RawStmt, lc1);
-		MemoryContext per_parsetree_context,
-					oldcontext;
 		List	   *stmt_list;
 		ListCell   *lc2;
 
-		/*
-		 * We do the work for each parsetree in a short-lived context, to
-		 * limit the memory used when there are many commands in the string.
-		 */
-		per_parsetree_context =
-			AllocSetContextCreate(CurrentMemoryContext,
-								  "execute_sql_string per-statement context",
-								  ALLOCSET_DEFAULT_SIZES);
-		oldcontext = MemoryContextSwitchTo(per_parsetree_context);
-
 		/* Be sure parser can see any DDL done so far */
 		CommandCounterIncrement();
 
@@ -784,10 +772,6 @@ execute_sql_string(const char *sql)
 
 			PopActiveSnapshot();
 		}
-
-		/* Clean up per-parsetree context. */
-		MemoryContextSwitchTo(oldcontext);
-		MemoryContextDelete(per_parsetree_context);
 	}
 
 	/* Be sure to advance the command counter after the last script command */
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 0f1a9f0e54..3aee2d82ce 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -15915,7 +15915,6 @@ CloneRowTriggersToPartition(Relation parent, Relation partition)
 		Datum		value;
 		bool		isnull;
 		List	   *cols = NIL;
-		List	   *trigargs = NIL;
 		MemoryContext oldcxt;
 
 		/*
@@ -15980,31 +15979,11 @@ CloneRowTriggersToPartition(Relation parent, Relation partition)
 			}
 		}
 
-		/* Reconstruct trigger arguments list. */
-		if (trigForm->tgnargs > 0)
-		{
-			char	   *p;
-
-			value = heap_getattr(tuple, Anum_pg_trigger_tgargs,
-								 RelationGetDescr(pg_trigger), &isnull);
-			if (isnull)
-				elog(ERROR, "tgargs is null for trigger \"%s\" in partition \"%s\"",
-					 NameStr(trigForm->tgname), RelationGetRelationName(partition));
-
-			p = (char *) VARDATA_ANY(DatumGetByteaPP(value));
-
-			for (int i = 0; i < trigForm->tgnargs; i++)
-			{
-				trigargs = lappend(trigargs, makeString(pstrdup(p)));
-				p += strlen(p) + 1;
-			}
-		}
-
 		trigStmt = makeNode(CreateTrigStmt);
 		trigStmt->trigname = NameStr(trigForm->tgname);
 		trigStmt->relation = NULL;
 		trigStmt->funcname = NULL;	/* passed separately */
-		trigStmt->args = trigargs;
+		trigStmt->args = NULL;	/* passed separately */
 		trigStmt->row = true;
 		trigStmt->timing = trigForm->tgtype & TRIGGER_TYPE_TIMING_MASK;
 		trigStmt->events = trigForm->tgtype & TRIGGER_TYPE_EVENT_MASK;
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index ee878d70a9..316692b7c2 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -1172,6 +1172,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
 			 */
 			childStmt = (CreateTrigStmt *) copyObject(stmt);
 			childStmt->funcname = NIL;
+			childStmt->args = NIL;
 			childStmt->whenClause = NULL;
 
 			/* If there is a WHEN clause, create a modified copy of it */
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 29e2681484..27f0345515 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -2793,7 +2793,6 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
 	estate->es_range_table_array = parentestate->es_range_table_array;
 	estate->es_range_table_size = parentestate->es_range_table_size;
 	estate->es_relations = parentestate->es_relations;
-	estate->es_queryEnv = parentestate->es_queryEnv;
 	estate->es_rowmarks = parentestate->es_rowmarks;
 	estate->es_plannedstmt = parentestate->es_plannedstmt;
 	estate->es_junkFilter = parentestate->es_junkFilter;
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index 8400dd319e..4529b5c63b 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -2278,6 +2278,14 @@ _outRelOptInfo(StringInfo str, const RelOptInfo *node)
 	WRITE_NODE_FIELD(partitioned_child_rels);
 }
 
+static void
+_outRelInfoList(StringInfo str, const RelInfoList *node)
+{
+	WRITE_NODE_TYPE("RELOPTINFOLIST");
+
+	WRITE_NODE_FIELD(items);
+}
+
 static void
 _outIndexOptInfo(StringInfo str, const IndexOptInfo *node)
 {
@@ -4052,6 +4060,9 @@ outNode(StringInfo str, const void *obj)
 			case T_RelOptInfo:
 				_outRelOptInfo(str, obj);
 				break;
+			case T_RelInfoList:
+				_outRelInfoList(str, obj);
+				break;
 			case T_IndexOptInfo:
 				_outIndexOptInfo(str, obj);
 				break;
diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c
index 6c69c1c147..c69f3469ba 100644
--- a/src/backend/optimizer/geqo/geqo_eval.c
+++ b/src/backend/optimizer/geqo/geqo_eval.c
@@ -92,11 +92,11 @@ geqo_eval(PlannerInfo *root, Gene *tour, int num_gene)
 	 *
 	 * join_rel_level[] shouldn't be in use, so just Assert it isn't.
 	 */
-	savelength = list_length(root->join_rel_list);
-	savehash = root->join_rel_hash;
+	savelength = list_length(root->join_rel_list->items);
+	savehash = root->join_rel_list->hash;
 	Assert(root->join_rel_level == NULL);
 
-	root->join_rel_hash = NULL;
+	root->join_rel_list->hash = NULL;
 
 	/* construct the best path for the given combination of relations */
 	joinrel = gimme_tree(root, tour, num_gene);
@@ -121,9 +121,9 @@ geqo_eval(PlannerInfo *root, Gene *tour, int num_gene)
 	 * Restore join_rel_list to its former state, and put back original
 	 * hashtable if any.
 	 */
-	root->join_rel_list = list_truncate(root->join_rel_list,
-										savelength);
-	root->join_rel_hash = savehash;
+	root->join_rel_list->items = list_truncate(root->join_rel_list->items,
+											   savelength);
+	root->join_rel_list->hash = savehash;
 
 	/* release all the memory acquired within gimme_tree */
 	MemoryContextSwitchTo(oldcxt);
diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c
index 2dbf1db844..0b9999c8a6 100644
--- a/src/backend/optimizer/plan/planmain.c
+++ b/src/backend/optimizer/plan/planmain.c
@@ -65,8 +65,7 @@ query_planner(PlannerInfo *root,
 	 * NOTE: append_rel_list was set up by subquery_planner, so do not touch
 	 * here.
 	 */
-	root->join_rel_list = NIL;
-	root->join_rel_hash = NULL;
+	root->join_rel_list = makeNode(RelInfoList);
 	root->join_rel_level = NULL;
 	root->join_cur_level = 0;
 	root->canon_pathkeys = NIL;
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index 6054bd2b53..c238dd6538 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -31,11 +31,11 @@
 #include "utils/hsearch.h"
 
 
-typedef struct JoinHashEntry
+typedef struct RelInfoEntry
 {
-	Relids		join_relids;	/* hash key --- MUST BE FIRST */
-	RelOptInfo *join_rel;
-} JoinHashEntry;
+	Relids		relids;			/* hash key --- MUST BE FIRST */
+	void	   *data;
+} RelInfoEntry;
 
 static void build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel,
 								RelOptInfo *input_rel);
@@ -375,11 +375,11 @@ find_base_rel(PlannerInfo *root, int relid)
 }
 
 /*
- * build_join_rel_hash
- *	  Construct the auxiliary hash table for join relations.
+ * build_rel_hash
+ *	  Construct the auxiliary hash table for relation specific data.
  */
 static void
-build_join_rel_hash(PlannerInfo *root)
+build_rel_hash(RelInfoList *list)
 {
 	HTAB	   *hashtab;
 	HASHCTL		hash_ctl;
@@ -388,47 +388,50 @@ build_join_rel_hash(PlannerInfo *root)
 	/* Create the hash table */
 	MemSet(&hash_ctl, 0, sizeof(hash_ctl));
 	hash_ctl.keysize = sizeof(Relids);
-	hash_ctl.entrysize = sizeof(JoinHashEntry);
+	hash_ctl.entrysize = sizeof(RelInfoEntry);
 	hash_ctl.hash = bitmap_hash;
 	hash_ctl.match = bitmap_match;
 	hash_ctl.hcxt = CurrentMemoryContext;
-	hashtab = hash_create("JoinRelHashTable",
+	hashtab = hash_create("RelHashTable",
 						  256L,
 						  &hash_ctl,
 						  HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
 
 	/* Insert all the already-existing joinrels */
-	foreach(l, root->join_rel_list)
+	foreach(l, list->items)
 	{
-		RelOptInfo *rel = (RelOptInfo *) lfirst(l);
-		JoinHashEntry *hentry;
+		void	   *item = lfirst(l);
+		RelInfoEntry *hentry;
 		bool		found;
+		Relids		relids;
 
-		hentry = (JoinHashEntry *) hash_search(hashtab,
-											   &(rel->relids),
-											   HASH_ENTER,
-											   &found);
+		Assert(IsA(item, RelOptInfo));
+		relids = ((RelOptInfo *) item)->relids;
+
+		hentry = (RelInfoEntry *) hash_search(hashtab,
+											  &relids,
+											  HASH_ENTER,
+											  &found);
 		Assert(!found);
-		hentry->join_rel = rel;
+		hentry->data = item;
 	}
 
-	root->join_rel_hash = hashtab;
+	list->hash = hashtab;
 }
 
 /*
- * find_join_rel
- *	  Returns relation entry corresponding to 'relids' (a set of RT indexes),
- *	  or NULL if none exists.  This is for join relations.
+ * find_rel_info
+ *	  Find a base or join relation entry.
  */
-RelOptInfo *
-find_join_rel(PlannerInfo *root, Relids relids)
+static void *
+find_rel_info(RelInfoList *list, Relids relids)
 {
 	/*
 	 * Switch to using hash lookup when list grows "too long".  The threshold
 	 * is arbitrary and is known only here.
 	 */
-	if (!root->join_rel_hash && list_length(root->join_rel_list) > 32)
-		build_join_rel_hash(root);
+	if (!list->hash && list_length(list->items) > 32)
+		build_rel_hash(list);
 
 	/*
 	 * Use either hashtable lookup or linear search, as appropriate.
@@ -438,34 +441,90 @@ find_join_rel(PlannerInfo *root, Relids relids)
 	 * so would force relids out of a register and thus probably slow down the
 	 * list-search case.
 	 */
-	if (root->join_rel_hash)
+	if (list->hash)
 	{
 		Relids		hashkey = relids;
-		JoinHashEntry *hentry;
+		RelInfoEntry *hentry;
 
-		hentry = (JoinHashEntry *) hash_search(root->join_rel_hash,
-											   &hashkey,
-											   HASH_FIND,
-											   NULL);
+		hentry = (RelInfoEntry *) hash_search(list->hash,
+											  &hashkey,
+											  HASH_FIND,
+											  NULL);
 		if (hentry)
-			return hentry->join_rel;
+			return hentry->data;
 	}
 	else
 	{
 		ListCell   *l;
 
-		foreach(l, root->join_rel_list)
+		foreach(l, list->items)
 		{
-			RelOptInfo *rel = (RelOptInfo *) lfirst(l);
+			void	   *item = lfirst(l);
+			Relids		item_relids;
 
-			if (bms_equal(rel->relids, relids))
-				return rel;
+			Assert(IsA(item, RelOptInfo));
+			item_relids = ((RelOptInfo *) item)->relids;
+
+			if (bms_equal(item_relids, relids))
+				return item;
 		}
 	}
 
 	return NULL;
 }
 
+/*
+ * find_join_rel
+ *	  Returns relation entry corresponding to 'relids' (a set of RT indexes),
+ *	  or NULL if none exists.  This is for join relations.
+ */
+RelOptInfo *
+find_join_rel(PlannerInfo *root, Relids relids)
+{
+	return (RelOptInfo *) find_rel_info(root->join_rel_list, relids);
+}
+
+/*
+ * add_rel_info
+ *		Add relation specific info to a list, and also add it to the auxiliary
+ *		hashtable if there is one.
+ */
+static void
+add_rel_info(RelInfoList *list, void *data)
+{
+	Assert(IsA(data, RelOptInfo));
+
+	/* GEQO requires us to append the new joinrel to the end of the list! */
+	list->items = lappend(list->items, data);
+
+	/* store it into the auxiliary hashtable if there is one. */
+	if (list->hash)
+	{
+		Relids		relids;
+		RelInfoEntry *hentry;
+		bool		found;
+
+		relids = ((RelOptInfo *) data)->relids;
+		hentry = (RelInfoEntry *) hash_search(list->hash,
+											  &relids,
+											  HASH_ENTER,
+											  &found);
+		Assert(!found);
+		hentry->data = data;
+	}
+}
+
+/*
+ * add_join_rel
+ *		Add given join relation to the list of join relations in the given
+ *		PlannerInfo.
+ */
+static void
+add_join_rel(PlannerInfo *root, RelOptInfo *joinrel)
+{
+	add_rel_info(root->join_rel_list, joinrel);
+}
+
 /*
  * set_foreign_rel_properties
  *		Set up foreign-join fields if outer and inner relation are foreign
@@ -516,32 +575,6 @@ set_foreign_rel_properties(RelOptInfo *joinrel, RelOptInfo *outer_rel,
 	}
 }
 
-/*
- * add_join_rel
- *		Add given join relation to the list of join relations in the given
- *		PlannerInfo. Also add it to the auxiliary hashtable if there is one.
- */
-static void
-add_join_rel(PlannerInfo *root, RelOptInfo *joinrel)
-{
-	/* GEQO requires us to append the new joinrel to the end of the list! */
-	root->join_rel_list = lappend(root->join_rel_list, joinrel);
-
-	/* store it into the auxiliary hashtable if there is one. */
-	if (root->join_rel_hash)
-	{
-		JoinHashEntry *hentry;
-		bool		found;
-
-		hentry = (JoinHashEntry *) hash_search(root->join_rel_hash,
-											   &(joinrel->relids),
-											   HASH_ENTER,
-											   &found);
-		Assert(!found);
-		hentry->join_rel = joinrel;
-	}
-}
-
 /*
  * build_join_rel
  *	  Returns relation entry corresponding to the union of two given rels,
diff --git a/src/backend/partitioning/partprune.c b/src/backend/partitioning/partprune.c
index e71a21c0a7..5982af4de1 100644
--- a/src/backend/partitioning/partprune.c
+++ b/src/backend/partitioning/partprune.c
@@ -194,10 +194,8 @@ static PruneStepResult *perform_pruning_base_step(PartitionPruneContext *context
 static PruneStepResult *perform_pruning_combine_step(PartitionPruneContext *context,
 													 PartitionPruneStepCombine *cstep,
 													 PruneStepResult **step_results);
-static PartClauseMatchStatus match_boolean_partition_clause(Oid partopfamily,
-															Expr *clause,
-															Expr *partkey,
-															Expr **outconst);
+static bool match_boolean_partition_clause(Oid partopfamily, Expr *clause,
+										   Expr *partkey, Expr **outconst);
 static void partkey_datum_from_expr(PartitionPruneContext *context,
 									Expr *expr, int stateidx,
 									Datum *value, bool *isnull);
@@ -1625,7 +1623,6 @@ match_clause_to_partition_key(GeneratePruningStepsContext *context,
 							  bool *clause_is_not_null, PartClauseInfo **pc,
 							  List **clause_steps)
 {
-	PartClauseMatchStatus boolmatchstatus;
 	PartitionScheme part_scheme = context->rel->part_scheme;
 	Oid			partopfamily = part_scheme->partopfamily[partkeyidx],
 				partcoll = part_scheme->partcollation[partkeyidx];
@@ -1634,10 +1631,7 @@ match_clause_to_partition_key(GeneratePruningStepsContext *context,
 	/*
 	 * Recognize specially shaped clauses that match a Boolean partition key.
 	 */
-	boolmatchstatus = match_boolean_partition_clause(partopfamily, clause,
-													 partkey, &expr);
-
-	if (boolmatchstatus == PARTCLAUSE_MATCH_CLAUSE)
+	if (match_boolean_partition_clause(partopfamily, clause, partkey, &expr))
 	{
 		PartClauseInfo *partclause;
 
@@ -2153,21 +2147,7 @@ match_clause_to_partition_key(GeneratePruningStepsContext *context,
 		return PARTCLAUSE_MATCH_NULLNESS;
 	}
 
-	/*
-	 * If we get here then the return value depends on the result of the
-	 * match_boolean_partition_clause call above.  If the call returned
-	 * PARTCLAUSE_UNSUPPORTED then we're either not dealing with a bool qual
-	 * or the bool qual is not suitable for pruning.  Since the qual didn't
-	 * match up to any of the other qual types supported here, then trying to
-	 * match it against any other partition key is a waste of time, so just
-	 * return PARTCLAUSE_UNSUPPORTED.  If the qual just couldn't be matched to
-	 * this partition key, then it may match another, so return
-	 * PARTCLAUSE_NOMATCH.  The only other value that
-	 * match_boolean_partition_clause can return is PARTCLAUSE_MATCH_CLAUSE,
-	 * and since that value was already dealt with above, then we can just
-	 * return boolmatchstatus.
-	 */
-	return boolmatchstatus;
+	return PARTCLAUSE_UNSUPPORTED;
 }
 
 /*
@@ -3415,15 +3395,11 @@ perform_pruning_combine_step(PartitionPruneContext *context,
 /*
  * match_boolean_partition_clause
  *
- * If we're able to match the clause to the partition key as specially-shaped
- * boolean clause, set *outconst to a Const containing a true or false value
- * and return PARTCLAUSE_MATCH_CLAUSE.  Returns PARTCLAUSE_UNSUPPORTED if the
- * clause is not a boolean clause or if the boolean clause is unsuitable for
- * partition pruning.  Returns PARTCLAUSE_NOMATCH if it's a bool quals but
- * just does not match this partition key.  *outconst is set to NULL in the
- * latter two cases.
+ * Sets *outconst to a Const containing true or false value and returns true if
+ * we're able to match the clause to the partition key as specially-shaped
+ * Boolean clause.  Returns false otherwise with *outconst set to NULL.
  */
-static PartClauseMatchStatus
+static bool
 match_boolean_partition_clause(Oid partopfamily, Expr *clause, Expr *partkey,
 							   Expr **outconst)
 {
@@ -3432,7 +3408,7 @@ match_boolean_partition_clause(Oid partopfamily, Expr *clause, Expr *partkey,
 	*outconst = NULL;
 
 	if (!IsBooleanOpfamily(partopfamily))
-		return PARTCLAUSE_UNSUPPORTED;
+		return false;
 
 	if (IsA(clause, BooleanTest))
 	{
@@ -3441,7 +3417,7 @@ match_boolean_partition_clause(Oid partopfamily, Expr *clause, Expr *partkey,
 		/* Only IS [NOT] TRUE/FALSE are any good to us */
 		if (btest->booltesttype == IS_UNKNOWN ||
 			btest->booltesttype == IS_NOT_UNKNOWN)
-			return PARTCLAUSE_UNSUPPORTED;
+			return false;
 
 		leftop = btest->arg;
 		if (IsA(leftop, RelabelType))
@@ -3454,7 +3430,7 @@ match_boolean_partition_clause(Oid partopfamily, Expr *clause, Expr *partkey,
 				: (Expr *) makeBoolConst(false, false);
 
 		if (*outconst)
-			return PARTCLAUSE_MATCH_CLAUSE;
+			return true;
 	}
 	else
 	{
@@ -3474,10 +3450,10 @@ match_boolean_partition_clause(Oid partopfamily, Expr *clause, Expr *partkey,
 			*outconst = (Expr *) makeBoolConst(false, false);
 
 		if (*outconst)
-			return PARTCLAUSE_MATCH_CLAUSE;
+			return true;
 	}
 
-	return PARTCLAUSE_NOMATCH;
+	return false;
 }
 
 /*
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index ffd84d877c..44a59e1d4f 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -1070,7 +1070,6 @@ exec_simple_query(const char *query_string)
 		bool		snapshot_set = false;
 		const char *commandTag;
 		char		completionTag[COMPLETION_TAG_BUFSIZE];
-		MemoryContext per_parsetree_context = NULL;
 		List	   *querytree_list,
 				   *plantree_list;
 		Portal		portal;
@@ -1133,25 +1132,10 @@ exec_simple_query(const char *query_string)
 		/*
 		 * OK to analyze, rewrite, and plan this query.
 		 *
-		 * Switch to appropriate context for constructing query and plan trees
-		 * (these can't be in the transaction context, as that will get reset
-		 * when the command is COMMIT/ROLLBACK).  If we have multiple
-		 * parsetrees, we use a separate context for each one, so that we can
-		 * free that memory before moving on to the next one.  But for the
-		 * last (or only) parsetree, just use MessageContext, which will be
-		 * reset shortly after completion anyway.  In event of an error, the
-		 * per_parsetree_context will be deleted when MessageContext is reset.
+		 * Switch to appropriate context for constructing querytrees (again,
+		 * these must outlive the execution context).
 		 */
-		if (lnext(parsetree_item) != NULL)
-		{
-			per_parsetree_context =
-				AllocSetContextCreate(MessageContext,
-									  "per-parsetree message context",
-									  ALLOCSET_DEFAULT_SIZES);
-			oldcontext = MemoryContextSwitchTo(per_parsetree_context);
-		}
-		else
-			oldcontext = MemoryContextSwitchTo(MessageContext);
+		oldcontext = MemoryContextSwitchTo(MessageContext);
 
 		querytree_list = pg_analyze_and_rewrite(parsetree, query_string,
 												NULL, 0, NULL);
@@ -1176,8 +1160,8 @@ exec_simple_query(const char *query_string)
 
 		/*
 		 * We don't have to copy anything into the portal, because everything
-		 * we are passing here is in MessageContext or the
-		 * per_parsetree_context, and so will outlive the portal anyway.
+		 * we are passing here is in MessageContext, which will outlive the
+		 * portal anyway.
 		 */
 		PortalDefineQuery(portal,
 						  NULL,
@@ -1279,10 +1263,6 @@ exec_simple_query(const char *query_string)
 		 * aborted by error will not send an EndCommand report at all.)
 		 */
 		EndCommand(completionTag, dest);
-
-		/* Now we may drop the per-parsetree context, if one was created. */
-		if (per_parsetree_context)
-			MemoryContextDelete(per_parsetree_context);
 	}							/* end loop over parsetrees */
 
 	/*
diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c
index 04d77ad700..f1acbdfcf2 100644
--- a/src/bin/initdb/initdb.c
+++ b/src/bin/initdb/initdb.c
@@ -1051,7 +1051,7 @@ test_config_settings(void)
 	else
 		printf("%dkB\n", n_buffers * (BLCKSZ / 1024));
 
-	printf(_("selecting default time zone ... "));
+	printf(_("selecting default timezone ... "));
 	fflush(stdout);
 	default_timezone = select_default_timezone(share_path);
 	printf("%s\n", default_timezone ? default_timezone : "GMT");
diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c
index 90a3f41bbb..b029118bf6 100644
--- a/src/bin/pg_basebackup/pg_recvlogical.c
+++ b/src/bin/pg_basebackup/pg_recvlogical.c
@@ -1020,11 +1020,12 @@ prepareToTerminate(PGconn *conn, XLogRecPtr endpos, bool keepalive, XLogRecPtr l
 	if (verbose)
 	{
 		if (keepalive)
-			pg_log_info("end position %X/%X reached by keepalive",
+			pg_log_info("endpos %X/%X reached by keepalive",
 						(uint32) (endpos >> 32), (uint32) endpos);
 		else
-			pg_log_info("end position %X/%X reached by WAL record at %X/%X",
+			pg_log_info("endpos %X/%X reached by record at %X/%X",
 						(uint32) (endpos >> 32), (uint32) (endpos),
 						(uint32) (lsn >> 32), (uint32) lsn);
+
 	}
 }
diff --git a/src/bin/pg_checksums/pg_checksums.c b/src/bin/pg_checksums/pg_checksums.c
index 8c00ec9a3b..b591fcc864 100644
--- a/src/bin/pg_checksums/pg_checksums.c
+++ b/src/bin/pg_checksums/pg_checksums.c
@@ -235,7 +235,7 @@ scan_file(const char *fn, BlockNumber segmentno)
 			/* Write block with checksum */
 			if (write(f, buf.data, BLCKSZ) != BLCKSZ)
 			{
-				pg_log_error("could not write block %u in file \"%s\": %m",
+				pg_log_error("could not update checksum of block %u in file \"%s\": %m",
 							 blockno, fn);
 				exit(1);
 			}
@@ -469,7 +469,7 @@ main(int argc, char *argv[])
 	/* filenode checking only works in --check mode */
 	if (mode != PG_MODE_CHECK && only_filenode)
 	{
-		pg_log_error("option -f/--filenode can only be used with --check");
+		pg_log_error("--filenode option only possible with --check");
 		fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
 				progname);
 		exit(1);
diff --git a/src/bin/pg_dump/pg_backup_db.c b/src/bin/pg_dump/pg_backup_db.c
index ee822c5249..401e0c8883 100644
--- a/src/bin/pg_dump/pg_backup_db.c
+++ b/src/bin/pg_dump/pg_backup_db.c
@@ -176,7 +176,7 @@ _connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser)
 		newConn = PQconnectdbParams(keywords, values, true);
 
 		if (!newConn)
-			fatal("could not reconnect to database");
+			fatal("failed to reconnect to database");
 
 		if (PQstatus(newConn) == CONNECTION_BAD)
 		{
@@ -287,7 +287,7 @@ ConnectDatabase(Archive *AHX,
 		AH->connection = PQconnectdbParams(keywords, values, true);
 
 		if (!AH->connection)
-			fatal("could not connect to database");
+			fatal("failed to connect to database");
 
 		if (PQstatus(AH->connection) == CONNECTION_BAD &&
 			PQconnectionNeedsPassword(AH->connection) &&
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 0981efcf5d..158c0c74b2 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -482,7 +482,7 @@ main(int argc, char *argv[])
 		OPF = fopen(filename, PG_BINARY_W);
 		if (!OPF)
 		{
-			pg_log_error("could not open output file \"%s\": %m",
+			pg_log_error("could not open the output file \"%s\": %m",
 						 filename);
 			exit_nicely(1);
 		}
@@ -1492,11 +1492,11 @@ dumpDatabases(PGconn *conn)
 		/* Skip any explicitly excluded database */
 		if (simple_string_list_member(&database_exclude_names, dbname))
 		{
-			pg_log_info("excluding database \"%s\"", dbname);
+			pg_log_info("excluding database \"%s\"...", dbname);
 			continue;
 		}
 
-		pg_log_info("dumping database \"%s\"", dbname);
+		pg_log_info("dumping database \"%s\"...", dbname);
 
 		fprintf(OPF, "--\n-- Database \"%s\" dump\n--\n\n", dbname);
 
diff --git a/src/bin/pg_upgrade/option.c b/src/bin/pg_upgrade/option.c
index d76f27c9e8..73f395f2a3 100644
--- a/src/bin/pg_upgrade/option.c
+++ b/src/bin/pg_upgrade/option.c
@@ -304,7 +304,7 @@ usage(void)
 	printf(_("  -p, --old-port=PORT           old cluster port number (default %d)\n"), old_cluster.port);
 	printf(_("  -P, --new-port=PORT           new cluster port number (default %d)\n"), new_cluster.port);
 	printf(_("  -r, --retain                  retain SQL and log files after success\n"));
-	printf(_("  -s, --socketdir=DIR           socket directory to use (default current dir.)\n"));
+	printf(_("  -s, --socketdir=DIR           socket directory to use (default CWD)\n"));
 	printf(_("  -U, --username=NAME           cluster superuser (default \"%s\")\n"), os_info.user);
 	printf(_("  -v, --verbose                 enable verbose internal logging\n"));
 	printf(_("  -V, --version                 display version information, then exit\n"));
diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h
index c2b0481e7e..7edfcf3ef9 100644
--- a/src/include/access/tableam.h
+++ b/src/include/access/tableam.h
@@ -434,8 +434,8 @@ typedef struct TableAmRoutine
 	 *
 	 * Note that only the subset of the relcache filled by
 	 * RelationBuildLocalRelation() can be relied upon and that the relation's
-	 * catalog entries will either not yet exist (new relation), or will still
-	 * reference the old relfilenode.
+	 * catalog entries either will either not yet exist (new relation), or
+	 * will still reference the old relfilenode.
 	 *
 	 * As output *freezeXid, *minmulti must be set to the values appropriate
 	 * for pg_class.{relfrozenxid, relminmxid}. For AMs that don't need those
@@ -591,7 +591,7 @@ typedef struct TableAmRoutine
 	 * See table_relation_estimate_size().
 	 *
 	 * While block oriented, it shouldn't be too hard for an AM that doesn't
-	 * internally use blocks to convert into a usable representation.
+	 * doesn't internally use blocks to convert into a usable representation.
 	 *
 	 * This differs from the relation_size callback by returning size
 	 * estimates (both relation size and tuple count) for planning purposes,
@@ -967,7 +967,7 @@ table_index_fetch_end(struct IndexFetchTableData *scan)
  *
  * *all_dead, if all_dead is not NULL, will be set to true by
  * table_index_fetch_tuple() iff it is guaranteed that no backend needs to see
- * that tuple. Index AMs can use that to avoid returning that tid in future
+ * that tuple. Index AMs can use that do avoid returning that tid in future
  * searches.
  *
  * The difference between this function and table_fetch_row_version is that
@@ -1014,8 +1014,8 @@ extern bool table_index_fetch_tuple_check(Relation rel,
  * true, false otherwise.
  *
  * See table_index_fetch_tuple's comment about what the difference between
- * these functions is. It is correct to use this function outside of index
- * entry->table tuple lookups.
+ * these functions is. This function is the correct to use outside of
+ * index entry->table tuple lookups.
  */
 static inline bool
 table_tuple_fetch_row_version(Relation rel,
diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h
index 4e2fb39105..11027cdb10 100644
--- a/src/include/nodes/nodes.h
+++ b/src/include/nodes/nodes.h
@@ -220,6 +220,7 @@ typedef enum NodeTag
 	T_PlannerInfo,
 	T_PlannerGlobal,
 	T_RelOptInfo,
+	T_RelInfoList,
 	T_IndexOptInfo,
 	T_ForeignKeyOptInfo,
 	T_ParamPathInfo,
diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h
index 441e64eca9..38dc186623 100644
--- a/src/include/nodes/pathnodes.h
+++ b/src/include/nodes/pathnodes.h
@@ -236,15 +236,9 @@ struct PlannerInfo
 
 	/*
 	 * join_rel_list is a list of all join-relation RelOptInfos we have
-	 * considered in this planning run.  For small problems we just scan the
-	 * list to do lookups, but when there are many join relations we build a
-	 * hash table for faster lookups.  The hash table is present and valid
-	 * when join_rel_hash is not NULL.  Note that we still maintain the list
-	 * even when using the hash table for lookups; this simplifies life for
-	 * GEQO.
+	 * considered in this planning run.
 	 */
-	List	   *join_rel_list;	/* list of join-relation RelOptInfos */
-	struct HTAB *join_rel_hash; /* optional hashtable for join relations */
+	struct RelInfoList *join_rel_list;	/* list of join-relation RelOptInfos */
 
 	/*
 	 * When doing a dynamic-programming-style join search, join_rel_level[k]
@@ -742,6 +736,24 @@ typedef struct RelOptInfo
 	((rel)->part_scheme && (rel)->boundinfo && (rel)->nparts > 0 && \
 	 (rel)->part_rels && (rel)->partexprs && (rel)->nullable_partexprs)
 
+/*
+ * RelInfoList
+ *		A list to store relation specific info and to retrieve it by relids.
+ *
+ * For small problems we just scan the list to do lookups, but when there are
+ * many relations we build a hash table for faster lookups. The hash table is
+ * present and valid when rel_hash is not NULL.  Note that we still maintain
+ * the list even when using the hash table for lookups; this simplifies life
+ * for GEQO.
+ */
+typedef struct RelInfoList
+{
+	NodeTag		type;
+
+	List	   *items;
+	struct HTAB *hash;
+} RelInfoList;
+
 /*
  * IndexOptInfo
  *		Per-index information for planning/optimization
diff --git a/src/test/regress/expected/partition_prune.out b/src/test/regress/expected/partition_prune.out
index 2eecb1744b..841bd8bc67 100644
--- a/src/test/regress/expected/partition_prune.out
+++ b/src/test/regress/expected/partition_prune.out
@@ -1086,19 +1086,6 @@ explain (costs off) select * from boolpart where a is not unknown;
          Filter: (a IS NOT UNKNOWN)
 (7 rows)
 
-create table boolrangep (a bool, b bool, c int) partition by range (a,b,c);
-create table boolrangep_tf partition of boolrangep for values from ('true', 'false', 0) to ('true', 'false', 100);
-create table boolrangep_ft partition of boolrangep for values from ('false', 'true', 0) to ('false', 'true', 100);
-create table boolrangep_ff1 partition of boolrangep for values from ('false', 'false', 0) to ('false', 'false', 50);
-create table boolrangep_ff2 partition of boolrangep for values from ('false', 'false', 50) to ('false', 'false', 100);
--- try a more complex case that's been known to trip up pruning in the past
-explain (costs off)  select * from boolrangep where not a and not b and c = 25;
-                  QUERY PLAN                  
-----------------------------------------------
- Seq Scan on boolrangep_ff1
-   Filter: ((NOT a) AND (NOT b) AND (c = 25))
-(2 rows)
-
 -- test scalar-to-array operators
 create table coercepart (a varchar) partition by list (a);
 create table coercepart_ab partition of coercepart for values in ('ab');
@@ -1433,7 +1420,7 @@ explain (costs off) select * from rparted_by_int2 where a > 100000000000000;
    Filter: (a > '100000000000000'::bigint)
 (2 rows)
 
-drop table lp, coll_pruning, rlp, mc3p, mc2p, boolpart, boolrangep, rp, coll_pruning_multi, like_op_noprune, lparted_by_int2, rparted_by_int2;
+drop table lp, coll_pruning, rlp, mc3p, mc2p, boolpart, rp, coll_pruning_multi, like_op_noprune, lparted_by_int2, rparted_by_int2;
 --
 -- Test Partition pruning for HASH partitioning
 --
diff --git a/src/test/regress/expected/triggers.out b/src/test/regress/expected/triggers.out
index c64151ba09..cd2b550c14 100644
--- a/src/test/regress/expected/triggers.out
+++ b/src/test/regress/expected/triggers.out
@@ -2094,30 +2094,6 @@ NOTICE:  trigger zzz on parted_trig_1_1 AFTER INSERT for ROW
 NOTICE:  trigger bbb on parted_trig_2 AFTER INSERT for ROW
 NOTICE:  trigger zzz on parted_trig_2 AFTER INSERT for ROW
 drop table parted_trig;
--- Verify propagation of trigger arguments to partitions
-create table parted_trig (a int) partition by list (a);
-create table parted_trig1 partition of parted_trig for values in (1);
-create or replace function trigger_notice() returns trigger as $$
-  declare
-    arg1 text = TG_ARGV[0];
-    arg2 integer = TG_ARGV[1];
-  begin
-    raise notice 'trigger % on % % % for % args % %',
-		TG_NAME, TG_TABLE_NAME, TG_WHEN, TG_OP, TG_LEVEL, arg1, arg2;
-    return null;
-  end;
-  $$ language plpgsql;
-create trigger aaa after insert on parted_trig
-   for each row execute procedure trigger_notice('quirky', 1);
--- Verify propagation of trigger arguments to partitions attached after creating trigger
-create table parted_trig2 partition of parted_trig for values in (2);
-create table parted_trig3 (like parted_trig);
-alter table parted_trig attach partition parted_trig3 for values in (3);
-insert into parted_trig values (1), (2), (3);
-NOTICE:  trigger aaa on parted_trig1 AFTER INSERT for ROW args quirky 1
-NOTICE:  trigger aaa on parted_trig2 AFTER INSERT for ROW args quirky 1
-NOTICE:  trigger aaa on parted_trig3 AFTER INSERT for ROW args quirky 1
-drop table parted_trig;
 -- test irregular partitions (i.e., different column definitions),
 -- including that the WHEN clause works
 create function bark(text) returns bool language plpgsql immutable
diff --git a/src/test/regress/sql/partition_prune.sql b/src/test/regress/sql/partition_prune.sql
index 7bb4e2fffc..071e28dce8 100644
--- a/src/test/regress/sql/partition_prune.sql
+++ b/src/test/regress/sql/partition_prune.sql
@@ -159,15 +159,6 @@ explain (costs off) select * from boolpart where a is not true and a is not fals
 explain (costs off) select * from boolpart where a is unknown;
 explain (costs off) select * from boolpart where a is not unknown;
 
-create table boolrangep (a bool, b bool, c int) partition by range (a,b,c);
-create table boolrangep_tf partition of boolrangep for values from ('true', 'false', 0) to ('true', 'false', 100);
-create table boolrangep_ft partition of boolrangep for values from ('false', 'true', 0) to ('false', 'true', 100);
-create table boolrangep_ff1 partition of boolrangep for values from ('false', 'false', 0) to ('false', 'false', 50);
-create table boolrangep_ff2 partition of boolrangep for values from ('false', 'false', 50) to ('false', 'false', 100);
-
--- try a more complex case that's been known to trip up pruning in the past
-explain (costs off)  select * from boolrangep where not a and not b and c = 25;
-
 -- test scalar-to-array operators
 create table coercepart (a varchar) partition by list (a);
 create table coercepart_ab partition of coercepart for values in ('ab');
@@ -273,7 +264,7 @@ create table rparted_by_int2_maxvalue partition of rparted_by_int2 for values fr
 -- all partitions but rparted_by_int2_maxvalue pruned
 explain (costs off) select * from rparted_by_int2 where a > 100000000000000;
 
-drop table lp, coll_pruning, rlp, mc3p, mc2p, boolpart, boolrangep, rp, coll_pruning_multi, like_op_noprune, lparted_by_int2, rparted_by_int2;
+drop table lp, coll_pruning, rlp, mc3p, mc2p, boolpart, rp, coll_pruning_multi, like_op_noprune, lparted_by_int2, rparted_by_int2;
 
 --
 -- Test Partition pruning for HASH partitioning
diff --git a/src/test/regress/sql/triggers.sql b/src/test/regress/sql/triggers.sql
index 4534dc9ebe..8f833b7d10 100644
--- a/src/test/regress/sql/triggers.sql
+++ b/src/test/regress/sql/triggers.sql
@@ -1460,29 +1460,6 @@ create trigger qqq after insert on parted_trig_1_1 for each row execute procedur
 insert into parted_trig values (50), (1500);
 drop table parted_trig;
 
--- Verify propagation of trigger arguments to partitions
-create table parted_trig (a int) partition by list (a);
-create table parted_trig1 partition of parted_trig for values in (1);
-create or replace function trigger_notice() returns trigger as $$
-  declare
-    arg1 text = TG_ARGV[0];
-    arg2 integer = TG_ARGV[1];
-  begin
-    raise notice 'trigger % on % % % for % args % %',
-		TG_NAME, TG_TABLE_NAME, TG_WHEN, TG_OP, TG_LEVEL, arg1, arg2;
-    return null;
-  end;
-  $$ language plpgsql;
-create trigger aaa after insert on parted_trig
-   for each row execute procedure trigger_notice('quirky', 1);
-
--- Verify propagation of trigger arguments to partitions attached after creating trigger
-create table parted_trig2 partition of parted_trig for values in (2);
-create table parted_trig3 (like parted_trig);
-alter table parted_trig attach partition parted_trig3 for values in (3);
-insert into parted_trig values (1), (2), (3);
-drop table parted_trig;
-
 -- test irregular partitions (i.e., different column definitions),
 -- including that the WHEN clause works
 create function bark(text) returns bool language plpgsql immutable
-- 
2.16.4

