From 41429921736879dde027225d3d2814424f51ec14 Mon Sep 17 00:00:00 2001
From: "dgrowley@gmail.com" <dgrowley@gmail.com>
Date: Fri, 4 Dec 2020 00:39:48 +1300
Subject: [PATCH 5/5] Use a Result Cache node to cache results from subplans

---
 .../postgres_fdw/expected/postgres_fdw.out    |  49 +++++----
 src/backend/optimizer/plan/subselect.c        | 103 ++++++++++++++++++
 src/test/regress/expected/aggregates.out      |   6 +-
 src/test/regress/expected/groupingsets.out    |  20 ++--
 .../regress/expected/incremental_sort.out     |  16 ++-
 src/test/regress/expected/join.out            |  16 +--
 src/test/regress/expected/join_hash.out       |  58 +++++++---
 src/test/regress/expected/resultcache.out     |  37 +++++++
 src/test/regress/expected/rowsecurity.out     |  20 ++--
 src/test/regress/expected/select_parallel.out |  28 +++--
 src/test/regress/expected/subselect.out       |  20 ++--
 src/test/regress/sql/resultcache.sql          |   9 ++
 12 files changed, 297 insertions(+), 85 deletions(-)

diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
index ee2582cf65..f07b3f0194 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -2134,22 +2134,25 @@ SELECT t1c1, avg(t1c1 + t2c1) FROM (SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2
 -- join with lateral reference
 EXPLAIN (VERBOSE, COSTS OFF)
 SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
-                                                                             QUERY PLAN                                                                             
---------------------------------------------------------------------------------------------------------------------------------------------------------------------
+                                                                                   QUERY PLAN                                                                                   
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
  Limit
    Output: t1."C 1"
    ->  Nested Loop
          Output: t1."C 1"
          ->  Index Scan using t1_pkey on "S 1"."T 1" t1
                Output: t1."C 1", t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8
-         ->  HashAggregate
-               Output: t2.c1, t3.c1
-               Group Key: t2.c1, t3.c1
-               ->  Foreign Scan
-                     Output: t2.c1, t3.c1
-                     Relations: (public.ft1 t2) INNER JOIN (public.ft2 t3)
-                     Remote SQL: SELECT r1."C 1", r2."C 1" FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")) AND ((r1.c2 = $1::integer))))
-(13 rows)
+         ->  Result Cache
+               Cache Key: t1.c2
+               ->  Subquery Scan on q
+                     ->  HashAggregate
+                           Output: t2.c1, t3.c1
+                           Group Key: t2.c1, t3.c1
+                           ->  Foreign Scan
+                                 Output: t2.c1, t3.c1
+                                 Relations: (public.ft1 t2) INNER JOIN (public.ft2 t3)
+                                 Remote SQL: SELECT r1."C 1", r2."C 1" FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1")) AND ((r1.c2 = $1::integer))))
+(16 rows)
 
 SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
  C 1 
@@ -2930,10 +2933,13 @@ select distinct (select count(*) filter (where t2.c2 = 6 and t2.c1 < 10) from ft
                Relations: Aggregate on (public.ft2 t2)
                Remote SQL: SELECT count(*) FILTER (WHERE ((c2 = 6) AND ("C 1" < 10))) FROM "S 1"."T 1" WHERE (((c2 % 6) = 0))
                SubPlan 1
-                 ->  Foreign Scan on public.ft1 t1
-                       Output: (count(*) FILTER (WHERE ((t2.c2 = 6) AND (t2.c1 < 10))))
-                       Remote SQL: SELECT NULL FROM "S 1"."T 1" WHERE (("C 1" = 6))
-(13 rows)
+                 ->  Result Cache
+                       Output: ((count(*) FILTER (WHERE ((t2.c2 = 6) AND (t2.c1 < 10)))))
+                       Cache Key: (count(*) FILTER (WHERE ((t2.c2 = 6) AND (t2.c1 < 10))))
+                       ->  Foreign Scan on public.ft1 t1
+                             Output: (count(*) FILTER (WHERE ((t2.c2 = 6) AND (t2.c1 < 10))))
+                             Remote SQL: SELECT NULL FROM "S 1"."T 1" WHERE (("C 1" = 6))
+(16 rows)
 
 select distinct (select count(*) filter (where t2.c2 = 6 and t2.c1 < 10) from ft1 t1 where t1.c1 = 6) from ft2 t2 where t2.c2 % 6 = 0 order by 1;
  count 
@@ -2944,8 +2950,8 @@ select distinct (select count(*) filter (where t2.c2 = 6 and t2.c1 < 10) from ft
 -- Inner query is aggregation query
 explain (verbose, costs off)
 select distinct (select count(t1.c1) filter (where t2.c2 = 6 and t2.c1 < 10) from ft1 t1 where t1.c1 = 6) from ft2 t2 where t2.c2 % 6 = 0 order by 1;
-                                                                      QUERY PLAN                                                                      
-------------------------------------------------------------------------------------------------------------------------------------------------------
+                                                                         QUERY PLAN                                                                         
+------------------------------------------------------------------------------------------------------------------------------------------------------------
  Unique
    Output: ((SubPlan 1))
    ->  Sort
@@ -2955,11 +2961,14 @@ select distinct (select count(t1.c1) filter (where t2.c2 = 6 and t2.c1 < 10) fro
                Output: (SubPlan 1)
                Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE (((c2 % 6) = 0))
                SubPlan 1
-                 ->  Foreign Scan
+                 ->  Result Cache
                        Output: (count(t1.c1) FILTER (WHERE ((t2.c2 = 6) AND (t2.c1 < 10))))
-                       Relations: Aggregate on (public.ft1 t1)
-                       Remote SQL: SELECT count("C 1") FILTER (WHERE (($1::integer = 6) AND ($2::integer < 10))) FROM "S 1"."T 1" WHERE (("C 1" = 6))
-(13 rows)
+                       Cache Key: t2.c2, t2.c1
+                       ->  Foreign Scan
+                             Output: (count(t1.c1) FILTER (WHERE ((t2.c2 = 6) AND (t2.c1 < 10))))
+                             Relations: Aggregate on (public.ft1 t1)
+                             Remote SQL: SELECT count("C 1") FILTER (WHERE (($1::integer = 6) AND ($2::integer < 10))) FROM "S 1"."T 1" WHERE (("C 1" = 6))
+(16 rows)
 
 select distinct (select count(t1.c1) filter (where t2.c2 = 6 and t2.c1 < 10) from ft1 t1 where t1.c1 = 6) from ft2 t2 where t2.c2 % 6 = 0 order by 1;
  count 
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index 92ad54e41e..bd648f66b3 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -37,6 +37,7 @@
 #include "utils/builtins.h"
 #include "utils/lsyscache.h"
 #include "utils/syscache.h"
+#include "utils/typcache.h"
 
 
 typedef struct convert_testexpr_context
@@ -137,6 +138,74 @@ get_first_col_type(Plan *plan, Oid *coltype, int32 *coltypmod,
 	*colcollation = InvalidOid;
 }
 
+
+/*
+ * outer_params_hashable
+ *		Determine if it's valid to use a ResultCache node to cache already
+ *		seen rows matching a given set of parameters instead of performing a
+ *		rescan of the subplan pointed to by 'subroot'.  If it's valid, check
+ *		if all parameters required by this query level can be hashed.  If so,
+ *		return true and set 'operators' to the list of hash equality operators
+ *		for the given parameters then populate 'param_exprs' with each
+ *		PARAM_EXEC parameter that the subplan requires the outer query to pass
+ *		it.  When hashing is not possible, false is returned and the two
+ *		output lists are unchanged.
+ */
+static bool
+outer_params_hashable(PlannerInfo *subroot, List *plan_params, List **operators,
+					  List **param_exprs)
+{
+	List	   *oplist = NIL;
+	List	   *exprlist = NIL;
+	ListCell   *lc;
+
+	/* Ensure we're not given a top-level query. */
+	Assert(subroot->parent_root != NULL);
+
+	/*
+	 * It's not valid to use a Result Cache node if there are any volatile
+	 * function in the subquery.  Caching could cause fewer evaluations of
+	 * volatile functions that have side-effects
+	 */
+	if (contain_volatile_functions((Node *) subroot->parse))
+		return false;
+
+	foreach(lc, plan_params)
+	{
+		PlannerParamItem *ppi = (PlannerParamItem *) lfirst(lc);
+		TypeCacheEntry *typentry;
+		Node	   *expr = ppi->item;
+		Param	   *param;
+
+		param = makeNode(Param);
+		param->paramkind = PARAM_EXEC;
+		param->paramid = ppi->paramId;
+		param->paramtype = exprType(expr);
+		param->paramtypmod = exprTypmod(expr);
+		param->paramcollid = exprCollation(expr);
+		param->location = -1;
+
+		typentry = lookup_type_cache(param->paramtype,
+									 TYPECACHE_HASH_PROC | TYPECACHE_EQ_OPR);
+
+		/* XXX will eq_opr ever be invalid if hash_proc isn't? */
+		if (!OidIsValid(typentry->hash_proc) || !OidIsValid(typentry->eq_opr))
+		{
+			list_free(oplist);
+			list_free(exprlist);
+			return false;
+		}
+
+		oplist = lappend_oid(oplist, typentry->eq_opr);
+		exprlist = lappend(exprlist, param);
+	}
+
+	*operators = oplist;
+	*param_exprs = exprlist;
+
+	return true;				/* all params can be hashed */
+}
+
 /*
  * Convert a SubLink (as created by the parser) into a SubPlan.
  *
@@ -234,6 +303,40 @@ make_subplan(PlannerInfo *root, Query *orig_subquery,
 	final_rel = fetch_upper_rel(subroot, UPPERREL_FINAL, NULL);
 	best_path = get_cheapest_fractional_path(final_rel, tuple_fraction);
 
+	/*
+	 * When enabled, for parameterized EXPR_SUBLINKS, we add a ResultCache to
+	 * the top of the subplan in order to cache previously looked up results
+	 * in the hope that they'll be needed again by a subsequent call.  At this
+	 * stage we don't have any details of how often we'll be called or with
+	 * which values we'll be called, so for now, we add the Result Cache
+	 * regardless. It may be useful if we can only do this when it seems
+	 * likely that we'll get some repeat lookups, i.e. cache hits.
+	 */
+	if (enable_resultcache && plan_params != NIL && subLinkType == EXPR_SUBLINK)
+	{
+		List	   *operators;
+		List	   *param_exprs;
+
+		/* Determine if all the subplan parameters can be hashed */
+		if (outer_params_hashable(subroot, plan_params, &operators, &param_exprs))
+		{
+			ResultCachePath *rcpath;
+
+			/*
+			 * Pass -1 for the number of calls since we don't have any ideas
+			 * what that'll be.
+			 */
+			rcpath = create_resultcache_path(root,
+											 best_path->parent,
+											 best_path,
+											 param_exprs,
+											 operators,
+											 false,
+											 -1);
+			best_path = (Path *) rcpath;
+		}
+	}
+
 	plan = create_plan(subroot, best_path);
 
 	/* And convert to SubPlan or InitPlan format. */
diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out
index 1eb0f7346b..cc4cac7bf8 100644
--- a/src/test/regress/expected/aggregates.out
+++ b/src/test/regress/expected/aggregates.out
@@ -1004,12 +1004,14 @@ explain (costs off)
 -----------------------------------------------------------------------------------------
  Seq Scan on int4_tbl
    SubPlan 2
-     ->  Result
+     ->  Result Cache
+           Cache Key: int4_tbl.f1
            InitPlan 1 (returns $1)
              ->  Limit
                    ->  Index Only Scan using tenk1_unique1 on tenk1
                          Index Cond: ((unique1 IS NOT NULL) AND (unique1 > int4_tbl.f1))
-(7 rows)
+           ->  Result
+(9 rows)
 
 select f1, (select min(unique1) from tenk1 where unique1 > f1) AS gt
   from int4_tbl;
diff --git a/src/test/regress/expected/groupingsets.out b/src/test/regress/expected/groupingsets.out
index 7c844c6e09..33befe0e7b 100644
--- a/src/test/regress/expected/groupingsets.out
+++ b/src/test/regress/expected/groupingsets.out
@@ -774,19 +774,21 @@ select v.c, (select count(*) from gstest2 group by () having v.c)
 explain (costs off)
   select v.c, (select count(*) from gstest2 group by () having v.c)
     from (values (false),(true)) v(c) order by v.c;
-                        QUERY PLAN                         
------------------------------------------------------------
+                           QUERY PLAN                            
+-----------------------------------------------------------------
  Sort
    Sort Key: "*VALUES*".column1
    ->  Values Scan on "*VALUES*"
          SubPlan 1
-           ->  Aggregate
-                 Group Key: ()
-                 Filter: "*VALUES*".column1
-                 ->  Result
-                       One-Time Filter: "*VALUES*".column1
-                       ->  Seq Scan on gstest2
-(10 rows)
+           ->  Result Cache
+                 Cache Key: "*VALUES*".column1
+                 ->  Aggregate
+                       Group Key: ()
+                       Filter: "*VALUES*".column1
+                       ->  Result
+                             One-Time Filter: "*VALUES*".column1
+                             ->  Seq Scan on gstest2
+(12 rows)
 
 -- HAVING with GROUPING queries
 select ten, grouping(ten) from onek
diff --git a/src/test/regress/expected/incremental_sort.out b/src/test/regress/expected/incremental_sort.out
index a8cbfd9f5f..b3cf302af7 100644
--- a/src/test/regress/expected/incremental_sort.out
+++ b/src/test/regress/expected/incremental_sort.out
@@ -1568,9 +1568,11 @@ from tenk1 t, generate_series(1, 1000);
                      ->  Parallel Index Only Scan using tenk1_unique1 on tenk1 t
                      ->  Function Scan on generate_series
                SubPlan 1
-                 ->  Index Only Scan using tenk1_unique1 on tenk1
-                       Index Cond: (unique1 = t.unique1)
-(11 rows)
+                 ->  Result Cache
+                       Cache Key: t.unique1
+                       ->  Index Only Scan using tenk1_unique1 on tenk1
+                             Index Cond: (unique1 = t.unique1)
+(13 rows)
 
 explain (costs off) select
   unique1,
@@ -1587,9 +1589,11 @@ order by 1, 2;
                ->  Parallel Index Only Scan using tenk1_unique1 on tenk1 t
                ->  Function Scan on generate_series
          SubPlan 1
-           ->  Index Only Scan using tenk1_unique1 on tenk1
-                 Index Cond: (unique1 = t.unique1)
-(10 rows)
+           ->  Result Cache
+                 Cache Key: t.unique1
+                 ->  Index Only Scan using tenk1_unique1 on tenk1
+                       Index Cond: (unique1 = t.unique1)
+(12 rows)
 
 -- Parallel sort but with expression not available until the upper rel.
 explain (costs off) select distinct sub.unique1, stringu1 || random()::text
diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out
index 5e6b02cdd7..0cde696292 100644
--- a/src/test/regress/expected/join.out
+++ b/src/test/regress/expected/join.out
@@ -2976,8 +2976,8 @@ select * from
 where
   1 = (select 1 from int8_tbl t3 where ss.y is not null limit 1)
 order by 1,2;
-                        QUERY PLAN                         
------------------------------------------------------------
+                           QUERY PLAN                            
+-----------------------------------------------------------------
  Sort
    Sort Key: t1.q1, t1.q2
    ->  Hash Left Join
@@ -2987,11 +2987,13 @@ order by 1,2;
          ->  Hash
                ->  Seq Scan on int8_tbl t2
          SubPlan 1
-           ->  Limit
-                 ->  Result
-                       One-Time Filter: ((42) IS NOT NULL)
-                       ->  Seq Scan on int8_tbl t3
-(13 rows)
+           ->  Result Cache
+                 Cache Key: (42)
+                 ->  Limit
+                       ->  Result
+                             One-Time Filter: ((42) IS NOT NULL)
+                             ->  Seq Scan on int8_tbl t3
+(15 rows)
 
 select * from
   int8_tbl t1 left join
diff --git a/src/test/regress/expected/join_hash.out b/src/test/regress/expected/join_hash.out
index 3a91c144a2..9f04684fcd 100644
--- a/src/test/regress/expected/join_hash.out
+++ b/src/test/regress/expected/join_hash.out
@@ -923,27 +923,42 @@ WHERE
          Output: hjtest_1.a, hjtest_1.tableoid, hjtest_1.id, hjtest_1.b
          Filter: ((SubPlan 4) < 50)
          SubPlan 4
-           ->  Result
+           ->  Result Cache
                  Output: (hjtest_1.b * 5)
+                 Cache Key: hjtest_1.b
+                 ->  Result
+                       Output: (hjtest_1.b * 5)
    ->  Hash
          Output: hjtest_2.a, hjtest_2.tableoid, hjtest_2.id, hjtest_2.c, hjtest_2.b
          ->  Seq Scan on public.hjtest_2
                Output: hjtest_2.a, hjtest_2.tableoid, hjtest_2.id, hjtest_2.c, hjtest_2.b
                Filter: ((SubPlan 5) < 55)
                SubPlan 5
-                 ->  Result
+                 ->  Result Cache
                        Output: (hjtest_2.c * 5)
+                       Cache Key: hjtest_2.c
+                       ->  Result
+                             Output: (hjtest_2.c * 5)
          SubPlan 1
-           ->  Result
+           ->  Result Cache
                  Output: 1
-                 One-Time Filter: (hjtest_2.id = 1)
+                 Cache Key: hjtest_2.id
+                 ->  Result
+                       Output: 1
+                       One-Time Filter: (hjtest_2.id = 1)
          SubPlan 3
-           ->  Result
+           ->  Result Cache
                  Output: (hjtest_2.c * 5)
+                 Cache Key: hjtest_2.c
+                 ->  Result
+                       Output: (hjtest_2.c * 5)
    SubPlan 2
-     ->  Result
+     ->  Result Cache
            Output: (hjtest_1.b * 5)
-(28 rows)
+           Cache Key: hjtest_1.b
+           ->  Result
+                 Output: (hjtest_1.b * 5)
+(43 rows)
 
 SELECT hjtest_1.a a1, hjtest_2.a a2,hjtest_1.tableoid::regclass t1, hjtest_2.tableoid::regclass t2
 FROM hjtest_1, hjtest_2
@@ -977,27 +992,42 @@ WHERE
          Output: hjtest_2.a, hjtest_2.tableoid, hjtest_2.id, hjtest_2.c, hjtest_2.b
          Filter: ((SubPlan 5) < 55)
          SubPlan 5
-           ->  Result
+           ->  Result Cache
                  Output: (hjtest_2.c * 5)
+                 Cache Key: hjtest_2.c
+                 ->  Result
+                       Output: (hjtest_2.c * 5)
    ->  Hash
          Output: hjtest_1.a, hjtest_1.tableoid, hjtest_1.id, hjtest_1.b
          ->  Seq Scan on public.hjtest_1
                Output: hjtest_1.a, hjtest_1.tableoid, hjtest_1.id, hjtest_1.b
                Filter: ((SubPlan 4) < 50)
                SubPlan 4
-                 ->  Result
+                 ->  Result Cache
                        Output: (hjtest_1.b * 5)
+                       Cache Key: hjtest_1.b
+                       ->  Result
+                             Output: (hjtest_1.b * 5)
          SubPlan 2
-           ->  Result
+           ->  Result Cache
                  Output: (hjtest_1.b * 5)
+                 Cache Key: hjtest_1.b
+                 ->  Result
+                       Output: (hjtest_1.b * 5)
    SubPlan 1
-     ->  Result
+     ->  Result Cache
            Output: 1
-           One-Time Filter: (hjtest_2.id = 1)
+           Cache Key: hjtest_2.id
+           ->  Result
+                 Output: 1
+                 One-Time Filter: (hjtest_2.id = 1)
    SubPlan 3
-     ->  Result
+     ->  Result Cache
            Output: (hjtest_2.c * 5)
-(28 rows)
+           Cache Key: hjtest_2.c
+           ->  Result
+                 Output: (hjtest_2.c * 5)
+(43 rows)
 
 SELECT hjtest_1.a a1, hjtest_2.a a2,hjtest_1.tableoid::regclass t1, hjtest_2.tableoid::regclass t2
 FROM hjtest_2, hjtest_1
diff --git a/src/test/regress/expected/resultcache.out b/src/test/regress/expected/resultcache.out
index 205cbb82ab..7870102f0a 100644
--- a/src/test/regress/expected/resultcache.out
+++ b/src/test/regress/expected/resultcache.out
@@ -151,3 +151,40 @@ WHERE t1.unique1 < 1000;', false);
 RESET min_parallel_table_scan_size;
 RESET parallel_setup_cost;
 RESET parallel_tuple_cost;
+-- Ensure we get the expected plan with sub plans.
+SELECT explain_resultcache('
+SELECT unique1, (SELECT count(*) FROM tenk1 t2 WHERE t2.twenty = t1.twenty)
+FROM tenk1 t1 WHERE t1.unique1 < 1000;', false);
+                                explain_resultcache                                
+-----------------------------------------------------------------------------------
+ Bitmap Heap Scan on tenk1 t1 (actual rows=1000 loops=1)
+   Recheck Cond: (unique1 < 1000)
+   Heap Blocks: exact=333
+   ->  Bitmap Index Scan on tenk1_unique1 (actual rows=1000 loops=1)
+         Index Cond: (unique1 < 1000)
+   SubPlan 1
+     ->  Result Cache (actual rows=1 loops=1000)
+           Cache Key: t1.twenty
+           Hits: 980  Misses: 20  Evictions: Zero  Overflows: 0  Memory Usage: NkB
+           ->  Aggregate (actual rows=1 loops=20)
+                 ->  Seq Scan on tenk1 t2 (actual rows=500 loops=20)
+                       Filter: (twenty = t1.twenty)
+                       Rows Removed by Filter: 9500
+(13 rows)
+
+SELECT explain_resultcache('
+SELECT unique1, (SELECT count(*) FROM tenk1 t2 WHERE t2.thousand = t1.thousand)
+FROM tenk1 t1;', false);
+                                          explain_resultcache                                           
+--------------------------------------------------------------------------------------------------------
+ Seq Scan on tenk1 t1 (actual rows=10000 loops=1)
+   SubPlan 1
+     ->  Result Cache (actual rows=1 loops=10000)
+           Cache Key: t1.thousand
+           Hits: 9000  Misses: 1000  Evictions: Zero  Overflows: 0  Memory Usage: NkB
+           ->  Aggregate (actual rows=1 loops=1000)
+                 ->  Index Only Scan using tenk1_thous_tenthous on tenk1 t2 (actual rows=10 loops=1000)
+                       Index Cond: (thousand = t1.thousand)
+                       Heap Fetches: 0
+(9 rows)
+
diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out
index 9506aaef82..b9a58be7ad 100644
--- a/src/test/regress/expected/rowsecurity.out
+++ b/src/test/regress/expected/rowsecurity.out
@@ -1477,18 +1477,20 @@ SELECT (SELECT x FROM s1 LIMIT 1) xx, * FROM s2 WHERE y like '%28%';
 (3 rows)
 
 EXPLAIN (COSTS OFF) SELECT (SELECT x FROM s1 LIMIT 1) xx, * FROM s2 WHERE y like '%28%';
-                               QUERY PLAN                                
--------------------------------------------------------------------------
+                                  QUERY PLAN                                   
+-------------------------------------------------------------------------------
  Seq Scan on s2
    Filter: (((x % 2) = 0) AND (y ~~ '%28%'::text))
    SubPlan 2
-     ->  Limit
-           ->  Seq Scan on s1
-                 Filter: (hashed SubPlan 1)
-                 SubPlan 1
-                   ->  Seq Scan on s2 s2_1
-                         Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text))
-(9 rows)
+     ->  Result Cache
+           Cache Key: s2.x
+           ->  Limit
+                 ->  Seq Scan on s1
+                       Filter: (hashed SubPlan 1)
+                       SubPlan 1
+                         ->  Seq Scan on s2 s2_1
+                               Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text))
+(11 rows)
 
 SET SESSION AUTHORIZATION regress_rls_alice;
 ALTER POLICY p2 ON s2 USING (x in (select a from s1 where b like '%d2%'));
diff --git a/src/test/regress/expected/select_parallel.out b/src/test/regress/expected/select_parallel.out
index 9b0c418db7..a3caf95c8d 100644
--- a/src/test/regress/expected/select_parallel.out
+++ b/src/test/regress/expected/select_parallel.out
@@ -148,14 +148,18 @@ explain (costs off)
                ->  Parallel Seq Scan on part_pa_test_p1 pa2_1
                ->  Parallel Seq Scan on part_pa_test_p2 pa2_2
    SubPlan 2
-     ->  Result
+     ->  Result Cache
+           Cache Key: max((SubPlan 1))
+           ->  Result
    SubPlan 1
-     ->  Append
-           ->  Seq Scan on part_pa_test_p1 pa1_1
-                 Filter: (a = pa2.a)
-           ->  Seq Scan on part_pa_test_p2 pa1_2
-                 Filter: (a = pa2.a)
-(14 rows)
+     ->  Result Cache
+           Cache Key: pa2.a
+           ->  Append
+                 ->  Seq Scan on part_pa_test_p1 pa1_1
+                       Filter: (a = pa2.a)
+                 ->  Seq Scan on part_pa_test_p2 pa1_2
+                       Filter: (a = pa2.a)
+(18 rows)
 
 drop table part_pa_test;
 -- test with leader participation disabled
@@ -1168,9 +1172,11 @@ SELECT 1 FROM tenk1_vw_sec
          Workers Planned: 4
          ->  Parallel Index Only Scan using tenk1_unique1 on tenk1
    SubPlan 1
-     ->  Aggregate
-           ->  Seq Scan on int4_tbl
-                 Filter: (f1 < tenk1_vw_sec.unique1)
-(9 rows)
+     ->  Result Cache
+           Cache Key: tenk1_vw_sec.unique1
+           ->  Aggregate
+                 ->  Seq Scan on int4_tbl
+                       Filter: (f1 < tenk1_vw_sec.unique1)
+(11 rows)
 
 rollback;
diff --git a/src/test/regress/expected/subselect.out b/src/test/regress/expected/subselect.out
index c7986fb7fc..249f76cacc 100644
--- a/src/test/regress/expected/subselect.out
+++ b/src/test/regress/expected/subselect.out
@@ -968,19 +968,25 @@ explain (verbose, costs off)
 explain (verbose, costs off)
   select x, x from
     (select (select now() where y=y) as x from (values(1),(2)) v(y)) ss;
-                              QUERY PLAN                              
-----------------------------------------------------------------------
+                                 QUERY PLAN                                 
+----------------------------------------------------------------------------
  Values Scan on "*VALUES*"
    Output: (SubPlan 1), (SubPlan 2)
    SubPlan 1
-     ->  Result
+     ->  Result Cache
            Output: now()
-           One-Time Filter: ("*VALUES*".column1 = "*VALUES*".column1)
+           Cache Key: "*VALUES*".column1
+           ->  Result
+                 Output: now()
+                 One-Time Filter: ("*VALUES*".column1 = "*VALUES*".column1)
    SubPlan 2
-     ->  Result
+     ->  Result Cache
            Output: now()
-           One-Time Filter: ("*VALUES*".column1 = "*VALUES*".column1)
-(10 rows)
+           Cache Key: "*VALUES*".column1
+           ->  Result
+                 Output: now()
+                 One-Time Filter: ("*VALUES*".column1 = "*VALUES*".column1)
+(16 rows)
 
 explain (verbose, costs off)
   select x, x from
diff --git a/src/test/regress/sql/resultcache.sql b/src/test/regress/sql/resultcache.sql
index 2a84cf3845..bbd1bcd013 100644
--- a/src/test/regress/sql/resultcache.sql
+++ b/src/test/regress/sql/resultcache.sql
@@ -76,3 +76,12 @@ WHERE t1.unique1 < 1000;', false);
 RESET min_parallel_table_scan_size;
 RESET parallel_setup_cost;
 RESET parallel_tuple_cost;
+
+-- Ensure we get the expected plan with sub plans.
+SELECT explain_resultcache('
+SELECT unique1, (SELECT count(*) FROM tenk1 t2 WHERE t2.twenty = t1.twenty)
+FROM tenk1 t1 WHERE t1.unique1 < 1000;', false);
+
+SELECT explain_resultcache('
+SELECT unique1, (SELECT count(*) FROM tenk1 t2 WHERE t2.thousand = t1.thousand)
+FROM tenk1 t1;', false);
-- 
2.17.0

