diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out
index be5fa5727d..0a23a39aa2 100644
--- a/src/test/regress/expected/aggregates.out
+++ b/src/test/regress/expected/aggregates.out
@@ -1,8 +1,18 @@
 --
 -- AGGREGATES
 --
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
 -- avoid bit-exact output here because operations may not be bit-exact.
 SET extra_float_digits = 0;
+-- prepare some test data
+CREATE TABLE aggtest (
+	a 			int2,
+	b			float4
+);
+\set filename :abs_srcdir '/data/agg.data'
+COPY aggtest FROM :'filename';
+ANALYZE aggtest;
 SELECT avg(four) AS avg_1 FROM onek;
        avg_1        
 --------------------
diff --git a/src/test/regress/expected/arrays.out b/src/test/regress/expected/arrays.out
index 3e3a1beaab..da3d2022c0 100644
--- a/src/test/regress/expected/arrays.out
+++ b/src/test/regress/expected/arrays.out
@@ -1,6 +1,8 @@
 --
 -- ARRAYS
 --
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
 CREATE TABLE arrtest (
 	a 			int2[],
 	b 			int4[][][],
@@ -10,6 +12,14 @@ CREATE TABLE arrtest (
 	f			char(5)[],
 	g			varchar(5)[]
 );
+CREATE TABLE array_op_test (
+	seqno		int4,
+	i			int4[],
+	t			text[]
+);
+\set filename :abs_srcdir '/data/array.data'
+COPY array_op_test FROM :'filename';
+ANALYZE array_op_test;
 --
 -- only the 'e' array is 0-based, the others are 1-based.
 --
@@ -307,6 +317,9 @@ INSERT INTO arrtest_s VALUES(NULL, NULL);
 UPDATE arrtest_s SET a[:] = '{11, 12, 13, 14, 15}';  -- fail, no good with null
 ERROR:  array slice subscript must provide both boundaries
 DETAIL:  When assigning to a slice of an empty array value, slice boundaries must be fully specified.
+-- we want to work with a point_tbl that includes a null
+CREATE TEMP TABLE point_tbl AS SELECT * FROM public.point_tbl;
+INSERT INTO POINT_TBL(f1) VALUES (NULL);
 -- check with fixed-length-array type, such as point
 SELECT f1[0:1] FROM POINT_TBL;
 ERROR:  slices of fixed-length arrays not implemented
diff --git a/src/test/regress/expected/btree_index.out b/src/test/regress/expected/btree_index.out
index c43e023716..93ed5e8cc0 100644
--- a/src/test/regress/expected/btree_index.out
+++ b/src/test/regress/expected/btree_index.out
@@ -1,5 +1,49 @@
 --
 -- BTREE_INDEX
+--
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
+CREATE TABLE bt_i4_heap (
+	seqno 		int4,
+	random 		int4
+);
+CREATE TABLE bt_name_heap (
+	seqno 		name,
+	random 		int4
+);
+CREATE TABLE bt_txt_heap (
+	seqno 		text,
+	random 		int4
+);
+CREATE TABLE bt_f8_heap (
+	seqno 		float8,
+	random 		int4
+);
+\set filename :abs_srcdir '/data/desc.data'
+COPY bt_i4_heap FROM :'filename';
+\set filename :abs_srcdir '/data/hash.data'
+COPY bt_name_heap FROM :'filename';
+\set filename :abs_srcdir '/data/desc.data'
+COPY bt_txt_heap FROM :'filename';
+\set filename :abs_srcdir '/data/hash.data'
+COPY bt_f8_heap FROM :'filename';
+ANALYZE bt_i4_heap;
+ANALYZE bt_name_heap;
+ANALYZE bt_txt_heap;
+ANALYZE bt_f8_heap;
+--
+-- BTREE ascending/descending cases
+--
+-- we load int4/text from pure descending data (each key is a new
+-- low key) and name/f8 from pure ascending data (each key is a new
+-- high key).  we had a bug where new low keys would sometimes be
+-- "lost".
+--
+CREATE INDEX bt_i4_index ON bt_i4_heap USING btree (seqno int4_ops);
+CREATE INDEX bt_name_index ON bt_name_heap USING btree (seqno name_ops);
+CREATE INDEX bt_txt_index ON bt_txt_heap USING btree (seqno text_ops);
+CREATE INDEX bt_f8_index ON bt_f8_heap USING btree (seqno float8_ops);
+--
 -- test retrieval of min/max keys for each index
 --
 SELECT b.*
diff --git a/src/test/regress/expected/char.out b/src/test/regress/expected/char.out
index d515b3ce34..2d78f90f3b 100644
--- a/src/test/regress/expected/char.out
+++ b/src/test/regress/expected/char.out
@@ -11,8 +11,9 @@ SELECT char 'c' = char 'c' AS true;
 
 --
 -- Build a table for testing
+-- (This temporarily hides the table created in test_setup.sql)
 --
-CREATE TABLE CHAR_TBL(f1 char);
+CREATE TEMP TABLE CHAR_TBL(f1 char);
 INSERT INTO CHAR_TBL (f1) VALUES ('a');
 INSERT INTO CHAR_TBL (f1) VALUES ('A');
 -- any of the following three input formats are acceptable
@@ -104,13 +105,11 @@ DROP TABLE CHAR_TBL;
 --
 -- Now test longer arrays of char
 --
-CREATE TABLE CHAR_TBL(f1 char(4));
-INSERT INTO CHAR_TBL (f1) VALUES ('a');
-INSERT INTO CHAR_TBL (f1) VALUES ('ab');
-INSERT INTO CHAR_TBL (f1) VALUES ('abcd');
+-- This char_tbl was already created and filled in test_setup.sql.
+-- Here we just try to insert bad values.
+--
 INSERT INTO CHAR_TBL (f1) VALUES ('abcde');
 ERROR:  value too long for type character(4)
-INSERT INTO CHAR_TBL (f1) VALUES ('abcd    ');
 SELECT * FROM CHAR_TBL;
   f1  
 ------
diff --git a/src/test/regress/expected/char_1.out b/src/test/regress/expected/char_1.out
index 6c917c0b68..fa6644d692 100644
--- a/src/test/regress/expected/char_1.out
+++ b/src/test/regress/expected/char_1.out
@@ -11,8 +11,9 @@ SELECT char 'c' = char 'c' AS true;
 
 --
 -- Build a table for testing
+-- (This temporarily hides the table created in test_setup.sql)
 --
-CREATE TABLE CHAR_TBL(f1 char);
+CREATE TEMP TABLE CHAR_TBL(f1 char);
 INSERT INTO CHAR_TBL (f1) VALUES ('a');
 INSERT INTO CHAR_TBL (f1) VALUES ('A');
 -- any of the following three input formats are acceptable
@@ -104,13 +105,11 @@ DROP TABLE CHAR_TBL;
 --
 -- Now test longer arrays of char
 --
-CREATE TABLE CHAR_TBL(f1 char(4));
-INSERT INTO CHAR_TBL (f1) VALUES ('a');
-INSERT INTO CHAR_TBL (f1) VALUES ('ab');
-INSERT INTO CHAR_TBL (f1) VALUES ('abcd');
+-- This char_tbl was already created and filled in test_setup.sql.
+-- Here we just try to insert bad values.
+--
 INSERT INTO CHAR_TBL (f1) VALUES ('abcde');
 ERROR:  value too long for type character(4)
-INSERT INTO CHAR_TBL (f1) VALUES ('abcd    ');
 SELECT * FROM CHAR_TBL;
   f1  
 ------
diff --git a/src/test/regress/expected/char_2.out b/src/test/regress/expected/char_2.out
index 9a54658632..09434a44cd 100644
--- a/src/test/regress/expected/char_2.out
+++ b/src/test/regress/expected/char_2.out
@@ -11,8 +11,9 @@ SELECT char 'c' = char 'c' AS true;
 
 --
 -- Build a table for testing
+-- (This temporarily hides the table created in test_setup.sql)
 --
-CREATE TABLE CHAR_TBL(f1 char);
+CREATE TEMP TABLE CHAR_TBL(f1 char);
 INSERT INTO CHAR_TBL (f1) VALUES ('a');
 INSERT INTO CHAR_TBL (f1) VALUES ('A');
 -- any of the following three input formats are acceptable
@@ -104,13 +105,11 @@ DROP TABLE CHAR_TBL;
 --
 -- Now test longer arrays of char
 --
-CREATE TABLE CHAR_TBL(f1 char(4));
-INSERT INTO CHAR_TBL (f1) VALUES ('a');
-INSERT INTO CHAR_TBL (f1) VALUES ('ab');
-INSERT INTO CHAR_TBL (f1) VALUES ('abcd');
+-- This char_tbl was already created and filled in test_setup.sql.
+-- Here we just try to insert bad values.
+--
 INSERT INTO CHAR_TBL (f1) VALUES ('abcde');
 ERROR:  value too long for type character(4)
-INSERT INTO CHAR_TBL (f1) VALUES ('abcd    ');
 SELECT * FROM CHAR_TBL;
   f1  
 ------
diff --git a/src/test/regress/expected/copy.out b/src/test/regress/expected/copy.out
index 931e7b2e69..5d1088ee0b 100644
--- a/src/test/regress/expected/copy.out
+++ b/src/test/regress/expected/copy.out
@@ -4,84 +4,6 @@
 -- directory paths are passed to us in environment variables
 \getenv abs_srcdir PG_ABS_SRCDIR
 \getenv abs_builddir PG_ABS_BUILDDIR
--- CLASS POPULATION
---	(any resemblance to real life is purely coincidental)
---
-\set filename :abs_srcdir '/data/agg.data'
-COPY aggtest FROM :'filename';
-\set filename :abs_srcdir '/data/onek.data'
-COPY onek FROM :'filename';
-\set filename :abs_builddir '/results/onek.data'
-COPY onek TO :'filename';
-DELETE FROM onek;
-COPY onek FROM :'filename';
-\set filename :abs_srcdir '/data/tenk.data'
-COPY tenk1 FROM :'filename';
-\set filename :abs_srcdir '/data/rect.data'
-COPY slow_emp4000 FROM :'filename';
-\set filename :abs_srcdir '/data/person.data'
-COPY person FROM :'filename';
-\set filename :abs_srcdir '/data/emp.data'
-COPY emp FROM :'filename';
-\set filename :abs_srcdir '/data/student.data'
-COPY student FROM :'filename';
-\set filename :abs_srcdir '/data/stud_emp.data'
-COPY stud_emp FROM :'filename';
-\set filename :abs_srcdir '/data/streets.data'
-COPY road FROM :'filename';
-\set filename :abs_srcdir '/data/real_city.data'
-COPY real_city FROM :'filename';
-\set filename :abs_srcdir '/data/hash.data'
-COPY hash_i4_heap FROM :'filename';
-COPY hash_name_heap FROM :'filename';
-COPY hash_txt_heap FROM :'filename';
-COPY hash_f8_heap FROM :'filename';
-\set filename :abs_srcdir '/data/tsearch.data'
-COPY test_tsvector FROM :'filename';
-\set filename :abs_srcdir '/data/jsonb.data'
-COPY testjsonb FROM :'filename';
--- the data in this file has a lot of duplicates in the index key
--- fields, leading to long bucket chains and lots of table expansion.
--- this is therefore a stress test of the bucket overflow code (unlike
--- the data in hash.data, which has unique index keys).
---
--- \set filename :abs_srcdir '/data/hashovfl.data'
--- COPY hash_ovfl_heap FROM :'filename';
-\set filename :abs_srcdir '/data/desc.data'
-COPY bt_i4_heap FROM :'filename';
-\set filename :abs_srcdir '/data/hash.data'
-COPY bt_name_heap FROM :'filename';
-\set filename :abs_srcdir '/data/desc.data'
-COPY bt_txt_heap FROM :'filename';
-\set filename :abs_srcdir '/data/hash.data'
-COPY bt_f8_heap FROM :'filename';
-\set filename :abs_srcdir '/data/array.data'
-COPY array_op_test FROM :'filename';
-\set filename :abs_srcdir '/data/array.data'
-COPY array_index_op_test FROM :'filename';
--- analyze all the data we just loaded, to ensure plan consistency
--- in later tests
-ANALYZE aggtest;
-ANALYZE onek;
-ANALYZE tenk1;
-ANALYZE slow_emp4000;
-ANALYZE person;
-ANALYZE emp;
-ANALYZE student;
-ANALYZE stud_emp;
-ANALYZE road;
-ANALYZE real_city;
-ANALYZE hash_i4_heap;
-ANALYZE hash_name_heap;
-ANALYZE hash_txt_heap;
-ANALYZE hash_f8_heap;
-ANALYZE test_tsvector;
-ANALYZE bt_i4_heap;
-ANALYZE bt_name_heap;
-ANALYZE bt_txt_heap;
-ANALYZE bt_f8_heap;
-ANALYZE array_op_test;
-ANALYZE array_index_op_test;
 --- test copying in CSV mode with various styles
 --- of embedded line ending characters
 create temp table copytest (
diff --git a/src/test/regress/expected/create_index.out b/src/test/regress/expected/create_index.out
index daf75dd5c4..098df529f2 100644
--- a/src/test/regress/expected/create_index.out
+++ b/src/test/regress/expected/create_index.out
@@ -2,6 +2,8 @@
 -- CREATE_INDEX
 -- Create ancillary data structures (i.e. indices)
 --
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
 --
 -- BTREE
 --
@@ -31,18 +33,6 @@ ERROR:  relation "six_wrong" does not exist
 COMMENT ON INDEX six IS 'good index';
 COMMENT ON INDEX six IS NULL;
 --
--- BTREE ascending/descending cases
---
--- we load int4/text from pure descending data (each key is a new
--- low key) and name/f8 from pure ascending data (each key is a new
--- high key).  we had a bug where new low keys would sometimes be
--- "lost".
---
-CREATE INDEX bt_i4_index ON bt_i4_heap USING btree (seqno int4_ops);
-CREATE INDEX bt_name_index ON bt_name_heap USING btree (seqno name_ops);
-CREATE INDEX bt_txt_index ON bt_txt_heap USING btree (seqno text_ops);
-CREATE INDEX bt_f8_index ON bt_f8_heap USING btree (seqno float8_ops);
---
 -- BTREE partial indices
 --
 CREATE INDEX onek2_u1_prtl ON onek2 USING btree(unique1 int4_ops)
@@ -54,9 +44,20 @@ CREATE INDEX onek2_stu1_prtl ON onek2 USING btree(stringu1 name_ops)
 --
 -- GiST (rtree-equivalent opclasses only)
 --
+CREATE TABLE slow_emp4000 (
+	home_base	 box
+);
+CREATE TABLE fast_emp4000 (
+	home_base	 box
+);
+\set filename :abs_srcdir '/data/rect.data'
+COPY slow_emp4000 FROM :'filename';
+INSERT INTO fast_emp4000 SELECT * FROM slow_emp4000;
+ANALYZE slow_emp4000;
+ANALYZE fast_emp4000;
 CREATE INDEX grect2ind ON fast_emp4000 USING gist (home_base);
-CREATE INDEX gpolygonind ON polygon_tbl USING gist (f1);
-CREATE INDEX gcircleind ON circle_tbl USING gist (f1);
+-- we want to work with a point_tbl that includes a null
+CREATE TEMP TABLE point_tbl AS SELECT * FROM public.point_tbl;
 INSERT INTO POINT_TBL(f1) VALUES (NULL);
 CREATE INDEX gpointind ON point_tbl USING gist (f1);
 CREATE TEMP TABLE gpolygon_tbl AS
@@ -95,23 +96,6 @@ SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL;
    278
 (1 row)
 
-SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
-    ORDER BY (poly_center(f1))[0];
-         f1          
----------------------
- ((2,0),(2,4),(0,0))
-(1 row)
-
-SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
-    ORDER BY area(f1);
-      f1       
----------------
- <(1,2),3>
- <(1,3),5>
- <(1,2),100>
- <(100,1),115>
-(4 rows)
-
 SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,0)'::polygon;
  count 
 -------
@@ -310,45 +294,6 @@ SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL;
    278
 (1 row)
 
-EXPLAIN (COSTS OFF)
-SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
-    ORDER BY (poly_center(f1))[0];
-                         QUERY PLAN                         
-------------------------------------------------------------
- Sort
-   Sort Key: ((poly_center(f1))[0])
-   ->  Index Scan using gpolygonind on polygon_tbl
-         Index Cond: (f1 @> '((1,1),(2,2),(2,1))'::polygon)
-(4 rows)
-
-SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
-    ORDER BY (poly_center(f1))[0];
-         f1          
----------------------
- ((2,0),(2,4),(0,0))
-(1 row)
-
-EXPLAIN (COSTS OFF)
-SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
-    ORDER BY area(f1);
-                    QUERY PLAN                    
---------------------------------------------------
- Sort
-   Sort Key: (area(f1))
-   ->  Index Scan using gcircleind on circle_tbl
-         Index Cond: (f1 && '<(1,-2),1>'::circle)
-(4 rows)
-
-SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
-    ORDER BY area(f1);
-      f1       
----------------
- <(1,2),3>
- <(1,3),5>
- <(1,2),100>
- <(100,1),115>
-(4 rows)
-
 EXPLAIN (COSTS OFF)
 SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,0)'::polygon;
                          QUERY PLAN                         
@@ -677,6 +622,36 @@ RESET enable_bitmapscan;
 --
 -- Note: GIN currently supports only bitmap scans, not plain indexscans
 --
+CREATE TABLE array_index_op_test (
+	seqno		int4,
+	i			int4[],
+	t			text[]
+);
+\set filename :abs_srcdir '/data/array.data'
+COPY array_index_op_test FROM :'filename';
+ANALYZE array_index_op_test;
+SELECT * FROM array_index_op_test WHERE i = '{NULL}' ORDER BY seqno;
+ seqno |   i    |   t    
+-------+--------+--------
+   102 | {NULL} | {NULL}
+(1 row)
+
+SELECT * FROM array_index_op_test WHERE i @> '{NULL}' ORDER BY seqno;
+ seqno | i | t 
+-------+---+---
+(0 rows)
+
+SELECT * FROM array_index_op_test WHERE i && '{NULL}' ORDER BY seqno;
+ seqno | i | t 
+-------+---+---
+(0 rows)
+
+SELECT * FROM array_index_op_test WHERE i <@ '{NULL}' ORDER BY seqno;
+ seqno | i  | t  
+-------+----+----
+   101 | {} | {}
+(1 row)
+
 SET enable_seqscan = OFF;
 SET enable_indexscan = OFF;
 SET enable_bitmapscan = ON;
@@ -904,28 +879,6 @@ SELECT * FROM array_index_op_test WHERE i <@ '{}' ORDER BY seqno;
    101 | {} | {}
 (1 row)
 
-SELECT * FROM array_op_test WHERE i = '{NULL}' ORDER BY seqno;
- seqno |   i    |   t    
--------+--------+--------
-   102 | {NULL} | {NULL}
-(1 row)
-
-SELECT * FROM array_op_test WHERE i @> '{NULL}' ORDER BY seqno;
- seqno | i | t 
--------+---+---
-(0 rows)
-
-SELECT * FROM array_op_test WHERE i && '{NULL}' ORDER BY seqno;
- seqno | i | t 
--------+---+---
-(0 rows)
-
-SELECT * FROM array_op_test WHERE i <@ '{NULL}' ORDER BY seqno;
- seqno | i  | t  
--------+----+----
-   101 | {} | {}
-(1 row)
-
 CREATE INDEX textarrayidx ON array_index_op_test USING gin (t);
 explain (costs off)
 SELECT * FROM array_index_op_test WHERE t @> '{AAAAAAAA72908}' ORDER BY seqno;
@@ -1195,18 +1148,6 @@ SELECT * FROM array_index_op_test WHERE t = '{}' ORDER BY seqno;
    101 | {} | {}
 (1 row)
 
-SELECT * FROM array_op_test WHERE i = '{NULL}' ORDER BY seqno;
- seqno |   i    |   t    
--------+--------+--------
-   102 | {NULL} | {NULL}
-(1 row)
-
-SELECT * FROM array_op_test WHERE i <@ '{NULL}' ORDER BY seqno;
- seqno | i  | t  
--------+----+----
-   101 | {} | {}
-(1 row)
-
 RESET enable_seqscan;
 RESET enable_indexscan;
 RESET enable_bitmapscan;
@@ -1240,10 +1181,6 @@ Options: fastupdate=on, gin_pending_list_limit=128
 --
 -- HASH
 --
-CREATE INDEX hash_i4_index ON hash_i4_heap USING hash (random int4_ops);
-CREATE INDEX hash_name_index ON hash_name_heap USING hash (random name_ops);
-CREATE INDEX hash_txt_index ON hash_txt_heap USING hash (random text_ops);
-CREATE INDEX hash_f8_index ON hash_f8_heap USING hash (random float8_ops) WITH (fillfactor=60);
 CREATE UNLOGGED TABLE unlogged_hash_table (id int4);
 CREATE INDEX unlogged_hash_index ON unlogged_hash_table USING hash (id int4_ops);
 DROP TABLE unlogged_hash_table;
@@ -1357,13 +1294,6 @@ ALTER TABLE covering_index_heap ADD CONSTRAINT covering_pkey PRIMARY KEY USING I
 covering_pkey;
 DROP TABLE covering_index_heap;
 --
--- Also try building functional, expressional, and partial indexes on
--- tables that already contain data.
---
-create unique index hash_f8_index_1 on hash_f8_heap(abs(random));
-create unique index hash_f8_index_2 on hash_f8_heap((seqno + 1), random);
-create unique index hash_f8_index_3 on hash_f8_heap(random) where seqno > 1000;
---
 -- Try some concurrent index builds
 --
 -- Unfortunately this only tests about half the code paths because there are
@@ -1888,7 +1818,6 @@ SELECT count(*) FROM dupindexcols
 --
 -- Check ordering of =ANY indexqual results (bug in 9.2.0)
 --
-vacuum tenk1;		-- ensure we get consistent plans here
 explain (costs off)
 SELECT unique1 FROM tenk1
 WHERE unique1 IN (1,42,7)
diff --git a/src/test/regress/expected/create_misc.out b/src/test/regress/expected/create_misc.out
index 41bc4d7750..5b46ee5f1c 100644
--- a/src/test/regress/expected/create_misc.out
+++ b/src/test/regress/expected/create_misc.out
@@ -1,53 +1,34 @@
 --
 -- CREATE_MISC
 --
--- CLASS POPULATION
---	(any resemblance to real life is purely coincidental)
 --
-INSERT INTO tenk2 SELECT * FROM tenk1;
-CREATE TABLE onek2 AS SELECT * FROM onek;
-INSERT INTO fast_emp4000 SELECT * FROM slow_emp4000;
-SELECT *
-   INTO TABLE Bprime
-   FROM tenk1
-   WHERE unique2 < 1000;
-INSERT INTO hobbies_r (name, person)
-   SELECT 'posthacking', p.name
-   FROM person* p
-   WHERE p.name = 'mike' or p.name = 'jeff';
-INSERT INTO hobbies_r (name, person)
-   SELECT 'basketball', p.name
-   FROM person p
-   WHERE p.name = 'joe' or p.name = 'sally';
-INSERT INTO hobbies_r (name) VALUES ('skywalking');
-INSERT INTO equipment_r (name, hobby) VALUES ('advil', 'posthacking');
-INSERT INTO equipment_r (name, hobby) VALUES ('peet''s coffee', 'posthacking');
-INSERT INTO equipment_r (name, hobby) VALUES ('hightops', 'basketball');
-INSERT INTO equipment_r (name, hobby) VALUES ('guts', 'skywalking');
-INSERT INTO city VALUES
-('Podunk', '(1,2),(3,4)', '100,127,1000'),
-('Gotham', '(1000,34),(1100,334)', '123456,127,-1000,6789');
-TABLE city;
-  name  |       location       |        budget         
---------+----------------------+-----------------------
- Podunk | (3,4),(1,2)          | 100,127,1000,0
- Gotham | (1100,334),(1000,34) | 123456,127,-1000,6789
-(2 rows)
-
-SELECT *
-   INTO TABLE ramp
-   FROM road
-   WHERE name ~ '.*Ramp';
-INSERT INTO ihighway
-   SELECT *
-   FROM road
-   WHERE name ~ 'I- .*';
-INSERT INTO shighway
-   SELECT *
-   FROM road
-   WHERE name ~ 'State Hwy.*';
-UPDATE shighway
-   SET surface = 'asphalt';
+-- a is the type root
+-- b and c inherit from a (one-level single inheritance)
+-- d inherits from b and c (two-level multiple inheritance)
+-- e inherits from c (two-level single inheritance)
+-- f inherits from e (three-level single inheritance)
+--
+CREATE TABLE a_star (
+	class		char,
+	a 			int4
+);
+CREATE TABLE b_star (
+	b 			text
+) INHERITS (a_star);
+CREATE TABLE c_star (
+	c 			name
+) INHERITS (a_star);
+CREATE TABLE d_star (
+	d 			float8
+) INHERITS (b_star, c_star);
+NOTICE:  merging multiple inherited definitions of column "class"
+NOTICE:  merging multiple inherited definitions of column "a"
+CREATE TABLE e_star (
+	e 			int2
+) INHERITS (c_star);
+CREATE TABLE f_star (
+	f 			polygon
+) INHERITS (e_star);
 INSERT INTO a_star (class, a) VALUES ('a', 1);
 INSERT INTO a_star (class, a) VALUES ('a', 2);
 INSERT INTO a_star (class) VALUES ('a');
@@ -138,14 +119,369 @@ ANALYZE d_star;
 ANALYZE e_star;
 ANALYZE f_star;
 --
--- for internal portal (cursor) tests
+-- inheritance stress test
 --
-CREATE TABLE iportaltest (
-	i		int4,
-	d		float4,
-	p		polygon
-);
-INSERT INTO iportaltest (i, d, p)
-   VALUES (1, 3.567, '(3.0,1.0),(4.0,2.0)'::polygon);
-INSERT INTO iportaltest (i, d, p)
-   VALUES (2, 89.05, '(4.0,2.0),(3.0,1.0)'::polygon);
+SELECT * FROM a_star*;
+ class | a  
+-------+----
+ a     |  1
+ a     |  2
+ a     |   
+ b     |  3
+ b     |  4
+ b     |   
+ b     |   
+ c     |  5
+ c     |  6
+ c     |   
+ c     |   
+ d     |  7
+ d     |  8
+ d     |  9
+ d     | 10
+ d     |   
+ d     | 11
+ d     | 12
+ d     | 13
+ d     |   
+ d     |   
+ d     |   
+ d     | 14
+ d     |   
+ d     |   
+ d     |   
+ d     |   
+ e     | 15
+ e     | 16
+ e     | 17
+ e     |   
+ e     | 18
+ e     |   
+ e     |   
+ f     | 19
+ f     | 20
+ f     | 21
+ f     | 22
+ f     |   
+ f     | 24
+ f     | 25
+ f     | 26
+ f     |   
+ f     |   
+ f     |   
+ f     | 27
+ f     |   
+ f     |   
+ f     |   
+ f     |   
+(50 rows)
+
+SELECT *
+   FROM b_star* x
+   WHERE x.b = text 'bumble' or x.a < 3;
+ class | a |   b    
+-------+---+--------
+ b     |   | bumble
+(1 row)
+
+SELECT class, a
+   FROM c_star* x
+   WHERE x.c ~ text 'hi';
+ class | a  
+-------+----
+ c     |  5
+ c     |   
+ d     |  7
+ d     |  8
+ d     | 10
+ d     |   
+ d     | 12
+ d     |   
+ d     |   
+ d     |   
+ e     | 15
+ e     | 16
+ e     |   
+ e     |   
+ f     | 19
+ f     | 20
+ f     | 21
+ f     |   
+ f     | 24
+ f     |   
+ f     |   
+ f     |   
+(22 rows)
+
+SELECT class, b, c
+   FROM d_star* x
+   WHERE x.a < 100;
+ class |    b    |     c      
+-------+---------+------------
+ d     | grumble | hi sunita
+ d     | stumble | hi koko
+ d     | rumble  | 
+ d     |         | hi kristin
+ d     | fumble  | 
+ d     |         | hi avi
+ d     |         | 
+ d     |         | 
+(8 rows)
+
+SELECT class, c FROM e_star* x WHERE x.c NOTNULL;
+ class |      c      
+-------+-------------
+ e     | hi carol
+ e     | hi bob
+ e     | hi michelle
+ e     | hi elisa
+ f     | hi claire
+ f     | hi mike
+ f     | hi marcel
+ f     | hi keith
+ f     | hi marc
+ f     | hi allison
+ f     | hi jeff
+ f     | hi carl
+(12 rows)
+
+SELECT * FROM f_star* x WHERE x.c ISNULL;
+ class | a  | c |  e  |                     f                     
+-------+----+---+-----+-------------------------------------------
+ f     | 22 |   |  -7 | ((111,555),(222,666),(333,777),(444,888))
+ f     | 25 |   |  -9 | 
+ f     | 26 |   |     | ((11111,33333),(22222,44444))
+ f     |    |   | -11 | ((1111111,3333333),(2222222,4444444))
+ f     | 27 |   |     | 
+ f     |    |   | -12 | 
+ f     |    |   |     | ((11111111,33333333),(22222222,44444444))
+ f     |    |   |     | 
+(8 rows)
+
+-- grouping and aggregation on inherited sets have been busted in the past...
+SELECT sum(a) FROM a_star*;
+ sum 
+-----
+ 355
+(1 row)
+
+SELECT class, sum(a) FROM a_star* GROUP BY class ORDER BY class;
+ class | sum 
+-------+-----
+ a     |   3
+ b     |   7
+ c     |  11
+ d     |  84
+ e     |  66
+ f     | 184
+(6 rows)
+
+ALTER TABLE f_star RENAME COLUMN f TO ff;
+ALTER TABLE e_star* RENAME COLUMN e TO ee;
+ALTER TABLE d_star* RENAME COLUMN d TO dd;
+ALTER TABLE c_star* RENAME COLUMN c TO cc;
+ALTER TABLE b_star* RENAME COLUMN b TO bb;
+ALTER TABLE a_star* RENAME COLUMN a TO aa;
+SELECT class, aa
+   FROM a_star* x
+   WHERE aa ISNULL;
+ class | aa 
+-------+----
+ a     |   
+ b     |   
+ b     |   
+ c     |   
+ c     |   
+ d     |   
+ d     |   
+ d     |   
+ d     |   
+ d     |   
+ d     |   
+ d     |   
+ d     |   
+ e     |   
+ e     |   
+ e     |   
+ f     |   
+ f     |   
+ f     |   
+ f     |   
+ f     |   
+ f     |   
+ f     |   
+ f     |   
+(24 rows)
+
+-- As of Postgres 7.1, ALTER implicitly recurses,
+-- so this should be same as ALTER a_star*
+ALTER TABLE a_star RENAME COLUMN aa TO foo;
+SELECT class, foo
+   FROM a_star* x
+   WHERE x.foo >= 2;
+ class | foo 
+-------+-----
+ a     |   2
+ b     |   3
+ b     |   4
+ c     |   5
+ c     |   6
+ d     |   7
+ d     |   8
+ d     |   9
+ d     |  10
+ d     |  11
+ d     |  12
+ d     |  13
+ d     |  14
+ e     |  15
+ e     |  16
+ e     |  17
+ e     |  18
+ f     |  19
+ f     |  20
+ f     |  21
+ f     |  22
+ f     |  24
+ f     |  25
+ f     |  26
+ f     |  27
+(25 rows)
+
+ALTER TABLE a_star RENAME COLUMN foo TO aa;
+SELECT *
+   from a_star*
+   WHERE aa < 1000;
+ class | aa 
+-------+----
+ a     |  1
+ a     |  2
+ b     |  3
+ b     |  4
+ c     |  5
+ c     |  6
+ d     |  7
+ d     |  8
+ d     |  9
+ d     | 10
+ d     | 11
+ d     | 12
+ d     | 13
+ d     | 14
+ e     | 15
+ e     | 16
+ e     | 17
+ e     | 18
+ f     | 19
+ f     | 20
+ f     | 21
+ f     | 22
+ f     | 24
+ f     | 25
+ f     | 26
+ f     | 27
+(26 rows)
+
+ALTER TABLE f_star ADD COLUMN f int4;
+UPDATE f_star SET f = 10;
+ALTER TABLE e_star* ADD COLUMN e int4;
+--UPDATE e_star* SET e = 42;
+SELECT * FROM e_star*;
+ class | aa |     cc      | ee  | e 
+-------+----+-------------+-----+---
+ e     | 15 | hi carol    |  -1 |  
+ e     | 16 | hi bob      |     |  
+ e     | 17 |             |  -2 |  
+ e     |    | hi michelle |  -3 |  
+ e     | 18 |             |     |  
+ e     |    | hi elisa    |     |  
+ e     |    |             |  -4 |  
+ f     | 19 | hi claire   |  -5 |  
+ f     | 20 | hi mike     |  -6 |  
+ f     | 21 | hi marcel   |     |  
+ f     | 22 |             |  -7 |  
+ f     |    | hi keith    |  -8 |  
+ f     | 24 | hi marc     |     |  
+ f     | 25 |             |  -9 |  
+ f     | 26 |             |     |  
+ f     |    | hi allison  | -10 |  
+ f     |    | hi jeff     |     |  
+ f     |    |             | -11 |  
+ f     | 27 |             |     |  
+ f     |    | hi carl     |     |  
+ f     |    |             | -12 |  
+ f     |    |             |     |  
+ f     |    |             |     |  
+(23 rows)
+
+ALTER TABLE a_star* ADD COLUMN a text;
+NOTICE:  merging definition of column "a" for child "d_star"
+-- That ALTER TABLE should have added TOAST tables.
+SELECT relname, reltoastrelid <> 0 AS has_toast_table
+   FROM pg_class
+   WHERE oid::regclass IN ('a_star', 'c_star')
+   ORDER BY 1;
+ relname | has_toast_table 
+---------+-----------------
+ a_star  | t
+ c_star  | t
+(2 rows)
+
+--UPDATE b_star*
+--   SET a = text 'gazpacho'
+--   WHERE aa > 4;
+SELECT class, aa, a FROM a_star*;
+ class | aa | a 
+-------+----+---
+ a     |  1 | 
+ a     |  2 | 
+ a     |    | 
+ b     |  3 | 
+ b     |  4 | 
+ b     |    | 
+ b     |    | 
+ c     |  5 | 
+ c     |  6 | 
+ c     |    | 
+ c     |    | 
+ d     |  7 | 
+ d     |  8 | 
+ d     |  9 | 
+ d     | 10 | 
+ d     |    | 
+ d     | 11 | 
+ d     | 12 | 
+ d     | 13 | 
+ d     |    | 
+ d     |    | 
+ d     |    | 
+ d     | 14 | 
+ d     |    | 
+ d     |    | 
+ d     |    | 
+ d     |    | 
+ e     | 15 | 
+ e     | 16 | 
+ e     | 17 | 
+ e     |    | 
+ e     | 18 | 
+ e     |    | 
+ e     |    | 
+ f     | 19 | 
+ f     | 20 | 
+ f     | 21 | 
+ f     | 22 | 
+ f     |    | 
+ f     | 24 | 
+ f     | 25 | 
+ f     | 26 | 
+ f     |    | 
+ f     |    | 
+ f     |    | 
+ f     | 27 | 
+ f     |    | 
+ f     |    | 
+ f     |    | 
+ f     |    | 
+(50 rows)
+
diff --git a/src/test/regress/expected/create_table.out b/src/test/regress/expected/create_table.out
index a958b84979..02d0999580 100644
--- a/src/test/regress/expected/create_table.out
+++ b/src/test/regress/expected/create_table.out
@@ -1,204 +1,7 @@
 --
 -- CREATE_TABLE
 --
---
--- CLASS DEFINITIONS
---
-CREATE TABLE hobbies_r (
-	name		text,
-	person 		text
-);
-CREATE TABLE equipment_r (
-	name 		text,
-	hobby		text
-);
-CREATE TABLE onek (
-	unique1		int4,
-	unique2		int4,
-	two			int4,
-	four		int4,
-	ten			int4,
-	twenty		int4,
-	hundred		int4,
-	thousand	int4,
-	twothousand	int4,
-	fivethous	int4,
-	tenthous	int4,
-	odd			int4,
-	even		int4,
-	stringu1	name,
-	stringu2	name,
-	string4		name
-);
-CREATE TABLE tenk1 (
-	unique1		int4,
-	unique2		int4,
-	two			int4,
-	four		int4,
-	ten			int4,
-	twenty		int4,
-	hundred		int4,
-	thousand	int4,
-	twothousand	int4,
-	fivethous	int4,
-	tenthous	int4,
-	odd			int4,
-	even		int4,
-	stringu1	name,
-	stringu2	name,
-	string4		name
-);
-CREATE TABLE tenk2 (
-	unique1 	int4,
-	unique2 	int4,
-	two 	 	int4,
-	four 		int4,
-	ten			int4,
-	twenty 		int4,
-	hundred 	int4,
-	thousand 	int4,
-	twothousand int4,
-	fivethous 	int4,
-	tenthous	int4,
-	odd			int4,
-	even		int4,
-	stringu1	name,
-	stringu2	name,
-	string4		name
-);
-CREATE TABLE person (
-	name 		text,
-	age			int4,
-	location 	point
-);
-CREATE TABLE emp (
-	salary 		int4,
-	manager 	name
-) INHERITS (person);
-CREATE TABLE student (
-	gpa 		float8
-) INHERITS (person);
-CREATE TABLE stud_emp (
-	percent 	int4
-) INHERITS (emp, student);
-NOTICE:  merging multiple inherited definitions of column "name"
-NOTICE:  merging multiple inherited definitions of column "age"
-NOTICE:  merging multiple inherited definitions of column "location"
-CREATE TABLE city (
-	name		name,
-	location 	box,
-	budget 		city_budget
-);
-CREATE TABLE dept (
-	dname		name,
-	mgrname 	text
-);
-CREATE TABLE slow_emp4000 (
-	home_base	 box
-);
-CREATE TABLE fast_emp4000 (
-	home_base	 box
-);
-CREATE TABLE road (
-	name		text,
-	thepath 	path
-);
-CREATE TABLE ihighway () INHERITS (road);
-CREATE TABLE shighway (
-	surface		text
-) INHERITS (road);
-CREATE TABLE real_city (
-	pop			int4,
-	cname		text,
-	outline 	path
-);
---
--- test the "star" operators a bit more thoroughly -- this time,
--- throw in lots of NULL fields...
---
--- a is the type root
--- b and c inherit from a (one-level single inheritance)
--- d inherits from b and c (two-level multiple inheritance)
--- e inherits from c (two-level single inheritance)
--- f inherits from e (three-level single inheritance)
---
-CREATE TABLE a_star (
-	class		char,
-	a 			int4
-);
-CREATE TABLE b_star (
-	b 			text
-) INHERITS (a_star);
-CREATE TABLE c_star (
-	c 			name
-) INHERITS (a_star);
-CREATE TABLE d_star (
-	d 			float8
-) INHERITS (b_star, c_star);
-NOTICE:  merging multiple inherited definitions of column "class"
-NOTICE:  merging multiple inherited definitions of column "a"
-CREATE TABLE e_star (
-	e 			int2
-) INHERITS (c_star);
-CREATE TABLE f_star (
-	f 			polygon
-) INHERITS (e_star);
-CREATE TABLE aggtest (
-	a 			int2,
-	b			float4
-);
-CREATE TABLE hash_i4_heap (
-	seqno 		int4,
-	random 		int4
-);
-CREATE TABLE hash_name_heap (
-	seqno 		int4,
-	random 		name
-);
-CREATE TABLE hash_txt_heap (
-	seqno 		int4,
-	random 		text
-);
-CREATE TABLE hash_f8_heap (
-	seqno		int4,
-	random 		float8
-);
--- don't include the hash_ovfl_heap stuff in the distribution
--- the data set is too large for what it's worth
---
--- CREATE TABLE hash_ovfl_heap (
---	x			int4,
---	y			int4
--- );
-CREATE TABLE bt_i4_heap (
-	seqno 		int4,
-	random 		int4
-);
-CREATE TABLE bt_name_heap (
-	seqno 		name,
-	random 		int4
-);
-CREATE TABLE bt_txt_heap (
-	seqno 		text,
-	random 		int4
-);
-CREATE TABLE bt_f8_heap (
-	seqno 		float8,
-	random 		int4
-);
-CREATE TABLE array_op_test (
-	seqno		int4,
-	i			int4[],
-	t			text[]
-);
-CREATE TABLE array_index_op_test (
-	seqno		int4,
-	i			int4[],
-	t			text[]
-);
-CREATE TABLE testjsonb (
-       j jsonb
-);
+-- Error cases
 CREATE TABLE unknowntab (
 	u unknown    -- fail
 );
@@ -207,14 +10,6 @@ CREATE TYPE unknown_comptype AS (
 	u unknown    -- fail
 );
 ERROR:  column "u" has pseudo-type unknown
-CREATE TABLE IF NOT EXISTS test_tsvector(
-	t text,
-	a tsvector
-);
-CREATE TABLE IF NOT EXISTS test_tsvector(
-	t text
-);
-NOTICE:  relation "test_tsvector" already exists, skipping
 -- invalid: non-lowercase quoted reloptions identifiers
 CREATE TABLE tas_case WITH ("Fillfactor" = 10) AS SELECT 1 a;
 ERROR:  unrecognized parameter "Fillfactor"
diff --git a/src/test/regress/expected/create_type.out b/src/test/regress/expected/create_type.out
index 8e105ad8f4..0dfc88c1c8 100644
--- a/src/test/regress/expected/create_type.out
+++ b/src/test/regress/expected/create_type.out
@@ -268,6 +268,22 @@ SELECT point '(1,2)' <% widget '(0,0,3)' AS t,
  t | f
 (1 row)
 
+-- exercise city_budget type
+CREATE TABLE city (
+	name		name,
+	location 	box,
+	budget 		city_budget
+);
+INSERT INTO city VALUES
+('Podunk', '(1,2),(3,4)', '100,127,1000'),
+('Gotham', '(1000,34),(1100,334)', '123456,127,-1000,6789');
+TABLE city;
+  name  |       location       |        budget         
+--------+----------------------+-----------------------
+ Podunk | (3,4),(1,2)          | 100,127,1000,0
+ Gotham | (1100,334),(1000,34) | 123456,127,-1000,6789
+(2 rows)
+
 --
 -- Test CREATE/ALTER TYPE using a type that's compatible with varchar,
 -- so we can re-use those support functions
diff --git a/src/test/regress/expected/create_view.out b/src/test/regress/expected/create_view.out
index d546457943..f89910838b 100644
--- a/src/test/regress/expected/create_view.out
+++ b/src/test/regress/expected/create_view.out
@@ -12,6 +12,18 @@ CREATE FUNCTION interpt_pp(path, path)
     RETURNS point
     AS :'regresslib'
     LANGUAGE C STRICT;
+CREATE TABLE real_city (
+	pop			int4,
+	cname		text,
+	outline 	path
+);
+\set filename :abs_srcdir '/data/real_city.data'
+COPY real_city FROM :'filename';
+ANALYZE real_city;
+SELECT *
+   INTO TABLE ramp
+   FROM ONLY road
+   WHERE name ~ '.*Ramp';
 CREATE VIEW street AS
    SELECT r.name, r.thepath, c.cname AS cname
    FROM ONLY road r, real_city c
diff --git a/src/test/regress/expected/errors.out b/src/test/regress/expected/errors.out
index 15862d4475..8c527474da 100644
--- a/src/test/regress/expected/errors.out
+++ b/src/test/regress/expected/errors.out
@@ -92,8 +92,8 @@ ERROR:  relation "nonesuch" does not exist
 alter table nonesuch rename to stud_emp;
 ERROR:  relation "nonesuch" does not exist
 -- conflict
-alter table stud_emp rename to aggtest;
-ERROR:  relation "aggtest" already exists
+alter table stud_emp rename to student;
+ERROR:  relation "student" already exists
 -- self-conflict
 alter table stud_emp rename to stud_emp;
 ERROR:  relation "stud_emp" already exists
diff --git a/src/test/regress/expected/expressions.out b/src/test/regress/expected/expressions.out
index 6406fb3a76..701715e9c2 100644
--- a/src/test/regress/expected/expressions.out
+++ b/src/test/regress/expected/expressions.out
@@ -2,7 +2,7 @@
 -- expression evaluation tests that don't fit into a more specific file
 --
 --
--- Tests for SQLVAlueFunction
+-- Tests for SQLValueFunction
 --
 -- current_date  (always matches because of transactional behaviour)
 SELECT date(now())::text = current_date::text;
@@ -87,77 +87,6 @@ SELECT current_schema;
 (1 row)
 
 RESET search_path;
---
--- Tests for BETWEEN
---
-explain (costs off)
-select count(*) from date_tbl
-  where f1 between '1997-01-01' and '1998-01-01';
-                                 QUERY PLAN                                  
------------------------------------------------------------------------------
- Aggregate
-   ->  Seq Scan on date_tbl
-         Filter: ((f1 >= '01-01-1997'::date) AND (f1 <= '01-01-1998'::date))
-(3 rows)
-
-select count(*) from date_tbl
-  where f1 between '1997-01-01' and '1998-01-01';
- count 
--------
-     3
-(1 row)
-
-explain (costs off)
-select count(*) from date_tbl
-  where f1 not between '1997-01-01' and '1998-01-01';
-                                QUERY PLAN                                
---------------------------------------------------------------------------
- Aggregate
-   ->  Seq Scan on date_tbl
-         Filter: ((f1 < '01-01-1997'::date) OR (f1 > '01-01-1998'::date))
-(3 rows)
-
-select count(*) from date_tbl
-  where f1 not between '1997-01-01' and '1998-01-01';
- count 
--------
-    13
-(1 row)
-
-explain (costs off)
-select count(*) from date_tbl
-  where f1 between symmetric '1997-01-01' and '1998-01-01';
-                                                                  QUERY PLAN                                                                  
-----------------------------------------------------------------------------------------------------------------------------------------------
- Aggregate
-   ->  Seq Scan on date_tbl
-         Filter: (((f1 >= '01-01-1997'::date) AND (f1 <= '01-01-1998'::date)) OR ((f1 >= '01-01-1998'::date) AND (f1 <= '01-01-1997'::date)))
-(3 rows)
-
-select count(*) from date_tbl
-  where f1 between symmetric '1997-01-01' and '1998-01-01';
- count 
--------
-     3
-(1 row)
-
-explain (costs off)
-select count(*) from date_tbl
-  where f1 not between symmetric '1997-01-01' and '1998-01-01';
-                                                               QUERY PLAN                                                                
------------------------------------------------------------------------------------------------------------------------------------------
- Aggregate
-   ->  Seq Scan on date_tbl
-         Filter: (((f1 < '01-01-1997'::date) OR (f1 > '01-01-1998'::date)) AND ((f1 < '01-01-1998'::date) OR (f1 > '01-01-1997'::date)))
-(3 rows)
-
-select count(*) from date_tbl
-  where f1 not between symmetric '1997-01-01' and '1998-01-01';
- count 
--------
-    13
-(1 row)
-
 --
 -- Test parsing of a no-op cast to a type with unspecified typmod
 --
diff --git a/src/test/regress/expected/float8.out b/src/test/regress/expected/float8.out
index fc4212b32d..de4d57ec9f 100644
--- a/src/test/regress/expected/float8.out
+++ b/src/test/regress/expected/float8.out
@@ -1,7 +1,11 @@
 --
 -- FLOAT8
 --
-CREATE TABLE FLOAT8_TBL(f1 float8);
+--
+-- Build a table for testing
+-- (This temporarily hides the table created in test_setup.sql)
+--
+CREATE TEMP TABLE FLOAT8_TBL(f1 float8);
 INSERT INTO FLOAT8_TBL(f1) VALUES ('    0.0   ');
 INSERT INTO FLOAT8_TBL(f1) VALUES ('1004.30  ');
 INSERT INTO FLOAT8_TBL(f1) VALUES ('   -34.84');
@@ -779,14 +783,8 @@ INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e-400');
 ERROR:  "-10e-400" is out of range for type double precision
 LINE 1: INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e-400');
                                            ^
--- maintain external table consistency across platforms
--- delete all values and reinsert well-behaved ones
-DELETE FROM FLOAT8_TBL;
-INSERT INTO FLOAT8_TBL(f1) VALUES ('0.0');
-INSERT INTO FLOAT8_TBL(f1) VALUES ('-34.84');
-INSERT INTO FLOAT8_TBL(f1) VALUES ('-1004.30');
-INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e+200');
-INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e-200');
+DROP TABLE FLOAT8_TBL;
+-- Check the float8 values exported for use by other tests
 SELECT * FROM FLOAT8_TBL;
           f1           
 -----------------------
diff --git a/src/test/regress/expected/geometry.out b/src/test/regress/expected/geometry.out
index d91d829f0f..3b364d1e3b 100644
--- a/src/test/regress/expected/geometry.out
+++ b/src/test/regress/expected/geometry.out
@@ -5236,3 +5236,63 @@ SELECT c.f1, p.f1, c.f1 <-> p.f1 FROM CIRCLE_TBL c, POLYGON_TBL p;
  <(3,5),NaN>    | ((0,1),(0,1))              |            NaN
 (56 rows)
 
+-- Check index behavior for circles
+CREATE INDEX gcircleind ON circle_tbl USING gist (f1);
+SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
+    ORDER BY area(f1);
+      f1       
+---------------
+ <(1,2),3>
+ <(1,3),5>
+ <(1,2),100>
+ <(100,1),115>
+(4 rows)
+
+EXPLAIN (COSTS OFF)
+SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
+    ORDER BY area(f1);
+                  QUERY PLAN                  
+----------------------------------------------
+ Sort
+   Sort Key: (area(f1))
+   ->  Seq Scan on circle_tbl
+         Filter: (f1 && '<(1,-2),1>'::circle)
+(4 rows)
+
+SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
+    ORDER BY area(f1);
+      f1       
+---------------
+ <(1,2),3>
+ <(1,3),5>
+ <(1,2),100>
+ <(100,1),115>
+(4 rows)
+
+-- Check index behavior for polygons
+CREATE INDEX gpolygonind ON polygon_tbl USING gist (f1);
+SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
+    ORDER BY (poly_center(f1))[0];
+         f1          
+---------------------
+ ((2,0),(2,4),(0,0))
+(1 row)
+
+EXPLAIN (COSTS OFF)
+SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
+    ORDER BY (poly_center(f1))[0];
+                       QUERY PLAN                       
+--------------------------------------------------------
+ Sort
+   Sort Key: ((poly_center(f1))[0])
+   ->  Seq Scan on polygon_tbl
+         Filter: (f1 @> '((1,1),(2,2),(2,1))'::polygon)
+(4 rows)
+
+SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
+    ORDER BY (poly_center(f1))[0];
+         f1          
+---------------------
+ ((2,0),(2,4),(0,0))
+(1 row)
+
diff --git a/src/test/regress/expected/hash_index.out b/src/test/regress/expected/hash_index.out
index e23de21b41..a2036a1597 100644
--- a/src/test/regress/expected/hash_index.out
+++ b/src/test/regress/expected/hash_index.out
@@ -1,5 +1,54 @@
 --
 -- HASH_INDEX
+--
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
+CREATE TABLE hash_i4_heap (
+	seqno 		int4,
+	random 		int4
+);
+CREATE TABLE hash_name_heap (
+	seqno 		int4,
+	random 		name
+);
+CREATE TABLE hash_txt_heap (
+	seqno 		int4,
+	random 		text
+);
+CREATE TABLE hash_f8_heap (
+	seqno		int4,
+	random 		float8
+);
+\set filename :abs_srcdir '/data/hash.data'
+COPY hash_i4_heap FROM :'filename';
+COPY hash_name_heap FROM :'filename';
+COPY hash_txt_heap FROM :'filename';
+COPY hash_f8_heap FROM :'filename';
+-- the data in this file has a lot of duplicates in the index key
+-- fields, leading to long bucket chains and lots of table expansion.
+-- this is therefore a stress test of the bucket overflow code (unlike
+-- the data in hash.data, which has unique index keys).
+--
+-- \set filename :abs_srcdir '/data/hashovfl.data'
+-- COPY hash_ovfl_heap FROM :'filename';
+ANALYZE hash_i4_heap;
+ANALYZE hash_name_heap;
+ANALYZE hash_txt_heap;
+ANALYZE hash_f8_heap;
+CREATE INDEX hash_i4_index ON hash_i4_heap USING hash (random int4_ops);
+CREATE INDEX hash_name_index ON hash_name_heap USING hash (random name_ops);
+CREATE INDEX hash_txt_index ON hash_txt_heap USING hash (random text_ops);
+CREATE INDEX hash_f8_index ON hash_f8_heap USING hash (random float8_ops)
+  WITH (fillfactor=60);
+--
+-- Also try building functional, expressional, and partial indexes on
+-- tables that already contain data.
+--
+create unique index hash_f8_index_1 on hash_f8_heap(abs(random));
+create unique index hash_f8_index_2 on hash_f8_heap((seqno + 1), random);
+create unique index hash_f8_index_3 on hash_f8_heap(random) where seqno > 1000;
+--
+-- hash index
 -- grep 843938989 hash.data
 --
 SELECT * FROM hash_i4_heap
diff --git a/src/test/regress/expected/horology.out b/src/test/regress/expected/horology.out
index 4b865e73f3..de73683690 100644
--- a/src/test/regress/expected/horology.out
+++ b/src/test/regress/expected/horology.out
@@ -2147,6 +2147,77 @@ SELECT '2020-10-05'::timestamptz >= '4714-11-24 BC'::timestamp as t;
 (1 row)
 
 RESET TimeZone;
+--
+-- Tests for BETWEEN
+--
+explain (costs off)
+select count(*) from date_tbl
+  where f1 between '1997-01-01' and '1998-01-01';
+                                 QUERY PLAN                                  
+-----------------------------------------------------------------------------
+ Aggregate
+   ->  Seq Scan on date_tbl
+         Filter: ((f1 >= '01-01-1997'::date) AND (f1 <= '01-01-1998'::date))
+(3 rows)
+
+select count(*) from date_tbl
+  where f1 between '1997-01-01' and '1998-01-01';
+ count 
+-------
+     3
+(1 row)
+
+explain (costs off)
+select count(*) from date_tbl
+  where f1 not between '1997-01-01' and '1998-01-01';
+                                QUERY PLAN                                
+--------------------------------------------------------------------------
+ Aggregate
+   ->  Seq Scan on date_tbl
+         Filter: ((f1 < '01-01-1997'::date) OR (f1 > '01-01-1998'::date))
+(3 rows)
+
+select count(*) from date_tbl
+  where f1 not between '1997-01-01' and '1998-01-01';
+ count 
+-------
+    13
+(1 row)
+
+explain (costs off)
+select count(*) from date_tbl
+  where f1 between symmetric '1997-01-01' and '1998-01-01';
+                                                                  QUERY PLAN                                                                  
+----------------------------------------------------------------------------------------------------------------------------------------------
+ Aggregate
+   ->  Seq Scan on date_tbl
+         Filter: (((f1 >= '01-01-1997'::date) AND (f1 <= '01-01-1998'::date)) OR ((f1 >= '01-01-1998'::date) AND (f1 <= '01-01-1997'::date)))
+(3 rows)
+
+select count(*) from date_tbl
+  where f1 between symmetric '1997-01-01' and '1998-01-01';
+ count 
+-------
+     3
+(1 row)
+
+explain (costs off)
+select count(*) from date_tbl
+  where f1 not between symmetric '1997-01-01' and '1998-01-01';
+                                                               QUERY PLAN                                                                
+-----------------------------------------------------------------------------------------------------------------------------------------
+ Aggregate
+   ->  Seq Scan on date_tbl
+         Filter: (((f1 < '01-01-1997'::date) OR (f1 > '01-01-1998'::date)) AND ((f1 < '01-01-1998'::date) OR (f1 > '01-01-1997'::date)))
+(3 rows)
+
+select count(*) from date_tbl
+  where f1 not between symmetric '1997-01-01' and '1998-01-01';
+ count 
+-------
+    13
+(1 row)
+
 --
 -- Formats
 --
diff --git a/src/test/regress/expected/int2.out b/src/test/regress/expected/int2.out
index 55ea7202cd..0306648fa2 100644
--- a/src/test/regress/expected/int2.out
+++ b/src/test/regress/expected/int2.out
@@ -1,18 +1,12 @@
 --
 -- INT2
 --
-CREATE TABLE INT2_TBL(f1 int2);
-INSERT INTO INT2_TBL(f1) VALUES ('0   ');
-INSERT INTO INT2_TBL(f1) VALUES ('  1234 ');
-INSERT INTO INT2_TBL(f1) VALUES ('    -1234');
+-- int2_tbl was already created and filled in test_setup.sql.
+-- Here we just try to insert bad values.
 INSERT INTO INT2_TBL(f1) VALUES ('34.5');
 ERROR:  invalid input syntax for type smallint: "34.5"
 LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('34.5');
                                          ^
--- largest and smallest values
-INSERT INTO INT2_TBL(f1) VALUES ('32767');
-INSERT INTO INT2_TBL(f1) VALUES ('-32767');
--- bad input values -- should give errors
 INSERT INTO INT2_TBL(f1) VALUES ('100000');
 ERROR:  value "100000" is out of range for type smallint
 LINE 1: INSERT INTO INT2_TBL(f1) VALUES ('100000');
diff --git a/src/test/regress/expected/int4.out b/src/test/regress/expected/int4.out
index 9d20b3380f..fbcc0e8d9e 100644
--- a/src/test/regress/expected/int4.out
+++ b/src/test/regress/expected/int4.out
@@ -1,18 +1,12 @@
 --
 -- INT4
 --
-CREATE TABLE INT4_TBL(f1 int4);
-INSERT INTO INT4_TBL(f1) VALUES ('   0  ');
-INSERT INTO INT4_TBL(f1) VALUES ('123456     ');
-INSERT INTO INT4_TBL(f1) VALUES ('    -123456');
+-- int4_tbl was already created and filled in test_setup.sql.
+-- Here we just try to insert bad values.
 INSERT INTO INT4_TBL(f1) VALUES ('34.5');
 ERROR:  invalid input syntax for type integer: "34.5"
 LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('34.5');
                                          ^
--- largest and smallest values
-INSERT INTO INT4_TBL(f1) VALUES ('2147483647');
-INSERT INTO INT4_TBL(f1) VALUES ('-2147483647');
--- bad input values -- should give errors
 INSERT INTO INT4_TBL(f1) VALUES ('1000000000000');
 ERROR:  value "1000000000000" is out of range for type integer
 LINE 1: INSERT INTO INT4_TBL(f1) VALUES ('1000000000000');
diff --git a/src/test/regress/expected/int8.out b/src/test/regress/expected/int8.out
index 36540ec456..1ae23cf3f9 100644
--- a/src/test/regress/expected/int8.out
+++ b/src/test/regress/expected/int8.out
@@ -2,13 +2,8 @@
 -- INT8
 -- Test int8 64-bit integers.
 --
-CREATE TABLE INT8_TBL(q1 int8, q2 int8);
-INSERT INTO INT8_TBL VALUES('  123   ','  456');
-INSERT INTO INT8_TBL VALUES('123   ','4567890123456789');
-INSERT INTO INT8_TBL VALUES('4567890123456789','123');
-INSERT INTO INT8_TBL VALUES(+4567890123456789,'4567890123456789');
-INSERT INTO INT8_TBL VALUES('+4567890123456789','-4567890123456789');
--- bad inputs
+-- int8_tbl was already created and filled in test_setup.sql.
+-- Here we just try to insert bad values.
 INSERT INTO INT8_TBL(q1) VALUES ('      ');
 ERROR:  invalid input syntax for type bigint: "      "
 LINE 1: INSERT INTO INT8_TBL(q1) VALUES ('      ');
diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out
index d5b5b775fd..19caebabd0 100644
--- a/src/test/regress/expected/join.out
+++ b/src/test/regress/expected/join.out
@@ -2261,7 +2261,7 @@ rollback;
 --
 explain (costs off)
 select aa, bb, unique1, unique1
-  from tenk1 right join b on aa = unique1
+  from tenk1 right join b_star on aa = unique1
   where bb < bb and bb is null;
         QUERY PLAN        
 --------------------------
@@ -2270,7 +2270,7 @@ select aa, bb, unique1, unique1
 (2 rows)
 
 select aa, bb, unique1, unique1
-  from tenk1 right join b on aa = unique1
+  from tenk1 right join b_star on aa = unique1
   where bb < bb and bb is null;
  aa | bb | unique1 | unique1 
 ----+----+---------+---------
diff --git a/src/test/regress/expected/jsonb.out b/src/test/regress/expected/jsonb.out
index a9cd145aec..b2b3677482 100644
--- a/src/test/regress/expected/jsonb.out
+++ b/src/test/regress/expected/jsonb.out
@@ -1,3 +1,10 @@
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
+CREATE TABLE testjsonb (
+       j jsonb
+);
+\set filename :abs_srcdir '/data/jsonb.data'
+COPY testjsonb FROM :'filename';
 -- Strings.
 SELECT '""'::jsonb;				-- OK.
  jsonb 
diff --git a/src/test/regress/expected/misc.out b/src/test/regress/expected/misc.out
index c317f1484e..6e816c57f1 100644
--- a/src/test/regress/expected/misc.out
+++ b/src/test/regress/expected/misc.out
@@ -36,6 +36,9 @@ UPDATE onek
 -- systems.    This non-func update stuff needs to be examined
 -- more closely.  			- jolly (2/22/96)
 --
+SELECT two, stringu1, ten, string4
+   INTO TABLE tmp
+   FROM onek;
 UPDATE tmp
    SET stringu1 = reverse_name(onek.stringu1)
    FROM onek
@@ -57,29 +60,23 @@ DROP TABLE tmp;
 --
 \set filename :abs_builddir '/results/onek.data'
 COPY onek TO :'filename';
-DELETE FROM onek;
-COPY onek FROM :'filename';
-SELECT unique1 FROM onek WHERE unique1 < 2 ORDER BY unique1;
- unique1 
----------
-       0
-       1
-(2 rows)
-
-DELETE FROM onek2;
-COPY onek2 FROM :'filename';
-SELECT unique1 FROM onek2 WHERE unique1 < 2 ORDER BY unique1;
- unique1 
----------
-       0
-       1
-(2 rows)
+CREATE TEMP TABLE onek_copy (LIKE onek);
+COPY onek_copy FROM :'filename';
+SELECT * FROM onek EXCEPT ALL SELECT * FROM onek_copy;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+(0 rows)
+
+SELECT * FROM onek_copy EXCEPT ALL SELECT * FROM onek;
+ unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 
+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------
+(0 rows)
 
 \set filename :abs_builddir '/results/stud_emp.data'
 COPY BINARY stud_emp TO :'filename';
-DELETE FROM stud_emp;
-COPY BINARY stud_emp FROM :'filename';
-SELECT * FROM stud_emp;
+CREATE TEMP TABLE stud_emp_copy (LIKE stud_emp);
+COPY BINARY stud_emp_copy FROM :'filename';
+SELECT * FROM stud_emp_copy;
  name  | age |  location  | salary | manager | gpa | percent 
 -------+-----+------------+--------+---------+-----+---------
  jeff  |  23 | (8,7.7)    |    600 | sharon  | 3.5 |        
@@ -87,383 +84,30 @@ SELECT * FROM stud_emp;
  linda |  19 | (0.9,6.1)  |    100 |         | 2.9 |        
 (3 rows)
 
--- COPY aggtest FROM stdin;
--- 56	7.8
--- 100	99.097
--- 0	0.09561
--- 42	324.78
--- .
--- COPY aggtest TO stdout;
---
--- inheritance stress test
---
-SELECT * FROM a_star*;
- class | a  
--------+----
- a     |  1
- a     |  2
- a     |   
- b     |  3
- b     |  4
- b     |   
- b     |   
- c     |  5
- c     |  6
- c     |   
- c     |   
- d     |  7
- d     |  8
- d     |  9
- d     | 10
- d     |   
- d     | 11
- d     | 12
- d     | 13
- d     |   
- d     |   
- d     |   
- d     | 14
- d     |   
- d     |   
- d     |   
- d     |   
- e     | 15
- e     | 16
- e     | 17
- e     |   
- e     | 18
- e     |   
- e     |   
- f     | 19
- f     | 20
- f     | 21
- f     | 22
- f     |   
- f     | 24
- f     | 25
- f     | 26
- f     |   
- f     |   
- f     |   
- f     | 27
- f     |   
- f     |   
- f     |   
- f     |   
-(50 rows)
-
-SELECT *
-   FROM b_star* x
-   WHERE x.b = text 'bumble' or x.a < 3;
- class | a |   b    
--------+---+--------
- b     |   | bumble
-(1 row)
-
-SELECT class, a
-   FROM c_star* x
-   WHERE x.c ~ text 'hi';
- class | a  
--------+----
- c     |  5
- c     |   
- d     |  7
- d     |  8
- d     | 10
- d     |   
- d     | 12
- d     |   
- d     |   
- d     |   
- e     | 15
- e     | 16
- e     |   
- e     |   
- f     | 19
- f     | 20
- f     | 21
- f     |   
- f     | 24
- f     |   
- f     |   
- f     |   
-(22 rows)
-
-SELECT class, b, c
-   FROM d_star* x
-   WHERE x.a < 100;
- class |    b    |     c      
--------+---------+------------
- d     | grumble | hi sunita
- d     | stumble | hi koko
- d     | rumble  | 
- d     |         | hi kristin
- d     | fumble  | 
- d     |         | hi avi
- d     |         | 
- d     |         | 
-(8 rows)
-
-SELECT class, c FROM e_star* x WHERE x.c NOTNULL;
- class |      c      
--------+-------------
- e     | hi carol
- e     | hi bob
- e     | hi michelle
- e     | hi elisa
- f     | hi claire
- f     | hi mike
- f     | hi marcel
- f     | hi keith
- f     | hi marc
- f     | hi allison
- f     | hi jeff
- f     | hi carl
-(12 rows)
-
-SELECT * FROM f_star* x WHERE x.c ISNULL;
- class | a  | c |  e  |                     f                     
--------+----+---+-----+-------------------------------------------
- f     | 22 |   |  -7 | ((111,555),(222,666),(333,777),(444,888))
- f     | 25 |   |  -9 | 
- f     | 26 |   |     | ((11111,33333),(22222,44444))
- f     |    |   | -11 | ((1111111,3333333),(2222222,4444444))
- f     | 27 |   |     | 
- f     |    |   | -12 | 
- f     |    |   |     | ((11111111,33333333),(22222222,44444444))
- f     |    |   |     | 
-(8 rows)
-
--- grouping and aggregation on inherited sets have been busted in the past...
-SELECT sum(a) FROM a_star*;
- sum 
------
- 355
-(1 row)
-
-SELECT class, sum(a) FROM a_star* GROUP BY class ORDER BY class;
- class | sum 
--------+-----
- a     |   3
- b     |   7
- c     |  11
- d     |  84
- e     |  66
- f     | 184
-(6 rows)
-
-ALTER TABLE f_star RENAME COLUMN f TO ff;
-ALTER TABLE e_star* RENAME COLUMN e TO ee;
-ALTER TABLE d_star* RENAME COLUMN d TO dd;
-ALTER TABLE c_star* RENAME COLUMN c TO cc;
-ALTER TABLE b_star* RENAME COLUMN b TO bb;
-ALTER TABLE a_star* RENAME COLUMN a TO aa;
-SELECT class, aa
-   FROM a_star* x
-   WHERE aa ISNULL;
- class | aa 
--------+----
- a     |   
- b     |   
- b     |   
- c     |   
- c     |   
- d     |   
- d     |   
- d     |   
- d     |   
- d     |   
- d     |   
- d     |   
- d     |   
- e     |   
- e     |   
- e     |   
- f     |   
- f     |   
- f     |   
- f     |   
- f     |   
- f     |   
- f     |   
- f     |   
-(24 rows)
-
--- As of Postgres 7.1, ALTER implicitly recurses,
--- so this should be same as ALTER a_star*
-ALTER TABLE a_star RENAME COLUMN aa TO foo;
-SELECT class, foo
-   FROM a_star* x
-   WHERE x.foo >= 2;
- class | foo 
--------+-----
- a     |   2
- b     |   3
- b     |   4
- c     |   5
- c     |   6
- d     |   7
- d     |   8
- d     |   9
- d     |  10
- d     |  11
- d     |  12
- d     |  13
- d     |  14
- e     |  15
- e     |  16
- e     |  17
- e     |  18
- f     |  19
- f     |  20
- f     |  21
- f     |  22
- f     |  24
- f     |  25
- f     |  26
- f     |  27
-(25 rows)
-
-ALTER TABLE a_star RENAME COLUMN foo TO aa;
-SELECT *
-   from a_star*
-   WHERE aa < 1000;
- class | aa 
--------+----
- a     |  1
- a     |  2
- b     |  3
- b     |  4
- c     |  5
- c     |  6
- d     |  7
- d     |  8
- d     |  9
- d     | 10
- d     | 11
- d     | 12
- d     | 13
- d     | 14
- e     | 15
- e     | 16
- e     | 17
- e     | 18
- f     | 19
- f     | 20
- f     | 21
- f     | 22
- f     | 24
- f     | 25
- f     | 26
- f     | 27
-(26 rows)
-
-ALTER TABLE f_star ADD COLUMN f int4;
-UPDATE f_star SET f = 10;
-ALTER TABLE e_star* ADD COLUMN e int4;
---UPDATE e_star* SET e = 42;
-SELECT * FROM e_star*;
- class | aa |     cc      | ee  | e 
--------+----+-------------+-----+---
- e     | 15 | hi carol    |  -1 |  
- e     | 16 | hi bob      |     |  
- e     | 17 |             |  -2 |  
- e     |    | hi michelle |  -3 |  
- e     | 18 |             |     |  
- e     |    | hi elisa    |     |  
- e     |    |             |  -4 |  
- f     | 19 | hi claire   |  -5 |  
- f     | 20 | hi mike     |  -6 |  
- f     | 21 | hi marcel   |     |  
- f     | 22 |             |  -7 |  
- f     |    | hi keith    |  -8 |  
- f     | 24 | hi marc     |     |  
- f     | 25 |             |  -9 |  
- f     | 26 |             |     |  
- f     |    | hi allison  | -10 |  
- f     |    | hi jeff     |     |  
- f     |    |             | -11 |  
- f     | 27 |             |     |  
- f     |    | hi carl     |     |  
- f     |    |             | -12 |  
- f     |    |             |     |  
- f     |    |             |     |  
-(23 rows)
-
-ALTER TABLE a_star* ADD COLUMN a text;
-NOTICE:  merging definition of column "a" for child "d_star"
--- That ALTER TABLE should have added TOAST tables.
-SELECT relname, reltoastrelid <> 0 AS has_toast_table
-   FROM pg_class
-   WHERE oid::regclass IN ('a_star', 'c_star')
-   ORDER BY 1;
- relname | has_toast_table 
----------+-----------------
- a_star  | t
- c_star  | t
-(2 rows)
-
---UPDATE b_star*
---   SET a = text 'gazpacho'
---   WHERE aa > 4;
-SELECT class, aa, a FROM a_star*;
- class | aa | a 
--------+----+---
- a     |  1 | 
- a     |  2 | 
- a     |    | 
- b     |  3 | 
- b     |  4 | 
- b     |    | 
- b     |    | 
- c     |  5 | 
- c     |  6 | 
- c     |    | 
- c     |    | 
- d     |  7 | 
- d     |  8 | 
- d     |  9 | 
- d     | 10 | 
- d     |    | 
- d     | 11 | 
- d     | 12 | 
- d     | 13 | 
- d     |    | 
- d     |    | 
- d     |    | 
- d     | 14 | 
- d     |    | 
- d     |    | 
- d     |    | 
- d     |    | 
- e     | 15 | 
- e     | 16 | 
- e     | 17 | 
- e     |    | 
- e     | 18 | 
- e     |    | 
- e     |    | 
- f     | 19 | 
- f     | 20 | 
- f     | 21 | 
- f     | 22 | 
- f     |    | 
- f     | 24 | 
- f     | 25 | 
- f     | 26 | 
- f     |    | 
- f     |    | 
- f     |    | 
- f     | 27 | 
- f     |    | 
- f     |    | 
- f     |    | 
- f     |    | 
-(50 rows)
-
---
--- versions
 --
+-- test data for postquel functions
+--
+CREATE TABLE hobbies_r (
+	name		text,
+	person 		text
+);
+CREATE TABLE equipment_r (
+	name 		text,
+	hobby		text
+);
+INSERT INTO hobbies_r (name, person)
+   SELECT 'posthacking', p.name
+   FROM person* p
+   WHERE p.name = 'mike' or p.name = 'jeff';
+INSERT INTO hobbies_r (name, person)
+   SELECT 'basketball', p.name
+   FROM person p
+   WHERE p.name = 'joe' or p.name = 'sally';
+INSERT INTO hobbies_r (name) VALUES ('skywalking');
+INSERT INTO equipment_r (name, hobby) VALUES ('advil', 'posthacking');
+INSERT INTO equipment_r (name, hobby) VALUES ('peet''s coffee', 'posthacking');
+INSERT INTO equipment_r (name, hobby) VALUES ('hightops', 'basketball');
+INSERT INTO equipment_r (name, hobby) VALUES ('guts', 'skywalking');
 --
 -- postquel functions
 --
diff --git a/src/test/regress/expected/point.out b/src/test/regress/expected/point.out
index 1dc535d1b3..3bde8ac7d0 100644
--- a/src/test/regress/expected/point.out
+++ b/src/test/regress/expected/point.out
@@ -3,22 +3,12 @@
 --
 -- avoid bit-exact output here because operations may not be bit-exact.
 SET extra_float_digits = 0;
-CREATE TABLE POINT_TBL(f1 point);
-INSERT INTO POINT_TBL(f1) VALUES ('(0.0,0.0)');
-INSERT INTO POINT_TBL(f1) VALUES ('(-10.0,0.0)');
-INSERT INTO POINT_TBL(f1) VALUES ('(-3.0,4.0)');
-INSERT INTO POINT_TBL(f1) VALUES ('(5.1, 34.5)');
-INSERT INTO POINT_TBL(f1) VALUES ('(-5.0,-12.0)');
-INSERT INTO POINT_TBL(f1) VALUES ('(1e-300,-1e-300)');	-- To underflow
-INSERT INTO POINT_TBL(f1) VALUES ('(1e+300,Inf)');		-- To overflow
-INSERT INTO POINT_TBL(f1) VALUES ('(Inf,1e+300)');		-- Transposed
-INSERT INTO POINT_TBL(f1) VALUES (' ( Nan , NaN ) ');
--- bad format points
+-- point_tbl was already created and filled in test_setup.sql.
+-- Here we just try to insert bad values.
 INSERT INTO POINT_TBL(f1) VALUES ('asdfasdf');
 ERROR:  invalid input syntax for type point: "asdfasdf"
 LINE 1: INSERT INTO POINT_TBL(f1) VALUES ('asdfasdf');
                                           ^
-INSERT INTO POINT_TBL(f1) VALUES ('10.0,10.0');
 INSERT INTO POINT_TBL(f1) VALUES ('(10.0 10.0)');
 ERROR:  invalid input syntax for type point: "(10.0 10.0)"
 LINE 1: INSERT INTO POINT_TBL(f1) VALUES ('(10.0 10.0)');
diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out
index 51d5bb2e49..b9b3dce040 100644
--- a/src/test/regress/expected/rules.out
+++ b/src/test/regress/expected/rules.out
@@ -1283,30 +1283,8 @@ drop table cchild;
 -- temporarily disable fancy output, so view changes create less diff noise
 \a\t
 SELECT viewname, definition FROM pg_views
-WHERE schemaname IN ('pg_catalog', 'public')
+WHERE schemaname = 'pg_catalog'
 ORDER BY viewname;
-iexit| SELECT ih.name,
-    ih.thepath,
-    interpt_pp(ih.thepath, r.thepath) AS exit
-   FROM ihighway ih,
-    ramp r
-  WHERE (ih.thepath ?# r.thepath);
-key_dependent_view| SELECT view_base_table.key,
-    view_base_table.data
-   FROM view_base_table
-  GROUP BY view_base_table.key;
-key_dependent_view_no_cols| SELECT
-   FROM view_base_table
-  GROUP BY view_base_table.key
- HAVING (length((view_base_table.data)::text) > 0);
-mvtest_tv| SELECT mvtest_t.type,
-    sum(mvtest_t.amt) AS totamt
-   FROM mvtest_t
-  GROUP BY mvtest_t.type;
-mvtest_tvv| SELECT sum(mvtest_tv.totamt) AS grandtot
-   FROM mvtest_tv;
-mvtest_tvvmv| SELECT mvtest_tvvm.grandtot
-   FROM mvtest_tvvm;
 pg_available_extension_versions| SELECT e.name,
     e.version,
     (x.extname IS NOT NULL) AS installed,
@@ -2590,204 +2568,14 @@ pg_views| SELECT n.nspname AS schemaname,
    FROM (pg_class c
      LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace)))
   WHERE (c.relkind = 'v'::"char");
-rtest_v1| SELECT rtest_t1.a,
-    rtest_t1.b
-   FROM rtest_t1;
-rtest_vcomp| SELECT x.part,
-    (x.size * y.factor) AS size_in_cm
-   FROM rtest_comp x,
-    rtest_unitfact y
-  WHERE (x.unit = y.unit);
-rtest_vview1| SELECT x.a,
-    x.b
-   FROM rtest_view1 x
-  WHERE (0 < ( SELECT count(*) AS count
-           FROM rtest_view2 y
-          WHERE (y.a = x.a)));
-rtest_vview2| SELECT rtest_view1.a,
-    rtest_view1.b
-   FROM rtest_view1
-  WHERE rtest_view1.v;
-rtest_vview3| SELECT x.a,
-    x.b
-   FROM rtest_vview2 x
-  WHERE (0 < ( SELECT count(*) AS count
-           FROM rtest_view2 y
-          WHERE (y.a = x.a)));
-rtest_vview4| SELECT x.a,
-    x.b,
-    count(y.a) AS refcount
-   FROM rtest_view1 x,
-    rtest_view2 y
-  WHERE (x.a = y.a)
-  GROUP BY x.a, x.b;
-rtest_vview5| SELECT rtest_view1.a,
-    rtest_view1.b,
-    rtest_viewfunc1(rtest_view1.a) AS refcount
-   FROM rtest_view1;
-shoe| SELECT sh.shoename,
-    sh.sh_avail,
-    sh.slcolor,
-    sh.slminlen,
-    (sh.slminlen * un.un_fact) AS slminlen_cm,
-    sh.slmaxlen,
-    (sh.slmaxlen * un.un_fact) AS slmaxlen_cm,
-    sh.slunit
-   FROM shoe_data sh,
-    unit un
-  WHERE (sh.slunit = un.un_name);
-shoe_ready| SELECT rsh.shoename,
-    rsh.sh_avail,
-    rsl.sl_name,
-    rsl.sl_avail,
-    int4smaller(rsh.sh_avail, rsl.sl_avail) AS total_avail
-   FROM shoe rsh,
-    shoelace rsl
-  WHERE ((rsl.sl_color = rsh.slcolor) AND (rsl.sl_len_cm >= rsh.slminlen_cm) AND (rsl.sl_len_cm <= rsh.slmaxlen_cm));
-shoelace| SELECT s.sl_name,
-    s.sl_avail,
-    s.sl_color,
-    s.sl_len,
-    s.sl_unit,
-    (s.sl_len * u.un_fact) AS sl_len_cm
-   FROM shoelace_data s,
-    unit u
-  WHERE (s.sl_unit = u.un_name);
-shoelace_candelete| SELECT shoelace_obsolete.sl_name,
-    shoelace_obsolete.sl_avail,
-    shoelace_obsolete.sl_color,
-    shoelace_obsolete.sl_len,
-    shoelace_obsolete.sl_unit,
-    shoelace_obsolete.sl_len_cm
-   FROM shoelace_obsolete
-  WHERE (shoelace_obsolete.sl_avail = 0);
-shoelace_obsolete| SELECT shoelace.sl_name,
-    shoelace.sl_avail,
-    shoelace.sl_color,
-    shoelace.sl_len,
-    shoelace.sl_unit,
-    shoelace.sl_len_cm
-   FROM shoelace
-  WHERE (NOT (EXISTS ( SELECT shoe.shoename
-           FROM shoe
-          WHERE (shoe.slcolor = shoelace.sl_color))));
-street| SELECT r.name,
-    r.thepath,
-    c.cname
-   FROM ONLY road r,
-    real_city c
-  WHERE (c.outline ?# r.thepath);
-test_tablesample_v1| SELECT test_tablesample.id
-   FROM test_tablesample TABLESAMPLE system ((10 * 2)) REPEATABLE (2);
-test_tablesample_v2| SELECT test_tablesample.id
-   FROM test_tablesample TABLESAMPLE system (99);
-toyemp| SELECT emp.name,
-    emp.age,
-    emp.location,
-    (12 * emp.salary) AS annualsal
-   FROM emp;
 SELECT tablename, rulename, definition FROM pg_rules
-WHERE schemaname IN ('pg_catalog', 'public')
+WHERE schemaname = 'pg_catalog'
 ORDER BY tablename, rulename;
 pg_settings|pg_settings_n|CREATE RULE pg_settings_n AS
     ON UPDATE TO pg_catalog.pg_settings DO INSTEAD NOTHING;
 pg_settings|pg_settings_u|CREATE RULE pg_settings_u AS
     ON UPDATE TO pg_catalog.pg_settings
    WHERE (new.name = old.name) DO  SELECT set_config(old.name, new.setting, false) AS set_config;
-rtest_emp|rtest_emp_del|CREATE RULE rtest_emp_del AS
-    ON DELETE TO public.rtest_emp DO  INSERT INTO rtest_emplog (ename, who, action, newsal, oldsal)
-  VALUES (old.ename, CURRENT_USER, 'fired'::bpchar, '$0.00'::money, old.salary);
-rtest_emp|rtest_emp_ins|CREATE RULE rtest_emp_ins AS
-    ON INSERT TO public.rtest_emp DO  INSERT INTO rtest_emplog (ename, who, action, newsal, oldsal)
-  VALUES (new.ename, CURRENT_USER, 'hired'::bpchar, new.salary, '$0.00'::money);
-rtest_emp|rtest_emp_upd|CREATE RULE rtest_emp_upd AS
-    ON UPDATE TO public.rtest_emp
-   WHERE (new.salary <> old.salary) DO  INSERT INTO rtest_emplog (ename, who, action, newsal, oldsal)
-  VALUES (new.ename, CURRENT_USER, 'honored'::bpchar, new.salary, old.salary);
-rtest_nothn1|rtest_nothn_r1|CREATE RULE rtest_nothn_r1 AS
-    ON INSERT TO public.rtest_nothn1
-   WHERE ((new.a >= 10) AND (new.a < 20)) DO INSTEAD NOTHING;
-rtest_nothn1|rtest_nothn_r2|CREATE RULE rtest_nothn_r2 AS
-    ON INSERT TO public.rtest_nothn1
-   WHERE ((new.a >= 30) AND (new.a < 40)) DO INSTEAD NOTHING;
-rtest_nothn2|rtest_nothn_r3|CREATE RULE rtest_nothn_r3 AS
-    ON INSERT TO public.rtest_nothn2
-   WHERE (new.a >= 100) DO INSTEAD  INSERT INTO rtest_nothn3 (a, b)
-  VALUES (new.a, new.b);
-rtest_nothn2|rtest_nothn_r4|CREATE RULE rtest_nothn_r4 AS
-    ON INSERT TO public.rtest_nothn2 DO INSTEAD NOTHING;
-rtest_order1|rtest_order_r1|CREATE RULE rtest_order_r1 AS
-    ON INSERT TO public.rtest_order1 DO INSTEAD  INSERT INTO rtest_order2 (a, b, c)
-  VALUES (new.a, nextval('rtest_seq'::regclass), 'rule 1 - this should run 1st'::text);
-rtest_order1|rtest_order_r2|CREATE RULE rtest_order_r2 AS
-    ON INSERT TO public.rtest_order1 DO  INSERT INTO rtest_order2 (a, b, c)
-  VALUES (new.a, nextval('rtest_seq'::regclass), 'rule 2 - this should run 2nd'::text);
-rtest_order1|rtest_order_r3|CREATE RULE rtest_order_r3 AS
-    ON INSERT TO public.rtest_order1 DO INSTEAD  INSERT INTO rtest_order2 (a, b, c)
-  VALUES (new.a, nextval('rtest_seq'::regclass), 'rule 3 - this should run 3rd'::text);
-rtest_order1|rtest_order_r4|CREATE RULE rtest_order_r4 AS
-    ON INSERT TO public.rtest_order1
-   WHERE (new.a < 100) DO INSTEAD  INSERT INTO rtest_order2 (a, b, c)
-  VALUES (new.a, nextval('rtest_seq'::regclass), 'rule 4 - this should run 4th'::text);
-rtest_person|rtest_pers_del|CREATE RULE rtest_pers_del AS
-    ON DELETE TO public.rtest_person DO  DELETE FROM rtest_admin
-  WHERE (rtest_admin.pname = old.pname);
-rtest_person|rtest_pers_upd|CREATE RULE rtest_pers_upd AS
-    ON UPDATE TO public.rtest_person DO  UPDATE rtest_admin SET pname = new.pname
-  WHERE (rtest_admin.pname = old.pname);
-rtest_system|rtest_sys_del|CREATE RULE rtest_sys_del AS
-    ON DELETE TO public.rtest_system DO ( DELETE FROM rtest_interface
-  WHERE (rtest_interface.sysname = old.sysname);
- DELETE FROM rtest_admin
-  WHERE (rtest_admin.sysname = old.sysname);
-);
-rtest_system|rtest_sys_upd|CREATE RULE rtest_sys_upd AS
-    ON UPDATE TO public.rtest_system DO ( UPDATE rtest_interface SET sysname = new.sysname
-  WHERE (rtest_interface.sysname = old.sysname);
- UPDATE rtest_admin SET sysname = new.sysname
-  WHERE (rtest_admin.sysname = old.sysname);
-);
-rtest_t4|rtest_t4_ins1|CREATE RULE rtest_t4_ins1 AS
-    ON INSERT TO public.rtest_t4
-   WHERE ((new.a >= 10) AND (new.a < 20)) DO INSTEAD  INSERT INTO rtest_t5 (a, b)
-  VALUES (new.a, new.b);
-rtest_t4|rtest_t4_ins2|CREATE RULE rtest_t4_ins2 AS
-    ON INSERT TO public.rtest_t4
-   WHERE ((new.a >= 20) AND (new.a < 30)) DO  INSERT INTO rtest_t6 (a, b)
-  VALUES (new.a, new.b);
-rtest_t5|rtest_t5_ins|CREATE RULE rtest_t5_ins AS
-    ON INSERT TO public.rtest_t5
-   WHERE (new.a > 15) DO  INSERT INTO rtest_t7 (a, b)
-  VALUES (new.a, new.b);
-rtest_t6|rtest_t6_ins|CREATE RULE rtest_t6_ins AS
-    ON INSERT TO public.rtest_t6
-   WHERE (new.a > 25) DO INSTEAD  INSERT INTO rtest_t8 (a, b)
-  VALUES (new.a, new.b);
-rtest_v1|rtest_v1_del|CREATE RULE rtest_v1_del AS
-    ON DELETE TO public.rtest_v1 DO INSTEAD  DELETE FROM rtest_t1
-  WHERE (rtest_t1.a = old.a);
-rtest_v1|rtest_v1_ins|CREATE RULE rtest_v1_ins AS
-    ON INSERT TO public.rtest_v1 DO INSTEAD  INSERT INTO rtest_t1 (a, b)
-  VALUES (new.a, new.b);
-rtest_v1|rtest_v1_upd|CREATE RULE rtest_v1_upd AS
-    ON UPDATE TO public.rtest_v1 DO INSTEAD  UPDATE rtest_t1 SET a = new.a, b = new.b
-  WHERE (rtest_t1.a = old.a);
-shoelace|shoelace_del|CREATE RULE shoelace_del AS
-    ON DELETE TO public.shoelace DO INSTEAD  DELETE FROM shoelace_data
-  WHERE (shoelace_data.sl_name = old.sl_name);
-shoelace|shoelace_ins|CREATE RULE shoelace_ins AS
-    ON INSERT TO public.shoelace DO INSTEAD  INSERT INTO shoelace_data (sl_name, sl_avail, sl_color, sl_len, sl_unit)
-  VALUES (new.sl_name, new.sl_avail, new.sl_color, new.sl_len, new.sl_unit);
-shoelace|shoelace_upd|CREATE RULE shoelace_upd AS
-    ON UPDATE TO public.shoelace DO INSTEAD  UPDATE shoelace_data SET sl_name = new.sl_name, sl_avail = new.sl_avail, sl_color = new.sl_color, sl_len = new.sl_len, sl_unit = new.sl_unit
-  WHERE (shoelace_data.sl_name = old.sl_name);
-shoelace_data|log_shoelace|CREATE RULE log_shoelace AS
-    ON UPDATE TO public.shoelace_data
-   WHERE (new.sl_avail <> old.sl_avail) DO  INSERT INTO shoelace_log (sl_name, sl_avail, log_who, log_when)
-  VALUES (new.sl_name, new.sl_avail, 'Al Bundy'::name, 'Thu Jan 01 00:00:00 1970'::timestamp without time zone);
-shoelace_ok|shoelace_ok_ins|CREATE RULE shoelace_ok_ins AS
-    ON INSERT TO public.shoelace_ok DO INSTEAD  UPDATE shoelace SET sl_avail = (shoelace.sl_avail + new.ok_quant)
-  WHERE (shoelace.sl_name = new.ok_name);
 -- restore normal output mode
 \a\t
 --
diff --git a/src/test/regress/expected/sanity_check.out b/src/test/regress/expected/sanity_check.out
index 63706a28cc..8370c1561c 100644
--- a/src/test/regress/expected/sanity_check.out
+++ b/src/test/regress/expected/sanity_check.out
@@ -1,229 +1,6 @@
 VACUUM;
 --
--- sanity check, if we don't have indices the test will take years to
--- complete.  But skip TOAST relations (since they will have varying
--- names depending on the current OID counter) as well as temp tables
--- of other backends (to avoid timing-dependent behavior).
---
--- temporarily disable fancy output, so catalog changes create less diff noise
-\a\t
-SELECT relname, relhasindex
-   FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = relnamespace
-   WHERE relkind IN ('r', 'p') AND (nspname ~ '^pg_temp_') IS NOT TRUE
-   ORDER BY relname;
-a|f
-a_star|f
-aggtest|f
-array_index_op_test|t
-array_op_test|f
-b|f
-b_star|f
-bit_defaults|f
-box_tbl|f
-bprime|f
-bt_f8_heap|t
-bt_i4_heap|t
-bt_name_heap|t
-bt_txt_heap|t
-c|f
-c_star|f
-char_tbl|f
-check2_tbl|f
-check_tbl|f
-circle_tbl|t
-city|f
-copy_tbl|f
-d|f
-d_star|f
-date_tbl|f
-default_tbl|f
-defaultexpr_tbl|f
-dept|f
-dupindexcols|t
-e_star|f
-emp|f
-equipment_r|f
-extra_wide_table|f
-f_star|f
-fast_emp4000|t
-float4_tbl|f
-float8_tbl|f
-func_index_heap|t
-hash_f8_heap|t
-hash_i4_heap|t
-hash_name_heap|t
-hash_txt_heap|t
-hobbies_r|f
-ihighway|t
-inet_tbl|f
-insert_tbl|f
-int2_tbl|f
-int4_tbl|f
-int8_tbl|f
-interval_tbl|f
-invalid_check_con|f
-invalid_check_con_child|f
-iportaltest|f
-kd_point_tbl|t
-line_tbl|f
-log_table|f
-lseg_tbl|f
-main_table|f
-mlparted|f
-mlparted1|f
-mlparted11|f
-mlparted12|f
-mlparted2|f
-mlparted3|f
-mlparted4|f
-mlparted_def|f
-mlparted_def1|f
-mlparted_def2|f
-mlparted_defd|f
-money_data|f
-num_data|f
-num_exp_add|t
-num_exp_div|t
-num_exp_ln|t
-num_exp_log10|t
-num_exp_mul|t
-num_exp_power_10_ln|t
-num_exp_sqrt|t
-num_exp_sub|t
-num_input_test|f
-num_result|f
-num_typemod_test|f
-nummultirange_test|t
-numrange_test|t
-onek|t
-onek2|t
-path_tbl|f
-person|f
-persons|f
-persons2|t
-persons3|t
-pg_aggregate|t
-pg_am|t
-pg_amop|t
-pg_amproc|t
-pg_attrdef|t
-pg_attribute|t
-pg_auth_members|t
-pg_authid|t
-pg_cast|t
-pg_class|t
-pg_collation|t
-pg_constraint|t
-pg_conversion|t
-pg_database|t
-pg_db_role_setting|t
-pg_default_acl|t
-pg_depend|t
-pg_description|t
-pg_enum|t
-pg_event_trigger|t
-pg_extension|t
-pg_foreign_data_wrapper|t
-pg_foreign_server|t
-pg_foreign_table|t
-pg_index|t
-pg_inherits|t
-pg_init_privs|t
-pg_language|t
-pg_largeobject|t
-pg_largeobject_metadata|t
-pg_namespace|t
-pg_opclass|t
-pg_operator|t
-pg_opfamily|t
-pg_partitioned_table|t
-pg_policy|t
-pg_proc|t
-pg_publication|t
-pg_publication_namespace|t
-pg_publication_rel|t
-pg_range|t
-pg_replication_origin|t
-pg_rewrite|t
-pg_seclabel|t
-pg_sequence|t
-pg_shdepend|t
-pg_shdescription|t
-pg_shseclabel|t
-pg_statistic|t
-pg_statistic_ext|t
-pg_statistic_ext_data|t
-pg_subscription|t
-pg_subscription_rel|t
-pg_tablespace|t
-pg_transform|t
-pg_trigger|t
-pg_ts_config|t
-pg_ts_config_map|t
-pg_ts_dict|t
-pg_ts_parser|t
-pg_ts_template|t
-pg_type|t
-pg_user_mapping|t
-point_tbl|t
-polygon_tbl|t
-quad_box_tbl|t
-quad_box_tbl_ord_seq1|f
-quad_box_tbl_ord_seq2|f
-quad_point_tbl|t
-quad_poly_tbl|t
-radix_text_tbl|t
-ramp|f
-real_city|f
-reservations|f
-road|t
-shighway|t
-slow_emp4000|f
-sql_features|f
-sql_implementation_info|f
-sql_parts|f
-sql_sizing|f
-stud_emp|f
-student|f
-tab_core_types|f
-tableam_parted_a_heap2|f
-tableam_parted_b_heap2|f
-tableam_parted_c_heap2|f
-tableam_parted_d_heap2|f
-tableam_parted_heap2|f
-tableam_tbl_heap2|f
-tableam_tblas_heap2|f
-tbl_include_box|t
-tbl_include_box_pk|f
-tbl_include_pk|t
-tbl_include_reg|t
-tbl_include_unique1|t
-tbl_include_unique2|f
-tenk1|t
-tenk2|t
-test_range_excl|t
-test_range_gist|t
-test_range_spgist|t
-test_tsvector|f
-testjsonb|f
-text_tbl|f
-textrange_test|t
-time_tbl|f
-timestamp_tbl|f
-timestamptz_tbl|f
-timetz_tbl|f
-tmp|f
-trigger_parted|t
-trigger_parted_p1|t
-trigger_parted_p1_1|t
-trigger_parted_p2|t
-trigger_parted_p2_2|t
-varchar_tbl|f
-view_base_table|t
--- restore normal output mode
-\a\t
---
--- another sanity check: every system catalog that has OIDs should have
+-- Sanity check: every system catalog that has OIDs should have
 -- a unique index on OID.  This ensures that the OIDs will be unique,
 -- even after the OID counter wraps around.
 -- We exclude non-system tables from the check by looking at nspname.
diff --git a/src/test/regress/expected/select.out b/src/test/regress/expected/select.out
index c441049f41..d7888e44eb 100644
--- a/src/test/regress/expected/select.out
+++ b/src/test/regress/expected/select.out
@@ -292,9 +292,6 @@ SELECT onek2.unique1, onek2.stringu1 FROM onek2
 RESET enable_seqscan;
 RESET enable_bitmapscan;
 RESET enable_sort;
-SELECT two, stringu1, ten, string4
-   INTO TABLE tmp
-   FROM onek;
 --
 -- awk '{print $1,$2;}' person.data |
 -- awk '{if(NF!=2){print $3,$2;}else{print;}}' - emp.data |
diff --git a/src/test/regress/expected/select_distinct.out b/src/test/regress/expected/select_distinct.out
index 58122c6f88..748419cee0 100644
--- a/src/test/regress/expected/select_distinct.out
+++ b/src/test/regress/expected/select_distinct.out
@@ -4,7 +4,7 @@
 --
 -- awk '{print $3;}' onek.data | sort -n | uniq
 --
-SELECT DISTINCT two FROM tmp ORDER BY 1;
+SELECT DISTINCT two FROM onek ORDER BY 1;
  two 
 -----
    0
@@ -14,7 +14,7 @@ SELECT DISTINCT two FROM tmp ORDER BY 1;
 --
 -- awk '{print $5;}' onek.data | sort -n | uniq
 --
-SELECT DISTINCT ten FROM tmp ORDER BY 1;
+SELECT DISTINCT ten FROM onek ORDER BY 1;
  ten 
 -----
    0
@@ -32,7 +32,7 @@ SELECT DISTINCT ten FROM tmp ORDER BY 1;
 --
 -- awk '{print $16;}' onek.data | sort -d | uniq
 --
-SELECT DISTINCT string4 FROM tmp ORDER BY 1;
+SELECT DISTINCT string4 FROM onek ORDER BY 1;
  string4 
 ---------
  AAAAxx
@@ -46,7 +46,7 @@ SELECT DISTINCT string4 FROM tmp ORDER BY 1;
 -- sort +0n -1 +1d -2 +2n -3
 --
 SELECT DISTINCT two, string4, ten
-   FROM tmp
+   FROM onek
    ORDER BY two using <, string4 using <, ten using <;
  two | string4 | ten 
 -----+---------+-----
diff --git a/src/test/regress/expected/select_distinct_on.out b/src/test/regress/expected/select_distinct_on.out
index b787b307f6..3d98990991 100644
--- a/src/test/regress/expected/select_distinct_on.out
+++ b/src/test/regress/expected/select_distinct_on.out
@@ -2,7 +2,7 @@
 -- SELECT_DISTINCT_ON
 --
 SELECT DISTINCT ON (string4) string4, two, ten
-   FROM tmp
+   FROM onek
    ORDER BY string4 using <, two using >, ten using <;
  string4 | two | ten 
 ---------+-----+-----
@@ -14,13 +14,13 @@ SELECT DISTINCT ON (string4) string4, two, ten
 
 -- this will fail due to conflict of ordering requirements
 SELECT DISTINCT ON (string4, ten) string4, two, ten
-   FROM tmp
+   FROM onek
    ORDER BY string4 using <, two using <, ten using <;
 ERROR:  SELECT DISTINCT ON expressions must match initial ORDER BY expressions
 LINE 1: SELECT DISTINCT ON (string4, ten) string4, two, ten
                                      ^
 SELECT DISTINCT ON (string4, ten) string4, ten, two
-   FROM tmp
+   FROM onek
    ORDER BY string4 using <, ten using >, two using <;
  string4 | ten | two 
 ---------+-----+-----
diff --git a/src/test/regress/expected/select_into.out b/src/test/regress/expected/select_into.out
index 43b8209d22..b79fe9a1c0 100644
--- a/src/test/regress/expected/select_into.out
+++ b/src/test/regress/expected/select_into.out
@@ -162,9 +162,9 @@ DROP TABLE easi, easi2;
 --
 -- Disallowed uses of SELECT ... INTO.  All should fail
 --
-DECLARE foo CURSOR FOR SELECT 1 INTO b;
+DECLARE foo CURSOR FOR SELECT 1 INTO int4_tbl;
 ERROR:  SELECT ... INTO is not allowed here
-LINE 1: DECLARE foo CURSOR FOR SELECT 1 INTO b;
+LINE 1: DECLARE foo CURSOR FOR SELECT 1 INTO int4_tbl;
                                              ^
 COPY (SELECT 1 INTO frak UNION SELECT 2) TO 'blob';
 ERROR:  COPY (SELECT INTO) is not supported
@@ -172,12 +172,12 @@ SELECT * FROM (SELECT 1 INTO f) bar;
 ERROR:  SELECT ... INTO is not allowed here
 LINE 1: SELECT * FROM (SELECT 1 INTO f) bar;
                                      ^
-CREATE VIEW foo AS SELECT 1 INTO b;
+CREATE VIEW foo AS SELECT 1 INTO int4_tbl;
 ERROR:  views must not contain SELECT INTO
-INSERT INTO b SELECT 1 INTO f;
+INSERT INTO int4_tbl SELECT 1 INTO f;
 ERROR:  SELECT ... INTO is not allowed here
-LINE 1: INSERT INTO b SELECT 1 INTO f;
-                                    ^
+LINE 1: INSERT INTO int4_tbl SELECT 1 INTO f;
+                                           ^
 -- Test CREATE TABLE AS ... IF NOT EXISTS
 CREATE TABLE ctas_ine_tbl AS SELECT 1;
 CREATE TABLE ctas_ine_tbl AS SELECT 1 / 0; -- error
diff --git a/src/test/regress/expected/test_setup.out b/src/test/regress/expected/test_setup.out
index 34e0aa70b5..98e08cb6eb 100644
--- a/src/test/regress/expected/test_setup.out
+++ b/src/test/regress/expected/test_setup.out
@@ -12,6 +12,174 @@
 --
 GRANT ALL ON SCHEMA public TO public;
 --
+-- These tables have traditionally been referenced by many tests,
+-- so create and populate them.  Insert only non-error values here.
+-- (Some subsequent tests try to insert erroneous values.  That's okay
+-- because the table won't actually change.  Do not change the contents
+-- of these tables in later tests, as it may affect other tests.)
+--
+CREATE TABLE CHAR_TBL(f1 char(4));
+INSERT INTO CHAR_TBL (f1) VALUES
+  ('a'),
+  ('ab'),
+  ('abcd'),
+  ('abcd    ');
+VACUUM CHAR_TBL;
+CREATE TABLE FLOAT8_TBL(f1 float8);
+INSERT INTO FLOAT8_TBL(f1) VALUES
+  ('0.0'),
+  ('-34.84'),
+  ('-1004.30'),
+  ('-1.2345678901234e+200'),
+  ('-1.2345678901234e-200');
+VACUUM FLOAT8_TBL;
+CREATE TABLE INT2_TBL(f1 int2);
+INSERT INTO INT2_TBL(f1) VALUES
+  ('0   '),
+  ('  1234 '),
+  ('    -1234'),
+  ('32767'),  -- largest and smallest values
+  ('-32767');
+VACUUM INT2_TBL;
+CREATE TABLE INT4_TBL(f1 int4);
+INSERT INTO INT4_TBL(f1) VALUES
+  ('   0  '),
+  ('123456     '),
+  ('    -123456'),
+  ('2147483647'),  -- largest and smallest values
+  ('-2147483647');
+VACUUM INT4_TBL;
+CREATE TABLE INT8_TBL(q1 int8, q2 int8);
+INSERT INTO INT8_TBL VALUES
+  ('  123   ','  456'),
+  ('123   ','4567890123456789'),
+  ('4567890123456789','123'),
+  (+4567890123456789,'4567890123456789'),
+  ('+4567890123456789','-4567890123456789');
+VACUUM INT8_TBL;
+CREATE TABLE POINT_TBL(f1 point);
+INSERT INTO POINT_TBL(f1) VALUES
+  ('(0.0,0.0)'),
+  ('(-10.0,0.0)'),
+  ('(-3.0,4.0)'),
+  ('(5.1, 34.5)'),
+  ('(-5.0,-12.0)'),
+  ('(1e-300,-1e-300)'),  -- To underflow
+  ('(1e+300,Inf)'),  -- To overflow
+  ('(Inf,1e+300)'),  -- Transposed
+  (' ( Nan , NaN ) '),
+  ('10.0,10.0');
+-- We intentionally don't vacuum point_tbl here; geometry depends on that
+CREATE TABLE TEXT_TBL (f1 text);
+INSERT INTO TEXT_TBL VALUES
+  ('doh!'),
+  ('hi de ho neighbor');
+VACUUM TEXT_TBL;
+CREATE TABLE VARCHAR_TBL(f1 varchar(4));
+INSERT INTO VARCHAR_TBL (f1) VALUES
+  ('a'),
+  ('ab'),
+  ('abcd'),
+  ('abcd    ');
+VACUUM VARCHAR_TBL;
+CREATE TABLE onek (
+	unique1		int4,
+	unique2		int4,
+	two			int4,
+	four		int4,
+	ten			int4,
+	twenty		int4,
+	hundred		int4,
+	thousand	int4,
+	twothousand	int4,
+	fivethous	int4,
+	tenthous	int4,
+	odd			int4,
+	even		int4,
+	stringu1	name,
+	stringu2	name,
+	string4		name
+);
+\set filename :abs_srcdir '/data/onek.data'
+COPY onek FROM :'filename';
+VACUUM ANALYZE onek;
+CREATE TABLE onek2 AS SELECT * FROM onek;
+VACUUM ANALYZE onek2;
+CREATE TABLE tenk1 (
+	unique1		int4,
+	unique2		int4,
+	two			int4,
+	four		int4,
+	ten			int4,
+	twenty		int4,
+	hundred		int4,
+	thousand	int4,
+	twothousand	int4,
+	fivethous	int4,
+	tenthous	int4,
+	odd			int4,
+	even		int4,
+	stringu1	name,
+	stringu2	name,
+	string4		name
+);
+\set filename :abs_srcdir '/data/tenk.data'
+COPY tenk1 FROM :'filename';
+VACUUM ANALYZE tenk1;
+CREATE TABLE tenk2 AS SELECT * FROM tenk1;
+VACUUM ANALYZE tenk2;
+CREATE TABLE person (
+	name 		text,
+	age			int4,
+	location 	point
+);
+\set filename :abs_srcdir '/data/person.data'
+COPY person FROM :'filename';
+VACUUM ANALYZE person;
+CREATE TABLE emp (
+	salary 		int4,
+	manager 	name
+) INHERITS (person);
+\set filename :abs_srcdir '/data/emp.data'
+COPY emp FROM :'filename';
+VACUUM ANALYZE emp;
+CREATE TABLE student (
+	gpa 		float8
+) INHERITS (person);
+\set filename :abs_srcdir '/data/student.data'
+COPY student FROM :'filename';
+VACUUM ANALYZE student;
+CREATE TABLE stud_emp (
+	percent 	int4
+) INHERITS (emp, student);
+NOTICE:  merging multiple inherited definitions of column "name"
+NOTICE:  merging multiple inherited definitions of column "age"
+NOTICE:  merging multiple inherited definitions of column "location"
+\set filename :abs_srcdir '/data/stud_emp.data'
+COPY stud_emp FROM :'filename';
+VACUUM ANALYZE stud_emp;
+CREATE TABLE road (
+	name		text,
+	thepath 	path
+);
+\set filename :abs_srcdir '/data/streets.data'
+COPY road FROM :'filename';
+VACUUM ANALYZE road;
+CREATE TABLE ihighway () INHERITS (road);
+INSERT INTO ihighway
+   SELECT *
+   FROM ONLY road
+   WHERE name ~ 'I- .*';
+VACUUM ANALYZE ihighway;
+CREATE TABLE shighway (
+	surface		text
+) INHERITS (road);
+INSERT INTO shighway
+   SELECT *, 'asphalt'
+   FROM ONLY road
+   WHERE name ~ 'State Hwy.*';
+VACUUM ANALYZE shighway;
+--
 -- We must have some enum type in the database for opr_sanity and type_sanity.
 --
 create type stoplight as enum ('red', 'yellow', 'green');
diff --git a/src/test/regress/expected/text.out b/src/test/regress/expected/text.out
index b625b09f32..4c65b238e7 100644
--- a/src/test/regress/expected/text.out
+++ b/src/test/regress/expected/text.out
@@ -13,9 +13,7 @@ SELECT text 'this is a text string' = text 'this is a text strin' AS false;
  f
 (1 row)
 
-CREATE TABLE TEXT_TBL (f1 text);
-INSERT INTO TEXT_TBL VALUES ('doh!');
-INSERT INTO TEXT_TBL VALUES ('hi de ho neighbor');
+-- text_tbl was already created and filled in test_setup.sql.
 SELECT * FROM TEXT_TBL;
         f1         
 -------------------
diff --git a/src/test/regress/expected/transactions.out b/src/test/regress/expected/transactions.out
index 61862d595d..599d511a67 100644
--- a/src/test/regress/expected/transactions.out
+++ b/src/test/regress/expected/transactions.out
@@ -2,9 +2,12 @@
 -- TRANSACTIONS
 --
 BEGIN;
-SELECT *
-   INTO TABLE xacttest
-   FROM aggtest;
+CREATE TABLE xacttest (a smallint, b real);
+INSERT INTO xacttest VALUES
+  (56, 7.8),
+  (100, 99.097),
+  (0, 0.09561),
+  (42, 324.78);
 INSERT INTO xacttest (a, b) VALUES (777, 777.777);
 END;
 -- should retrieve one value--
@@ -16,9 +19,9 @@ SELECT a FROM xacttest WHERE a > 100;
 
 BEGIN;
 CREATE TABLE disappear (a int4);
-DELETE FROM aggtest;
+DELETE FROM xacttest;
 -- should be empty
-SELECT * FROM aggtest;
+SELECT * FROM xacttest;
  a | b 
 ---+---
 (0 rows)
@@ -31,14 +34,15 @@ SELECT oid FROM pg_class WHERE relname = 'disappear';
 (0 rows)
 
 -- should have members again
-SELECT * FROM aggtest;
+SELECT * FROM xacttest;
   a  |    b    
 -----+---------
   56 |     7.8
  100 |  99.097
    0 | 0.09561
   42 |  324.78
-(4 rows)
+ 777 | 777.777
+(5 rows)
 
 -- Read-only tests
 CREATE TABLE writetest (a int);
diff --git a/src/test/regress/expected/tsearch.out b/src/test/regress/expected/tsearch.out
index 45b92a6338..dc03f15499 100644
--- a/src/test/regress/expected/tsearch.out
+++ b/src/test/regress/expected/tsearch.out
@@ -1,3 +1,5 @@
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
 --
 -- Sanity checks for text search catalogs
 --
@@ -55,6 +57,14 @@ WHERE
 -------+-------+--------+--------------+----------+---------
 (0 rows)
 
+-- Load some test data
+CREATE TABLE test_tsvector(
+	t text,
+	a tsvector
+);
+\set filename :abs_srcdir '/data/tsearch.data'
+COPY test_tsvector FROM :'filename';
+ANALYZE test_tsvector;
 -- test basic text search behavior without indexes, then with
 SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
  count 
diff --git a/src/test/regress/expected/varchar.out b/src/test/regress/expected/varchar.out
index da23ae810b..f1a8202d9f 100644
--- a/src/test/regress/expected/varchar.out
+++ b/src/test/regress/expected/varchar.out
@@ -1,7 +1,11 @@
 --
 -- VARCHAR
 --
-CREATE TABLE VARCHAR_TBL(f1 varchar(1));
+--
+-- Build a table for testing
+-- (This temporarily hides the table created in test_setup.sql)
+--
+CREATE TEMP TABLE VARCHAR_TBL(f1 varchar(1));
 INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
 INSERT INTO VARCHAR_TBL (f1) VALUES ('A');
 -- any of the following three input formats are acceptable
@@ -93,13 +97,11 @@ DROP TABLE VARCHAR_TBL;
 --
 -- Now test longer arrays of char
 --
-CREATE TABLE VARCHAR_TBL(f1 varchar(4));
-INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
-INSERT INTO VARCHAR_TBL (f1) VALUES ('ab');
-INSERT INTO VARCHAR_TBL (f1) VALUES ('abcd');
+-- This varchar_tbl was already created and filled in test_setup.sql.
+-- Here we just try to insert bad values.
+--
 INSERT INTO VARCHAR_TBL (f1) VALUES ('abcde');
 ERROR:  value too long for type character varying(4)
-INSERT INTO VARCHAR_TBL (f1) VALUES ('abcd    ');
 SELECT * FROM VARCHAR_TBL;
   f1  
 ------
diff --git a/src/test/regress/expected/varchar_1.out b/src/test/regress/expected/varchar_1.out
index 958f9c07e0..6f01ef969e 100644
--- a/src/test/regress/expected/varchar_1.out
+++ b/src/test/regress/expected/varchar_1.out
@@ -1,7 +1,11 @@
 --
 -- VARCHAR
 --
-CREATE TABLE VARCHAR_TBL(f1 varchar(1));
+--
+-- Build a table for testing
+-- (This temporarily hides the table created in test_setup.sql)
+--
+CREATE TEMP TABLE VARCHAR_TBL(f1 varchar(1));
 INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
 INSERT INTO VARCHAR_TBL (f1) VALUES ('A');
 -- any of the following three input formats are acceptable
@@ -93,13 +97,11 @@ DROP TABLE VARCHAR_TBL;
 --
 -- Now test longer arrays of char
 --
-CREATE TABLE VARCHAR_TBL(f1 varchar(4));
-INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
-INSERT INTO VARCHAR_TBL (f1) VALUES ('ab');
-INSERT INTO VARCHAR_TBL (f1) VALUES ('abcd');
+-- This varchar_tbl was already created and filled in test_setup.sql.
+-- Here we just try to insert bad values.
+--
 INSERT INTO VARCHAR_TBL (f1) VALUES ('abcde');
 ERROR:  value too long for type character varying(4)
-INSERT INTO VARCHAR_TBL (f1) VALUES ('abcd    ');
 SELECT * FROM VARCHAR_TBL;
   f1  
 ------
diff --git a/src/test/regress/expected/varchar_2.out b/src/test/regress/expected/varchar_2.out
index b1d412ca00..72e57050ea 100644
--- a/src/test/regress/expected/varchar_2.out
+++ b/src/test/regress/expected/varchar_2.out
@@ -1,7 +1,11 @@
 --
 -- VARCHAR
 --
-CREATE TABLE VARCHAR_TBL(f1 varchar(1));
+--
+-- Build a table for testing
+-- (This temporarily hides the table created in test_setup.sql)
+--
+CREATE TEMP TABLE VARCHAR_TBL(f1 varchar(1));
 INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
 INSERT INTO VARCHAR_TBL (f1) VALUES ('A');
 -- any of the following three input formats are acceptable
@@ -93,13 +97,11 @@ DROP TABLE VARCHAR_TBL;
 --
 -- Now test longer arrays of char
 --
-CREATE TABLE VARCHAR_TBL(f1 varchar(4));
-INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
-INSERT INTO VARCHAR_TBL (f1) VALUES ('ab');
-INSERT INTO VARCHAR_TBL (f1) VALUES ('abcd');
+-- This varchar_tbl was already created and filled in test_setup.sql.
+-- Here we just try to insert bad values.
+--
 INSERT INTO VARCHAR_TBL (f1) VALUES ('abcde');
 ERROR:  value too long for type character varying(4)
-INSERT INTO VARCHAR_TBL (f1) VALUES ('abcd    ');
 SELECT * FROM VARCHAR_TBL;
   f1  
 ------
diff --git a/src/test/regress/expected/with.out b/src/test/regress/expected/with.out
index 75e61460d9..1594b426b7 100644
--- a/src/test/regress/expected/with.out
+++ b/src/test/regress/expected/with.out
@@ -3043,36 +3043,42 @@ SELECT * FROM parent;
 -- check EXPLAIN VERBOSE for a wCTE with RETURNING
 EXPLAIN (VERBOSE, COSTS OFF)
 WITH wcte AS ( INSERT INTO int8_tbl VALUES ( 42, 47 ) RETURNING q2 )
-DELETE FROM a USING wcte WHERE aa = q2;
-                         QUERY PLAN                         
-------------------------------------------------------------
- Delete on public.a
-   Delete on public.a a_1
-   Delete on public.b a_2
-   Delete on public.c a_3
-   Delete on public.d a_4
+DELETE FROM a_star USING wcte WHERE aa = q2;
+                                QUERY PLAN                                 
+---------------------------------------------------------------------------
+ Delete on public.a_star
+   Delete on public.a_star a_star_1
+   Delete on public.b_star a_star_2
+   Delete on public.c_star a_star_3
+   Delete on public.d_star a_star_4
+   Delete on public.e_star a_star_5
+   Delete on public.f_star a_star_6
    CTE wcte
      ->  Insert on public.int8_tbl
            Output: int8_tbl.q2
            ->  Result
                  Output: '42'::bigint, '47'::bigint
    ->  Hash Join
-         Output: wcte.*, a.tableoid, a.ctid
-         Hash Cond: (a.aa = wcte.q2)
+         Output: wcte.*, a_star.tableoid, a_star.ctid
+         Hash Cond: (a_star.aa = wcte.q2)
          ->  Append
-               ->  Seq Scan on public.a a_1
-                     Output: a_1.aa, a_1.tableoid, a_1.ctid
-               ->  Seq Scan on public.b a_2
-                     Output: a_2.aa, a_2.tableoid, a_2.ctid
-               ->  Seq Scan on public.c a_3
-                     Output: a_3.aa, a_3.tableoid, a_3.ctid
-               ->  Seq Scan on public.d a_4
-                     Output: a_4.aa, a_4.tableoid, a_4.ctid
+               ->  Seq Scan on public.a_star a_star_1
+                     Output: a_star_1.aa, a_star_1.tableoid, a_star_1.ctid
+               ->  Seq Scan on public.b_star a_star_2
+                     Output: a_star_2.aa, a_star_2.tableoid, a_star_2.ctid
+               ->  Seq Scan on public.c_star a_star_3
+                     Output: a_star_3.aa, a_star_3.tableoid, a_star_3.ctid
+               ->  Seq Scan on public.d_star a_star_4
+                     Output: a_star_4.aa, a_star_4.tableoid, a_star_4.ctid
+               ->  Seq Scan on public.e_star a_star_5
+                     Output: a_star_5.aa, a_star_5.tableoid, a_star_5.ctid
+               ->  Seq Scan on public.f_star a_star_6
+                     Output: a_star_6.aa, a_star_6.tableoid, a_star_6.ctid
          ->  Hash
                Output: wcte.*, wcte.q2
                ->  CTE Scan on wcte
                      Output: wcte.*, wcte.q2
-(26 rows)
+(32 rows)
 
 -- error cases
 -- data-modifying WITH tries to use its own output
diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule
index f571214243..ce5d3ec4a8 100644
--- a/src/test/regress/parallel_schedule
+++ b/src/test/regress/parallel_schedule
@@ -1,6 +1,9 @@
 # ----------
 # src/test/regress/parallel_schedule
 #
+# Most test scripts can be run after running just test_setup and possibly
+# create_index.  Exceptions to this rule are documented below.
+#
 # By convention, we put no more than twenty tests in any one parallel group;
 # this limits the number of connections needed to run the tests.
 # ----------
@@ -20,40 +23,31 @@ test: boolean char name varchar text int2 int4 int8 oid float4 float8 bit numeri
 
 # ----------
 # The second group of parallel tests
-# strings depends on char, varchar and text
-# numerology depends on int2, int4, int8, float4, float8
 # multirangetypes depends on rangetypes
 # multirangetypes shouldn't run concurrently with type_sanity
 # ----------
-test: strings numerology point lseg line box path polygon circle date time timetz timestamp timestamptz interval inet macaddr macaddr8 multirangetypes create_function_0
+test: strings numerology point lseg line box path polygon circle date time timetz timestamp timestamptz interval inet macaddr macaddr8 multirangetypes
 
 # ----------
 # Another group of parallel tests
-# geometry depends on point, lseg, box, path, polygon and circle
-# horology depends on interval, timetz, timestamp, timestamptz
-# opr_sanity depends on create_function_0
+# geometry depends on point, lseg, line, box, path, polygon, circle
+# horology depends on date, time, timetz, timestamp, timestamptz, interval
 # ----------
 test: geometry horology tstypes regex type_sanity opr_sanity misc_sanity comments expressions unicode xid mvcc
 
-# ----------
-# These each depend on the previous one
-# ----------
-test: create_type
-test: create_table
-
 # ----------
 # Load huge amounts of data
 # We should split the data files into single files and then
-# execute two copy tests parallel, to check that copy itself
+# execute two copy tests in parallel, to check that copy itself
 # is concurrent safe.
 # ----------
 test: copy copyselect copydml insert insert_conflict
 
 # ----------
 # More groups of parallel tests
+# Note: many of the tests in later groups depend on create_index
 # ----------
-test: create_misc create_operator create_procedure
-# These depend on create_misc and create_operator
+test: create_function_0 create_misc create_operator create_procedure create_table create_type
 test: create_index create_index_spgist create_view index_including index_including_gist
 
 # ----------
@@ -67,11 +61,14 @@ test: create_aggregate create_function_3 create_cast constraints triggers select
 # ----------
 test: sanity_check
 
+# Note: the ignore: line does not skip random, just mark it as ignorable
+ignore: random
+
 # ----------
 # Another group of parallel tests
-# Note: the ignore: line does not run random, just mark it as ignorable
+# aggregates depends on create_aggregate
+# join depends on create_misc
 # ----------
-ignore: random
 test: select_into select_distinct select_distinct_on select_implicit select_having subselect union case join aggregates transactions random portals arrays btree_index hash_index update delete namespace prepared_xacts
 
 # ----------
@@ -86,15 +83,18 @@ test: brin_bloom brin_multi
 
 # ----------
 # Another group of parallel tests
+# psql depends on create_am
+# amutils depends on geometry, create_index_spgist, hash_index, brin
 # ----------
 test: create_table_like alter_generic alter_operator misc async dbsize misc_functions sysviews tsrf tid tidscan tidrangescan collate.icu.utf8 incremental_sort
 
-# rules cannot run concurrently with any test that creates
-# a view or rule in the public schema
 # collate.*.utf8 tests cannot be run in parallel with each other
 test: rules psql psql_crosstab amutils stats_ext collate.linux.utf8
 
-# run by itself so it can run parallel workers
+# ----------
+# Run these alone so they don't run out of parallel workers
+# select_parallel depends on create_misc
+# ----------
 test: select_parallel
 test: write_parallel
 test: vacuum_parallel
@@ -104,6 +104,7 @@ test: publication subscription
 
 # ----------
 # Another group of parallel tests
+# select_views depends on create_view
 # ----------
 test: select_views portals_p2 foreign_key cluster dependency guc bitmapops combocid tsearch tsdicts foreign_data window xmlmap functional_deps advisory_lock indirect_toast equivclass
 
@@ -114,6 +115,7 @@ test: json jsonb json_encoding jsonpath jsonpath_encoding jsonb_jsonpath
 
 # ----------
 # Another group of parallel tests
+# with depends on create_misc
 # NB: temp.sql does a reconnect which transiently uses 2 connections,
 # so keep this parallel group to at most 19 tests
 # ----------
@@ -124,9 +126,10 @@ test: plancache limit plpgsql copy2 temp domain rangefuncs prepare conversion tr
 # ----------
 test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression memoize
 
-# event triggers cannot run concurrently with any test that runs DDL
+# event_trigger cannot run concurrently with any test that runs DDL
 # oidjoins is read-only, though, and should run late for best coverage
 test: event_trigger oidjoins
+
 # this test also uses event triggers, so likewise run it by itself
 test: fast_default
 
diff --git a/src/test/regress/sql/aggregates.sql b/src/test/regress/sql/aggregates.sql
index 7cf86465e9..2f5d0e00f3 100644
--- a/src/test/regress/sql/aggregates.sql
+++ b/src/test/regress/sql/aggregates.sql
@@ -2,9 +2,24 @@
 -- AGGREGATES
 --
 
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
+
 -- avoid bit-exact output here because operations may not be bit-exact.
 SET extra_float_digits = 0;
 
+-- prepare some test data
+CREATE TABLE aggtest (
+	a 			int2,
+	b			float4
+);
+
+\set filename :abs_srcdir '/data/agg.data'
+COPY aggtest FROM :'filename';
+
+ANALYZE aggtest;
+
+
 SELECT avg(four) AS avg_1 FROM onek;
 
 SELECT avg(a) AS avg_32 FROM aggtest WHERE a < 100;
diff --git a/src/test/regress/sql/arrays.sql b/src/test/regress/sql/arrays.sql
index 912233ef96..c88a3fbae9 100644
--- a/src/test/regress/sql/arrays.sql
+++ b/src/test/regress/sql/arrays.sql
@@ -2,6 +2,9 @@
 -- ARRAYS
 --
 
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
+
 CREATE TABLE arrtest (
 	a 			int2[],
 	b 			int4[][][],
@@ -12,6 +15,16 @@ CREATE TABLE arrtest (
 	g			varchar(5)[]
 );
 
+CREATE TABLE array_op_test (
+	seqno		int4,
+	i			int4[],
+	t			text[]
+);
+
+\set filename :abs_srcdir '/data/array.data'
+COPY array_op_test FROM :'filename';
+ANALYZE array_op_test;
+
 --
 -- only the 'e' array is 0-based, the others are 1-based.
 --
@@ -153,6 +166,10 @@ UPDATE arrtest_s SET a[:] = '{23, 24, 25}';  -- fail, too small
 INSERT INTO arrtest_s VALUES(NULL, NULL);
 UPDATE arrtest_s SET a[:] = '{11, 12, 13, 14, 15}';  -- fail, no good with null
 
+-- we want to work with a point_tbl that includes a null
+CREATE TEMP TABLE point_tbl AS SELECT * FROM public.point_tbl;
+INSERT INTO POINT_TBL(f1) VALUES (NULL);
+
 -- check with fixed-length-array type, such as point
 SELECT f1[0:1] FROM POINT_TBL;
 SELECT f1[0:] FROM POINT_TBL;
diff --git a/src/test/regress/sql/btree_index.sql b/src/test/regress/sql/btree_index.sql
index c34502249f..239f4a4755 100644
--- a/src/test/regress/sql/btree_index.sql
+++ b/src/test/regress/sql/btree_index.sql
@@ -1,5 +1,64 @@
 --
 -- BTREE_INDEX
+--
+
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
+
+CREATE TABLE bt_i4_heap (
+	seqno 		int4,
+	random 		int4
+);
+
+CREATE TABLE bt_name_heap (
+	seqno 		name,
+	random 		int4
+);
+
+CREATE TABLE bt_txt_heap (
+	seqno 		text,
+	random 		int4
+);
+
+CREATE TABLE bt_f8_heap (
+	seqno 		float8,
+	random 		int4
+);
+
+\set filename :abs_srcdir '/data/desc.data'
+COPY bt_i4_heap FROM :'filename';
+
+\set filename :abs_srcdir '/data/hash.data'
+COPY bt_name_heap FROM :'filename';
+
+\set filename :abs_srcdir '/data/desc.data'
+COPY bt_txt_heap FROM :'filename';
+
+\set filename :abs_srcdir '/data/hash.data'
+COPY bt_f8_heap FROM :'filename';
+
+ANALYZE bt_i4_heap;
+ANALYZE bt_name_heap;
+ANALYZE bt_txt_heap;
+ANALYZE bt_f8_heap;
+
+--
+-- BTREE ascending/descending cases
+--
+-- we load int4/text from pure descending data (each key is a new
+-- low key) and name/f8 from pure ascending data (each key is a new
+-- high key).  we had a bug where new low keys would sometimes be
+-- "lost".
+--
+CREATE INDEX bt_i4_index ON bt_i4_heap USING btree (seqno int4_ops);
+
+CREATE INDEX bt_name_index ON bt_name_heap USING btree (seqno name_ops);
+
+CREATE INDEX bt_txt_index ON bt_txt_heap USING btree (seqno text_ops);
+
+CREATE INDEX bt_f8_index ON bt_f8_heap USING btree (seqno float8_ops);
+
+--
 -- test retrieval of min/max keys for each index
 --
 
diff --git a/src/test/regress/sql/char.sql b/src/test/regress/sql/char.sql
index 79e6e565ba..9c83c45e34 100644
--- a/src/test/regress/sql/char.sql
+++ b/src/test/regress/sql/char.sql
@@ -9,9 +9,10 @@ SELECT char 'c' = char 'c' AS true;
 
 --
 -- Build a table for testing
+-- (This temporarily hides the table created in test_setup.sql)
 --
 
-CREATE TABLE CHAR_TBL(f1 char);
+CREATE TEMP TABLE CHAR_TBL(f1 char);
 
 INSERT INTO CHAR_TBL (f1) VALUES ('a');
 
@@ -63,13 +64,10 @@ DROP TABLE CHAR_TBL;
 --
 -- Now test longer arrays of char
 --
+-- This char_tbl was already created and filled in test_setup.sql.
+-- Here we just try to insert bad values.
+--
 
-CREATE TABLE CHAR_TBL(f1 char(4));
-
-INSERT INTO CHAR_TBL (f1) VALUES ('a');
-INSERT INTO CHAR_TBL (f1) VALUES ('ab');
-INSERT INTO CHAR_TBL (f1) VALUES ('abcd');
 INSERT INTO CHAR_TBL (f1) VALUES ('abcde');
-INSERT INTO CHAR_TBL (f1) VALUES ('abcd    ');
 
 SELECT * FROM CHAR_TBL;
diff --git a/src/test/regress/sql/copy.sql b/src/test/regress/sql/copy.sql
index 15e26517ec..b01e68c78f 100644
--- a/src/test/regress/sql/copy.sql
+++ b/src/test/regress/sql/copy.sql
@@ -6,112 +6,6 @@
 \getenv abs_srcdir PG_ABS_SRCDIR
 \getenv abs_builddir PG_ABS_BUILDDIR
 
--- CLASS POPULATION
---	(any resemblance to real life is purely coincidental)
---
-\set filename :abs_srcdir '/data/agg.data'
-COPY aggtest FROM :'filename';
-
-\set filename :abs_srcdir '/data/onek.data'
-COPY onek FROM :'filename';
-
-\set filename :abs_builddir '/results/onek.data'
-COPY onek TO :'filename';
-
-DELETE FROM onek;
-
-COPY onek FROM :'filename';
-
-\set filename :abs_srcdir '/data/tenk.data'
-COPY tenk1 FROM :'filename';
-
-\set filename :abs_srcdir '/data/rect.data'
-COPY slow_emp4000 FROM :'filename';
-
-\set filename :abs_srcdir '/data/person.data'
-COPY person FROM :'filename';
-
-\set filename :abs_srcdir '/data/emp.data'
-COPY emp FROM :'filename';
-
-\set filename :abs_srcdir '/data/student.data'
-COPY student FROM :'filename';
-
-\set filename :abs_srcdir '/data/stud_emp.data'
-COPY stud_emp FROM :'filename';
-
-\set filename :abs_srcdir '/data/streets.data'
-COPY road FROM :'filename';
-
-\set filename :abs_srcdir '/data/real_city.data'
-COPY real_city FROM :'filename';
-
-\set filename :abs_srcdir '/data/hash.data'
-COPY hash_i4_heap FROM :'filename';
-
-COPY hash_name_heap FROM :'filename';
-
-COPY hash_txt_heap FROM :'filename';
-
-COPY hash_f8_heap FROM :'filename';
-
-\set filename :abs_srcdir '/data/tsearch.data'
-COPY test_tsvector FROM :'filename';
-
-\set filename :abs_srcdir '/data/jsonb.data'
-COPY testjsonb FROM :'filename';
-
--- the data in this file has a lot of duplicates in the index key
--- fields, leading to long bucket chains and lots of table expansion.
--- this is therefore a stress test of the bucket overflow code (unlike
--- the data in hash.data, which has unique index keys).
---
--- \set filename :abs_srcdir '/data/hashovfl.data'
--- COPY hash_ovfl_heap FROM :'filename';
-
-\set filename :abs_srcdir '/data/desc.data'
-COPY bt_i4_heap FROM :'filename';
-
-\set filename :abs_srcdir '/data/hash.data'
-COPY bt_name_heap FROM :'filename';
-
-\set filename :abs_srcdir '/data/desc.data'
-COPY bt_txt_heap FROM :'filename';
-
-\set filename :abs_srcdir '/data/hash.data'
-COPY bt_f8_heap FROM :'filename';
-
-\set filename :abs_srcdir '/data/array.data'
-COPY array_op_test FROM :'filename';
-
-\set filename :abs_srcdir '/data/array.data'
-COPY array_index_op_test FROM :'filename';
-
--- analyze all the data we just loaded, to ensure plan consistency
--- in later tests
-
-ANALYZE aggtest;
-ANALYZE onek;
-ANALYZE tenk1;
-ANALYZE slow_emp4000;
-ANALYZE person;
-ANALYZE emp;
-ANALYZE student;
-ANALYZE stud_emp;
-ANALYZE road;
-ANALYZE real_city;
-ANALYZE hash_i4_heap;
-ANALYZE hash_name_heap;
-ANALYZE hash_txt_heap;
-ANALYZE hash_f8_heap;
-ANALYZE test_tsvector;
-ANALYZE bt_i4_heap;
-ANALYZE bt_name_heap;
-ANALYZE bt_txt_heap;
-ANALYZE bt_f8_heap;
-ANALYZE array_op_test;
-ANALYZE array_index_op_test;
-
 --- test copying in CSV mode with various styles
 --- of embedded line ending characters
 
diff --git a/src/test/regress/sql/create_index.sql b/src/test/regress/sql/create_index.sql
index 8b353be16e..089dd26bd1 100644
--- a/src/test/regress/sql/create_index.sql
+++ b/src/test/regress/sql/create_index.sql
@@ -3,6 +3,9 @@
 -- Create ancillary data structures (i.e. indices)
 --
 
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
+
 --
 -- BTREE
 --
@@ -43,22 +46,6 @@ COMMENT ON INDEX six_wrong IS 'bad index';
 COMMENT ON INDEX six IS 'good index';
 COMMENT ON INDEX six IS NULL;
 
---
--- BTREE ascending/descending cases
---
--- we load int4/text from pure descending data (each key is a new
--- low key) and name/f8 from pure ascending data (each key is a new
--- high key).  we had a bug where new low keys would sometimes be
--- "lost".
---
-CREATE INDEX bt_i4_index ON bt_i4_heap USING btree (seqno int4_ops);
-
-CREATE INDEX bt_name_index ON bt_name_heap USING btree (seqno name_ops);
-
-CREATE INDEX bt_txt_index ON bt_txt_heap USING btree (seqno text_ops);
-
-CREATE INDEX bt_f8_index ON bt_f8_heap USING btree (seqno float8_ops);
-
 --
 -- BTREE partial indices
 --
@@ -74,12 +61,27 @@ CREATE INDEX onek2_stu1_prtl ON onek2 USING btree(stringu1 name_ops)
 --
 -- GiST (rtree-equivalent opclasses only)
 --
-CREATE INDEX grect2ind ON fast_emp4000 USING gist (home_base);
 
-CREATE INDEX gpolygonind ON polygon_tbl USING gist (f1);
+CREATE TABLE slow_emp4000 (
+	home_base	 box
+);
+
+CREATE TABLE fast_emp4000 (
+	home_base	 box
+);
+
+\set filename :abs_srcdir '/data/rect.data'
+COPY slow_emp4000 FROM :'filename';
+
+INSERT INTO fast_emp4000 SELECT * FROM slow_emp4000;
 
-CREATE INDEX gcircleind ON circle_tbl USING gist (f1);
+ANALYZE slow_emp4000;
+ANALYZE fast_emp4000;
 
+CREATE INDEX grect2ind ON fast_emp4000 USING gist (home_base);
+
+-- we want to work with a point_tbl that includes a null
+CREATE TEMP TABLE point_tbl AS SELECT * FROM public.point_tbl;
 INSERT INTO POINT_TBL(f1) VALUES (NULL);
 
 CREATE INDEX gpointind ON point_tbl USING gist (f1);
@@ -114,12 +116,6 @@ SELECT count(*) FROM fast_emp4000 WHERE home_base && '(1000,1000,0,0)'::box;
 
 SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL;
 
-SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
-    ORDER BY (poly_center(f1))[0];
-
-SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
-    ORDER BY area(f1);
-
 SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,0)'::polygon;
 
 SELECT count(*) FROM gcircle_tbl WHERE f1 && '<(500,500),500>'::circle;
@@ -175,18 +171,6 @@ EXPLAIN (COSTS OFF)
 SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL;
 SELECT count(*) FROM fast_emp4000 WHERE home_base IS NULL;
 
-EXPLAIN (COSTS OFF)
-SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
-    ORDER BY (poly_center(f1))[0];
-SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
-    ORDER BY (poly_center(f1))[0];
-
-EXPLAIN (COSTS OFF)
-SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
-    ORDER BY area(f1);
-SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
-    ORDER BY area(f1);
-
 EXPLAIN (COSTS OFF)
 SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,0)'::polygon;
 SELECT count(*) FROM gpolygon_tbl WHERE f1 && '(1000,1000,0,0)'::polygon;
@@ -274,6 +258,21 @@ RESET enable_bitmapscan;
 -- Note: GIN currently supports only bitmap scans, not plain indexscans
 --
 
+CREATE TABLE array_index_op_test (
+	seqno		int4,
+	i			int4[],
+	t			text[]
+);
+
+\set filename :abs_srcdir '/data/array.data'
+COPY array_index_op_test FROM :'filename';
+ANALYZE array_index_op_test;
+
+SELECT * FROM array_index_op_test WHERE i = '{NULL}' ORDER BY seqno;
+SELECT * FROM array_index_op_test WHERE i @> '{NULL}' ORDER BY seqno;
+SELECT * FROM array_index_op_test WHERE i && '{NULL}' ORDER BY seqno;
+SELECT * FROM array_index_op_test WHERE i <@ '{NULL}' ORDER BY seqno;
+
 SET enable_seqscan = OFF;
 SET enable_indexscan = OFF;
 SET enable_bitmapscan = ON;
@@ -295,10 +294,6 @@ SELECT * FROM array_index_op_test WHERE i = '{}' ORDER BY seqno;
 SELECT * FROM array_index_op_test WHERE i @> '{}' ORDER BY seqno;
 SELECT * FROM array_index_op_test WHERE i && '{}' ORDER BY seqno;
 SELECT * FROM array_index_op_test WHERE i <@ '{}' ORDER BY seqno;
-SELECT * FROM array_op_test WHERE i = '{NULL}' ORDER BY seqno;
-SELECT * FROM array_op_test WHERE i @> '{NULL}' ORDER BY seqno;
-SELECT * FROM array_op_test WHERE i && '{NULL}' ORDER BY seqno;
-SELECT * FROM array_op_test WHERE i <@ '{NULL}' ORDER BY seqno;
 
 CREATE INDEX textarrayidx ON array_index_op_test USING gin (t);
 
@@ -331,8 +326,6 @@ SELECT * FROM array_index_op_test WHERE t && '{AAAAAAA80240}' ORDER BY seqno;
 SELECT * FROM array_index_op_test WHERE i @> '{32}' AND t && '{AAAAAAA80240}' ORDER BY seqno;
 SELECT * FROM array_index_op_test WHERE i && '{32}' AND t @> '{AAAAAAA80240}' ORDER BY seqno;
 SELECT * FROM array_index_op_test WHERE t = '{}' ORDER BY seqno;
-SELECT * FROM array_op_test WHERE i = '{NULL}' ORDER BY seqno;
-SELECT * FROM array_op_test WHERE i <@ '{NULL}' ORDER BY seqno;
 
 RESET enable_seqscan;
 RESET enable_indexscan;
@@ -362,14 +355,6 @@ CREATE INDEX gin_relopts_test ON array_index_op_test USING gin (i)
 --
 -- HASH
 --
-CREATE INDEX hash_i4_index ON hash_i4_heap USING hash (random int4_ops);
-
-CREATE INDEX hash_name_index ON hash_name_heap USING hash (random name_ops);
-
-CREATE INDEX hash_txt_index ON hash_txt_heap USING hash (random text_ops);
-
-CREATE INDEX hash_f8_index ON hash_f8_heap USING hash (random float8_ops) WITH (fillfactor=60);
-
 CREATE UNLOGGED TABLE unlogged_hash_table (id int4);
 CREATE INDEX unlogged_hash_index ON unlogged_hash_table USING hash (id int4_ops);
 DROP TABLE unlogged_hash_table;
@@ -448,15 +433,6 @@ ALTER TABLE covering_index_heap ADD CONSTRAINT covering_pkey PRIMARY KEY USING I
 covering_pkey;
 DROP TABLE covering_index_heap;
 
-
---
--- Also try building functional, expressional, and partial indexes on
--- tables that already contain data.
---
-create unique index hash_f8_index_1 on hash_f8_heap(abs(random));
-create unique index hash_f8_index_2 on hash_f8_heap((seqno + 1), random);
-create unique index hash_f8_index_3 on hash_f8_heap(random) where seqno > 1000;
-
 --
 -- Try some concurrent index builds
 --
@@ -733,8 +709,6 @@ SELECT count(*) FROM dupindexcols
 -- Check ordering of =ANY indexqual results (bug in 9.2.0)
 --
 
-vacuum tenk1;		-- ensure we get consistent plans here
-
 explain (costs off)
 SELECT unique1 FROM tenk1
 WHERE unique1 IN (1,42,7)
diff --git a/src/test/regress/sql/create_misc.sql b/src/test/regress/sql/create_misc.sql
index c7d0d064c3..6fb9fdab4c 100644
--- a/src/test/regress/sql/create_misc.sql
+++ b/src/test/regress/sql/create_misc.sql
@@ -2,63 +2,37 @@
 -- CREATE_MISC
 --
 
--- CLASS POPULATION
---	(any resemblance to real life is purely coincidental)
 --
+-- a is the type root
+-- b and c inherit from a (one-level single inheritance)
+-- d inherits from b and c (two-level multiple inheritance)
+-- e inherits from c (two-level single inheritance)
+-- f inherits from e (three-level single inheritance)
+--
+CREATE TABLE a_star (
+	class		char,
+	a 			int4
+);
 
-INSERT INTO tenk2 SELECT * FROM tenk1;
-
-CREATE TABLE onek2 AS SELECT * FROM onek;
-
-INSERT INTO fast_emp4000 SELECT * FROM slow_emp4000;
-
-SELECT *
-   INTO TABLE Bprime
-   FROM tenk1
-   WHERE unique2 < 1000;
-
-INSERT INTO hobbies_r (name, person)
-   SELECT 'posthacking', p.name
-   FROM person* p
-   WHERE p.name = 'mike' or p.name = 'jeff';
-
-INSERT INTO hobbies_r (name, person)
-   SELECT 'basketball', p.name
-   FROM person p
-   WHERE p.name = 'joe' or p.name = 'sally';
-
-INSERT INTO hobbies_r (name) VALUES ('skywalking');
-
-INSERT INTO equipment_r (name, hobby) VALUES ('advil', 'posthacking');
-
-INSERT INTO equipment_r (name, hobby) VALUES ('peet''s coffee', 'posthacking');
-
-INSERT INTO equipment_r (name, hobby) VALUES ('hightops', 'basketball');
-
-INSERT INTO equipment_r (name, hobby) VALUES ('guts', 'skywalking');
-
-INSERT INTO city VALUES
-('Podunk', '(1,2),(3,4)', '100,127,1000'),
-('Gotham', '(1000,34),(1100,334)', '123456,127,-1000,6789');
-TABLE city;
+CREATE TABLE b_star (
+	b 			text
+) INHERITS (a_star);
 
-SELECT *
-   INTO TABLE ramp
-   FROM road
-   WHERE name ~ '.*Ramp';
+CREATE TABLE c_star (
+	c 			name
+) INHERITS (a_star);
 
-INSERT INTO ihighway
-   SELECT *
-   FROM road
-   WHERE name ~ 'I- .*';
+CREATE TABLE d_star (
+	d 			float8
+) INHERITS (b_star, c_star);
 
-INSERT INTO shighway
-   SELECT *
-   FROM road
-   WHERE name ~ 'State Hwy.*';
+CREATE TABLE e_star (
+	e 			int2
+) INHERITS (c_star);
 
-UPDATE shighway
-   SET surface = 'asphalt';
+CREATE TABLE f_star (
+	f 			polygon
+) INHERITS (e_star);
 
 INSERT INTO a_star (class, a) VALUES ('a', 1);
 
@@ -200,18 +174,85 @@ ANALYZE d_star;
 ANALYZE e_star;
 ANALYZE f_star;
 
-
 --
--- for internal portal (cursor) tests
+-- inheritance stress test
 --
-CREATE TABLE iportaltest (
-	i		int4,
-	d		float4,
-	p		polygon
-);
+SELECT * FROM a_star*;
+
+SELECT *
+   FROM b_star* x
+   WHERE x.b = text 'bumble' or x.a < 3;
+
+SELECT class, a
+   FROM c_star* x
+   WHERE x.c ~ text 'hi';
+
+SELECT class, b, c
+   FROM d_star* x
+   WHERE x.a < 100;
+
+SELECT class, c FROM e_star* x WHERE x.c NOTNULL;
+
+SELECT * FROM f_star* x WHERE x.c ISNULL;
+
+-- grouping and aggregation on inherited sets have been busted in the past...
+
+SELECT sum(a) FROM a_star*;
+
+SELECT class, sum(a) FROM a_star* GROUP BY class ORDER BY class;
+
+
+ALTER TABLE f_star RENAME COLUMN f TO ff;
+
+ALTER TABLE e_star* RENAME COLUMN e TO ee;
+
+ALTER TABLE d_star* RENAME COLUMN d TO dd;
+
+ALTER TABLE c_star* RENAME COLUMN c TO cc;
+
+ALTER TABLE b_star* RENAME COLUMN b TO bb;
+
+ALTER TABLE a_star* RENAME COLUMN a TO aa;
+
+SELECT class, aa
+   FROM a_star* x
+   WHERE aa ISNULL;
+
+-- As of Postgres 7.1, ALTER implicitly recurses,
+-- so this should be same as ALTER a_star*
+
+ALTER TABLE a_star RENAME COLUMN aa TO foo;
+
+SELECT class, foo
+   FROM a_star* x
+   WHERE x.foo >= 2;
+
+ALTER TABLE a_star RENAME COLUMN foo TO aa;
+
+SELECT *
+   from a_star*
+   WHERE aa < 1000;
+
+ALTER TABLE f_star ADD COLUMN f int4;
+
+UPDATE f_star SET f = 10;
+
+ALTER TABLE e_star* ADD COLUMN e int4;
+
+--UPDATE e_star* SET e = 42;
+
+SELECT * FROM e_star*;
+
+ALTER TABLE a_star* ADD COLUMN a text;
+
+-- That ALTER TABLE should have added TOAST tables.
+SELECT relname, reltoastrelid <> 0 AS has_toast_table
+   FROM pg_class
+   WHERE oid::regclass IN ('a_star', 'c_star')
+   ORDER BY 1;
 
-INSERT INTO iportaltest (i, d, p)
-   VALUES (1, 3.567, '(3.0,1.0),(4.0,2.0)'::polygon);
+--UPDATE b_star*
+--   SET a = text 'gazpacho'
+--   WHERE aa > 4;
 
-INSERT INTO iportaltest (i, d, p)
-   VALUES (2, 89.05, '(4.0,2.0),(3.0,1.0)'::polygon);
+SELECT class, aa, a FROM a_star*;
diff --git a/src/test/regress/sql/create_table.sql b/src/test/regress/sql/create_table.sql
index cc41f58ba2..37dac6b5fb 100644
--- a/src/test/regress/sql/create_table.sql
+++ b/src/test/regress/sql/create_table.sql
@@ -2,240 +2,7 @@
 -- CREATE_TABLE
 --
 
---
--- CLASS DEFINITIONS
---
-CREATE TABLE hobbies_r (
-	name		text,
-	person 		text
-);
-
-CREATE TABLE equipment_r (
-	name 		text,
-	hobby		text
-);
-
-CREATE TABLE onek (
-	unique1		int4,
-	unique2		int4,
-	two			int4,
-	four		int4,
-	ten			int4,
-	twenty		int4,
-	hundred		int4,
-	thousand	int4,
-	twothousand	int4,
-	fivethous	int4,
-	tenthous	int4,
-	odd			int4,
-	even		int4,
-	stringu1	name,
-	stringu2	name,
-	string4		name
-);
-
-CREATE TABLE tenk1 (
-	unique1		int4,
-	unique2		int4,
-	two			int4,
-	four		int4,
-	ten			int4,
-	twenty		int4,
-	hundred		int4,
-	thousand	int4,
-	twothousand	int4,
-	fivethous	int4,
-	tenthous	int4,
-	odd			int4,
-	even		int4,
-	stringu1	name,
-	stringu2	name,
-	string4		name
-);
-
-CREATE TABLE tenk2 (
-	unique1 	int4,
-	unique2 	int4,
-	two 	 	int4,
-	four 		int4,
-	ten			int4,
-	twenty 		int4,
-	hundred 	int4,
-	thousand 	int4,
-	twothousand int4,
-	fivethous 	int4,
-	tenthous	int4,
-	odd			int4,
-	even		int4,
-	stringu1	name,
-	stringu2	name,
-	string4		name
-);
-
-
-CREATE TABLE person (
-	name 		text,
-	age			int4,
-	location 	point
-);
-
-
-CREATE TABLE emp (
-	salary 		int4,
-	manager 	name
-) INHERITS (person);
-
-
-CREATE TABLE student (
-	gpa 		float8
-) INHERITS (person);
-
-
-CREATE TABLE stud_emp (
-	percent 	int4
-) INHERITS (emp, student);
-
-
-CREATE TABLE city (
-	name		name,
-	location 	box,
-	budget 		city_budget
-);
-
-CREATE TABLE dept (
-	dname		name,
-	mgrname 	text
-);
-
-CREATE TABLE slow_emp4000 (
-	home_base	 box
-);
-
-CREATE TABLE fast_emp4000 (
-	home_base	 box
-);
-
-CREATE TABLE road (
-	name		text,
-	thepath 	path
-);
-
-CREATE TABLE ihighway () INHERITS (road);
-
-CREATE TABLE shighway (
-	surface		text
-) INHERITS (road);
-
-CREATE TABLE real_city (
-	pop			int4,
-	cname		text,
-	outline 	path
-);
-
---
--- test the "star" operators a bit more thoroughly -- this time,
--- throw in lots of NULL fields...
---
--- a is the type root
--- b and c inherit from a (one-level single inheritance)
--- d inherits from b and c (two-level multiple inheritance)
--- e inherits from c (two-level single inheritance)
--- f inherits from e (three-level single inheritance)
---
-CREATE TABLE a_star (
-	class		char,
-	a 			int4
-);
-
-CREATE TABLE b_star (
-	b 			text
-) INHERITS (a_star);
-
-CREATE TABLE c_star (
-	c 			name
-) INHERITS (a_star);
-
-CREATE TABLE d_star (
-	d 			float8
-) INHERITS (b_star, c_star);
-
-CREATE TABLE e_star (
-	e 			int2
-) INHERITS (c_star);
-
-CREATE TABLE f_star (
-	f 			polygon
-) INHERITS (e_star);
-
-CREATE TABLE aggtest (
-	a 			int2,
-	b			float4
-);
-
-CREATE TABLE hash_i4_heap (
-	seqno 		int4,
-	random 		int4
-);
-
-CREATE TABLE hash_name_heap (
-	seqno 		int4,
-	random 		name
-);
-
-CREATE TABLE hash_txt_heap (
-	seqno 		int4,
-	random 		text
-);
-
-CREATE TABLE hash_f8_heap (
-	seqno		int4,
-	random 		float8
-);
-
--- don't include the hash_ovfl_heap stuff in the distribution
--- the data set is too large for what it's worth
---
--- CREATE TABLE hash_ovfl_heap (
---	x			int4,
---	y			int4
--- );
-
-CREATE TABLE bt_i4_heap (
-	seqno 		int4,
-	random 		int4
-);
-
-CREATE TABLE bt_name_heap (
-	seqno 		name,
-	random 		int4
-);
-
-CREATE TABLE bt_txt_heap (
-	seqno 		text,
-	random 		int4
-);
-
-CREATE TABLE bt_f8_heap (
-	seqno 		float8,
-	random 		int4
-);
-
-CREATE TABLE array_op_test (
-	seqno		int4,
-	i			int4[],
-	t			text[]
-);
-
-CREATE TABLE array_index_op_test (
-	seqno		int4,
-	i			int4[],
-	t			text[]
-);
-
-CREATE TABLE testjsonb (
-       j jsonb
-);
-
+-- Error cases
 CREATE TABLE unknowntab (
 	u unknown    -- fail
 );
@@ -244,15 +11,6 @@ CREATE TYPE unknown_comptype AS (
 	u unknown    -- fail
 );
 
-CREATE TABLE IF NOT EXISTS test_tsvector(
-	t text,
-	a tsvector
-);
-
-CREATE TABLE IF NOT EXISTS test_tsvector(
-	t text
-);
-
 -- invalid: non-lowercase quoted reloptions identifiers
 CREATE TABLE tas_case WITH ("Fillfactor" = 10) AS SELECT 1 a;
 
diff --git a/src/test/regress/sql/create_type.sql b/src/test/regress/sql/create_type.sql
index f75272468f..c6fc4f9029 100644
--- a/src/test/regress/sql/create_type.sql
+++ b/src/test/regress/sql/create_type.sql
@@ -210,6 +210,19 @@ CREATE OPERATOR <% (
 SELECT point '(1,2)' <% widget '(0,0,3)' AS t,
        point '(1,2)' <% widget '(0,0,1)' AS f;
 
+-- exercise city_budget type
+CREATE TABLE city (
+	name		name,
+	location 	box,
+	budget 		city_budget
+);
+
+INSERT INTO city VALUES
+('Podunk', '(1,2),(3,4)', '100,127,1000'),
+('Gotham', '(1000,34),(1100,334)', '123456,127,-1000,6789');
+
+TABLE city;
+
 --
 -- Test CREATE/ALTER TYPE using a type that's compatible with varchar,
 -- so we can re-use those support functions
diff --git a/src/test/regress/sql/create_view.sql b/src/test/regress/sql/create_view.sql
index 51997d7a0a..c099517428 100644
--- a/src/test/regress/sql/create_view.sql
+++ b/src/test/regress/sql/create_view.sql
@@ -16,6 +16,21 @@ CREATE FUNCTION interpt_pp(path, path)
     AS :'regresslib'
     LANGUAGE C STRICT;
 
+CREATE TABLE real_city (
+	pop			int4,
+	cname		text,
+	outline 	path
+);
+
+\set filename :abs_srcdir '/data/real_city.data'
+COPY real_city FROM :'filename';
+ANALYZE real_city;
+
+SELECT *
+   INTO TABLE ramp
+   FROM ONLY road
+   WHERE name ~ '.*Ramp';
+
 CREATE VIEW street AS
    SELECT r.name, r.thepath, c.cname AS cname
    FROM ONLY road r, real_city c
diff --git a/src/test/regress/sql/errors.sql b/src/test/regress/sql/errors.sql
index a7fcf72dd4..52474e858d 100644
--- a/src/test/regress/sql/errors.sql
+++ b/src/test/regress/sql/errors.sql
@@ -77,7 +77,7 @@ alter table nonesuch rename to newnonesuch;
 alter table nonesuch rename to stud_emp;
 
 -- conflict
-alter table stud_emp rename to aggtest;
+alter table stud_emp rename to student;
 
 -- self-conflict
 alter table stud_emp rename to stud_emp;
diff --git a/src/test/regress/sql/expressions.sql b/src/test/regress/sql/expressions.sql
index 03b88bde7a..a8643c457d 100644
--- a/src/test/regress/sql/expressions.sql
+++ b/src/test/regress/sql/expressions.sql
@@ -3,7 +3,7 @@
 --
 
 --
--- Tests for SQLVAlueFunction
+-- Tests for SQLValueFunction
 --
 
 
@@ -38,35 +38,6 @@ SELECT current_schema;
 RESET search_path;
 
 
---
--- Tests for BETWEEN
---
-
-explain (costs off)
-select count(*) from date_tbl
-  where f1 between '1997-01-01' and '1998-01-01';
-select count(*) from date_tbl
-  where f1 between '1997-01-01' and '1998-01-01';
-
-explain (costs off)
-select count(*) from date_tbl
-  where f1 not between '1997-01-01' and '1998-01-01';
-select count(*) from date_tbl
-  where f1 not between '1997-01-01' and '1998-01-01';
-
-explain (costs off)
-select count(*) from date_tbl
-  where f1 between symmetric '1997-01-01' and '1998-01-01';
-select count(*) from date_tbl
-  where f1 between symmetric '1997-01-01' and '1998-01-01';
-
-explain (costs off)
-select count(*) from date_tbl
-  where f1 not between symmetric '1997-01-01' and '1998-01-01';
-select count(*) from date_tbl
-  where f1 not between symmetric '1997-01-01' and '1998-01-01';
-
-
 --
 -- Test parsing of a no-op cast to a type with unspecified typmod
 --
diff --git a/src/test/regress/sql/float8.sql b/src/test/regress/sql/float8.sql
index 97f0c3bb2f..03c134b078 100644
--- a/src/test/regress/sql/float8.sql
+++ b/src/test/regress/sql/float8.sql
@@ -2,7 +2,12 @@
 -- FLOAT8
 --
 
-CREATE TABLE FLOAT8_TBL(f1 float8);
+--
+-- Build a table for testing
+-- (This temporarily hides the table created in test_setup.sql)
+--
+
+CREATE TEMP TABLE FLOAT8_TBL(f1 float8);
 
 INSERT INTO FLOAT8_TBL(f1) VALUES ('    0.0   ');
 INSERT INTO FLOAT8_TBL(f1) VALUES ('1004.30  ');
@@ -229,20 +234,9 @@ INSERT INTO FLOAT8_TBL(f1) VALUES ('10e-400');
 
 INSERT INTO FLOAT8_TBL(f1) VALUES ('-10e-400');
 
--- maintain external table consistency across platforms
--- delete all values and reinsert well-behaved ones
-
-DELETE FROM FLOAT8_TBL;
-
-INSERT INTO FLOAT8_TBL(f1) VALUES ('0.0');
-
-INSERT INTO FLOAT8_TBL(f1) VALUES ('-34.84');
-
-INSERT INTO FLOAT8_TBL(f1) VALUES ('-1004.30');
-
-INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e+200');
+DROP TABLE FLOAT8_TBL;
 
-INSERT INTO FLOAT8_TBL(f1) VALUES ('-1.2345678901234e-200');
+-- Check the float8 values exported for use by other tests
 
 SELECT * FROM FLOAT8_TBL;
 
diff --git a/src/test/regress/sql/geometry.sql b/src/test/regress/sql/geometry.sql
index 0aa06bcc9d..8928d04aa3 100644
--- a/src/test/regress/sql/geometry.sql
+++ b/src/test/regress/sql/geometry.sql
@@ -497,3 +497,29 @@ SELECT c.f1, p.f1, c.f1 / p.f1 FROM CIRCLE_TBL c, POINT_TBL p WHERE p.f1 ~= '(0,
 
 -- Distance to polygon
 SELECT c.f1, p.f1, c.f1 <-> p.f1 FROM CIRCLE_TBL c, POLYGON_TBL p;
+
+-- Check index behavior for circles
+
+CREATE INDEX gcircleind ON circle_tbl USING gist (f1);
+
+SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
+    ORDER BY area(f1);
+
+EXPLAIN (COSTS OFF)
+SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
+    ORDER BY area(f1);
+SELECT * FROM circle_tbl WHERE f1 && circle(point(1,-2), 1)
+    ORDER BY area(f1);
+
+-- Check index behavior for polygons
+
+CREATE INDEX gpolygonind ON polygon_tbl USING gist (f1);
+
+SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
+    ORDER BY (poly_center(f1))[0];
+
+EXPLAIN (COSTS OFF)
+SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
+    ORDER BY (poly_center(f1))[0];
+SELECT * FROM polygon_tbl WHERE f1 @> '((1,1),(2,2),(2,1))'::polygon
+    ORDER BY (poly_center(f1))[0];
diff --git a/src/test/regress/sql/hash_index.sql b/src/test/regress/sql/hash_index.sql
index 4d1aa020a9..527024f710 100644
--- a/src/test/regress/sql/hash_index.sql
+++ b/src/test/regress/sql/hash_index.sql
@@ -1,8 +1,70 @@
 --
 -- HASH_INDEX
--- grep 843938989 hash.data
 --
 
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
+
+CREATE TABLE hash_i4_heap (
+	seqno 		int4,
+	random 		int4
+);
+
+CREATE TABLE hash_name_heap (
+	seqno 		int4,
+	random 		name
+);
+
+CREATE TABLE hash_txt_heap (
+	seqno 		int4,
+	random 		text
+);
+
+CREATE TABLE hash_f8_heap (
+	seqno		int4,
+	random 		float8
+);
+
+\set filename :abs_srcdir '/data/hash.data'
+COPY hash_i4_heap FROM :'filename';
+COPY hash_name_heap FROM :'filename';
+COPY hash_txt_heap FROM :'filename';
+COPY hash_f8_heap FROM :'filename';
+
+-- the data in this file has a lot of duplicates in the index key
+-- fields, leading to long bucket chains and lots of table expansion.
+-- this is therefore a stress test of the bucket overflow code (unlike
+-- the data in hash.data, which has unique index keys).
+--
+-- \set filename :abs_srcdir '/data/hashovfl.data'
+-- COPY hash_ovfl_heap FROM :'filename';
+
+ANALYZE hash_i4_heap;
+ANALYZE hash_name_heap;
+ANALYZE hash_txt_heap;
+ANALYZE hash_f8_heap;
+
+CREATE INDEX hash_i4_index ON hash_i4_heap USING hash (random int4_ops);
+
+CREATE INDEX hash_name_index ON hash_name_heap USING hash (random name_ops);
+
+CREATE INDEX hash_txt_index ON hash_txt_heap USING hash (random text_ops);
+
+CREATE INDEX hash_f8_index ON hash_f8_heap USING hash (random float8_ops)
+  WITH (fillfactor=60);
+
+--
+-- Also try building functional, expressional, and partial indexes on
+-- tables that already contain data.
+--
+create unique index hash_f8_index_1 on hash_f8_heap(abs(random));
+create unique index hash_f8_index_2 on hash_f8_heap((seqno + 1), random);
+create unique index hash_f8_index_3 on hash_f8_heap(random) where seqno > 1000;
+
+--
+-- hash index
+-- grep 843938989 hash.data
+--
 SELECT * FROM hash_i4_heap
    WHERE hash_i4_heap.random = 843938989;
 
diff --git a/src/test/regress/sql/horology.sql b/src/test/regress/sql/horology.sql
index 78091112ca..2724a2bbc7 100644
--- a/src/test/regress/sql/horology.sql
+++ b/src/test/regress/sql/horology.sql
@@ -306,6 +306,34 @@ SELECT '2020-10-05'::timestamptz >= '4714-11-24 BC'::timestamp as t;
 
 RESET TimeZone;
 
+--
+-- Tests for BETWEEN
+--
+
+explain (costs off)
+select count(*) from date_tbl
+  where f1 between '1997-01-01' and '1998-01-01';
+select count(*) from date_tbl
+  where f1 between '1997-01-01' and '1998-01-01';
+
+explain (costs off)
+select count(*) from date_tbl
+  where f1 not between '1997-01-01' and '1998-01-01';
+select count(*) from date_tbl
+  where f1 not between '1997-01-01' and '1998-01-01';
+
+explain (costs off)
+select count(*) from date_tbl
+  where f1 between symmetric '1997-01-01' and '1998-01-01';
+select count(*) from date_tbl
+  where f1 between symmetric '1997-01-01' and '1998-01-01';
+
+explain (costs off)
+select count(*) from date_tbl
+  where f1 not between symmetric '1997-01-01' and '1998-01-01';
+select count(*) from date_tbl
+  where f1 not between symmetric '1997-01-01' and '1998-01-01';
+
 --
 -- Formats
 --
diff --git a/src/test/regress/sql/int2.sql b/src/test/regress/sql/int2.sql
index 613b344704..8e8d33892d 100644
--- a/src/test/regress/sql/int2.sql
+++ b/src/test/regress/sql/int2.sql
@@ -2,22 +2,10 @@
 -- INT2
 --
 
-CREATE TABLE INT2_TBL(f1 int2);
-
-INSERT INTO INT2_TBL(f1) VALUES ('0   ');
-
-INSERT INTO INT2_TBL(f1) VALUES ('  1234 ');
-
-INSERT INTO INT2_TBL(f1) VALUES ('    -1234');
+-- int2_tbl was already created and filled in test_setup.sql.
+-- Here we just try to insert bad values.
 
 INSERT INTO INT2_TBL(f1) VALUES ('34.5');
-
--- largest and smallest values
-INSERT INTO INT2_TBL(f1) VALUES ('32767');
-
-INSERT INTO INT2_TBL(f1) VALUES ('-32767');
-
--- bad input values -- should give errors
 INSERT INTO INT2_TBL(f1) VALUES ('100000');
 INSERT INTO INT2_TBL(f1) VALUES ('asdf');
 INSERT INTO INT2_TBL(f1) VALUES ('    ');
diff --git a/src/test/regress/sql/int4.sql b/src/test/regress/sql/int4.sql
index 55ec07a147..f19077f3da 100644
--- a/src/test/regress/sql/int4.sql
+++ b/src/test/regress/sql/int4.sql
@@ -2,22 +2,10 @@
 -- INT4
 --
 
-CREATE TABLE INT4_TBL(f1 int4);
-
-INSERT INTO INT4_TBL(f1) VALUES ('   0  ');
-
-INSERT INTO INT4_TBL(f1) VALUES ('123456     ');
-
-INSERT INTO INT4_TBL(f1) VALUES ('    -123456');
+-- int4_tbl was already created and filled in test_setup.sql.
+-- Here we just try to insert bad values.
 
 INSERT INTO INT4_TBL(f1) VALUES ('34.5');
-
--- largest and smallest values
-INSERT INTO INT4_TBL(f1) VALUES ('2147483647');
-
-INSERT INTO INT4_TBL(f1) VALUES ('-2147483647');
-
--- bad input values -- should give errors
 INSERT INTO INT4_TBL(f1) VALUES ('1000000000000');
 INSERT INTO INT4_TBL(f1) VALUES ('asdf');
 INSERT INTO INT4_TBL(f1) VALUES ('     ');
diff --git a/src/test/regress/sql/int8.sql b/src/test/regress/sql/int8.sql
index 32940b4daa..38b771964d 100644
--- a/src/test/regress/sql/int8.sql
+++ b/src/test/regress/sql/int8.sql
@@ -2,15 +2,10 @@
 -- INT8
 -- Test int8 64-bit integers.
 --
-CREATE TABLE INT8_TBL(q1 int8, q2 int8);
 
-INSERT INTO INT8_TBL VALUES('  123   ','  456');
-INSERT INTO INT8_TBL VALUES('123   ','4567890123456789');
-INSERT INTO INT8_TBL VALUES('4567890123456789','123');
-INSERT INTO INT8_TBL VALUES(+4567890123456789,'4567890123456789');
-INSERT INTO INT8_TBL VALUES('+4567890123456789','-4567890123456789');
+-- int8_tbl was already created and filled in test_setup.sql.
+-- Here we just try to insert bad values.
 
--- bad inputs
 INSERT INTO INT8_TBL(q1) VALUES ('      ');
 INSERT INTO INT8_TBL(q1) VALUES ('xxx');
 INSERT INTO INT8_TBL(q1) VALUES ('3908203590239580293850293850329485');
diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql
index 52240fea7e..6dd01b022e 100644
--- a/src/test/regress/sql/join.sql
+++ b/src/test/regress/sql/join.sql
@@ -391,11 +391,11 @@ rollback;
 --
 explain (costs off)
 select aa, bb, unique1, unique1
-  from tenk1 right join b on aa = unique1
+  from tenk1 right join b_star on aa = unique1
   where bb < bb and bb is null;
 
 select aa, bb, unique1, unique1
-  from tenk1 right join b on aa = unique1
+  from tenk1 right join b_star on aa = unique1
   where bb < bb and bb is null;
 
 --
diff --git a/src/test/regress/sql/jsonb.sql b/src/test/regress/sql/jsonb.sql
index 5016f29c15..8d25966267 100644
--- a/src/test/regress/sql/jsonb.sql
+++ b/src/test/regress/sql/jsonb.sql
@@ -1,3 +1,13 @@
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
+
+CREATE TABLE testjsonb (
+       j jsonb
+);
+
+\set filename :abs_srcdir '/data/jsonb.data'
+COPY testjsonb FROM :'filename';
+
 -- Strings.
 SELECT '""'::jsonb;				-- OK.
 SELECT $$''$$::jsonb;			-- ERROR, single quotes are not allowed
diff --git a/src/test/regress/sql/misc.sql b/src/test/regress/sql/misc.sql
index 8bf63bca3d..165a2e175f 100644
--- a/src/test/regress/sql/misc.sql
+++ b/src/test/regress/sql/misc.sql
@@ -45,6 +45,10 @@ UPDATE onek
 -- systems.    This non-func update stuff needs to be examined
 -- more closely.  			- jolly (2/22/96)
 --
+SELECT two, stringu1, ten, string4
+   INTO TABLE tmp
+   FROM onek;
+
 UPDATE tmp
    SET stringu1 = reverse_name(onek.stringu1)
    FROM onek
@@ -72,123 +76,56 @@ DROP TABLE tmp;
 \set filename :abs_builddir '/results/onek.data'
 COPY onek TO :'filename';
 
-DELETE FROM onek;
-
-COPY onek FROM :'filename';
+CREATE TEMP TABLE onek_copy (LIKE onek);
 
-SELECT unique1 FROM onek WHERE unique1 < 2 ORDER BY unique1;
+COPY onek_copy FROM :'filename';
 
-DELETE FROM onek2;
+SELECT * FROM onek EXCEPT ALL SELECT * FROM onek_copy;
 
-COPY onek2 FROM :'filename';
-
-SELECT unique1 FROM onek2 WHERE unique1 < 2 ORDER BY unique1;
+SELECT * FROM onek_copy EXCEPT ALL SELECT * FROM onek;
 
 \set filename :abs_builddir '/results/stud_emp.data'
 COPY BINARY stud_emp TO :'filename';
 
-DELETE FROM stud_emp;
-
-COPY BINARY stud_emp FROM :'filename';
+CREATE TEMP TABLE stud_emp_copy (LIKE stud_emp);
 
-SELECT * FROM stud_emp;
-
--- COPY aggtest FROM stdin;
--- 56	7.8
--- 100	99.097
--- 0	0.09561
--- 42	324.78
--- .
--- COPY aggtest TO stdout;
+COPY BINARY stud_emp_copy FROM :'filename';
 
+SELECT * FROM stud_emp_copy;
 
 --
--- inheritance stress test
+-- test data for postquel functions
 --
-SELECT * FROM a_star*;
-
-SELECT *
-   FROM b_star* x
-   WHERE x.b = text 'bumble' or x.a < 3;
-
-SELECT class, a
-   FROM c_star* x
-   WHERE x.c ~ text 'hi';
-
-SELECT class, b, c
-   FROM d_star* x
-   WHERE x.a < 100;
-
-SELECT class, c FROM e_star* x WHERE x.c NOTNULL;
-
-SELECT * FROM f_star* x WHERE x.c ISNULL;
-
--- grouping and aggregation on inherited sets have been busted in the past...
-
-SELECT sum(a) FROM a_star*;
-
-SELECT class, sum(a) FROM a_star* GROUP BY class ORDER BY class;
-
-
-ALTER TABLE f_star RENAME COLUMN f TO ff;
 
-ALTER TABLE e_star* RENAME COLUMN e TO ee;
+CREATE TABLE hobbies_r (
+	name		text,
+	person 		text
+);
 
-ALTER TABLE d_star* RENAME COLUMN d TO dd;
+CREATE TABLE equipment_r (
+	name 		text,
+	hobby		text
+);
 
-ALTER TABLE c_star* RENAME COLUMN c TO cc;
+INSERT INTO hobbies_r (name, person)
+   SELECT 'posthacking', p.name
+   FROM person* p
+   WHERE p.name = 'mike' or p.name = 'jeff';
 
-ALTER TABLE b_star* RENAME COLUMN b TO bb;
+INSERT INTO hobbies_r (name, person)
+   SELECT 'basketball', p.name
+   FROM person p
+   WHERE p.name = 'joe' or p.name = 'sally';
 
-ALTER TABLE a_star* RENAME COLUMN a TO aa;
+INSERT INTO hobbies_r (name) VALUES ('skywalking');
 
-SELECT class, aa
-   FROM a_star* x
-   WHERE aa ISNULL;
+INSERT INTO equipment_r (name, hobby) VALUES ('advil', 'posthacking');
 
--- As of Postgres 7.1, ALTER implicitly recurses,
--- so this should be same as ALTER a_star*
+INSERT INTO equipment_r (name, hobby) VALUES ('peet''s coffee', 'posthacking');
 
-ALTER TABLE a_star RENAME COLUMN aa TO foo;
+INSERT INTO equipment_r (name, hobby) VALUES ('hightops', 'basketball');
 
-SELECT class, foo
-   FROM a_star* x
-   WHERE x.foo >= 2;
-
-ALTER TABLE a_star RENAME COLUMN foo TO aa;
-
-SELECT *
-   from a_star*
-   WHERE aa < 1000;
-
-ALTER TABLE f_star ADD COLUMN f int4;
-
-UPDATE f_star SET f = 10;
-
-ALTER TABLE e_star* ADD COLUMN e int4;
-
---UPDATE e_star* SET e = 42;
-
-SELECT * FROM e_star*;
-
-ALTER TABLE a_star* ADD COLUMN a text;
-
--- That ALTER TABLE should have added TOAST tables.
-SELECT relname, reltoastrelid <> 0 AS has_toast_table
-   FROM pg_class
-   WHERE oid::regclass IN ('a_star', 'c_star')
-   ORDER BY 1;
-
---UPDATE b_star*
---   SET a = text 'gazpacho'
---   WHERE aa > 4;
-
-SELECT class, aa, a FROM a_star*;
-
-
---
--- versions
---
+INSERT INTO equipment_r (name, hobby) VALUES ('guts', 'skywalking');
 
 --
 -- postquel functions
diff --git a/src/test/regress/sql/point.sql b/src/test/regress/sql/point.sql
index f85379192c..435ff4b9b6 100644
--- a/src/test/regress/sql/point.sql
+++ b/src/test/regress/sql/point.sql
@@ -5,31 +5,11 @@
 -- avoid bit-exact output here because operations may not be bit-exact.
 SET extra_float_digits = 0;
 
-CREATE TABLE POINT_TBL(f1 point);
+-- point_tbl was already created and filled in test_setup.sql.
+-- Here we just try to insert bad values.
 
-INSERT INTO POINT_TBL(f1) VALUES ('(0.0,0.0)');
-
-INSERT INTO POINT_TBL(f1) VALUES ('(-10.0,0.0)');
-
-INSERT INTO POINT_TBL(f1) VALUES ('(-3.0,4.0)');
-
-INSERT INTO POINT_TBL(f1) VALUES ('(5.1, 34.5)');
-
-INSERT INTO POINT_TBL(f1) VALUES ('(-5.0,-12.0)');
-
-INSERT INTO POINT_TBL(f1) VALUES ('(1e-300,-1e-300)');	-- To underflow
-
-INSERT INTO POINT_TBL(f1) VALUES ('(1e+300,Inf)');		-- To overflow
-
-INSERT INTO POINT_TBL(f1) VALUES ('(Inf,1e+300)');		-- Transposed
-
-INSERT INTO POINT_TBL(f1) VALUES (' ( Nan , NaN ) ');
-
--- bad format points
 INSERT INTO POINT_TBL(f1) VALUES ('asdfasdf');
 
-INSERT INTO POINT_TBL(f1) VALUES ('10.0,10.0');
-
 INSERT INTO POINT_TBL(f1) VALUES ('(10.0 10.0)');
 
 INSERT INTO POINT_TBL(f1) VALUES ('(10.0, 10.0) x');
diff --git a/src/test/regress/sql/rules.sql b/src/test/regress/sql/rules.sql
index b732833e63..8bdab6dec3 100644
--- a/src/test/regress/sql/rules.sql
+++ b/src/test/regress/sql/rules.sql
@@ -776,11 +776,11 @@ drop table cchild;
 \a\t
 
 SELECT viewname, definition FROM pg_views
-WHERE schemaname IN ('pg_catalog', 'public')
+WHERE schemaname = 'pg_catalog'
 ORDER BY viewname;
 
 SELECT tablename, rulename, definition FROM pg_rules
-WHERE schemaname IN ('pg_catalog', 'public')
+WHERE schemaname = 'pg_catalog'
 ORDER BY tablename, rulename;
 
 -- restore normal output mode
diff --git a/src/test/regress/sql/sanity_check.sql b/src/test/regress/sql/sanity_check.sql
index a4ec00309f..162e5324b5 100644
--- a/src/test/regress/sql/sanity_check.sql
+++ b/src/test/regress/sql/sanity_check.sql
@@ -1,25 +1,7 @@
 VACUUM;
 
 --
--- sanity check, if we don't have indices the test will take years to
--- complete.  But skip TOAST relations (since they will have varying
--- names depending on the current OID counter) as well as temp tables
--- of other backends (to avoid timing-dependent behavior).
---
-
--- temporarily disable fancy output, so catalog changes create less diff noise
-\a\t
-
-SELECT relname, relhasindex
-   FROM pg_class c LEFT JOIN pg_namespace n ON n.oid = relnamespace
-   WHERE relkind IN ('r', 'p') AND (nspname ~ '^pg_temp_') IS NOT TRUE
-   ORDER BY relname;
-
--- restore normal output mode
-\a\t
-
---
--- another sanity check: every system catalog that has OIDs should have
+-- Sanity check: every system catalog that has OIDs should have
 -- a unique index on OID.  This ensures that the OIDs will be unique,
 -- even after the OID counter wraps around.
 -- We exclude non-system tables from the check by looking at nspname.
diff --git a/src/test/regress/sql/select.sql b/src/test/regress/sql/select.sql
index b5929b2eca..aa3d035204 100644
--- a/src/test/regress/sql/select.sql
+++ b/src/test/regress/sql/select.sql
@@ -90,11 +90,6 @@ RESET enable_seqscan;
 RESET enable_bitmapscan;
 RESET enable_sort;
 
-
-SELECT two, stringu1, ten, string4
-   INTO TABLE tmp
-   FROM onek;
-
 --
 -- awk '{print $1,$2;}' person.data |
 -- awk '{if(NF!=2){print $3,$2;}else{print;}}' - emp.data |
diff --git a/src/test/regress/sql/select_distinct.sql b/src/test/regress/sql/select_distinct.sql
index 1bfe59c26f..f27ff714f8 100644
--- a/src/test/regress/sql/select_distinct.sql
+++ b/src/test/regress/sql/select_distinct.sql
@@ -5,24 +5,24 @@
 --
 -- awk '{print $3;}' onek.data | sort -n | uniq
 --
-SELECT DISTINCT two FROM tmp ORDER BY 1;
+SELECT DISTINCT two FROM onek ORDER BY 1;
 
 --
 -- awk '{print $5;}' onek.data | sort -n | uniq
 --
-SELECT DISTINCT ten FROM tmp ORDER BY 1;
+SELECT DISTINCT ten FROM onek ORDER BY 1;
 
 --
 -- awk '{print $16;}' onek.data | sort -d | uniq
 --
-SELECT DISTINCT string4 FROM tmp ORDER BY 1;
+SELECT DISTINCT string4 FROM onek ORDER BY 1;
 
 --
 -- awk '{print $3,$16,$5;}' onek.data | sort -d | uniq |
 -- sort +0n -1 +1d -2 +2n -3
 --
 SELECT DISTINCT two, string4, ten
-   FROM tmp
+   FROM onek
    ORDER BY two using <, string4 using <, ten using <;
 
 --
diff --git a/src/test/regress/sql/select_distinct_on.sql b/src/test/regress/sql/select_distinct_on.sql
index d18733d274..0920bd64b9 100644
--- a/src/test/regress/sql/select_distinct_on.sql
+++ b/src/test/regress/sql/select_distinct_on.sql
@@ -3,16 +3,16 @@
 --
 
 SELECT DISTINCT ON (string4) string4, two, ten
-   FROM tmp
+   FROM onek
    ORDER BY string4 using <, two using >, ten using <;
 
 -- this will fail due to conflict of ordering requirements
 SELECT DISTINCT ON (string4, ten) string4, two, ten
-   FROM tmp
+   FROM onek
    ORDER BY string4 using <, two using <, ten using <;
 
 SELECT DISTINCT ON (string4, ten) string4, ten, two
-   FROM tmp
+   FROM onek
    ORDER BY string4 using <, ten using >, two using <;
 
 -- bug #5049: early 8.4.x chokes on volatile DISTINCT ON clauses
diff --git a/src/test/regress/sql/select_into.sql b/src/test/regress/sql/select_into.sql
index 7e903c339a..689c448cc2 100644
--- a/src/test/regress/sql/select_into.sql
+++ b/src/test/regress/sql/select_into.sql
@@ -110,11 +110,11 @@ DROP TABLE easi, easi2;
 --
 -- Disallowed uses of SELECT ... INTO.  All should fail
 --
-DECLARE foo CURSOR FOR SELECT 1 INTO b;
+DECLARE foo CURSOR FOR SELECT 1 INTO int4_tbl;
 COPY (SELECT 1 INTO frak UNION SELECT 2) TO 'blob';
 SELECT * FROM (SELECT 1 INTO f) bar;
-CREATE VIEW foo AS SELECT 1 INTO b;
-INSERT INTO b SELECT 1 INTO f;
+CREATE VIEW foo AS SELECT 1 INTO int4_tbl;
+INSERT INTO int4_tbl SELECT 1 INTO f;
 
 -- Test CREATE TABLE AS ... IF NOT EXISTS
 CREATE TABLE ctas_ine_tbl AS SELECT 1;
diff --git a/src/test/regress/sql/test_setup.sql b/src/test/regress/sql/test_setup.sql
index 61c953fb16..d0a73c473d 100644
--- a/src/test/regress/sql/test_setup.sql
+++ b/src/test/regress/sql/test_setup.sql
@@ -15,6 +15,208 @@
 --
 GRANT ALL ON SCHEMA public TO public;
 
+--
+-- These tables have traditionally been referenced by many tests,
+-- so create and populate them.  Insert only non-error values here.
+-- (Some subsequent tests try to insert erroneous values.  That's okay
+-- because the table won't actually change.  Do not change the contents
+-- of these tables in later tests, as it may affect other tests.)
+--
+
+CREATE TABLE CHAR_TBL(f1 char(4));
+
+INSERT INTO CHAR_TBL (f1) VALUES
+  ('a'),
+  ('ab'),
+  ('abcd'),
+  ('abcd    ');
+VACUUM CHAR_TBL;
+
+CREATE TABLE FLOAT8_TBL(f1 float8);
+
+INSERT INTO FLOAT8_TBL(f1) VALUES
+  ('0.0'),
+  ('-34.84'),
+  ('-1004.30'),
+  ('-1.2345678901234e+200'),
+  ('-1.2345678901234e-200');
+VACUUM FLOAT8_TBL;
+
+CREATE TABLE INT2_TBL(f1 int2);
+
+INSERT INTO INT2_TBL(f1) VALUES
+  ('0   '),
+  ('  1234 '),
+  ('    -1234'),
+  ('32767'),  -- largest and smallest values
+  ('-32767');
+VACUUM INT2_TBL;
+
+CREATE TABLE INT4_TBL(f1 int4);
+
+INSERT INTO INT4_TBL(f1) VALUES
+  ('   0  '),
+  ('123456     '),
+  ('    -123456'),
+  ('2147483647'),  -- largest and smallest values
+  ('-2147483647');
+VACUUM INT4_TBL;
+
+CREATE TABLE INT8_TBL(q1 int8, q2 int8);
+
+INSERT INTO INT8_TBL VALUES
+  ('  123   ','  456'),
+  ('123   ','4567890123456789'),
+  ('4567890123456789','123'),
+  (+4567890123456789,'4567890123456789'),
+  ('+4567890123456789','-4567890123456789');
+VACUUM INT8_TBL;
+
+CREATE TABLE POINT_TBL(f1 point);
+
+INSERT INTO POINT_TBL(f1) VALUES
+  ('(0.0,0.0)'),
+  ('(-10.0,0.0)'),
+  ('(-3.0,4.0)'),
+  ('(5.1, 34.5)'),
+  ('(-5.0,-12.0)'),
+  ('(1e-300,-1e-300)'),  -- To underflow
+  ('(1e+300,Inf)'),  -- To overflow
+  ('(Inf,1e+300)'),  -- Transposed
+  (' ( Nan , NaN ) '),
+  ('10.0,10.0');
+-- We intentionally don't vacuum point_tbl here; geometry depends on that
+
+CREATE TABLE TEXT_TBL (f1 text);
+
+INSERT INTO TEXT_TBL VALUES
+  ('doh!'),
+  ('hi de ho neighbor');
+VACUUM TEXT_TBL;
+
+CREATE TABLE VARCHAR_TBL(f1 varchar(4));
+
+INSERT INTO VARCHAR_TBL (f1) VALUES
+  ('a'),
+  ('ab'),
+  ('abcd'),
+  ('abcd    ');
+VACUUM VARCHAR_TBL;
+
+CREATE TABLE onek (
+	unique1		int4,
+	unique2		int4,
+	two			int4,
+	four		int4,
+	ten			int4,
+	twenty		int4,
+	hundred		int4,
+	thousand	int4,
+	twothousand	int4,
+	fivethous	int4,
+	tenthous	int4,
+	odd			int4,
+	even		int4,
+	stringu1	name,
+	stringu2	name,
+	string4		name
+);
+
+\set filename :abs_srcdir '/data/onek.data'
+COPY onek FROM :'filename';
+VACUUM ANALYZE onek;
+
+CREATE TABLE onek2 AS SELECT * FROM onek;
+VACUUM ANALYZE onek2;
+
+CREATE TABLE tenk1 (
+	unique1		int4,
+	unique2		int4,
+	two			int4,
+	four		int4,
+	ten			int4,
+	twenty		int4,
+	hundred		int4,
+	thousand	int4,
+	twothousand	int4,
+	fivethous	int4,
+	tenthous	int4,
+	odd			int4,
+	even		int4,
+	stringu1	name,
+	stringu2	name,
+	string4		name
+);
+
+\set filename :abs_srcdir '/data/tenk.data'
+COPY tenk1 FROM :'filename';
+VACUUM ANALYZE tenk1;
+
+CREATE TABLE tenk2 AS SELECT * FROM tenk1;
+VACUUM ANALYZE tenk2;
+
+CREATE TABLE person (
+	name 		text,
+	age			int4,
+	location 	point
+);
+
+\set filename :abs_srcdir '/data/person.data'
+COPY person FROM :'filename';
+VACUUM ANALYZE person;
+
+CREATE TABLE emp (
+	salary 		int4,
+	manager 	name
+) INHERITS (person);
+
+\set filename :abs_srcdir '/data/emp.data'
+COPY emp FROM :'filename';
+VACUUM ANALYZE emp;
+
+CREATE TABLE student (
+	gpa 		float8
+) INHERITS (person);
+
+\set filename :abs_srcdir '/data/student.data'
+COPY student FROM :'filename';
+VACUUM ANALYZE student;
+
+CREATE TABLE stud_emp (
+	percent 	int4
+) INHERITS (emp, student);
+
+\set filename :abs_srcdir '/data/stud_emp.data'
+COPY stud_emp FROM :'filename';
+VACUUM ANALYZE stud_emp;
+
+CREATE TABLE road (
+	name		text,
+	thepath 	path
+);
+
+\set filename :abs_srcdir '/data/streets.data'
+COPY road FROM :'filename';
+VACUUM ANALYZE road;
+
+CREATE TABLE ihighway () INHERITS (road);
+
+INSERT INTO ihighway
+   SELECT *
+   FROM ONLY road
+   WHERE name ~ 'I- .*';
+VACUUM ANALYZE ihighway;
+
+CREATE TABLE shighway (
+	surface		text
+) INHERITS (road);
+
+INSERT INTO shighway
+   SELECT *, 'asphalt'
+   FROM ONLY road
+   WHERE name ~ 'State Hwy.*';
+VACUUM ANALYZE shighway;
+
 --
 -- We must have some enum type in the database for opr_sanity and type_sanity.
 --
diff --git a/src/test/regress/sql/text.sql b/src/test/regress/sql/text.sql
index 56eee69abc..540e551254 100644
--- a/src/test/regress/sql/text.sql
+++ b/src/test/regress/sql/text.sql
@@ -6,11 +6,7 @@ SELECT text 'this is a text string' = text 'this is a text string' AS true;
 
 SELECT text 'this is a text string' = text 'this is a text strin' AS false;
 
-CREATE TABLE TEXT_TBL (f1 text);
-
-INSERT INTO TEXT_TBL VALUES ('doh!');
-INSERT INTO TEXT_TBL VALUES ('hi de ho neighbor');
-
+-- text_tbl was already created and filled in test_setup.sql.
 SELECT * FROM TEXT_TBL;
 
 -- As of 8.3 we have removed most implicit casts to text, so that for example
diff --git a/src/test/regress/sql/transactions.sql b/src/test/regress/sql/transactions.sql
index 8886280c0a..0a716b506b 100644
--- a/src/test/regress/sql/transactions.sql
+++ b/src/test/regress/sql/transactions.sql
@@ -4,10 +4,12 @@
 
 BEGIN;
 
-SELECT *
-   INTO TABLE xacttest
-   FROM aggtest;
-
+CREATE TABLE xacttest (a smallint, b real);
+INSERT INTO xacttest VALUES
+  (56, 7.8),
+  (100, 99.097),
+  (0, 0.09561),
+  (42, 324.78);
 INSERT INTO xacttest (a, b) VALUES (777, 777.777);
 
 END;
@@ -20,10 +22,10 @@ BEGIN;
 
 CREATE TABLE disappear (a int4);
 
-DELETE FROM aggtest;
+DELETE FROM xacttest;
 
 -- should be empty
-SELECT * FROM aggtest;
+SELECT * FROM xacttest;
 
 ABORT;
 
@@ -31,7 +33,7 @@ ABORT;
 SELECT oid FROM pg_class WHERE relname = 'disappear';
 
 -- should have members again
-SELECT * FROM aggtest;
+SELECT * FROM xacttest;
 
 
 -- Read-only tests
diff --git a/src/test/regress/sql/tsearch.sql b/src/test/regress/sql/tsearch.sql
index d929210998..0fa8ac4682 100644
--- a/src/test/regress/sql/tsearch.sql
+++ b/src/test/regress/sql/tsearch.sql
@@ -1,3 +1,6 @@
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
+
 --
 -- Sanity checks for text search catalogs
 --
@@ -39,6 +42,17 @@ RIGHT JOIN pg_ts_config_map AS m
 WHERE
     tt.cfgid IS NULL OR tt.tokid IS NULL;
 
+-- Load some test data
+CREATE TABLE test_tsvector(
+	t text,
+	a tsvector
+);
+
+\set filename :abs_srcdir '/data/tsearch.data'
+COPY test_tsvector FROM :'filename';
+
+ANALYZE test_tsvector;
+
 -- test basic text search behavior without indexes, then with
 
 SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh';
diff --git a/src/test/regress/sql/varchar.sql b/src/test/regress/sql/varchar.sql
index 35e24b84d3..a970821426 100644
--- a/src/test/regress/sql/varchar.sql
+++ b/src/test/regress/sql/varchar.sql
@@ -2,7 +2,12 @@
 -- VARCHAR
 --
 
-CREATE TABLE VARCHAR_TBL(f1 varchar(1));
+--
+-- Build a table for testing
+-- (This temporarily hides the table created in test_setup.sql)
+--
+
+CREATE TEMP TABLE VARCHAR_TBL(f1 varchar(1));
 
 INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
 
@@ -54,13 +59,10 @@ DROP TABLE VARCHAR_TBL;
 --
 -- Now test longer arrays of char
 --
+-- This varchar_tbl was already created and filled in test_setup.sql.
+-- Here we just try to insert bad values.
+--
 
-CREATE TABLE VARCHAR_TBL(f1 varchar(4));
-
-INSERT INTO VARCHAR_TBL (f1) VALUES ('a');
-INSERT INTO VARCHAR_TBL (f1) VALUES ('ab');
-INSERT INTO VARCHAR_TBL (f1) VALUES ('abcd');
 INSERT INTO VARCHAR_TBL (f1) VALUES ('abcde');
-INSERT INTO VARCHAR_TBL (f1) VALUES ('abcd    ');
 
 SELECT * FROM VARCHAR_TBL;
diff --git a/src/test/regress/sql/with.sql b/src/test/regress/sql/with.sql
index 46668a903e..0aee0db9a3 100644
--- a/src/test/regress/sql/with.sql
+++ b/src/test/regress/sql/with.sql
@@ -1401,7 +1401,7 @@ SELECT * FROM parent;
 
 EXPLAIN (VERBOSE, COSTS OFF)
 WITH wcte AS ( INSERT INTO int8_tbl VALUES ( 42, 47 ) RETURNING q2 )
-DELETE FROM a USING wcte WHERE aa = q2;
+DELETE FROM a_star USING wcte WHERE aa = q2;
 
 -- error cases
 
