diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index 49547ee..b651858 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -2884,6 +2884,21 @@ include_dir 'conf.d'
       </listitem>
      </varlistentry>
 
+     <varlistentry id="guc-enable-hashagg-disk" xreflabel="enable_hashagg_disk">
+      <term><varname>enable_hashagg_disk</varname> (<type>boolean</type>)
+      <indexterm>
+       <primary><varname>enable_hashagg_disk</> configuration parameter</primary>
+      </indexterm>
+      </term>
+      <listitem>
+       <para>
+        Enables or disables the query planner's use of hashed aggregation plan
+        types when the planner expects the hash table size to exceed
+        <varname>work_mem</varname>. The default is <literal>on</>.
+       </para>
+      </listitem>
+     </varlistentry>
+
      <varlistentry id="guc-enable-hashjoin" xreflabel="enable_hashjoin">
       <term><varname>enable_hashjoin</varname> (<type>boolean</type>)
       <indexterm>
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 6455864..3ae9583 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -108,6 +108,7 @@
 #include "optimizer/tlist.h"
 #include "parser/parse_agg.h"
 #include "parser/parse_coerce.h"
+#include "storage/buffile.h"
 #include "utils/acl.h"
 #include "utils/builtins.h"
 #include "utils/lsyscache.h"
@@ -115,7 +116,11 @@
 #include "utils/syscache.h"
 #include "utils/tuplesort.h"
 #include "utils/datum.h"
+#include "utils/dynahash.h"
 
+#define HASH_DISK_MIN_PARTITIONS		1
+#define HASH_DISK_DEFAULT_PARTITIONS	4
+#define HASH_DISK_MAX_PARTITIONS		256
 
 /*
  * AggStatePerAggData - per-aggregate working state for the Agg scan
@@ -310,6 +315,24 @@ typedef struct AggHashEntryData
 
 }	AggHashEntryData;	/* VARIABLE LENGTH STRUCT */
 
+/*
+ * Used as a unit of work when batching. After reaching work_mem, no new
+ * groups are added to the hash table, and the tuples are divided into
+ * multiple batches (using a range of bits from the hash value).
+ * 
+ * At the end, each output partition (represented by a temporary file)
+ * is converted into a new HashWork item and the process is repeated.
+ */
+typedef struct HashWork
+{
+	BufFile		 *input_file;	/* input partition, NULL for outer plan */
+	int			  input_bits;	/* number of bits for input partition mask */
+
+	int			  n_output_partitions; /* number of output partitions */
+	BufFile		**output_partitions; /* output partition files */
+	int			 *output_ntuples; /* number of tuples in each partition */
+	int			  output_bits; /* log2(n_output_partitions) + input_bits */
+} HashWork;
 
 static void initialize_aggregates(AggState *aggstate,
 					  AggStatePerAgg peragg,
@@ -331,10 +354,11 @@ static void finalize_aggregate(AggState *aggstate,
 static Bitmapset *find_unaggregated_cols(AggState *aggstate);
 static bool find_unaggregated_cols_walker(Node *node, Bitmapset **colnos);
 static void build_hash_table(AggState *aggstate, Size tuple_width);
-static AggHashEntry lookup_hash_entry(AggState *aggstate,
-				  TupleTableSlot *inputslot);
+static AggHashEntry lookup_hash_entry(AggState *aggstate, HashWork * work,
+					uint32 hashvalue, TupleTableSlot *inputslot);
+static HashWork *hash_work(BufFile *input_file, int input_bits);
 static TupleTableSlot *agg_retrieve_direct(AggState *aggstate);
-static void agg_fill_hash_table(AggState *aggstate);
+static bool agg_fill_hash_table(AggState *aggstate);
 static TupleTableSlot *agg_retrieve_hash_table(AggState *aggstate);
 static Datum GetAggInitVal(Datum textInitVal, Oid transtype);
 
@@ -348,6 +372,12 @@ static void reset_hash_table(AggHashTable htab);
 static void IteratorReset(AggHashTable htab);
 static AggHashEntry IteratorGetNext(AggHashTable htab);
 
+static TupleTableSlot *
+read_saved_tuple(BufFile *file, uint32 *hashvalue, TupleTableSlot *tupleSlot);
+static void
+save_tuple(AggState *aggstate, HashWork *work, TupleTableSlot *slot,
+		   uint32 hashvalue);
+
 /*
  * The size of the chunks for dense allocation. This needs to be >8kB
  * because the default (and only) memory context implementation uses
@@ -412,6 +442,7 @@ typedef struct AggHashTableData
 	 */
 	HashChunk		cur_chunk;
 	AggHashEntry	cur_entry;
+	int				niterated;
 
 	/* list of chunks with dense-packed entries / minimal tuples */
 	HashChunk		chunks_hash;
@@ -1096,12 +1127,15 @@ build_hash_table(AggState *aggstate, Size tuple_width)
 	htab = (AggHashTable)MemoryContextAllocZero(aggstate->aggcontext,
 											sizeof(AggHashTableData));
 
+	htab->niterated = 0;
+
 	/* TODO create a memory context for the hash table */
-	htab->htabctx = AllocSetContextCreate(aggstate->aggcontext,
+	htab->htabctx = AllocSetContextCreateTracked(aggstate->aggcontext,
 											"HashAggHashTable",
 											ALLOCSET_DEFAULT_MINSIZE,
 											ALLOCSET_DEFAULT_INITSIZE,
-											ALLOCSET_DEFAULT_MAXSIZE);
+											ALLOCSET_DEFAULT_MAXSIZE,
+											true);
 
 	/* buckets are just pointers to AggHashEntryData structures */
 	htab->buckets = (AggHashEntry*)MemoryContextAllocZero(htab->htabctx,
@@ -1198,15 +1232,14 @@ hash_agg_entry_size(int numAggs)
  * When called, CurrentMemoryContext should be the per-query context.
  */
 static AggHashEntry
-lookup_hash_entry(AggState *aggstate, TupleTableSlot *inputslot)
+lookup_hash_entry(AggState *aggstate, HashWork * work, uint32 hashvalue,
+TupleTableSlot *inputslot)
 {
 
 	AggHashEntry entry = NULL;
-	uint32		hashvalue;
 	uint32		bucketno;
 	MinimalTuple mintuple;
 
-	hashvalue = compute_hash_value(aggstate, inputslot);
 	bucketno = compute_bucket(aggstate, hashvalue);
 
 	entry = aggstate->hashtable->buckets[bucketno];
@@ -1223,10 +1256,13 @@ lookup_hash_entry(AggState *aggstate, TupleTableSlot *inputslot)
 		entry = entry->next;
 	}
 
-	/* There's not a maching entry in the bucket, so create a new one and
-	 * copy in data both for the aggregates, and the MinimalTuple containing
-	 * keys for the group columns. */
-	if (entry == NULL)
+	/*
+	 * There's not a maching entry in the bucket (and we've not reached the
+	 * work_mem limit, so create a new one and copy in data both for the
+	 * aggregates, and the MinimalTuple containing keys for the group columns.
+	 */
+	if ((entry == NULL) &&
+		(MemoryContextGetAllocated(aggstate->hashtable->htabctx, true) < work_mem * 1024L))
 	{
 
 		MemoryContext old;
@@ -1318,9 +1354,16 @@ ExecAgg(AggState *node)
 	/* Dispatch based on strategy */
 	if (((Agg *) node->ss.ps.plan)->aggstrategy == AGG_HASHED)
 	{
-		if (!node->table_filled)
-			agg_fill_hash_table(node);
-		return agg_retrieve_hash_table(node);
+		TupleTableSlot *slot = NULL;
+
+		while (slot == NULL)
+		{
+			if (!node->table_filled)
+				if (! agg_fill_hash_table(node))
+					break;	/* no more HashWork items to process */
+			slot = agg_retrieve_hash_table(node);
+		}
+		return slot;
 	}
 	else
 		return agg_retrieve_direct(node);
@@ -1536,13 +1579,15 @@ agg_retrieve_direct(AggState *aggstate)
 /*
  * ExecAgg for hashed case: phase 1, read input and build hash table
  */
-static void
+static bool
 agg_fill_hash_table(AggState *aggstate)
 {
 	PlanState  *outerPlan;
 	ExprContext *tmpcontext;
 	AggHashEntry entry;
-	TupleTableSlot *outerslot;
+	TupleTableSlot *outerslot = NULL;
+	HashWork   *work;
+	int			i;
 
 	/*
 	 * get state info from node
@@ -1551,33 +1596,120 @@ agg_fill_hash_table(AggState *aggstate)
 	/* tmpcontext is the per-input-tuple expression context */
 	tmpcontext = aggstate->tmpcontext;
 
+	/* if there's no HashWork item, we're done */
+	if (aggstate->hash_work == NIL)
+	{
+		aggstate->agg_done = true;
+		return false;
+	}
+
+	work = linitial(aggstate->hash_work);
+	aggstate->hash_work = list_delete_first(aggstate->hash_work);
+
+	/* if not the first time through, reinitialize */
+	if (!aggstate->hash_init_state)
+	{
+		/* FIXME get rid of all the previous aggregate states somehow.
+		 *       Either reset the aggcontext, or clear the hash table
+		 *       somehow. Resetting the context seems better. */
+
+		/* reset the hash table (free the chunks, zero buckets) */
+		reset_hash_table(aggstate->hashtable);
+	}
+
+	/* reinitialize on the next item */
+	aggstate->hash_init_state = false;
+
 	/*
 	 * Process each outer-plan tuple, and then fetch the next one, until we
 	 * exhaust the outer plan.
 	 */
 	for (;;)
 	{
-		outerslot = ExecProcNode(outerPlan);
-		if (TupIsNull(outerslot))
-			break;
-		/* set up for advance_aggregates call */
-		tmpcontext->ecxt_outertuple = outerslot;
+
+		uint32 hashvalue;
+
+		CHECK_FOR_INTERRUPTS();
+
+		/* the first HashWork item means we need to fetch the tuples */
+		if (work->input_file == NULL)
+		{
+			outerslot = ExecProcNode(outerPlan);
+			if (TupIsNull(outerslot))
+				break;
+
+			hashvalue = compute_hash_value(aggstate, outerslot);
+		}
+		else
+		{
+			/* first time through this HashWork item */
+			if (outerslot == NULL)
+				outerslot = MakeSingleTupleTableSlot(aggstate->hashtable->slot->tts_tupleDescriptor);
+
+			outerslot = read_saved_tuple(work->input_file, &hashvalue, outerslot);
+			if (TupIsNull(outerslot))
+			{
+				BufFileClose(work->input_file);
+				work->input_file = NULL;
+				break;
+			}
+		}
 
 		/* Find or build hashtable entry for this tuple's group */
-		entry = lookup_hash_entry(aggstate, outerslot);
+		entry = lookup_hash_entry(aggstate, work, hashvalue, outerslot);
 
-		/* Advance the aggregates */
-		advance_aggregates(aggstate, entry->pergroup);
+		if (entry != NULL) {
 
-		/* Reset per-input-tuple context after each tuple */
-		ResetExprContext(tmpcontext);
+			/* set up for advance_aggregates call */
+			tmpcontext->ecxt_outertuple = outerslot;
+
+			/* Advance the aggregates */
+			advance_aggregates(aggstate, entry->pergroup);
+
+			/* Reset per-input-tuple context after each tuple */
+			ResetExprContext(tmpcontext);
+
+		} else {
+
+			/* no entry for this tuple, and we've reached work_mem */
+			save_tuple(aggstate, work, outerslot, hashvalue);
+
+		}
 	}
 
+	/* add each output partition as a new work item */
+	for (i = 0; i < work->n_output_partitions; i++)
+	{
+		BufFile			*file = work->output_partitions[i];
+		MemoryContext	 oldContext;
+
+		/* partition is empty */
+		if (work->output_ntuples[i] == 0)
+			continue;
+
+		/* rewind file for reading */
+		if (BufFileSeek(file, 0, 0L, SEEK_SET))
+			ereport(ERROR,
+					(errcode_for_file_access(),
+					 errmsg("could not rewind HashAgg temporary file: %m")));
+
+		oldContext = MemoryContextSwitchTo(aggstate->aggcontext);
+		aggstate->hash_work = lappend(aggstate->hash_work,
+									  hash_work(file,
+												work->output_bits + work->input_bits));
+		MemoryContextSwitchTo(oldContext);
+	}
+
+	pfree(work);
+
 	aggstate->table_filled = true;
 
 	/* Initialize for iteration through the table (first bucket / entry) */
 	IteratorReset(aggstate->hashtable);
 
+	/* ready to return groups from this hash table */
+	return true;
+
 }
 
 /*
@@ -1620,6 +1752,8 @@ agg_retrieve_hash_table(AggState *aggstate)
 		 */
 		ResetExprContext(econtext);
 
+		htab->niterated += 1;
+
 		/*
 		* Store the copied first input tuple in the tuple table slot reserved
 		* for it, so that it can be used in ExecProject.
@@ -1677,7 +1811,8 @@ agg_retrieve_hash_table(AggState *aggstate)
 
 	}
 
-	aggstate->agg_done = true;
+	/* No more entries in hashtable, so done with this batch */
+	aggstate->table_filled = false;
 
 	/* No more groups */
 	return NULL;
@@ -1739,11 +1874,11 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 	 * recover no-longer-wanted space.
 	 */
 	aggstate->aggcontext =
-		AllocSetContextCreate(CurrentMemoryContext,
+		AllocSetContextCreateTracked(CurrentMemoryContext,
 							  "AggContext",
 							  ALLOCSET_DEFAULT_MINSIZE,
 							  ALLOCSET_DEFAULT_INITSIZE,
-							  ALLOCSET_DEFAULT_MAXSIZE);
+							  ALLOCSET_DEFAULT_MAXSIZE, true);
 
 	/*
 	 * tuple table initialization
@@ -1842,10 +1977,22 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 
 	if (node->aggstrategy == AGG_HASHED)
 	{
+		MemoryContext oldContext;
+
 		build_hash_table(aggstate, outerPlan->plan_width);
 		aggstate->table_filled = false;
+		aggstate->hash_init_state = true;
+		aggstate->hash_disk = false;
+
 		/* Compute the columns we actually need to hash on */
 		aggstate->hash_needed = find_hash_columns(aggstate);
+
+		/* prime with initial work item to read from outer plan */
+		oldContext = MemoryContextSwitchTo(aggstate->aggcontext);
+		aggstate->hash_work = lappend(aggstate->hash_work,
+									  hash_work(NULL, 0));
+		MemoryContextSwitchTo(oldContext);
+
 	}
 	else
 	{
@@ -2264,22 +2411,23 @@ ExecReScanAgg(AggState *node)
 	if (((Agg *) node->ss.ps.plan)->aggstrategy == AGG_HASHED)
 	{
 		/*
-		 * In the hashed case, if we haven't yet built the hash table then we
-		 * can just return; nothing done yet, so nothing to undo. If subnode's
-		 * chgParam is not NULL then it will be re-scanned by ExecProcNode,
-		 * else no reason to re-scan it at all.
+		 * In the hashed case, if we haven't done any execution work yet, we
+		 * can just return; nothing to undo. If subnode's chgParam is not NULL
+		 * then it will be re-scanned by ExecProcNode, else no reason to
+		 * re-scan it at all.
 		 */
-		if (!node->table_filled)
+		if (node->hash_init_state)
 			return;
 
 		/*
-		 * If we do have the hash table and the subplan does not have any
-		 * parameter changes, then we can just rescan the existing hash table;
-		 * no need to build it again.
+		 * If we do have the hash table, it never went to disk, and the
+		 * subplan does not have any parameter changes, then we can just
+		 * rescan the existing hash table; no need to build it again.
 		 */
-		if (node->ss.ps.lefttree->chgParam == NULL)
+		if (node->ss.ps.lefttree->chgParam == NULL && !node->hash_disk)
 		{
 			IteratorReset(node->hashtable);
+			node->table_filled = true;
 			return;
 		}
 	}
@@ -2318,10 +2466,21 @@ ExecReScanAgg(AggState *node)
 
 	if (((Agg *) node->ss.ps.plan)->aggstrategy == AGG_HASHED)
 	{
+		MemoryContext oldContext;
 		Plan * outerPlan = outerPlan((Agg *) node->ss.ps.plan);
+
 		/* Rebuild an empty hash table */
 		build_hash_table(node, outerPlan->plan_width);
+		node->hash_init_state = true;
 		node->table_filled = false;
+		node->hash_disk = false;
+		node->hash_work = NIL;
+
+		/* prime with initial work item to read from outer plan */
+		oldContext = MemoryContextSwitchTo(node->aggcontext);
+		node->hash_work = lappend(node->hash_work,
+								  hash_work(NULL, 0));
+		MemoryContextSwitchTo(oldContext);
 	}
 	else
 	{
@@ -2827,6 +2986,144 @@ AggHashEntry IteratorGetNext(AggHashTable htab)
 }
 
 /*
+ * hash_work
+ *
+ * Construct a HashWork item, which represents one iteration of HashAgg to be
+ * done. Should be called in the aggregate's memory context.
+ */
+static HashWork *
+hash_work(BufFile *input_file, int input_bits)
+{
+	HashWork *work = palloc(sizeof(HashWork));
+
+	work->input_file = input_file;
+	work->input_bits = input_bits;
+
+	/*
+	 * Will be set only if we run out of memory and need to partition an
+	 * additional level.
+	 */
+	work->n_output_partitions = 0;
+	work->output_partitions = NULL;
+	work->output_ntuples = NULL;
+	work->output_bits = 0;
+
+	return work;
+}
+
+/*
+ * save_tuple
+ *
+ * Not enough memory to add tuple as new entry in hash table. Save for later
+ * in the appropriate partition.
+ */
+static void
+save_tuple(AggState *aggstate, HashWork *work, TupleTableSlot *slot,
+		   uint32 hashvalue)
+{
+	int					 partition;
+	MinimalTuple		 tuple;
+	BufFile				*file;
+	int					 written;
+
+	if (work->output_partitions == NULL)
+	{
+		int npartitions = HASH_DISK_DEFAULT_PARTITIONS; //TODO choose
+		int partition_bits;
+		int i;
+
+		if (npartitions < HASH_DISK_MIN_PARTITIONS)
+			npartitions = HASH_DISK_MIN_PARTITIONS;
+		if (npartitions > HASH_DISK_MAX_PARTITIONS)
+			npartitions = HASH_DISK_MAX_PARTITIONS;
+
+		partition_bits = my_log2(npartitions);
+
+		/* make sure that we don't exhaust the hash bits */
+		if (partition_bits + work->input_bits >= 32)
+			partition_bits = 32 - work->input_bits;
+
+		/* number of partitions will be a power of two */
+		npartitions = 1L << partition_bits;
+
+		work->output_bits = partition_bits;
+		work->n_output_partitions = npartitions;
+		work->output_partitions = palloc(sizeof(BufFile *) * npartitions);
+		work->output_ntuples = palloc0(sizeof(int) * npartitions);
+
+		for (i = 0; i < npartitions; i++)
+			work->output_partitions[i] = BufFileCreateTemp(false);
+	}
+
+	if (work->output_bits == 0)
+		partition = 0;
+	else
+		partition = (hashvalue << work->input_bits) >>
+			(32 - work->output_bits);
+
+	work->output_ntuples[partition]++;
+	file = work->output_partitions[partition];
+	tuple = ExecFetchSlotMinimalTuple(slot);
+
+	written = BufFileWrite(file, (void *) &hashvalue, sizeof(uint32));
+	if (written != sizeof(uint32))
+		ereport(ERROR,
+				(errcode_for_file_access(),
+				 errmsg("could not write to HashAgg temporary file: %m")));
+
+	written = BufFileWrite(file, (void *) tuple, tuple->t_len);
+	if (written != tuple->t_len)
+		ereport(ERROR,
+				(errcode_for_file_access(),
+				 errmsg("could not write to HashAgg temporary file: %m")));
+}
+
+
+/*
+ * read_saved_tuple
+ *		read the next tuple from a batch file.  Return NULL if no more.
+ *
+ * On success, *hashvalue is set to the tuple's hash value, and the tuple
+ * itself is stored in the given slot.
+ *
+ * Copied with minor modifications from ExecHashJoinGetSavedTuple.
+ */
+static TupleTableSlot *
+read_saved_tuple(BufFile *file, uint32 *hashvalue, TupleTableSlot *tupleSlot)
+{
+	uint32		header[2];
+	size_t		nread;
+	MinimalTuple tuple;
+
+	/*
+	 * Since both the hash value and the MinimalTuple length word are uint32,
+	 * we can read them both in one BufFileRead() call without any type
+	 * cheating.
+	 */
+	nread = BufFileRead(file, (void *) header, sizeof(header));
+	if (nread == 0)				/* end of file */
+	{
+		ExecClearTuple(tupleSlot);
+		return NULL;
+	}
+	if (nread != sizeof(header))
+		ereport(ERROR,
+				(errcode_for_file_access(),
+				 errmsg("could not read from HashAgg temporary file: %m")));
+	*hashvalue = header[0];
+	tuple = (MinimalTuple) palloc(header[1]);
+	tuple->t_len = header[1];
+	nread = BufFileRead(file,
+						(void *) ((char *) tuple + sizeof(uint32)),
+						header[1] - sizeof(uint32));
+	if (nread != header[1] - sizeof(uint32))
+		ereport(ERROR,
+				(errcode_for_file_access(),
+				 errmsg("could not read from HashAgg temporary file: %m")));
+	return ExecStoreMinimalTuple(tuple, tupleSlot, true);
+}
+
+/*
  * Resets the contents of the hash table - removes all the entries and
  * tuples, but keeps the 'size' of the hash table (nbuckets).
  */
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 0cdb790..926abad 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -113,6 +113,7 @@ bool		enable_bitmapscan = true;
 bool		enable_tidscan = true;
 bool		enable_sort = true;
 bool		enable_hashagg = true;
+bool		enable_hashagg_disk = true;
 bool		enable_nestloop = true;
 bool		enable_material = true;
 bool		enable_mergejoin = true;
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index e1480cd..7b8135d 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -2741,7 +2741,8 @@ choose_hashed_grouping(PlannerInfo *root,
 	/* plus the per-hash-entry overhead */
 	hashentrysize += hash_agg_entry_size(agg_costs->numAggs);
 
-	if (hashentrysize * dNumGroups > work_mem * 1024L)
+	if (!enable_hashagg_disk &&
+		hashentrysize * dNumGroups > work_mem * 1024L)
 		return false;
 
 	/*
@@ -2907,7 +2908,8 @@ choose_hashed_distinct(PlannerInfo *root,
 	/* plus the per-hash-entry overhead */
 	hashentrysize += hash_agg_entry_size(0);
 
-	if (hashentrysize * dNumDistinctRows > work_mem * 1024L)
+	if (!enable_hashagg_disk &&
+		hashentrysize * dNumDistinctRows > work_mem * 1024L)
 		return false;
 
 	/*
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 8c57803..5128e20 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -749,6 +749,15 @@ static struct config_bool ConfigureNamesBool[] =
 		NULL, NULL, NULL
 	},
 	{
+		{"enable_hashagg_disk", PGC_USERSET, QUERY_TUNING_METHOD,
+			gettext_noop("Enables the planner's use of disk-based hashed aggregation plans."),
+			NULL
+		},
+		&enable_hashagg_disk,
+		true,
+		NULL, NULL, NULL
+	},
+	{
 		{"enable_material", PGC_USERSET, QUERY_TUNING_METHOD,
 			gettext_noop("Enables the planner's use of materialization."),
 			NULL
diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample
index df98b02..8f5b73b 100644
--- a/src/backend/utils/misc/postgresql.conf.sample
+++ b/src/backend/utils/misc/postgresql.conf.sample
@@ -266,6 +266,7 @@
 
 #enable_bitmapscan = on
 #enable_hashagg = on
+#enable_hashagg_disk = on
 #enable_hashjoin = on
 #enable_indexscan = on
 #enable_indexonlyscan = on
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index a70b296..97034f1 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -634,7 +634,7 @@ MemoryContextCreate(NodeTag tag, Size size,
 	 */
 	if (track_mem)
 	{
-		node->accounting = (MemoryAccounting)MemoryContextAlloc(TopMemoryContext,
+		node->accounting = (MemoryAccounting)MemoryContextAllocZero(TopMemoryContext,
 												sizeof(MemoryAccountingData));
 		if (parent)
 			node->accounting->parent = parent->accounting;
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 995389b..1a61ac7 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -1726,6 +1726,11 @@ typedef struct AggState
 	bool		table_filled;	/* hash table filled yet? */
 	AggHashTable	hashtable;	/* instance of the simple hash table */
 
+	/* simple batching */
+	bool		hash_init_state; /* in initial state before execution? */
+	bool		hash_disk;		/* have we exceeded memory yet? */
+	List	   *hash_work;		/* remaining work to be done */
+
 } AggState;
 
 /* ----------------
diff --git a/src/include/optimizer/cost.h b/src/include/optimizer/cost.h
index 75e2afb..d363e65 100644
--- a/src/include/optimizer/cost.h
+++ b/src/include/optimizer/cost.h
@@ -57,6 +57,7 @@ extern bool enable_bitmapscan;
 extern bool enable_tidscan;
 extern bool enable_sort;
 extern bool enable_hashagg;
+extern bool enable_hashagg_disk;
 extern bool enable_nestloop;
 extern bool enable_material;
 extern bool enable_mergejoin;
diff --git a/src/test/regress/expected/rangefuncs.out b/src/test/regress/expected/rangefuncs.out
index 774e75e..e88c83c 100644
--- a/src/test/regress/expected/rangefuncs.out
+++ b/src/test/regress/expected/rangefuncs.out
@@ -3,6 +3,7 @@ SELECT name, setting FROM pg_settings WHERE name LIKE 'enable%';
 ----------------------+---------
  enable_bitmapscan    | on
  enable_hashagg       | on
+ enable_hashagg_disk  | on
  enable_hashjoin      | on
  enable_indexonlyscan | on
  enable_indexscan     | on
@@ -12,7 +13,7 @@ SELECT name, setting FROM pg_settings WHERE name LIKE 'enable%';
  enable_seqscan       | on
  enable_sort          | on
  enable_tidscan       | on
-(11 rows)
+(12 rows)
 
 CREATE TABLE foo2(fooid int, f2 int);
 INSERT INTO foo2 VALUES(1, 11);
