Index: src/backend/access/common/indextuple.c =================================================================== RCS file: /var/lib/cvs/pgsql/src/backend/access/common/indextuple.c,v retrieving revision 1.55 diff -c -r1.55 indextuple.c *** src/backend/access/common/indextuple.c 25 Oct 2001 05:49:20 -0000 1.55 --- src/backend/access/common/indextuple.c 12 Mar 2002 06:25:01 -0000 *************** *** 432,434 **** --- 432,467 ---- ret = *target; memmove((char *) ret, (char *) source, size); } + + bool + index_is_equal(TupleDesc itupdesc, IndexTuple itup, OffsetNumber offnum, + int keysz, ScanKey scankey) + { + int i; + for (i = 1; i <= keysz; i++) + { + ScanKey entry = &scankey[i - 1]; + AttrNumber attno; + Datum datum; + bool isNull; + int32 result; + + attno = entry->sk_attno; + Assert(attno == i); + datum = index_getattr(itup, attno, itupdesc, &isNull); + + /* NULLs are never equal to anything */ + if (entry->sk_flags & SK_ISNULL || isNull) + return false; + + result = DatumGetInt32(FunctionCall2(&entry->sk_func, + entry->sk_argument, + datum)); + + if (result != 0) + return false; + } + + /* if we get here, the keys are equal */ + return true; + } Index: src/backend/access/hash/hash.c =================================================================== RCS file: /var/lib/cvs/pgsql/src/backend/access/hash/hash.c,v retrieving revision 1.56 diff -c -r1.56 hash.c *** src/backend/access/hash/hash.c 9 Mar 2002 17:35:35 -0000 1.56 --- src/backend/access/hash/hash.c 12 Mar 2002 06:25:01 -0000 *************** *** 34,39 **** --- 34,41 ---- typedef struct { double indtuples; + bool isUnique; + Relation heapRel; } HashBuildState; static void hashbuildCallback(Relation index, *************** *** 77,86 **** /* build the index */ buildstate.indtuples = 0; /* do the heap scan */ reltuples = IndexBuildHeapScan(heap, index, indexInfo, ! hashbuildCallback, (void *) &buildstate); /* all done */ BuildingHash = false; --- 79,90 ---- /* build the index */ buildstate.indtuples = 0; + buildstate.isUnique = indexInfo->ii_Unique; + buildstate.heapRel = heap; /* do the heap scan */ reltuples = IndexBuildHeapScan(heap, index, indexInfo, ! hashbuildCallback, (void *) &buildstate); /* all done */ BuildingHash = false; *************** *** 126,132 **** HashItem hitem; InsertIndexResult res; ! /* form an index tuple and point it at the heap tuple */ itup = index_formtuple(RelationGetDescr(index), attdata, nulls); itup->t_tid = htup->t_self; --- 130,136 ---- HashItem hitem; InsertIndexResult res; ! /* Form an index tuple and point it at the heap tuple */ itup = index_formtuple(RelationGetDescr(index), attdata, nulls); itup->t_tid = htup->t_self; *************** *** 139,151 **** hitem = _hash_formitem(itup); ! res = _hash_doinsert(index, hitem); ! ! if (res) ! pfree(res); buildstate->indtuples += 1; pfree(hitem); pfree(itup); } --- 143,156 ---- hitem = _hash_formitem(itup); ! /* Do the insertion */ ! res = _hash_doinsert(index, hitem, buildstate->isUnique, ! buildstate->heapRel); buildstate->indtuples += 1; + if (res) + pfree(res); pfree(hitem); pfree(itup); } *************** *** 164,172 **** Datum *datum = (Datum *) PG_GETARG_POINTER(1); char *nulls = (char *) PG_GETARG_POINTER(2); ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3); - #ifdef NOT_USED Relation heapRel = (Relation) PG_GETARG_POINTER(4); - #endif InsertIndexResult res; HashItem hitem; --- 169,175 ---- *************** *** 193,199 **** hitem = _hash_formitem(itup); ! res = _hash_doinsert(rel, hitem); pfree(hitem); pfree(itup); --- 196,202 ---- hitem = _hash_formitem(itup); ! res = _hash_doinsert(rel, hitem, rel->rd_uniqueindex, heapRel); pfree(hitem); pfree(itup); Index: src/backend/access/hash/hashinsert.c =================================================================== RCS file: /var/lib/cvs/pgsql/src/backend/access/hash/hashinsert.c,v retrieving revision 1.24 diff -c -r1.24 hashinsert.c *** src/backend/access/hash/hashinsert.c 6 Mar 2002 20:49:40 -0000 1.24 --- src/backend/access/hash/hashinsert.c 12 Mar 2002 06:25:01 -0000 *************** *** 16,24 **** #include "postgres.h" #include "access/hash.h" ! static InsertIndexResult _hash_insertonpg(Relation rel, Buffer buf, int keysz, ScanKey scankey, HashItem hitem, Buffer metabuf); ! static OffsetNumber _hash_pgaddtup(Relation rel, Buffer buf, int keysz, ScanKey itup_scankey, Size itemsize, HashItem hitem); /* * _hash_doinsert() -- Handle insertion of a single HashItem in the table. --- 16,32 ---- #include "postgres.h" #include "access/hash.h" + #include "storage/lmgr.h" ! static TransactionId _hash_check_unique(Relation rel, HashItem hitem, ! Relation heapRel, Buffer buf, ! ScanKey scankey); ! static InsertIndexResult _hash_insertonpg(Relation rel, Buffer buf, ! int keysz, ScanKey scankey, ! HashItem hitem, Buffer metabuf); ! static OffsetNumber _hash_pgaddtup(Relation rel, Buffer buf, ! int keysz, ScanKey itup_scankey, ! Size itemsize, HashItem hitem); /* * _hash_doinsert() -- Handle insertion of a single HashItem in the table. *************** *** 29,41 **** * hashitem. */ InsertIndexResult ! _hash_doinsert(Relation rel, HashItem hitem) { Buffer buf; Buffer metabuf; - BlockNumber blkno; HashMetaPage metap; - IndexTuple itup; InsertIndexResult res; ScanKey itup_scankey; int natts; --- 37,50 ---- * hashitem. */ InsertIndexResult ! _hash_doinsert(Relation rel, ! HashItem hitem, ! bool index_is_unique, ! Relation heapRel) { Buffer buf; Buffer metabuf; HashMetaPage metap; InsertIndexResult res; ScanKey itup_scankey; int natts; *************** *** 45,60 **** metap = (HashMetaPage) BufferGetPage(metabuf); _hash_checkpage((Page) metap, LH_META_PAGE); - /* we need a scan key to do our search, so build one */ - itup = &(hitem->hash_itup); if ((natts = rel->rd_rel->relnatts) != 1) elog(ERROR, "Hash indices valid for only one index key."); ! itup_scankey = _hash_mkscankey(rel, itup); /* * find the first page in the bucket chain containing this key and * place it in buf. _hash_search obtains a read lock for us. */ _hash_search(rel, natts, itup_scankey, &buf, metap); page = BufferGetPage(buf); _hash_checkpage(page, LH_BUCKET_PAGE); --- 54,70 ---- metap = (HashMetaPage) BufferGetPage(metabuf); _hash_checkpage((Page) metap, LH_META_PAGE); if ((natts = rel->rd_rel->relnatts) != 1) elog(ERROR, "Hash indices valid for only one index key."); ! ! /* we need a scan key to do our search, so build one */ ! itup_scankey = _hash_mkscankey(rel, &(hitem->hash_itup)); /* * find the first page in the bucket chain containing this key and * place it in buf. _hash_search obtains a read lock for us. */ + top: _hash_search(rel, natts, itup_scankey, &buf, metap); page = BufferGetPage(buf); _hash_checkpage(page, LH_BUCKET_PAGE); *************** *** 63,72 **** * trade in our read lock for a write lock so that we can do the * insertion. */ ! blkno = BufferGetBlockNumber(buf); ! _hash_relbuf(rel, buf, HASH_READ); ! buf = _hash_getbuf(rel, blkno, HASH_WRITE); /* * XXX btree comment (haven't decided what to do in hash): don't think --- 73,111 ---- * trade in our read lock for a write lock so that we can do the * insertion. */ ! page = _hash_chgbufaccess(rel, &buf, HASH_READ, HASH_WRITE); + /* + * If we're not allowing duplicates, make sure the key isn't already + * in the index. + * + * NOTE: obviously, _hash_check_unique can only detect keys that are + * already in the index; so it cannot defend against concurrent + * insertions of the same key. We protect against that by means + * of holding a write lock on the target page. Any other would-be + * inserter of the same key must acquire a write lock on the same + * target page, so only one would-be inserter can be making the check + * at one time. Furthermore, once we are past the check we hold + * write locks continuously until we have performed our insertion, + * so no later inserter can fail to see our insertion. (This + * requires some care in _hash_insertonpg.) + * + * If we must wait for another xact, we release the lock while waiting, + * and then must start over completely. + */ + if (index_is_unique) + { + TransactionId xwait = _hash_check_unique(rel, hitem, heapRel, + buf, itup_scankey); + + /* Are we waiting another transaction to finish? */ + if (TransactionIdIsValid(xwait)) + { + _hash_relbuf(rel, buf, HASH_WRITE); + XactLockTableWait(xwait); + goto top; + } + } /* * XXX btree comment (haven't decided what to do in hash): don't think *************** *** 77,85 **** * the right place for the key we want to insert. */ ! /* do the insertion */ ! res = _hash_insertonpg(rel, buf, natts, itup_scankey, ! hitem, metabuf); /* be tidy */ _hash_freeskey(itup_scankey); --- 116,123 ---- * the right place for the key we want to insert. */ ! /* do the insertion & release the write lock */ ! res = _hash_insertonpg(rel, buf, natts, itup_scankey, hitem, metabuf); /* be tidy */ _hash_freeskey(itup_scankey); *************** *** 88,93 **** --- 126,147 ---- } /* + * _hash_check_unique() -- Check for violation of unique index constraint + * + * Returns InvalidTransactionId if there is no conflict, else an xact ID + * we must wait for to see if it commits a conflicting tuple. If an actual + * conflict is detected, no return --- just elog(). + */ + static TransactionId _hash_check_unique(Relation rel, + HashItem hitem, + Relation heapRel, + Buffer buf, + ScanKey scankey) + { + ; /* FIXME: implement this */ + } + + /* * _hash_insertonpg() -- Insert a tuple on a particular page in the table. * * This recursive procedure does the following things: *************** *** 127,135 **** pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); bucket = pageopaque->hasho_bucket; ! itemsz = IndexTupleDSize(hitem->hash_itup) ! + (sizeof(HashItemData) - sizeof(IndexTupleData)); ! itemsz = MAXALIGN(itemsz); while (PageGetFreeSpace(page) < itemsz) { --- 181,188 ---- pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); bucket = pageopaque->hasho_bucket; ! itemsz = MAXALIGN(IndexTupleDSize(hitem->hash_itup) + ! sizeof(HashItemData) - sizeof(IndexTupleData)); while (PageGetFreeSpace(page) < itemsz) { *************** *** 194,206 **** --- 247,262 ---- } + /* Releases the write lock */ _hash_wrtbuf(rel, buf); if (do_expand || (metap->hashm_nkeys / (metap->hashm_maxbucket + 1)) > metap->hashm_ffactor) _hash_expandtable(rel, metabuf); + _hash_relbuf(rel, metabuf, HASH_READ); + return res; } Index: src/backend/access/hash/hashpage.c =================================================================== RCS file: /var/lib/cvs/pgsql/src/backend/access/hash/hashpage.c,v retrieving revision 1.35 diff -c -r1.35 hashpage.c *** src/backend/access/hash/hashpage.c 6 Mar 2002 20:49:42 -0000 1.35 --- src/backend/access/hash/hashpage.c 12 Mar 2002 06:25:02 -0000 *************** *** 360,366 **** * the page, because no other backend can hold a READ lock on the page, * and that means no other backend currently has an indexscan stopped on * any item of the item being deleted. Our own backend might have such ! * an indexscan (in fact *will*, since that's how VACUUM found the item * in the first place), but _hash_adjscans will fix the scan position. */ void --- 360,366 ---- * the page, because no other backend can hold a READ lock on the page, * and that means no other backend currently has an indexscan stopped on * any item of the item being deleted. Our own backend might have such ! * an indexscan (in fact it *will*, since that's how VACUUM found the item * in the first place), but _hash_adjscans will fix the scan position. */ void *************** *** 600,606 **** itup = &(hitem->hash_itup); itupdesc = RelationGetDescr(rel); datum = index_getattr(itup, 1, itupdesc, &null); ! bucket = _hash_call(rel, metap, datum); if (bucket == nbucket) { --- 600,606 ---- itup = &(hitem->hash_itup); itupdesc = RelationGetDescr(rel); datum = index_getattr(itup, 1, itupdesc, &null); ! bucket = _hash_bucket(rel, metap, datum); if (bucket == nbucket) { Index: src/backend/access/hash/hashsearch.c =================================================================== RCS file: /var/lib/cvs/pgsql/src/backend/access/hash/hashsearch.c,v retrieving revision 1.27 diff -c -r1.27 hashsearch.c *** src/backend/access/hash/hashsearch.c 25 Oct 2001 05:49:21 -0000 1.27 --- src/backend/access/hash/hashsearch.c 12 Mar 2002 06:25:02 -0000 *************** *** 20,26 **** /* * _hash_search() -- Finds the page/bucket that the contains the ! * scankey and loads it into *bufP. the buffer has a read lock. */ void _hash_search(Relation rel, --- 20,26 ---- /* * _hash_search() -- Finds the page/bucket that the contains the ! * scankey and loads it into *bufP. The buffer has a read lock. */ void _hash_search(Relation rel, *************** *** 33,40 **** Datum keyDatum; Bucket bucket; ! if (scankey == (ScanKey) NULL || ! (keyDatum = scankey[0].sk_argument) == (Datum) NULL) { /* * If the scankey argument is NULL, all tuples will satisfy the --- 33,40 ---- Datum keyDatum; Bucket bucket; ! keyDatum = scankey[0].sk_argument; ! if (scankey == (ScanKey) NULL || keyDatum == (Datum) NULL) { /* * If the scankey argument is NULL, all tuples will satisfy the *************** *** 43,49 **** bucket = 0; } else ! bucket = _hash_call(rel, metap, keyDatum); blkno = BUCKET_TO_BLKNO(bucket); --- 43,49 ---- bucket = 0; } else ! bucket = _hash_bucket(rel, metap, keyDatum); blkno = BUCKET_TO_BLKNO(bucket); *************** *** 162,168 **** * _hash_first() -- Find the first item in a scan. * * Return the RetrieveIndexResult of the first item in the tree that ! * satisfies the qualificatin associated with the scan descriptor. On * exit, the page containing the current index tuple is read locked * and pinned, and the scan's opaque data entry is updated to * include the buffer. --- 162,168 ---- * _hash_first() -- Find the first item in a scan. * * Return the RetrieveIndexResult of the first item in the tree that ! * satisfies the qualification associated with the scan descriptor. On * exit, the page containing the current index tuple is read locked * and pinned, and the scan's opaque data entry is updated to * include the buffer. *************** *** 204,215 **** opaque = (HashPageOpaque) PageGetSpecialPointer(page); /* ! * if we are scanning forward, we need to find the first non-empty ! * page (if any) in the bucket chain. since overflow pages are never * empty, this had better be either the bucket page or the first * overflow page. * ! * if we are scanning backward, we always go all the way to the end of * the bucket chain. */ if (PageIsEmpty(page)) --- 204,215 ---- opaque = (HashPageOpaque) PageGetSpecialPointer(page); /* ! * If we are scanning forward, we need to find the first non-empty ! * page (if any) in the bucket chain. Since overflow pages are never * empty, this had better be either the bucket page or the first * overflow page. * ! * If we are scanning backward, we always go all the way to the end of * the bucket chain. */ if (PageIsEmpty(page)) Index: src/backend/access/hash/hashutil.c =================================================================== RCS file: /var/lib/cvs/pgsql/src/backend/access/hash/hashutil.c,v retrieving revision 1.28 diff -c -r1.28 hashutil.c *** src/backend/access/hash/hashutil.c 6 Mar 2002 20:49:43 -0000 1.28 --- src/backend/access/hash/hashutil.c 12 Mar 2002 06:25:02 -0000 *************** *** 92,98 **** } Bucket ! _hash_call(Relation rel, HashMetaPage metap, Datum key) { FmgrInfo *procinfo; uint32 n; --- 92,98 ---- } Bucket ! _hash_bucket(Relation rel, HashMetaPage metap, Datum key) { FmgrInfo *procinfo; uint32 n; *************** *** 128,147 **** void _hash_checkpage(Page page, int flags) { ! HashPageOpaque opaque; ! Assert(page); Assert(((PageHeader) (page))->pd_lower >= (sizeof(PageHeaderData) - sizeof(ItemIdData))); - #if 1 Assert(((PageHeader) (page))->pd_upper <= (BLCKSZ - MAXALIGN(sizeof(HashPageOpaqueData)))); Assert(((PageHeader) (page))->pd_special == (BLCKSZ - MAXALIGN(sizeof(HashPageOpaqueData)))); Assert(((PageHeader) (page))->pd_opaque.od_pagesize == BLCKSZ); - #endif if (flags) { ! opaque = (HashPageOpaque) PageGetSpecialPointer(page); Assert(opaque->hasho_flag & flags); } } --- 128,145 ---- void _hash_checkpage(Page page, int flags) { ! #ifdef USE_ASSERT_CHECKING Assert(page); Assert(((PageHeader) (page))->pd_lower >= (sizeof(PageHeaderData) - sizeof(ItemIdData))); Assert(((PageHeader) (page))->pd_upper <= (BLCKSZ - MAXALIGN(sizeof(HashPageOpaqueData)))); Assert(((PageHeader) (page))->pd_special == (BLCKSZ - MAXALIGN(sizeof(HashPageOpaqueData)))); Assert(((PageHeader) (page))->pd_opaque.od_pagesize == BLCKSZ); if (flags) { ! HashPageOpaque opaque = (HashPageOpaque) PageGetSpecialPointer(page); Assert(opaque->hasho_flag & flags); } + #endif /* USE_ASSERT_CHECKING */ } Index: src/backend/access/nbtree/nbtinsert.c =================================================================== RCS file: /var/lib/cvs/pgsql/src/backend/access/nbtree/nbtinsert.c,v retrieving revision 1.90 diff -c -r1.90 nbtinsert.c *** src/backend/access/nbtree/nbtinsert.c 6 Mar 2002 06:09:17 -0000 1.90 --- src/backend/access/nbtree/nbtinsert.c 12 Mar 2002 06:25:02 -0000 *************** *** 17,22 **** --- 17,23 ---- #include "access/heapam.h" #include "access/nbtree.h" + #include "access/itup.h" #include "miscadmin.h" *************** *** 2049,2087 **** int keysz, ScanKey scankey) { BTItem btitem; - IndexTuple itup; - int i; /* Better be comparing to a leaf item */ Assert(P_ISLEAF((BTPageOpaque) PageGetSpecialPointer(page))); btitem = (BTItem) PageGetItem(page, PageGetItemId(page, offnum)); - itup = &(btitem->bti_itup); ! for (i = 1; i <= keysz; i++) ! { ! ScanKey entry = &scankey[i - 1]; ! AttrNumber attno; ! Datum datum; ! bool isNull; ! int32 result; ! ! attno = entry->sk_attno; ! Assert(attno == i); ! datum = index_getattr(itup, attno, itupdesc, &isNull); ! ! /* NULLs are never equal to anything */ ! if (entry->sk_flags & SK_ISNULL || isNull) ! return false; ! ! result = DatumGetInt32(FunctionCall2(&entry->sk_func, ! entry->sk_argument, ! datum)); ! ! if (result != 0) ! return false; ! } ! ! /* if we get here, the keys are equal */ ! return true; } --- 2050,2061 ---- int keysz, ScanKey scankey) { BTItem btitem; /* Better be comparing to a leaf item */ Assert(P_ISLEAF((BTPageOpaque) PageGetSpecialPointer(page))); btitem = (BTItem) PageGetItem(page, PageGetItemId(page, offnum)); ! return index_is_equal(itupdesc, &(btitem->bti_itup), ! offnum, keysz, scankey); } Index: src/backend/parser/analyze.c =================================================================== RCS file: /var/lib/cvs/pgsql/src/backend/parser/analyze.c,v retrieving revision 1.218 diff -c -r1.218 analyze.c *** src/backend/parser/analyze.c 8 Mar 2002 06:55:08 -0000 1.218 --- src/backend/parser/analyze.c 12 Mar 2002 06:25:02 -0000 *************** *** 1650,1656 **** /* * transformIndexStmt - ! * transforms the qualification of the index statement */ static Query * transformIndexStmt(ParseState *pstate, IndexStmt *stmt) --- 1650,1656 ---- /* * transformIndexStmt - ! * Transforms a CREATE INDEX statement */ static Query * transformIndexStmt(ParseState *pstate, IndexStmt *stmt) Index: src/backend/parser/gram.y =================================================================== RCS file: /var/lib/cvs/pgsql/src/backend/parser/gram.y,v retrieving revision 2.289 diff -c -r2.289 gram.y *** src/backend/parser/gram.y 9 Mar 2002 17:37:16 -0000 2.289 --- src/backend/parser/gram.y 12 Mar 2002 06:25:02 -0000 *************** *** 2491,2497 **** /***************************************************************************** * * QUERY: ! * create index on * [ using ] "(" ( with )+ ")" * [ where ] * --- 2491,2497 ---- /***************************************************************************** * * QUERY: ! * create [ unique ] index on * [ using ] "(" ( with )+ ")" * [ where ] * Index: src/include/access/hash.h =================================================================== RCS file: /var/lib/cvs/pgsql/src/include/access/hash.h,v retrieving revision 1.45 diff -c -r1.45 hash.h *** src/include/access/hash.h 9 Mar 2002 17:35:37 -0000 1.45 --- src/include/access/hash.h 12 Mar 2002 06:25:07 -0000 *************** *** 271,277 **** /* private routines */ /* hashinsert.c */ ! extern InsertIndexResult _hash_doinsert(Relation rel, HashItem hitem); /* hashovfl.c */ --- 271,280 ---- /* private routines */ /* hashinsert.c */ ! extern InsertIndexResult _hash_doinsert(Relation rel, ! HashItem hitem, ! bool index_is_unique, ! Relation heapRel); /* hashovfl.c */ *************** *** 317,323 **** extern void _hash_freeskey(ScanKey skey); extern bool _hash_checkqual(IndexScanDesc scan, IndexTuple itup); extern HashItem _hash_formitem(IndexTuple itup); ! extern Bucket _hash_call(Relation rel, HashMetaPage metap, Datum key); extern uint32 _hash_log2(uint32 num); extern void _hash_checkpage(Page page, int flags); --- 320,326 ---- extern void _hash_freeskey(ScanKey skey); extern bool _hash_checkqual(IndexScanDesc scan, IndexTuple itup); extern HashItem _hash_formitem(IndexTuple itup); ! extern Bucket _hash_bucket(Relation rel, HashMetaPage metap, Datum key); extern uint32 _hash_log2(uint32 num); extern void _hash_checkpage(Page page, int flags); Index: src/include/access/itup.h =================================================================== RCS file: /var/lib/cvs/pgsql/src/include/access/itup.h,v retrieving revision 1.33 diff -c -r1.33 itup.h *** src/include/access/itup.h 5 Nov 2001 17:46:31 -0000 1.33 --- src/include/access/itup.h 12 Mar 2002 06:25:07 -0000 *************** *** 17,22 **** --- 17,23 ---- #include "access/ibit.h" #include "access/tupdesc.h" #include "access/tupmacs.h" + #include "access/skey.h" #include "storage/itemptr.h" *************** *** 150,154 **** --- 151,157 ---- extern RetrieveIndexResult FormRetrieveIndexResult(ItemPointer indexItemPointer, ItemPointer heapItemPointer); extern void CopyIndexTuple(IndexTuple source, IndexTuple *target); + bool index_is_equal(TupleDesc itupdesc, IndexTuple itup, + OffsetNumber offnum, int keysz, ScanKey scankey); #endif /* ITUP_H */ Index: src/include/catalog/pg_am.h =================================================================== RCS file: /var/lib/cvs/pgsql/src/include/catalog/pg_am.h,v retrieving revision 1.21 diff -c -r1.21 pg_am.h *** src/include/catalog/pg_am.h 5 Nov 2001 17:46:32 -0000 1.21 --- src/include/catalog/pg_am.h 12 Mar 2002 06:25:07 -0000 *************** *** 103,109 **** DATA(insert OID = 403 ( btree PGUID 5 1 1 t t t t btgettuple btinsert btbeginscan btrescan btendscan btmarkpos btrestrpos btbuild btbulkdelete btcostestimate )); DESCR("b-tree index access method"); #define BTREE_AM_OID 403 ! DATA(insert OID = 405 ( hash PGUID 1 1 0 f f f t hashgettuple hashinsert hashbeginscan hashrescan hashendscan hashmarkpos hashrestrpos hashbuild hashbulkdelete hashcostestimate )); DESCR("hash index access method"); DATA(insert OID = 783 ( gist PGUID 100 7 0 f t f f gistgettuple gistinsert gistbeginscan gistrescan gistendscan gistmarkpos gistrestrpos gistbuild gistbulkdelete gistcostestimate )); DESCR("GiST index access method"); --- 103,109 ---- DATA(insert OID = 403 ( btree PGUID 5 1 1 t t t t btgettuple btinsert btbeginscan btrescan btendscan btmarkpos btrestrpos btbuild btbulkdelete btcostestimate )); DESCR("b-tree index access method"); #define BTREE_AM_OID 403 ! DATA(insert OID = 405 ( hash PGUID 1 1 0 t f f t hashgettuple hashinsert hashbeginscan hashrescan hashendscan hashmarkpos hashrestrpos hashbuild hashbulkdelete hashcostestimate )); DESCR("hash index access method"); DATA(insert OID = 783 ( gist PGUID 100 7 0 f t f f gistgettuple gistinsert gistbeginscan gistrescan gistendscan gistmarkpos gistrestrpos gistbuild gistbulkdelete gistcostestimate )); DESCR("GiST index access method");