Re: Page access

From: Bruce Momjian <pgman(at)candle(dot)pha(dot)pa(dot)us>
To: Manfred Koizar <mkoi-pg(at)aon(dot)at>
Cc: pgsql-patches(at)postgresql(dot)org
Subject: Re: Page access
Date: 2002-07-02 05:48:40
Message-ID: 200207020548.g625meS25272@candle.pha.pa.us
Views: Raw Message | Whole Thread | Download mbox | Resend email
Thread:
Lists: pgsql-patches


Patch applied. Thanks.

---------------------------------------------------------------------------

Manfred Koizar wrote:
> There already was a macro PageGetItemId; this is now used in (almost)
> all places, where pd_linp is accessed. Also introduce new macros
> SizeOfPageHeaderData and BTMaxItemSize.
> This is just source code cosmetic, no behaviour changed.
> All 81 tests passed.
>
> Servus
> Manfred
>
> diff -ru ../base/src/backend/access/heap/heapam.c src/backend/access/heap/heapam.c
> --- ../base/src/backend/access/heap/heapam.c 2002-06-17 22:35:32.000000000 +0200
> +++ src/backend/access/heap/heapam.c 2002-06-19 16:57:07.000000000 +0200
> @@ -2067,7 +2067,7 @@
>
> while ((char *) unused < unend)
> {
> - lp = ((PageHeader) page)->pd_linp + *unused;
> + lp = PageGetItemId(page, *unused + 1);
> lp->lp_flags &= ~LP_USED;
> unused++;
> }
> diff -ru ../base/src/backend/access/nbtree/nbtinsert.c src/backend/access/nbtree/nbtinsert.c
> --- ../base/src/backend/access/nbtree/nbtinsert.c 2002-05-27 16:29:37.000000000 +0200
> +++ src/backend/access/nbtree/nbtinsert.c 2002-06-20 12:19:57.000000000 +0200
> @@ -383,10 +383,9 @@
> * to 1/3 the per-page available space. Note that at this point,
> * itemsz doesn't include the ItemId.
> */
> - if (itemsz > (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
> + if (itemsz > BTMaxItemSize(page))
> elog(ERROR, "btree: index item size %lu exceeds maximum %lu",
> - (unsigned long) itemsz,
> - (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData));
> + (unsigned long) itemsz, BTMaxItemSize(page));
>
> /*
> * Determine exactly where new item will go.
> @@ -1020,9 +1019,8 @@
>
> /* Total free space available on a btree page, after fixed overhead */
> leftspace = rightspace =
> - PageGetPageSize(page) - sizeof(PageHeaderData) -
> - MAXALIGN(sizeof(BTPageOpaqueData))
> - +sizeof(ItemIdData);
> + PageGetPageSize(page) - SizeOfPageHeaderData -
> + MAXALIGN(sizeof(BTPageOpaqueData));
>
> /*
> * Finding the best possible split would require checking all the
> diff -ru ../base/src/backend/access/nbtree/nbtsort.c src/backend/access/nbtree/nbtsort.c
> --- ../base/src/backend/access/nbtree/nbtsort.c 2001-11-11 01:51:13.000000000 +0100
> +++ src/backend/access/nbtree/nbtsort.c 2002-06-20 12:27:41.000000000 +0200
> @@ -346,10 +346,9 @@
> * oversize items being inserted into an already-existing index. But
> * during creation of an index, we don't go through there.
> */
> - if (btisz > (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
> + if (btisz > BTMaxItemSize(npage))
> elog(ERROR, "btree: index item size %lu exceeds maximum %ld",
> - (unsigned long) btisz,
> - (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData));
> + (unsigned long) btisz, BTMaxItemSize(npage));
>
> if (pgspc < btisz || pgspc < state->btps_full)
> {
> diff -ru ../base/src/backend/storage/page/bufpage.c src/backend/storage/page/bufpage.c
> --- ../base/src/backend/storage/page/bufpage.c 2002-03-06 08:10:07.000000000 +0100
> +++ src/backend/storage/page/bufpage.c 2002-06-20 09:40:12.000000000 +0200
> @@ -37,13 +37,12 @@
> specialSize = MAXALIGN(specialSize);
>
> Assert(pageSize == BLCKSZ);
> - Assert(pageSize >
> - specialSize + sizeof(PageHeaderData) - sizeof(ItemIdData));
> + Assert(pageSize > specialSize + SizeOfPageHeaderData);
>
> /* Make sure all fields of page are zero, as well as unused space */
> MemSet(p, 0, pageSize);
>
> - p->pd_lower = sizeof(PageHeaderData) - sizeof(ItemIdData);
> + p->pd_lower = SizeOfPageHeaderData;
> p->pd_upper = pageSize - specialSize;
> p->pd_special = pageSize - specialSize;
> PageSetPageSize(page, pageSize);
> @@ -88,7 +87,7 @@
> /*
> * Be wary about corrupted page pointers
> */
> - if (phdr->pd_lower < (sizeof(PageHeaderData) - sizeof(ItemIdData)) ||
> + if (phdr->pd_lower < SizeOfPageHeaderData ||
> phdr->pd_lower > phdr->pd_upper ||
> phdr->pd_upper > phdr->pd_special ||
> phdr->pd_special > BLCKSZ)
> @@ -112,7 +111,7 @@
> }
> if (offsetNumber < limit)
> {
> - itemId = &phdr->pd_linp[offsetNumber - 1];
> + itemId = PageGetItemId(phdr, offsetNumber);
> if (((*itemId).lp_flags & LP_USED) ||
> ((*itemId).lp_len != 0))
> {
> @@ -136,7 +135,7 @@
> /* look for "recyclable" (unused & deallocated) ItemId */
> for (offsetNumber = 1; offsetNumber < limit; offsetNumber++)
> {
> - itemId = &phdr->pd_linp[offsetNumber - 1];
> + itemId = PageGetItemId(phdr, offsetNumber);
> if ((((*itemId).lp_flags & LP_USED) == 0) &&
> ((*itemId).lp_len == 0))
> break;
> @@ -150,7 +149,7 @@
> * alignedSize > pd_upper.
> */
> if (offsetNumber > limit)
> - lower = (char *) (&phdr->pd_linp[offsetNumber]) - (char *) page;
> + lower = (char *) PageGetItemId(phdr, offsetNumber + 1) - (char *) page;
> else if (offsetNumber == limit || needshuffle)
> lower = phdr->pd_lower + sizeof(ItemIdData);
> else
> @@ -175,13 +174,13 @@
> ItemId fromitemId,
> toitemId;
>
> - fromitemId = &phdr->pd_linp[i - 1];
> - toitemId = &phdr->pd_linp[i];
> + fromitemId = PageGetItemId(phdr, i);
> + toitemId = PageGetItemId(phdr, i + 1);
> *toitemId = *fromitemId;
> }
> }
>
> - itemId = &phdr->pd_linp[offsetNumber - 1];
> + itemId = PageGetItemId(phdr, offsetNumber);
> (*itemId).lp_off = upper;
> (*itemId).lp_len = size;
> (*itemId).lp_flags = flags;
> @@ -214,12 +213,12 @@
> memcpy(temp, page, pageSize);
>
> /* clear out the middle */
> - size = (pageSize - sizeof(PageHeaderData)) + sizeof(ItemIdData);
> + size = pageSize - SizeOfPageHeaderData;
> size -= MAXALIGN(specialSize);
> - MemSet((char *) &(thdr->pd_linp[0]), 0, size);
> + MemSet(PageGetContents(thdr), 0, size);
>
> /* set high, low water marks */
> - thdr->pd_lower = sizeof(PageHeaderData) - sizeof(ItemIdData);
> + thdr->pd_lower = SizeOfPageHeaderData;
> thdr->pd_upper = pageSize - MAXALIGN(specialSize);
>
> return temp;
> @@ -291,7 +290,7 @@
> * pointers, lengths, etc could cause us to clobber adjacent disk
> * buffers, spreading the data loss further. So, check everything.
> */
> - if (pd_lower < (sizeof(PageHeaderData) - sizeof(ItemIdData)) ||
> + if (pd_lower < SizeOfPageHeaderData ||
> pd_lower > pd_upper ||
> pd_upper > pd_special ||
> pd_special > BLCKSZ ||
> @@ -303,7 +302,7 @@
> nused = 0;
> for (i = 0; i < nline; i++)
> {
> - lp = ((PageHeader) page)->pd_linp + i;
> + lp = PageGetItemId(page, i + 1);
> if ((*lp).lp_flags & LP_DELETE) /* marked for deletion */
> (*lp).lp_flags &= ~(LP_USED | LP_DELETE);
> if ((*lp).lp_flags & LP_USED)
> @@ -317,7 +316,7 @@
> /* Page is completely empty, so just reset it quickly */
> for (i = 0; i < nline; i++)
> {
> - lp = ((PageHeader) page)->pd_linp + i;
> + lp = PageGetItemId(page, i + 1);
> (*lp).lp_len = 0; /* indicate unused & deallocated */
> }
> ((PageHeader) page)->pd_upper = pd_special;
> @@ -331,7 +330,7 @@
> totallen = 0;
> for (i = 0; i < nline; i++)
> {
> - lp = ((PageHeader) page)->pd_linp + i;
> + lp = PageGetItemId(page, i + 1);
> if ((*lp).lp_flags & LP_USED)
> {
> itemidptr->offsetindex = i;
> @@ -363,7 +362,7 @@
>
> for (i = 0, itemidptr = itemidbase; i < nused; i++, itemidptr++)
> {
> - lp = ((PageHeader) page)->pd_linp + itemidptr->offsetindex;
> + lp = PageGetItemId(page, itemidptr->offsetindex + 1);
> upper -= itemidptr->alignedlen;
> memmove((char *) page + upper,
> (char *) page + itemidptr->itemoff,
> @@ -426,7 +425,7 @@
> /*
> * As with PageRepairFragmentation, paranoia seems justified.
> */
> - if (phdr->pd_lower < (sizeof(PageHeaderData) - sizeof(ItemIdData)) ||
> + if (phdr->pd_lower < SizeOfPageHeaderData ||
> phdr->pd_lower > phdr->pd_upper ||
> phdr->pd_upper > phdr->pd_special ||
> phdr->pd_special > BLCKSZ)
> @@ -452,6 +451,8 @@
> /*
> * First, we want to get rid of the pd_linp entry for the index tuple.
> * We copy all subsequent linp's back one slot in the array.
> + * We don't use PageGetItemId, because we are manipulating the _array_,
> + * not individual linp's.
> */
> nbytes = phdr->pd_lower -
> ((char *) &phdr->pd_linp[offidx + 1] - (char *) phdr);
> @@ -490,8 +491,8 @@
> nline--; /* there's one less than when we started */
> for (i = nline; --i >= 0; )
> {
> - if (phdr->pd_linp[i].lp_off <= offset)
> - phdr->pd_linp[i].lp_off += size;
> + if (PageGetItemId(phdr, i + 1)->lp_off <= offset)
> + PageGetItemId(phdr, i + 1)->lp_off += size;
> }
> }
> }
> diff -ru ../base/src/include/access/nbtree.h src/include/access/nbtree.h
> --- ../base/src/include/access/nbtree.h 2002-05-27 16:29:39.000000000 +0200
> +++ src/include/access/nbtree.h 2002-06-20 12:11:01.000000000 +0200
> @@ -65,7 +65,7 @@
> } BTMetaPageData;
>
> #define BTPageGetMeta(p) \
> - ((BTMetaPageData *) &((PageHeader) p)->pd_linp[0])
> + ((BTMetaPageData *) PageGetContents(p))
>
> #define BTREE_METAPAGE 0 /* first page is meta */
> #define BTREE_MAGIC 0x053162 /* magic number of btree pages */
> @@ -77,6 +77,14 @@
> #define BTREE_VERSION 1
>
> /*
> + * We actually need to be able to fit three items on every page,
> + * so restrict any one item to 1/3 the per-page available space.
> + */
> +#define BTMaxItemSize(page) \
> + ((PageGetPageSize(page) - \
> + sizeof(PageHeaderData) - \
> + MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
> +/*
> * BTScanOpaqueData is used to remember which buffers we're currently
> * examining in the scan. We keep these buffers pinned (but not locked,
> * see nbtree.c) and recorded in the opaque entry of the scan to avoid
> diff -ru ../base/src/include/storage/bufpage.h src/include/storage/bufpage.h
> --- ../base/src/include/storage/bufpage.h 2002-01-16 00:14:17.000000000 +0100
> +++ src/include/storage/bufpage.h 2002-06-20 12:22:21.000000000 +0200
> @@ -158,12 +158,16 @@
> )
>
> /*
> + * line pointer does not count as part of header
> + */
> +#define SizeOfPageHeaderData (offsetof(PageHeaderData, pd_linp[0]))
> +
> +/*
> * PageIsEmpty
> * returns true iff no itemid has been allocated on the page
> */
> #define PageIsEmpty(page) \
> - (((PageHeader) (page))->pd_lower <= \
> - (sizeof(PageHeaderData) - sizeof(ItemIdData)))
> + (((PageHeader) (page))->pd_lower <= SizeOfPageHeaderData)
>
> /*
> * PageIsNew
> @@ -179,6 +183,13 @@
> #define PageGetItemId(page, offsetNumber) \
> ((ItemId) (&((PageHeader) (page))->pd_linp[(offsetNumber) - 1]))
>
> +/*
> + * PageGetContents
> + * To be used in case the page does not contain item pointers.
> + */
> +#define PageGetContents(page) \
> + ((char *) (&((PageHeader) (page))->pd_linp[0]))
> +
> /* ----------------
> * macros to access opaque space
> * ----------------
> @@ -290,8 +301,7 @@
> * That way we get -1 or so, not a huge positive number...
> */
> #define PageGetMaxOffsetNumber(page) \
> - (((int) (((PageHeader) (page))->pd_lower - \
> - (sizeof(PageHeaderData) - sizeof(ItemIdData)))) \
> + (((int) (((PageHeader) (page))->pd_lower - SizeOfPageHeaderData)) \
> / ((int) sizeof(ItemIdData)))
>
> #define PageGetLSN(page) \
>
>
> ---------------------------(end of broadcast)---------------------------
> TIP 2: you can get off all lists at once with the unregister command
> (send "unregister YourEmailAddressHere" to majordomo(at)postgresql(dot)org)
>

--
Bruce Momjian | http://candle.pha.pa.us
pgman(at)candle(dot)pha(dot)pa(dot)us | (610) 853-3000
+ If your life is a hard drive, | 830 Blythe Avenue
+ Christ can be your backup. | Drexel Hill, Pennsylvania 19026

In response to

  • Page access at 2002-06-20 14:12:39 from Manfred Koizar

Browse pgsql-patches by date

  From Date Subject
Next Message Bruce Momjian 2002-07-02 05:49:18 Re: psql: fix memory leak
Previous Message Bruce Momjian 2002-07-02 05:47:36 Re: BufMgr cleanup