From 9ada415253bf5c36c48145cdda2548f25588d59b Mon Sep 17 00:00:00 2001 From: David Rowley Date: Tue, 31 Dec 2024 09:19:24 +1300 Subject: [PATCH v9 3/5] Optimize tuple deformation This commit includes various optimizations to improve the performance of tuple deformation. We now precalculate CompactAttribute's attcacheoff, which allows us to remove the code from the deform routines which was setting the attcacheoff. Setting the attcacheoff is handled by TupleDescFinalize(), which must be called before the TupleDesc is used for anything. Having this TupleDescFinalize() function means we can store the first attribute in the TupleDesc which does not have an offset cached. That allows us to add a dedicated deforming loop to deform all attributes up to the final one with a attcacheoff set, or up to the first NULL attribute, whichever comes first. We also record the maximum attribute number which is guaranteed to exist in the tuple, that is, has a NOT NULL constraint and isn't an atthasmissing attribute. When deforming only attributes prior to the guaranteed attnum, we've no need to access the tuple's natt count. As an additional optimization, we only count fixed-width columns when calculating the maximum guaranteed column as this eliminates the need to emit code to fetch byref types in the deformation loop for guaranteed attributes. Some locations in the code deform tuples that have yet to go through NOT NULL constraint validation. We're unable to perform the guaranteed attribute optimization when that's the case. The optimization is opt-in via the TupleTableSlot using the TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS flag. This commit also adds a more efficient way of populating the isnull array by using a bit-wise trick which performs multiplication on the inverse of the tuple's bitmap byte and masking out all but the lower bit of each of the boolean's byte. This results in much more optimal code when compared to determining the NULLness via att_isnull(). 8 isnull elements are processed at once using this method, which means we need to round the tts_isnull array size up to the next 8 bytes. The palloc code does this anyway, but the round-up needed to be formalized so as not to overwrite the sentinel byte in debug builds. --- src/backend/access/common/heaptuple.c | 362 ++++++++---------- src/backend/access/common/indextuple.c | 371 ++++++++----------- src/backend/access/common/tupdesc.c | 53 +++ src/backend/access/spgist/spgutils.c | 3 - src/backend/executor/execTuples.c | 360 +++++++++--------- src/backend/executor/nodeSeqscan.c | 2 + src/backend/jit/llvm/llvmjit_deform.c | 6 - src/backend/utils/cache/relcache.c | 12 - src/include/access/tupdesc.h | 19 +- src/include/access/tupmacs.h | 191 +++++++++- src/include/executor/tuptable.h | 16 +- src/test/modules/deform_bench/deform_bench.c | 1 + 12 files changed, 773 insertions(+), 623 deletions(-) diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c index 11bec20e82e..94d11bdc4fd 100644 --- a/src/backend/access/common/heaptuple.c +++ b/src/backend/access/common/heaptuple.c @@ -498,19 +498,7 @@ heap_attisnull(HeapTuple tup, int attnum, TupleDesc tupleDesc) * nocachegetattr * * This only gets called from fastgetattr(), in cases where we - * can't use a cacheoffset and the value is not null. - * - * This caches attribute offsets in the attribute descriptor. - * - * An alternative way to speed things up would be to cache offsets - * with the tuple, but that seems more difficult unless you take - * the storage hit of actually putting those offsets into the - * tuple you send to disk. Yuck. - * - * This scheme will be slightly slower than that, but should - * perform well for queries which hit large #'s of tuples. After - * you cache the offsets once, examining all the other tuples using - * the same attribute descriptor will go much quicker. -cim 5/4/91 + * can't use the attcacheoff and the value is not null. * * NOTE: if you need to change this code, see also heap_deform_tuple. * Also see nocache_index_getattr, which is the same code for index @@ -522,194 +510,123 @@ nocachegetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc) { + CompactAttribute *cattr; HeapTupleHeader td = tup->t_data; char *tp; /* ptr to data part of tuple */ bits8 *bp = td->t_bits; /* ptr to null bitmap in tuple */ - bool slow = false; /* do we have to walk attrs? */ int off; /* current offset within data */ + int startAttr; + int firstNullAttr; + int i; + bool hasnulls = HeapTupleHasNulls(tup); - /* ---------------- - * Three cases: - * - * 1: No nulls and no variable-width attributes. - * 2: Has a null or a var-width AFTER att. - * 3: Has nulls or var-widths BEFORE att. - * ---------------- - */ + /* Did someone forget to call TupleDescFinalize()? */ + Assert(tupleDesc->firstNonCachedOffsetAttr >= 0); attnum--; - if (!HeapTupleNoNulls(tup)) - { - /* - * there's a null somewhere in the tuple - * - * check to see if any preceding bits are null... - */ - int byte = attnum >> 3; - int finalbit = attnum & 0x07; - - /* check for nulls "before" final bit of last byte */ - if ((~bp[byte]) & ((1 << finalbit) - 1)) - slow = true; - else - { - /* check for nulls in any "earlier" bytes */ - int i; + /* + * To reduce the number of attributes we need to look at, we start at the + * highest attribute that we can which has a cached offset. Since the + * attcacheoff for an attribute is only valid if there are no NULLs in + * prior attribute, we must look for NULLs to determine the start attr. + */ + if (hasnulls) + firstNullAttr = first_null_attr(bp, attnum); + else + firstNullAttr = attnum; - for (i = 0; i < byte; i++) - { - if (bp[i] != 0xFF) - { - slow = true; - break; - } - } - } + if (tupleDesc->firstNonCachedOffsetAttr > 0) + { + startAttr = Min(tupleDesc->firstNonCachedOffsetAttr - 1, firstNullAttr); + off = TupleDescCompactAttr(tupleDesc, startAttr)->attcacheoff; + } + else + { + startAttr = 0; + off = 0; } tp = (char *) td + td->t_hoff; - if (!slow) + /* + * Calculate 'off' up to the first NULL attr. We use two cheaper loops + * when the tuple has no variable-width columns. When variable-width + * columns exist we use att_addlength_pointer() to move the offset beyond + * the current attribute. + */ + if (!HeapTupleHasVarWidth(tup)) { - CompactAttribute *att; - - /* - * If we get here, there are no nulls up to and including the target - * attribute. If we have a cached offset, we can use it. - */ - att = TupleDescCompactAttr(tupleDesc, attnum); - if (att->attcacheoff >= 0) - return fetchatt(att, tp + att->attcacheoff); - - /* - * Otherwise, check for non-fixed-length attrs up to and including - * target. If there aren't any, it's safe to cheaply initialize the - * cached offsets for these attrs. - */ - if (HeapTupleHasVarWidth(tup)) + for (i = startAttr; i < firstNullAttr; i++) { - int j; + cattr = TupleDescCompactAttr(tupleDesc, i); - for (j = 0; j <= attnum; j++) - { - if (TupleDescCompactAttr(tupleDesc, j)->attlen <= 0) - { - slow = true; - break; - } - } + off = att_nominal_alignby(off, cattr->attalignby); + off += cattr->attlen; } - } - - if (!slow) - { - int natts = tupleDesc->natts; - int j = 1; - - /* - * If we get here, we have a tuple with no nulls or var-widths up to - * and including the target attribute, so we can use the cached offset - * ... only we don't have it yet, or we'd not have got here. Since - * it's cheap to compute offsets for fixed-width columns, we take the - * opportunity to initialize the cached offsets for *all* the leading - * fixed-width columns, in hope of avoiding future visits to this - * routine. - */ - TupleDescCompactAttr(tupleDesc, 0)->attcacheoff = 0; - - /* we might have set some offsets in the slow path previously */ - while (j < natts && TupleDescCompactAttr(tupleDesc, j)->attcacheoff > 0) - j++; - off = TupleDescCompactAttr(tupleDesc, j - 1)->attcacheoff + - TupleDescCompactAttr(tupleDesc, j - 1)->attlen; - - for (; j < natts; j++) + for (; i < attnum; i++) { - CompactAttribute *att = TupleDescCompactAttr(tupleDesc, j); - - if (att->attlen <= 0) - break; - - off = att_nominal_alignby(off, att->attalignby); + if (att_isnull(i, bp)) + continue; - att->attcacheoff = off; + cattr = TupleDescCompactAttr(tupleDesc, i); - off += att->attlen; + off = att_pointer_alignby(off, cattr->attalignby, cattr->attlen, + tp + off); + off += cattr->attlen; } - - Assert(j > attnum); - - off = TupleDescCompactAttr(tupleDesc, attnum)->attcacheoff; } else { - bool usecache = true; - int i; - - /* - * Now we know that we have to walk the tuple CAREFULLY. But we still - * might be able to cache some offsets for next time. - * - * Note - This loop is a little tricky. For each non-null attribute, - * we have to first account for alignment padding before the attr, - * then advance over the attr based on its length. Nulls have no - * storage and no alignment padding either. We can use/set - * attcacheoff until we reach either a null or a var-width attribute. - */ - off = 0; - for (i = 0;; i++) /* loop exit is at "break" */ + for (i = startAttr; i < firstNullAttr; i++) { - CompactAttribute *att = TupleDescCompactAttr(tupleDesc, i); + int attlen; - if (HeapTupleHasNulls(tup) && att_isnull(i, bp)) - { - usecache = false; - continue; /* this cannot be the target att */ - } + cattr = TupleDescCompactAttr(tupleDesc, i); + attlen = cattr->attlen; - /* If we know the next offset, we can skip the rest */ - if (usecache && att->attcacheoff >= 0) - off = att->attcacheoff; - else if (att->attlen == -1) - { - /* - * We can only cache the offset for a varlena attribute if the - * offset is already suitably aligned, so that there would be - * no pad bytes in any case: then the offset will be valid for - * either an aligned or unaligned value. - */ - if (usecache && - off == att_nominal_alignby(off, att->attalignby)) - att->attcacheoff = off; - else - { - off = att_pointer_alignby(off, att->attalignby, -1, - tp + off); - usecache = false; - } - } - else - { - /* not varlena, so safe to use att_nominal_alignby */ - off = att_nominal_alignby(off, att->attalignby); + /* + * cstrings don't exist in heap tuples. Use pg_assume to instruct + * the compiler not to emit the cstring related code in + * att_addlength_pointer(). + */ + pg_assume(attlen > 0 || attlen == -1); - if (usecache) - att->attcacheoff = off; - } + off = att_pointer_alignby(off, + cattr->attalignby, + attlen, + tp + off); + off = att_addlength_pointer(off, attlen, tp + off); + } - if (i == attnum) - break; + for (; i < attnum; i++) + { + int attlen; - off = att_addlength_pointer(off, att->attlen, tp + off); + if (att_isnull(i, bp)) + continue; - if (usecache && att->attlen <= 0) - usecache = false; + cattr = TupleDescCompactAttr(tupleDesc, i); + attlen = cattr->attlen; + + /* As above, heaptuples have no cstrings */ + pg_assume(attlen > 0 || attlen == -1); + + off = att_pointer_alignby(off, cattr->attalignby, attlen, + tp + off); + off = att_addlength_pointer(off, attlen, tp + off); } + } - return fetchatt(TupleDescCompactAttr(tupleDesc, attnum), tp + off); + cattr = TupleDescCompactAttr(tupleDesc, attnum); + off = att_pointer_alignby(off, + cattr->attalignby, + cattr->attlen, + tp + off); + + return fetchatt(cattr, tp + off); } /* ---------------- @@ -1347,6 +1264,7 @@ heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull) { HeapTupleHeader tup = tuple->t_data; + CompactAttribute *cattr; bool hasnulls = HeapTupleHasNulls(tuple); int tdesc_natts = tupleDesc->natts; int natts; /* number of atts to extract */ @@ -1354,70 +1272,98 @@ heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, char *tp; /* ptr to tuple data */ uint32 off; /* offset in tuple data */ bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */ - bool slow = false; /* can we use/set attcacheoff? */ + int firstNonCacheOffsetAttr; + int firstNullAttr; natts = HeapTupleHeaderGetNatts(tup); + /* Did someone forget to call TupleDescFinalize()? */ + Assert(tupleDesc->firstNonCachedOffsetAttr >= 0); + /* * In inheritance situations, it is possible that the given tuple actually * has more fields than the caller is expecting. Don't run off the end of * the caller's arrays. */ natts = Min(natts, tdesc_natts); + firstNonCacheOffsetAttr = Min(tupleDesc->firstNonCachedOffsetAttr, natts); + + if (hasnulls) + { + firstNullAttr = first_null_attr(bp, natts); + + /* + * XXX: it'd be nice to use populate_isnull_array() here, but that + * requires that the isnull array's size is rounded up to the next 8 + * elements. Doing that would require adjusting many location that + * allocate the array. + */ + firstNonCacheOffsetAttr = Min(firstNonCacheOffsetAttr, firstNullAttr); + } + else + firstNullAttr = natts; tp = (char *) tup + tup->t_hoff; + attnum = 0; - off = 0; + if (firstNonCacheOffsetAttr > 0) + { +#ifdef USE_ASSERT_CHECKING + /* In Assert enabled builds, verify attcacheoff is correct */ + int offcheck = 0; +#endif + do + { + isnull[attnum] = false; + cattr = TupleDescCompactAttr(tupleDesc, attnum); + off = cattr->attcacheoff; - for (attnum = 0; attnum < natts; attnum++) +#ifdef USE_ASSERT_CHECKING + offcheck = att_nominal_alignby(offcheck, cattr->attalignby); + Assert(offcheck == cattr->attcacheoff); + offcheck += cattr->attlen; +#endif + + values[attnum] = fetch_att_noerr(tp + off, + cattr->attbyval, + cattr->attlen); + } while (++attnum < firstNonCacheOffsetAttr); + off += cattr->attlen; + } + else + off = 0; + + for (; attnum < firstNullAttr; attnum++) { - CompactAttribute *thisatt = TupleDescCompactAttr(tupleDesc, attnum); + isnull[attnum] = false; + cattr = TupleDescCompactAttr(tupleDesc, attnum); + values[attnum] = align_fetch_then_add(tp, + &off, + cattr->attbyval, + cattr->attlen, + cattr->attalignby); + } + + for (; attnum < natts; attnum++) + { + Assert(hasnulls); - if (hasnulls && att_isnull(attnum, bp)) + if (att_isnull(attnum, bp)) { values[attnum] = (Datum) 0; isnull[attnum] = true; - slow = true; /* can't use attcacheoff anymore */ continue; } isnull[attnum] = false; - - if (!slow && thisatt->attcacheoff >= 0) - off = thisatt->attcacheoff; - else if (thisatt->attlen == -1) - { - /* - * We can only cache the offset for a varlena attribute if the - * offset is already suitably aligned, so that there would be no - * pad bytes in any case: then the offset will be valid for either - * an aligned or unaligned value. - */ - if (!slow && - off == att_nominal_alignby(off, thisatt->attalignby)) - thisatt->attcacheoff = off; - else - { - off = att_pointer_alignby(off, thisatt->attalignby, -1, - tp + off); - slow = true; - } - } - else - { - /* not varlena, so safe to use att_nominal_alignby */ - off = att_nominal_alignby(off, thisatt->attalignby); - - if (!slow) - thisatt->attcacheoff = off; - } - - values[attnum] = fetchatt(thisatt, tp + off); - - off = att_addlength_pointer(off, thisatt->attlen, tp + off); - - if (thisatt->attlen <= 0) - slow = true; /* can't use attcacheoff anymore */ + cattr = TupleDescCompactAttr(tupleDesc, attnum); + + /* align 'off', fetch the datum, and increment off beyond the datum */ + values[attnum] = align_fetch_then_add(tp, + &off, + cattr->attbyval, + cattr->attlen, + cattr->attalignby); } /* diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c index d6350201e01..92282039671 100644 --- a/src/backend/access/common/indextuple.c +++ b/src/backend/access/common/indextuple.c @@ -223,18 +223,6 @@ index_form_tuple_context(TupleDesc tupleDescriptor, * * This gets called from index_getattr() macro, and only in cases * where we can't use cacheoffset and the value is not null. - * - * This caches attribute offsets in the attribute descriptor. - * - * An alternative way to speed things up would be to cache offsets - * with the tuple, but that seems more difficult unless you take - * the storage hit of actually putting those offsets into the - * tuple you send to disk. Yuck. - * - * This scheme will be slightly slower than that, but should - * perform well for queries which hit large #'s of tuples. After - * you cache the offsets once, examining all the other tuples using - * the same attribute descriptor will go much quicker. -cim 5/4/91 * ---------------- */ Datum @@ -242,205 +230,126 @@ nocache_index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc) { + CompactAttribute *cattr; char *tp; /* ptr to data part of tuple */ bits8 *bp = NULL; /* ptr to null bitmap in tuple */ - bool slow = false; /* do we have to walk attrs? */ int data_off; /* tuple data offset */ int off; /* current offset within data */ + int startAttr; + int firstNullAttr; + bool hasnulls = IndexTupleHasNulls(tup); + int i; - /* ---------------- - * Three cases: - * - * 1: No nulls and no variable-width attributes. - * 2: Has a null or a var-width AFTER att. - * 3: Has nulls or var-widths BEFORE att. - * ---------------- - */ - - data_off = IndexInfoFindDataOffset(tup->t_info); + /* Did someone forget to call TupleDescFinalize()? */ + Assert(tupleDesc->firstNonCachedOffsetAttr >= 0); attnum--; - if (IndexTupleHasNulls(tup)) - { - /* - * there's a null somewhere in the tuple - * - * check to see if desired att is null - */ + data_off = IndexInfoFindDataOffset(tup->t_info); + tp = (char *) tup + data_off; - /* XXX "knows" t_bits are just after fixed tuple header! */ + /* + * To reduce the number of attributes we need to look at, we start at the + * highest attribute that we can which has a cached offset. Since the + * attcacheoff for an attribute is only valid if there are no NULLs in + * prior attribute, we must look for NULLs to determine the start attr. + */ + if (hasnulls) + { bp = (bits8 *) ((char *) tup + sizeof(IndexTupleData)); - - /* - * Now check to see if any preceding bits are null... - */ - { - int byte = attnum >> 3; - int finalbit = attnum & 0x07; - - /* check for nulls "before" final bit of last byte */ - if ((~bp[byte]) & ((1 << finalbit) - 1)) - slow = true; - else - { - /* check for nulls in any "earlier" bytes */ - int i; - - for (i = 0; i < byte; i++) - { - if (bp[i] != 0xFF) - { - slow = true; - break; - } - } - } - } + firstNullAttr = first_null_attr(bp, attnum); } + else + firstNullAttr = attnum; - tp = (char *) tup + data_off; - - if (!slow) + if (tupleDesc->firstNonCachedOffsetAttr > 0) { - CompactAttribute *att; - - /* - * If we get here, there are no nulls up to and including the target - * attribute. If we have a cached offset, we can use it. - */ - att = TupleDescCompactAttr(tupleDesc, attnum); - if (att->attcacheoff >= 0) - return fetchatt(att, tp + att->attcacheoff); - - /* - * Otherwise, check for non-fixed-length attrs up to and including - * target. If there aren't any, it's safe to cheaply initialize the - * cached offsets for these attrs. - */ - if (IndexTupleHasVarwidths(tup)) - { - int j; - - for (j = 0; j <= attnum; j++) - { - if (TupleDescCompactAttr(tupleDesc, j)->attlen <= 0) - { - slow = true; - break; - } - } - } + startAttr = Min(tupleDesc->firstNonCachedOffsetAttr - 1, firstNullAttr); + off = TupleDescCompactAttr(tupleDesc, startAttr)->attcacheoff; } - - if (!slow) + else { - int natts = tupleDesc->natts; - int j = 1; - - /* - * If we get here, we have a tuple with no nulls or var-widths up to - * and including the target attribute, so we can use the cached offset - * ... only we don't have it yet, or we'd not have got here. Since - * it's cheap to compute offsets for fixed-width columns, we take the - * opportunity to initialize the cached offsets for *all* the leading - * fixed-width columns, in hope of avoiding future visits to this - * routine. - */ - TupleDescCompactAttr(tupleDesc, 0)->attcacheoff = 0; + startAttr = 0; + off = 0; + } - /* we might have set some offsets in the slow path previously */ - while (j < natts && TupleDescCompactAttr(tupleDesc, j)->attcacheoff > 0) - j++; + /* + * Calculate 'off' up to the first NULL attr. We use two cheaper loops + * when the tuple has no variable-width columns. When variable-width + * columns exist we use att_addlength_pointer() to move the offset beyond + * the current attribute. + */ + if (IndexTupleHasVarwidths(tup)) + { + /* Calculate the offset up until the first NULL */ + for (i = startAttr; i < firstNullAttr; i++) + { + cattr = TupleDescCompactAttr(tupleDesc, i); - off = TupleDescCompactAttr(tupleDesc, j - 1)->attcacheoff + - TupleDescCompactAttr(tupleDesc, j - 1)->attlen; + off = att_pointer_alignby(off, + cattr->attalignby, + cattr->attlen, + tp + off); + off = att_addlength_pointer(off, cattr->attlen, tp + off); + } - for (; j < natts; j++) + /* Calculate the offset for any remaining columns. */ + for (; i < attnum; i++) { - CompactAttribute *att = TupleDescCompactAttr(tupleDesc, j); + Assert(hasnulls); - if (att->attlen <= 0) - break; + if (att_isnull(i, bp)) + continue; - off = att_nominal_alignby(off, att->attalignby); + cattr = TupleDescCompactAttr(tupleDesc, i); - att->attcacheoff = off; - - off += att->attlen; + off = att_pointer_alignby(off, + cattr->attalignby, + cattr->attlen, + tp + off); + off = att_addlength_pointer(off, cattr->attlen, tp + off); } - - Assert(j > attnum); - - off = TupleDescCompactAttr(tupleDesc, attnum)->attcacheoff; } else { - bool usecache = true; - int i; + /* Handle tuples with only fixed-width attributes */ - /* - * Now we know that we have to walk the tuple CAREFULLY. But we still - * might be able to cache some offsets for next time. - * - * Note - This loop is a little tricky. For each non-null attribute, - * we have to first account for alignment padding before the attr, - * then advance over the attr based on its length. Nulls have no - * storage and no alignment padding either. We can use/set - * attcacheoff until we reach either a null or a var-width attribute. - */ - off = 0; - for (i = 0;; i++) /* loop exit is at "break" */ + /* Calculate the offset up until the first NULL */ + for (i = startAttr; i < firstNullAttr; i++) { - CompactAttribute *att = TupleDescCompactAttr(tupleDesc, i); - - if (IndexTupleHasNulls(tup) && att_isnull(i, bp)) - { - usecache = false; - continue; /* this cannot be the target att */ - } - - /* If we know the next offset, we can skip the rest */ - if (usecache && att->attcacheoff >= 0) - off = att->attcacheoff; - else if (att->attlen == -1) - { - /* - * We can only cache the offset for a varlena attribute if the - * offset is already suitably aligned, so that there would be - * no pad bytes in any case: then the offset will be valid for - * either an aligned or unaligned value. - */ - if (usecache && - off == att_nominal_alignby(off, att->attalignby)) - att->attcacheoff = off; - else - { - off = att_pointer_alignby(off, att->attalignby, -1, - tp + off); - usecache = false; - } - } - else - { - /* not varlena, so safe to use att_nominal_alignby */ - off = att_nominal_alignby(off, att->attalignby); + cattr = TupleDescCompactAttr(tupleDesc, i); + + Assert(cattr->attlen > 0); + off = att_pointer_alignby(off, + cattr->attalignby, + cattr->attlen, + tp + off); + off += cattr->attlen; + } - if (usecache) - att->attcacheoff = off; - } + /* Calculate the offset for any remaining columns. */ + for (; i < attnum; i++) + { + Assert(hasnulls); - if (i == attnum) - break; + if (att_isnull(i, bp)) + continue; - off = att_addlength_pointer(off, att->attlen, tp + off); + cattr = TupleDescCompactAttr(tupleDesc, i); - if (usecache && att->attlen <= 0) - usecache = false; + Assert(cattr->attlen > 0); + off = att_pointer_alignby(off, + cattr->attalignby, + cattr->attlen, + tp + off); + off += cattr->attlen; } } - return fetchatt(TupleDescCompactAttr(tupleDesc, attnum), tp + off); + cattr = TupleDescCompactAttr(tupleDesc, attnum); + off = att_pointer_alignby(off, cattr->attalignby, + cattr->attlen, tp + off); + return fetchatt(cattr, tp + off); } /* @@ -480,63 +389,87 @@ index_deform_tuple_internal(TupleDesc tupleDescriptor, Datum *values, bool *isnull, char *tp, bits8 *bp, int hasnulls) { + CompactAttribute *cattr; int natts = tupleDescriptor->natts; /* number of atts to extract */ - int attnum; - int off = 0; /* offset in tuple data */ - bool slow = false; /* can we use/set attcacheoff? */ + int attnum = 0; + uint32 off = 0; /* offset in tuple data */ + int firstNonCacheOffsetAttr; + int firstNullAttr; /* Assert to protect callers who allocate fixed-size arrays */ Assert(natts <= INDEX_MAX_KEYS); - for (attnum = 0; attnum < natts; attnum++) + /* Did someone forget to call TupleDescFinalize()? */ + Assert(tupleDescriptor->firstNonCachedOffsetAttr >= 0); + + firstNonCacheOffsetAttr = Min(tupleDescriptor->firstNonCachedOffsetAttr, natts); + + if (hasnulls) + { + firstNullAttr = first_null_attr(bp, natts); + firstNonCacheOffsetAttr = Min(firstNonCacheOffsetAttr, firstNullAttr); + } + else + firstNullAttr = natts; + + if (firstNonCacheOffsetAttr > 0) + { +#ifdef USE_ASSERT_CHECKING + /* In Assert enabled builds, verify attcacheoff is correct */ + off = 0; +#endif + + do + { + isnull[attnum] = false; + cattr = TupleDescCompactAttr(tupleDescriptor, attnum); + +#ifdef USE_ASSERT_CHECKING + off = att_nominal_alignby(off, cattr->attalignby); + Assert(off == cattr->attcacheoff); + off += cattr->attlen; +#endif + + values[attnum] = fetch_att_noerr(tp + cattr->attcacheoff, cattr->attbyval, + cattr->attlen); + } while (++attnum < firstNonCacheOffsetAttr); + + off = cattr->attcacheoff + cattr->attlen; + } + + for (; attnum < firstNullAttr; attnum++) + { + isnull[attnum] = false; + cattr = TupleDescCompactAttr(tupleDescriptor, attnum); + + /* align 'off', fetch the datum, and increment off beyond the datum */ + values[attnum] = align_fetch_then_add(tp, + &off, + cattr->attbyval, + cattr->attlen, + cattr->attalignby); + } + + for (; attnum < natts; attnum++) { - CompactAttribute *thisatt = TupleDescCompactAttr(tupleDescriptor, attnum); + Assert(hasnulls); - if (hasnulls && att_isnull(attnum, bp)) + if (att_isnull(attnum, bp)) { values[attnum] = (Datum) 0; isnull[attnum] = true; - slow = true; /* can't use attcacheoff anymore */ continue; } isnull[attnum] = false; - - if (!slow && thisatt->attcacheoff >= 0) - off = thisatt->attcacheoff; - else if (thisatt->attlen == -1) - { - /* - * We can only cache the offset for a varlena attribute if the - * offset is already suitably aligned, so that there would be no - * pad bytes in any case: then the offset will be valid for either - * an aligned or unaligned value. - */ - if (!slow && - off == att_nominal_alignby(off, thisatt->attalignby)) - thisatt->attcacheoff = off; - else - { - off = att_pointer_alignby(off, thisatt->attalignby, -1, - tp + off); - slow = true; - } - } - else - { - /* not varlena, so safe to use att_nominal_alignby */ - off = att_nominal_alignby(off, thisatt->attalignby); - - if (!slow) - thisatt->attcacheoff = off; - } - - values[attnum] = fetchatt(thisatt, tp + off); - - off = att_addlength_pointer(off, thisatt->attlen, tp + off); - - if (thisatt->attlen <= 0) - slow = true; /* can't use attcacheoff anymore */ + cattr = TupleDescCompactAttr(tupleDescriptor, attnum); + + /* align 'off', fetch the datum, and increment off beyond the datum */ + values[attnum] = align_fetch_then_add(tp, + &off, + cattr->attbyval, + cattr->attlen, + cattr->attalignby); } } diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c index 2137385a833..028fcd53734 100644 --- a/src/backend/access/common/tupdesc.c +++ b/src/backend/access/common/tupdesc.c @@ -197,6 +197,10 @@ CreateTemplateTupleDesc(int natts) desc->tdtypmod = -1; desc->tdrefcount = -1; /* assume not reference-counted */ + /* This will be set to the correct value by TupleDescFinalize() */ + desc->firstNonCachedOffsetAttr = -1; + desc->firstNonGuaranteedAttr = -1; + return desc; } @@ -457,6 +461,9 @@ TupleDescCopy(TupleDesc dst, TupleDesc src) * descriptor to another. * * !!! Constraints and defaults are not copied !!! + * + * The caller must take care of calling TupleDescFinalize() on once all + * TupleDesc changes have been made. */ void TupleDescCopyEntry(TupleDesc dst, AttrNumber dstAttno, @@ -489,6 +496,52 @@ TupleDescCopyEntry(TupleDesc dst, AttrNumber dstAttno, populate_compact_attribute(dst, dstAttno - 1); } +/* + * TupleDescFinalize + * Finalize the given TupleDesc. This must be called after the + * attributes arrays have been populated or adjusted by any code. + * + * Must be called after populate_compact_attribute() and before + * BlessTupleDesc(). + */ +void +TupleDescFinalize(TupleDesc tupdesc) +{ + int firstNonCachedOffsetAttr = 0; + int firstNonGuaranteedAttr = tupdesc->natts; + int off = 0; + + for (int i = 0; i < tupdesc->natts; i++) + { + CompactAttribute *cattr = TupleDescCompactAttr(tupdesc, i); + + if (firstNonGuaranteedAttr == tupdesc->natts && + (cattr->attnullability != ATTNULLABLE_VALID || !cattr->attbyval || + cattr->atthasmissing || cattr->attisdropped || cattr->attlen <= 0)) + firstNonGuaranteedAttr = i; + + if (cattr->attlen <= 0) + break; + + off = att_nominal_alignby(off, cattr->attalignby); + + /* + * attcacheoff is an int16, so don't try and cache any offsets larger + * than will fit in that type. + */ + if (off > PG_INT16_MAX) + break; + + cattr->attcacheoff = off; + + off += cattr->attlen; + firstNonCachedOffsetAttr = i + 1; + } + + tupdesc->firstNonCachedOffsetAttr = firstNonCachedOffsetAttr; + tupdesc->firstNonGuaranteedAttr = firstNonGuaranteedAttr; +} + /* * Free a TupleDesc including all substructure */ diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c index b246e8127db..a4694bd8065 100644 --- a/src/backend/access/spgist/spgutils.c +++ b/src/backend/access/spgist/spgutils.c @@ -335,9 +335,6 @@ getSpGistTupleDesc(Relation index, SpGistTypeDesc *keyType) /* We shouldn't need to bother with making these valid: */ att->attcompression = InvalidCompressionMethod; att->attcollation = InvalidOid; - /* In case we changed typlen, we'd better reset following offsets */ - for (int i = spgFirstIncludeColumn; i < outTupDesc->natts; i++) - TupleDescCompactAttr(outTupDesc, i)->attcacheoff = -1; populate_compact_attribute(outTupDesc, spgKeyColumn); TupleDescFinalize(outTupDesc); diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c index e6ab51e6404..80faf29b797 100644 --- a/src/backend/executor/execTuples.c +++ b/src/backend/executor/execTuples.c @@ -993,218 +993,242 @@ tts_buffer_heap_store_tuple(TupleTableSlot *slot, HeapTuple tuple, } /* - * slot_deform_heap_tuple_internal - * An always inline helper function for use in slot_deform_heap_tuple to - * allow the compiler to emit specialized versions of this function for - * various combinations of "slow" and "hasnulls". For example, if a - * given tuple has no nulls, then we needn't check "hasnulls" for every - * attribute that we're deforming. The caller can just call this - * function with hasnulls set to constant-false and have the compiler - * remove the constant-false branches and emit more optimal code. - * - * Returns the next attnum to deform, which can be equal to natts when the - * function manages to deform all requested attributes. *offp is an input and - * output parameter which is the byte offset within the tuple to start deforming - * from which, on return, gets set to the offset where the next attribute - * should be deformed from. *slowp is set to true when subsequent deforming - * of this tuple must use a version of this function with "slow" passed as - * true. - * - * Callers cannot assume when we return "attnum" (i.e. all requested - * attributes have been deformed) that slow mode isn't required for any - * additional deforming as the final attribute may have caused a switch to - * slow mode. + * slot_deform_heap_tuple + * Given a TupleTableSlot, extract data from the slot's physical tuple + * into its Datum/isnull arrays. Data is extracted up through the + * natts'th column (caller must ensure this is a legal column number). + * + * This is essentially an incremental version of heap_deform_tuple: + * on each call we extract attributes up to the one needed, without + * re-computing information about previously extracted attributes. + * slot->tts_nvalid is the number of attributes already extracted. + * + * This is marked as always inline, so the different offp for different types + * of slots gets optimized away. */ -static pg_attribute_always_inline int -slot_deform_heap_tuple_internal(TupleTableSlot *slot, HeapTuple tuple, - int attnum, int natts, bool slow, - bool hasnulls, uint32 *offp, bool *slowp) +static pg_attribute_always_inline void +slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp, + int natts) { + CompactAttribute *cattr; TupleDesc tupleDesc = slot->tts_tupleDescriptor; - Datum *values = slot->tts_values; - bool *isnull = slot->tts_isnull; HeapTupleHeader tup = tuple->t_data; + int attnum; + int firstNonCacheOffsetAttr; + int firstNonGuaranteedAttr; + int firstNullAttr; + Datum *values; + bool *isnull; char *tp; /* ptr to tuple data */ - bits8 *bp = tup->t_bits; /* ptr to null bitmap in tuple */ - bool slownext = false; + uint32 off; /* offset in tuple data */ - tp = (char *) tup + tup->t_hoff; + /* Did someone forget to call TupleDescFinalize()? */ + Assert(tupleDesc->firstNonCachedOffsetAttr >= 0); - for (; attnum < natts; attnum++) + isnull = slot->tts_isnull; + + /* + * Some callers may form and deform tuples prior to NOT NULL constraints + * being checked. Here we'd like to optimize the case where we only need + * to fetch attributes before or up to the point where the attribute is + * guaranteed to exist in the tuple. We rely on the slot flag being set + * correctly to only enable this optimization when it's valid to do so. + * This optimization allows us to save fetching the number of attributes + * from the tuple and saves the additional cost of handling non-byval + * attrs. + */ + if (TTS_OBEYS_NOT_NULL_CONSTRAINTS(slot)) + firstNonGuaranteedAttr = Min(natts, tupleDesc->firstNonGuaranteedAttr); + else + firstNonGuaranteedAttr = 0; + + firstNonCacheOffsetAttr = tupleDesc->firstNonCachedOffsetAttr; + + if (HeapTupleHasNulls(tuple)) { - CompactAttribute *thisatt = TupleDescCompactAttr(tupleDesc, attnum); + int tupnatts = HeapTupleHeaderGetNatts(tup); - if (hasnulls && att_isnull(attnum, bp)) + tp = (char *) tup + MAXALIGN(offsetof(HeapTupleHeaderData, t_bits) + + BITMAPLEN(tupnatts)); + + natts = Min(tupnatts, natts); + if (natts > firstNonGuaranteedAttr) { - values[attnum] = (Datum) 0; - isnull[attnum] = true; - if (!slow) - { - *slowp = true; - return attnum + 1; - } - else - continue; - } + bits8 *bp = tup->t_bits; - isnull[attnum] = false; + /* Find the first NULL attr */ + firstNullAttr = first_null_attr(bp, natts); - /* calculate the offset of this attribute */ - if (!slow && thisatt->attcacheoff >= 0) - *offp = thisatt->attcacheoff; - else if (thisatt->attlen == -1) - { /* - * We can only cache the offset for a varlena attribute if the - * offset is already suitably aligned, so that there would be no - * pad bytes in any case: then the offset will be valid for either - * an aligned or unaligned value. + * And populate the isnull array for all attributes being fetched + * from the tuple. */ - if (!slow && *offp == att_nominal_alignby(*offp, thisatt->attalignby)) - thisatt->attcacheoff = *offp; - else - { - *offp = att_pointer_alignby(*offp, - thisatt->attalignby, - -1, - tp + *offp); + populate_isnull_array(bp, natts, isnull); - if (!slow) - slownext = true; - } + /* We can only use any cached offsets until the first NULL attr */ + firstNonCacheOffsetAttr = Min(firstNonCacheOffsetAttr, firstNullAttr); } else { - /* not varlena, so safe to use att_nominal_alignby */ - *offp = att_nominal_alignby(*offp, thisatt->attalignby); - - if (!slow) - thisatt->attcacheoff = *offp; + /* Otherwise all required columns are guaranteed to exist */ + firstNullAttr = natts; } + } + else + { + tp = (char *) tup + MAXALIGN(offsetof(HeapTupleHeaderData, t_bits)); + + /* + * We only need to look at the tuple's natts if we need more than the + * guaranteed number of columns + */ + if (natts > firstNonGuaranteedAttr) + natts = Min(HeapTupleHeaderGetNatts(tup), natts); + + /* All attrs can be fetched without checking for NULLs */ + firstNullAttr = natts; + } - values[attnum] = fetchatt(thisatt, tp + *offp); + attnum = slot->tts_nvalid; + values = slot->tts_values; + slot->tts_nvalid = natts; - *offp = att_addlength_pointer(*offp, thisatt->attlen, tp + *offp); + /* Ensure we calculated tp correctly */ + Assert(tp == (char *) tup + tup->t_hoff); - /* check if we need to switch to slow mode */ - if (!slow) + if (attnum < firstNonGuaranteedAttr) + { + do { + int attlen; + + isnull[attnum] = false; + cattr = TupleDescCompactAttr(tupleDesc, attnum); + attlen = cattr->attlen; + + /* We don't expect any non-byval types */ + pg_assume(attlen > 0); + /* - * We're unable to deform any further if the above code set - * 'slownext', or if this isn't a fixed-width attribute. + * Technically we could support non-byval fixed-width types, but + * not doing so allows us to pass true to fetch_att_noerr() which + * eliminates the !attbyval branch. */ - if (slownext || thisatt->attlen <= 0) - { - *slowp = true; - return attnum + 1; - } - } - } + Assert(cattr->attbyval == true); - return natts; -} - -/* - * slot_deform_heap_tuple - * Given a TupleTableSlot, extract data from the slot's physical tuple - * into its Datum/isnull arrays. Data is extracted up through the - * natts'th column (caller must ensure this is a legal column number). - * - * This is essentially an incremental version of heap_deform_tuple: - * on each call we extract attributes up to the one needed, without - * re-computing information about previously extracted attributes. - * slot->tts_nvalid is the number of attributes already extracted. - * - * This is marked as always inline, so the different offp for different types - * of slots gets optimized away. - */ -static pg_attribute_always_inline void -slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp, - int natts) -{ - bool hasnulls = HeapTupleHasNulls(tuple); - int attnum; - uint32 off; /* offset in tuple data */ - bool slow; /* can we use/set attcacheoff? */ + off = cattr->attcacheoff; + values[attnum] = fetch_att_noerr(tp + off, true, attlen); + attnum++; + } while (attnum < firstNonGuaranteedAttr); - /* We can only fetch as many attributes as the tuple has. */ - natts = Min(HeapTupleHeaderGetNatts(tuple->t_data), natts); + off += cattr->attlen; - /* - * Check whether the first call for this tuple, and initialize or restore - * loop state. - */ - attnum = slot->tts_nvalid; - if (attnum == 0) - { - /* Start from the first attribute */ - off = 0; - slow = false; + if (attnum == natts) + goto done; } else { /* Restore state from previous execution */ off = *offp; - slow = TTS_SLOW(slot); + + /* We expect *offp to be set to 0 when attnum == 0 */ + Assert(off == 0 || attnum > 0); } + /* We can only fetch as many attributes as the tuple has. */ + firstNonCacheOffsetAttr = Min(firstNonCacheOffsetAttr, natts); + /* - * If 'slow' isn't set, try deforming using deforming code that does not - * contain any of the extra checks required for non-fixed offset - * deforming. During deforming, if or when we find a NULL or a variable - * length attribute, we'll switch to a deforming method which includes the - * extra code required for non-fixed offset deforming, a.k.a slow mode. - * Because this is performance critical, we inline - * slot_deform_heap_tuple_internal passing the 'slow' and 'hasnull' - * parameters as constants to allow the compiler to emit specialized code - * with the known-const false comparisons and subsequent branches removed. + * Handle the portion of the tuple that we have cached the offset for up + * to the first NULL attribute. The offset is effectively fixed for these + * so we can use the CompactAttribute's attcacheoff. */ - if (!slow) + if (attnum < firstNonCacheOffsetAttr) { - /* Tuple without any NULLs? We can skip doing any NULL checking */ - if (!hasnulls) - attnum = slot_deform_heap_tuple_internal(slot, - tuple, - attnum, - natts, - false, /* slow */ - false, /* hasnulls */ - &off, - &slow); - else - attnum = slot_deform_heap_tuple_internal(slot, - tuple, - attnum, - natts, - false, /* slow */ - true, /* hasnulls */ - &off, - &slow); + do + { + isnull[attnum] = false; + cattr = TupleDescCompactAttr(tupleDesc, attnum); + + off = cattr->attcacheoff; + values[attnum] = fetch_att_noerr(tp + off, + cattr->attbyval, + cattr->attlen); + } while (++attnum < firstNonCacheOffsetAttr); + + /* + * Point the offset after the end of the last attribute with a cached + * offset. We expect the final cached offset attribute to have a + * fixed width, so just add the attlen to the attcacheoff + */ + Assert(cattr->attlen > 0); + off += cattr->attlen; + } + + /* + * Handle any portion of the tuple that doesn't have a fixed offset up + * until the first NULL attribute. This loops only differs from the one + * after it by the NULL checks. + */ + for (; attnum < firstNullAttr; attnum++) + { + int attlen; + + isnull[attnum] = false; + cattr = TupleDescCompactAttr(tupleDesc, attnum); + attlen = cattr->attlen; + + /* + * cstrings don't exist in heap tuples. Use pg_assume to instruct the + * compiler not to emit the cstring related code in + * align_fetch_then_add(). + */ + pg_assume(attlen > 0 || attlen == -1); + + /* align 'off', fetch the datum, and increment off beyond the datum */ + values[attnum] = align_fetch_then_add(tp, + &off, + cattr->attbyval, + attlen, + cattr->attalignby); } - /* If there's still work to do then we must be in slow mode */ - if (attnum < natts) + /* + * Now handle any remaining attributes in the tuple up to the requested + * attnum. This time, include NULL checks as we're now at the first NULL + * attribute. + */ + for (; attnum < natts; attnum++) { - /* XXX is it worth adding a separate call when hasnulls is false? */ - attnum = slot_deform_heap_tuple_internal(slot, - tuple, - attnum, - natts, - true, /* slow */ - hasnulls, - &off, - &slow); + int attlen; + + if (isnull[attnum]) + { + values[attnum] = (Datum) 0; + continue; + } + + cattr = TupleDescCompactAttr(tupleDesc, attnum); + attlen = cattr->attlen; + + /* As above, we don't expect cstrings */ + pg_assume(attlen > 0 || attlen == -1); + + /* align 'off', fetch the datum, and increment off beyond the datum */ + values[attnum] = align_fetch_then_add(tp, + &off, + cattr->attbyval, + attlen, + cattr->attalignby); } +done: + /* * Save state for next execution */ slot->tts_nvalid = attnum; *offp = off; - if (slow) - slot->tts_flags |= TTS_FLAG_SLOW; - else - slot->tts_flags &= ~TTS_FLAG_SLOW; } const TupleTableSlotOps TTSOpsVirtual = { @@ -1508,7 +1532,7 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */ slot->tts_values = (Datum *) MemoryContextAlloc(slot->tts_mcxt, tupdesc->natts * sizeof(Datum)); slot->tts_isnull = (bool *) - MemoryContextAlloc(slot->tts_mcxt, tupdesc->natts * sizeof(bool)); + MemoryContextAlloc(slot->tts_mcxt, MAXALIGN(tupdesc->natts * sizeof(bool))); } /* -------------------------------- @@ -2259,10 +2283,16 @@ ExecTypeSetColNames(TupleDesc typeInfo, List *namesList) * This happens "for free" if the tupdesc came from a relcache entry, but * not if we have manufactured a tupdesc for a transient RECORD datatype. * In that case we have to notify typcache.c of the existence of the type. + * + * TupleDescFinalize() must be called on the TupleDesc before calling this + * function. */ TupleDesc BlessTupleDesc(TupleDesc tupdesc) { + /* Did someone forget to call TupleDescFinalize()? */ + Assert(tupdesc->firstNonCachedOffsetAttr >= 0); + if (tupdesc->tdtypeid == RECORDOID && tupdesc->tdtypmod < 0) assign_record_type_typmod(tupdesc); diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c index af3c788ce8b..7f74a8ddcb2 100644 --- a/src/backend/executor/nodeSeqscan.c +++ b/src/backend/executor/nodeSeqscan.c @@ -246,6 +246,8 @@ ExecInitSeqScan(SeqScan *node, EState *estate, int eflags) RelationGetDescr(scanstate->ss.ss_currentRelation), table_slot_callbacks(scanstate->ss.ss_currentRelation)); + scanstate->ss.ss_ScanTupleSlot->tts_flags |= TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS; + /* * Initialize result type and projection. */ diff --git a/src/backend/jit/llvm/llvmjit_deform.c b/src/backend/jit/llvm/llvmjit_deform.c index 3eb087eb56b..12521e3e46a 100644 --- a/src/backend/jit/llvm/llvmjit_deform.c +++ b/src/backend/jit/llvm/llvmjit_deform.c @@ -62,7 +62,6 @@ slot_compile_deform(LLVMJitContext *context, TupleDesc desc, LLVMValueRef v_tts_values; LLVMValueRef v_tts_nulls; LLVMValueRef v_slotoffp; - LLVMValueRef v_flagsp; LLVMValueRef v_nvalidp; LLVMValueRef v_nvalid; LLVMValueRef v_maxatt; @@ -178,7 +177,6 @@ slot_compile_deform(LLVMJitContext *context, TupleDesc desc, v_tts_nulls = l_load_struct_gep(b, StructTupleTableSlot, v_slot, FIELDNO_TUPLETABLESLOT_ISNULL, "tts_ISNULL"); - v_flagsp = l_struct_gep(b, StructTupleTableSlot, v_slot, FIELDNO_TUPLETABLESLOT_FLAGS, ""); v_nvalidp = l_struct_gep(b, StructTupleTableSlot, v_slot, FIELDNO_TUPLETABLESLOT_NVALID, ""); if (ops == &TTSOpsHeapTuple || ops == &TTSOpsBufferHeapTuple) @@ -747,14 +745,10 @@ slot_compile_deform(LLVMJitContext *context, TupleDesc desc, { LLVMValueRef v_off = l_load(b, TypeSizeT, v_offp, ""); - LLVMValueRef v_flags; LLVMBuildStore(b, l_int16_const(lc, natts), v_nvalidp); v_off = LLVMBuildTrunc(b, v_off, LLVMInt32TypeInContext(lc), ""); LLVMBuildStore(b, v_off, v_slotoffp); - v_flags = l_load(b, LLVMInt16TypeInContext(lc), v_flagsp, "tts_flags"); - v_flags = LLVMBuildOr(b, v_flags, l_int16_const(lc, TTS_FLAG_SLOW), ""); - LLVMBuildStore(b, v_flags, v_flagsp); LLVMBuildRetVoid(b); } diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 770edb34e08..998be24ac41 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -666,14 +666,6 @@ RelationBuildTupleDesc(Relation relation) elog(ERROR, "pg_attribute catalog is missing %d attribute(s) for relation OID %u", need, RelationGetRelid(relation)); - /* - * We can easily set the attcacheoff value for the first attribute: it - * must be zero. This eliminates the need for special cases for attnum=1 - * that used to exist in fastgetattr() and index_getattr(). - */ - if (RelationGetNumberOfAttributes(relation) > 0) - TupleDescCompactAttr(relation->rd_att, 0)->attcacheoff = 0; - /* * Set up constraint/default info */ @@ -1985,8 +1977,6 @@ formrdesc(const char *relationName, Oid relationReltype, populate_compact_attribute(relation->rd_att, i); } - /* initialize first attribute's attcacheoff, cf RelationBuildTupleDesc */ - TupleDescCompactAttr(relation->rd_att, 0)->attcacheoff = 0; TupleDescFinalize(relation->rd_att); /* mark not-null status */ @@ -4446,8 +4436,6 @@ BuildHardcodedDescriptor(int natts, const FormData_pg_attribute *attrs) populate_compact_attribute(result, i); } - /* initialize first attribute's attcacheoff, cf RelationBuildTupleDesc */ - TupleDescCompactAttr(result, 0)->attcacheoff = 0; TupleDescFinalize(result); /* Note: we don't bother to set up a TupleConstr entry */ diff --git a/src/include/access/tupdesc.h b/src/include/access/tupdesc.h index 595413dbbc5..d4c3a749558 100644 --- a/src/include/access/tupdesc.h +++ b/src/include/access/tupdesc.h @@ -131,6 +131,19 @@ typedef struct CompactAttribute * Any code making changes manually to and fields in the FormData_pg_attribute * array must subsequently call populate_compact_attribute() to flush the * changes out to the corresponding 'compact_attrs' element. + * + * firstNonCachedOffsetAttr stores the index into the compact_attrs array for + * the first attribute that we don't have a known attcacheoff for. + * + * firstNonGuaranteedAttr stores the index to info the compact_attrs array for + * the first attribute that is either NULLable, missing, or !attbyval. This + * can be used in locations as a guarantee that attributes before this will + * always exist in tuples. The !attbyval part isn't required for this, but + * including this allows various tuple deforming routines to forego any checks + * for !attbyval. + * + * Once a TupleDesc has been populated, before it is used for any purpose + * TupleDescFinalize() must be called on it. */ typedef struct TupleDescData { @@ -138,6 +151,10 @@ typedef struct TupleDescData Oid tdtypeid; /* composite type ID for tuple type */ int32 tdtypmod; /* typmod for tuple type */ int tdrefcount; /* reference count, or -1 if not counting */ + int firstNonCachedOffsetAttr; /* index of the first att without + * an attcacheoff */ + int firstNonGuaranteedAttr; /* index of the first nullable, + * missing or !attbyval attribute. */ TupleConstr *constr; /* constraints, or NULL if none */ /* compact_attrs[N] is the compact metadata of Attribute Number N+1 */ CompactAttribute compact_attrs[FLEXIBLE_ARRAY_MEMBER]; @@ -195,7 +212,6 @@ extern TupleDesc CreateTupleDescTruncatedCopy(TupleDesc tupdesc, int natts); extern TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc); -#define TupleDescFinalize(d) ((void) 0) #define TupleDescSize(src) \ (offsetof(struct TupleDescData, compact_attrs) + \ (src)->natts * sizeof(CompactAttribute) + \ @@ -206,6 +222,7 @@ extern void TupleDescCopy(TupleDesc dst, TupleDesc src); extern void TupleDescCopyEntry(TupleDesc dst, AttrNumber dstAttno, TupleDesc src, AttrNumber srcAttno); +extern void TupleDescFinalize(TupleDesc tupdesc); extern void FreeTupleDesc(TupleDesc tupdesc); extern void IncrTupleDescRefCount(TupleDesc tupdesc); diff --git a/src/include/access/tupmacs.h b/src/include/access/tupmacs.h index d64c18b950b..c9587a1adc6 100644 --- a/src/include/access/tupmacs.h +++ b/src/include/access/tupmacs.h @@ -15,7 +15,8 @@ #define TUPMACS_H #include "catalog/pg_type_d.h" /* for TYPALIGN macros */ - +#include "port/pg_bitutils.h" +#include "varatt.h" /* * Check a tuple's null bitmap to determine whether the attribute is null. @@ -28,6 +29,49 @@ att_isnull(int ATT, const bits8 *BITS) return !(BITS[ATT >> 3] & (1 << (ATT & 0x07))); } +/* + * populate_isnull_array + * Transform a tuple's null bitmap into a boolean array. + * + * Caller must ensure that the isnull array is sized so it contains + * at least as many elements as there are bits in the 'bits' array. + * This is required because we always round 'natts' up to the next multiple + * of 8. + */ +static inline void +populate_isnull_array(const bits8 *bits, int natts, bool *isnull) +{ + int nbytes = (natts + 7) >> 3; + + /* + * Multiplying an inverted NULL bitmap byte by this value results in the + * lowest bit in each byte being set the same as each bit of the inverted + * byte. We perform this as 2 32-bit operations rather than a single + * 64-bit operation as multiplying by the required value to do this in + * 64-bits would result in overflowing a uint64 in some cases. + */ +#define SPREAD_BITS_MULTIPLIER_32 0x204081U + + for (int i = 0; i < nbytes; i++, isnull += 8) + { + uint64 isnull_8; + bits8 nullbyte = ~bits[i]; + + /* convert the lower 4 bits of null bitmap word into 32 bit int */ + isnull_8 = (nullbyte & 0xf) * SPREAD_BITS_MULTIPLIER_32; + + /* + * convert the upper 4 bits of null bitmap word into 32 bit int, shift + * into the upper 32 bit + */ + isnull_8 |= ((uint64) ((nullbyte >> 4) * SPREAD_BITS_MULTIPLIER_32)) << 32; + + /* mask out all other bits apart from the lowest bit of each byte */ + isnull_8 &= UINT64CONST(0x0101010101010101); + memcpy(isnull, &isnull_8, sizeof(uint64)); + } +} + #ifndef FRONTEND /* * Given an attbyval and an attlen from either a Form_pg_attribute or @@ -69,6 +113,151 @@ fetch_att(const void *T, bool attbyval, int attlen) else return PointerGetDatum(T); } + +/* + * Same, but no error checking for invalid attlens for byval types. This + * is safe to use when attlen comes from CompactAttribute as we validate the + * length when populating that struct. + */ +static inline Datum +fetch_att_noerr(const void *T, bool attbyval, int attlen) +{ + if (attbyval) + { + switch (attlen) + { + case sizeof(int32): + return Int32GetDatum(*((const int32 *) T)); + case sizeof(int16): + return Int16GetDatum(*((const int16 *) T)); + case sizeof(char): + return CharGetDatum(*((const char *) T)); + default: + Assert(attlen == sizeof(int64)); + return Int64GetDatum(*((const int64 *) T)); + } + } + else + return PointerGetDatum(T); +} + + +/* + * align_fetch_then_add + * Applies all the functionality of att_pointer_alignby(), + * fetch_att_noerr() and att_addlength_pointer() resulting in *off + * pointer to the perhaps unaligned number of bytes into 'tupptr', ready + * to deform the next attribute. + * + * tupptr: pointer to the beginning of the tuple, after the header and any + * NULL bitmask. + * off: offset in bytes for reading tuple data, possibly unaligned. + * attbyval, attlen, attalignby are values from CompactAttribute. + */ +static inline Datum +align_fetch_then_add(const char *tupptr, uint32 *off, bool attbyval, int attlen, + uint8 attalignby) +{ + Datum res; + + if (attlen > 0) + { + const char *offset_ptr; + + *off = TYPEALIGN(attalignby, *off); + offset_ptr = tupptr + *off; + *off += attlen; + if (attbyval) + { + switch (attlen) + { + case sizeof(char): + return CharGetDatum(*((const char *) offset_ptr)); + case sizeof(int16): + return Int16GetDatum(*((const int16 *) offset_ptr)); + case sizeof(int32): + return Int32GetDatum(*((const int32 *) offset_ptr)); + default: + + /* + * populate_compact_attribute_internal() should have + * checked + */ + Assert(attlen == sizeof(int64)); + return Int64GetDatum(*((const int64 *) offset_ptr)); + } + } + return PointerGetDatum(offset_ptr); + } + else if (attlen == -1) + { + if (!VARATT_IS_SHORT(tupptr + *off)) + *off = TYPEALIGN(attalignby, *off); + + res = PointerGetDatum(tupptr + *off); + *off += VARSIZE_ANY(DatumGetPointer(res)); + return res; + } + else + { + Assert(attlen == -2); + *off = TYPEALIGN(attalignby, *off); + res = PointerGetDatum(tupptr + *off); + *off += strlen(tupptr + *off) + 1; + return res; + } +} + +/* + * first_null_attr + * Inspect a NULL bitmask from a tuple and return the 0-based attnum of the + * first NULL attribute. Returns natts if no NULLs were found. + * + * We expect that 'bits' contains at least one 0 bit somewhere in the mask, + * not necessarily < natts. + */ +static inline int +first_null_attr(const bits8 *bits, int natts) +{ + int lastByte = natts >> 3; + int bytenum; + int res; + +#ifdef USE_ASSERT_CHECKING + int firstnull_check = natts; + + /* Do it the slow way and check we get the same answer. */ + for (int i = 0; i < natts; i++) + { + if (att_isnull(i, bits)) + { + firstnull_check = i; + break; + } + } +#endif + + /* Process all bytes up to just before the byte for the natts index */ + for (bytenum = 0; bytenum < lastByte; bytenum++) + { + /* break if there's any NULL attrs (a 0 bit) */ + if (bits[bytenum] != 0xFF) + break; + } + + res = bytenum << 3; + res += pg_rightmost_one_pos32(~bits[bytenum]); + + /* + * Since we did no masking to mask out bits beyond natts, we may have + * found a bit higher than natts, so we must cap to natts + */ + res = Min(res, natts); + + Assert(res == firstnull_check); + + return res; +} #endif /* FRONTEND */ /* diff --git a/src/include/executor/tuptable.h b/src/include/executor/tuptable.h index a2dfd707e78..8346be77302 100644 --- a/src/include/executor/tuptable.h +++ b/src/include/executor/tuptable.h @@ -84,9 +84,6 @@ * tts_values/tts_isnull are allocated either when the slot is created (when * the descriptor is provided), or when a descriptor is assigned to the slot; * they are of length equal to the descriptor's natts. - * - * The TTS_FLAG_SLOW flag is saved state for - * slot_deform_heap_tuple, and should not be touched by any other code. *---------- */ @@ -98,12 +95,13 @@ #define TTS_FLAG_SHOULDFREE (1 << 2) #define TTS_SHOULDFREE(slot) (((slot)->tts_flags & TTS_FLAG_SHOULDFREE) != 0) -/* saved state for slot_deform_heap_tuple */ -#define TTS_FLAG_SLOW (1 << 3) -#define TTS_SLOW(slot) (((slot)->tts_flags & TTS_FLAG_SLOW) != 0) +/* true = formed tuple guaranteed to not have NULLs in NOT NULLable columns */ +#define TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS (1 << 3) +#define TTS_OBEYS_NOT_NULL_CONSTRAINTS(slot) \ + (((slot)->tts_flags & TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS) != 0) /* fixed tuple descriptor */ -#define TTS_FLAG_FIXED (1 << 4) +#define TTS_FLAG_FIXED (1 << 4) /* XXX change to #3? */ #define TTS_FIXED(slot) (((slot)->tts_flags & TTS_FLAG_FIXED) != 0) struct TupleTableSlotOps; @@ -123,7 +121,9 @@ typedef struct TupleTableSlot #define FIELDNO_TUPLETABLESLOT_VALUES 5 Datum *tts_values; /* current per-attribute values */ #define FIELDNO_TUPLETABLESLOT_ISNULL 6 - bool *tts_isnull; /* current per-attribute isnull flags */ + bool *tts_isnull; /* current per-attribute isnull flags. Array + * size must always be rounded up to the next + * multiple of 8 elements. */ MemoryContext tts_mcxt; /* slot itself is in this context */ ItemPointerData tts_tid; /* stored tuple's tid */ Oid tts_tableOid; /* table oid of tuple */ diff --git a/src/test/modules/deform_bench/deform_bench.c b/src/test/modules/deform_bench/deform_bench.c index 7838f639bef..de39fecf8fd 100644 --- a/src/test/modules/deform_bench/deform_bench.c +++ b/src/test/modules/deform_bench/deform_bench.c @@ -49,6 +49,7 @@ deform_bench(PG_FUNCTION_ARGS) tupdesc = RelationGetDescr(rel); slot = MakeTupleTableSlot(tupdesc, &TTSOpsBufferHeapTuple); + slot->tts_flags |= TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS; scan = table_beginscan_strat(rel, GetActiveSnapshot(), 0, NULL, true, false); /* -- 2.51.0