From b52c1b1eed2d66ab3263a337300816860620bab6 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Sat, 9 Sep 2017 17:19:54 -0400 Subject: [PATCH v2 5/9] Change TRUE/FALSE to true/false in comments We use the lower-case spelling for the C concepts and the upper-case spelling for the SQL concepts. --- contrib/file_fdw/file_fdw.c | 2 +- contrib/pg_trgm/trgm_regexp.c | 4 +-- src/backend/access/brin/brin_revmap.c | 2 +- src/backend/access/common/heaptuple.c | 2 +- src/backend/access/hash/hashpage.c | 2 +- src/backend/access/heap/heapam.c | 20 +++++++------- src/backend/access/heap/pruneheap.c | 4 +-- src/backend/access/index/amvalidate.c | 6 ++--- src/backend/access/index/indexam.c | 2 +- src/backend/access/nbtree/nbtinsert.c | 4 +-- src/backend/access/nbtree/nbtsearch.c | 10 +++---- src/backend/access/nbtree/nbtutils.c | 12 ++++----- src/backend/access/spgist/spgdoinsert.c | 2 +- src/backend/access/transam/clog.c | 2 +- src/backend/access/transam/commit_ts.c | 2 +- src/backend/access/transam/multixact.c | 2 +- src/backend/access/transam/slru.c | 4 +-- src/backend/access/transam/twophase.c | 6 ++--- src/backend/access/transam/xact.c | 8 +++--- src/backend/access/transam/xlog.c | 44 +++++++++++++++---------------- src/backend/access/transam/xlogarchive.c | 4 +-- src/backend/access/transam/xloginsert.c | 8 +++--- src/backend/access/transam/xlogreader.c | 4 +-- src/backend/catalog/heap.c | 20 +++++++------- src/backend/catalog/namespace.c | 4 +-- src/backend/catalog/pg_constraint.c | 2 +- src/backend/catalog/pg_depend.c | 2 +- src/backend/catalog/pg_operator.c | 4 +-- src/backend/catalog/pg_type.c | 4 +-- src/backend/commands/copy.c | 6 ++--- src/backend/commands/dbcommands.c | 6 ++--- src/backend/commands/proclang.c | 2 +- src/backend/commands/tablespace.c | 2 +- src/backend/commands/trigger.c | 12 ++++----- src/backend/commands/typecmds.c | 8 +++--- src/backend/executor/execCurrent.c | 2 +- src/backend/executor/execMain.c | 4 +-- src/backend/executor/nodeHash.c | 6 ++--- src/backend/executor/nodeIndexscan.c | 8 +++--- src/backend/executor/nodeMergejoin.c | 2 +- src/backend/executor/nodeSubplan.c | 2 +- src/backend/executor/spi.c | 6 ++--- src/backend/executor/tqueue.c | 10 +++---- src/backend/foreign/foreign.c | 2 +- src/backend/libpq/hba.c | 6 ++--- src/backend/libpq/pqcomm.c | 2 +- src/backend/nodes/bitmapset.c | 4 +-- src/backend/nodes/nodeFuncs.c | 8 +++--- src/backend/nodes/tidbitmap.c | 2 +- src/backend/optimizer/path/allpaths.c | 6 ++--- src/backend/optimizer/path/equivclass.c | 10 +++---- src/backend/optimizer/path/indxpath.c | 8 +++--- src/backend/optimizer/path/joinpath.c | 4 +-- src/backend/optimizer/path/joinrels.c | 4 +-- src/backend/optimizer/path/pathkeys.c | 8 +++--- src/backend/optimizer/plan/analyzejoins.c | 4 +-- src/backend/optimizer/plan/createplan.c | 4 +-- src/backend/optimizer/plan/initsplan.c | 20 +++++++------- src/backend/optimizer/plan/planagg.c | 8 +++--- src/backend/optimizer/plan/planner.c | 4 +-- src/backend/optimizer/plan/subselect.c | 2 +- src/backend/optimizer/prep/prepjointree.c | 14 +++++----- src/backend/optimizer/util/clauses.c | 18 ++++++------- src/backend/optimizer/util/placeholder.c | 2 +- src/backend/optimizer/util/var.c | 4 +-- src/backend/parser/parse_clause.c | 2 +- src/backend/parser/parse_coerce.c | 4 +-- src/backend/parser/parse_oper.c | 2 +- src/backend/parser/parse_relation.c | 6 ++--- src/backend/parser/scansup.c | 2 +- src/backend/postmaster/pgstat.c | 2 +- src/backend/rewrite/rewriteDefine.c | 2 +- src/backend/rewrite/rewriteHandler.c | 12 ++++----- src/backend/rewrite/rewriteManip.c | 2 +- src/backend/storage/file/buffile.c | 2 +- src/backend/storage/file/fd.c | 2 +- src/backend/storage/ipc/procarray.c | 6 ++--- src/backend/storage/ipc/sinvaladt.c | 2 +- src/backend/storage/lmgr/deadlock.c | 14 +++++----- src/backend/storage/lmgr/lmgr.c | 10 +++---- src/backend/storage/lmgr/lwlock.c | 2 +- src/backend/tcop/postgres.c | 2 +- src/backend/tcop/pquery.c | 2 +- src/backend/tsearch/spell.c | 2 +- src/backend/utils/adt/misc.c | 2 +- src/backend/utils/adt/regexp.c | 8 +++--- src/backend/utils/adt/regproc.c | 2 +- src/backend/utils/adt/ri_triggers.c | 8 +++--- src/backend/utils/adt/ruleutils.c | 18 ++++++------- src/backend/utils/adt/selfuncs.c | 24 ++++++++--------- src/backend/utils/adt/varlena.c | 4 +-- src/backend/utils/adt/xml.c | 2 +- src/backend/utils/cache/lsyscache.c | 14 +++++----- src/backend/utils/cache/plancache.c | 2 +- src/backend/utils/cache/relcache.c | 6 ++--- src/backend/utils/cache/relmapper.c | 6 ++--- src/backend/utils/error/elog.c | 4 +-- src/backend/utils/fmgr/funcapi.c | 6 ++--- src/backend/utils/hash/dynahash.c | 8 +++--- src/backend/utils/init/miscinit.c | 4 +-- src/backend/utils/misc/tzparser.c | 6 ++--- src/backend/utils/mmgr/portalmem.c | 4 +-- src/backend/utils/sort/tuplesort.c | 16 +++++------ src/backend/utils/sort/tuplestore.c | 12 ++++----- src/backend/utils/time/combocid.c | 4 +-- src/backend/utils/time/tqual.c | 2 +- src/bin/pg_dump/dumputils.c | 4 +-- src/bin/pg_dump/pg_dump.c | 2 +- src/bin/pg_dump/pg_dump.h | 12 ++++----- src/bin/pg_dump/pg_dump_sort.c | 8 +++--- src/bin/pg_upgrade/pg_upgrade.h | 4 +-- src/bin/psql/command.c | 2 +- src/bin/psql/common.c | 4 +-- src/bin/psql/large_obj.c | 2 +- src/bin/psql/stringutils.c | 6 ++--- src/common/md5.c | 2 +- src/include/access/gin_private.h | 2 +- src/include/access/hash_xlog.h | 10 +++---- src/include/access/slru.h | 2 +- src/include/access/xlog.h | 2 +- src/include/c.h | 2 +- src/include/catalog/pg_conversion.h | 2 +- src/include/catalog/pg_type.h | 2 +- src/include/commands/vacuum.h | 8 +++--- src/include/executor/instrument.h | 6 ++--- src/include/executor/tuptable.h | 2 +- src/include/nodes/execnodes.h | 4 +-- src/include/nodes/parsenodes.h | 10 +++---- src/include/nodes/primnodes.h | 8 +++--- src/include/nodes/relation.h | 12 ++++----- src/include/parser/parse_node.h | 2 +- src/include/storage/s_lock.h | 2 +- src/include/storage/spin.h | 2 +- src/include/tsearch/ts_utils.h | 4 +-- src/interfaces/libpq/fe-secure.c | 4 +-- src/pl/plpgsql/src/pl_comp.c | 6 ++--- src/pl/plpgsql/src/pl_funcs.c | 2 +- 137 files changed, 396 insertions(+), 396 deletions(-) diff --git a/contrib/file_fdw/file_fdw.c b/contrib/file_fdw/file_fdw.c index 94e50e92f7..370cc365d6 100644 --- a/contrib/file_fdw/file_fdw.c +++ b/contrib/file_fdw/file_fdw.c @@ -824,7 +824,7 @@ fileIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel, * * Check to see if it's useful to convert only a subset of the file's columns * to binary. If so, construct a list of the column names to be converted, - * return that at *columns, and return TRUE. (Note that it's possible to + * return that at *columns, and return true. (Note that it's possible to * determine that no columns need be converted, for instance with a COUNT(*) * query. So we can't use returning a NIL list to indicate failure.) */ diff --git a/contrib/pg_trgm/trgm_regexp.c b/contrib/pg_trgm/trgm_regexp.c index 1d474e2aac..6593ac7ba5 100644 --- a/contrib/pg_trgm/trgm_regexp.c +++ b/contrib/pg_trgm/trgm_regexp.c @@ -634,7 +634,7 @@ createTrgmNFAInternal(regex_t *regex, TrgmPackedGraph **graph, * Main entry point for evaluating a graph during index scanning. * * The check[] array is indexed by trigram number (in the array of simple - * trigrams returned by createTrgmNFA), and holds TRUE for those trigrams + * trigrams returned by createTrgmNFA), and holds true for those trigrams * that are present in the index entry being checked. */ bool @@ -1451,7 +1451,7 @@ prefixContains(TrgmPrefix *prefix1, TrgmPrefix *prefix2) * Get vector of all color trigrams in graph and select which of them * to expand into simple trigrams. * - * Returns TRUE if OK, FALSE if exhausted resource limits. + * Returns true if OK, false if exhausted resource limits. */ static bool selectColorTrigrams(TrgmNFA *trgmNFA) diff --git a/src/backend/access/brin/brin_revmap.c b/src/backend/access/brin/brin_revmap.c index 22f2076887..aaffe8dc1d 100644 --- a/src/backend/access/brin/brin_revmap.c +++ b/src/backend/access/brin/brin_revmap.c @@ -315,7 +315,7 @@ brinGetTupleForHeapBlock(BrinRevmap *revmap, BlockNumber heapBlk, * * Index must be locked in ShareUpdateExclusiveLock mode. * - * Return FALSE if caller should retry. + * Return false if caller should retry. */ bool brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk) diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c index 13ee528e26..a1a9d9905b 100644 --- a/src/backend/access/common/heaptuple.c +++ b/src/backend/access/common/heaptuple.c @@ -289,7 +289,7 @@ heap_fill_tuple(TupleDesc tupleDesc, */ /* ---------------- - * heap_attisnull - returns TRUE iff tuple attribute is not present + * heap_attisnull - returns true iff tuple attribute is not present * ---------------- */ bool diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index 05798419fc..070c026580 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -991,7 +991,7 @@ _hash_expandtable(Relation rel, Buffer metabuf) * for the purpose. OTOH, adding a splitpoint is a very infrequent operation, * so it may not be worth worrying about. * - * Returns TRUE if successful, or FALSE if allocation failed due to + * Returns true if successful, or false if allocation failed due to * BlockNumber overflow. */ static bool diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index d20f0381f3..edb38988a4 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -1379,7 +1379,7 @@ heap_openrv_extended(const RangeVar *relation, LOCKMODE lockmode, * heap_beginscan_strat offers an extended API that lets the caller control * whether a nondefault buffer access strategy can be used, and whether * syncscan can be chosen (possibly resulting in the scan not starting from - * block zero). Both of these default to TRUE with plain heap_beginscan. + * block zero). Both of these default to true with plain heap_beginscan. * * heap_beginscan_bm is an alternative entry point for setting up a * HeapScanDesc for a bitmap heap scan. Although that scan technology is @@ -1842,16 +1842,16 @@ heap_getnext(HeapScanDesc scan, ScanDirection direction) * against the specified snapshot. * * If successful (tuple found and passes snapshot time qual), then *userbuf - * is set to the buffer holding the tuple and TRUE is returned. The caller + * is set to the buffer holding the tuple and true is returned. The caller * must unpin the buffer when done with the tuple. * * If the tuple is not found (ie, item number references a deleted slot), - * then tuple->t_data is set to NULL and FALSE is returned. + * then tuple->t_data is set to NULL and false is returned. * - * If the tuple is found but fails the time qual check, then FALSE is returned + * If the tuple is found but fails the time qual check, then false is returned * but tuple->t_data is left pointing to the tuple. * - * keep_buf determines what is done with the buffer in the FALSE-result cases. + * keep_buf determines what is done with the buffer in the false-result cases. * When the caller specifies keep_buf = true, we retain the pin on the buffer * and return it in *userbuf (so the caller must eventually unpin it); when * keep_buf = false, the pin is released and *userbuf is set to InvalidBuffer. @@ -1993,15 +1993,15 @@ heap_fetch(Relation relation, * of a HOT chain), and buffer is the buffer holding this tuple. We search * for the first chain member satisfying the given snapshot. If one is * found, we update *tid to reference that tuple's offset number, and - * return TRUE. If no match, return FALSE without modifying *tid. + * return true. If no match, return false without modifying *tid. * * heapTuple is a caller-supplied buffer. When a match is found, we return * the tuple here, in addition to updating *tid. If no match is found, the * contents of this buffer on return are undefined. * * If all_dead is not NULL, we check non-visible tuples to see if they are - * globally dead; *all_dead is set TRUE if all members of the HOT chain - * are vacuumable, FALSE if not. + * globally dead; *all_dead is set true if all members of the HOT chain + * are vacuumable, false if not. * * Unlike heap_fetch, the caller must already have pin and (at least) share * lock on the buffer; it is still pinned/locked at exit. Also unlike @@ -6592,7 +6592,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac) * are older than the specified cutoff XID and cutoff MultiXactId. If so, * setup enough state (in the *frz output argument) to later execute and - * WAL-log what we would need to do, and return TRUE. Return FALSE if nothing + * WAL-log what we would need to do, and return true. Return false if nothing * is to be changed. In addition, set *totally_frozen_p to true if the tuple * will be totally frozen after these operations are performed and false if * more freezing will eventually be required. @@ -7240,7 +7240,7 @@ heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple) * heap_tuple_needs_freeze * * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac) - * are older than the specified cutoff XID or MultiXactId. If so, return TRUE. + * are older than the specified cutoff XID or MultiXactId. If so, return true. * * It doesn't matter whether the tuple is alive or dead, we are checking * to see if a tuple needs to be removed or frozen to avoid wraparound. diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index 52231ac417..9f33e0ce07 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -39,7 +39,7 @@ typedef struct OffsetNumber redirected[MaxHeapTuplesPerPage * 2]; OffsetNumber nowdead[MaxHeapTuplesPerPage]; OffsetNumber nowunused[MaxHeapTuplesPerPage]; - /* marked[i] is TRUE if item i is entered in one of the above arrays */ + /* marked[i] is true if item i is entered in one of the above arrays */ bool marked[MaxHeapTuplesPerPage + 1]; } PruneState; @@ -170,7 +170,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer) * or RECENTLY_DEAD (see HeapTupleSatisfiesVacuum). * * If report_stats is true then we send the number of reclaimed heap-only - * tuples to pgstats. (This must be FALSE during vacuum, since vacuum will + * tuples to pgstats. (This must be false during vacuum, since vacuum will * send its own new total to pgstats, and we don't want this delta applied * on top of that.) * diff --git a/src/backend/access/index/amvalidate.c b/src/backend/access/index/amvalidate.c index 80865e9ff9..728c48179f 100644 --- a/src/backend/access/index/amvalidate.c +++ b/src/backend/access/index/amvalidate.c @@ -140,9 +140,9 @@ identify_opfamily_groups(CatCList *oprlist, CatCList *proclist) /* * Validate the signature (argument and result types) of an opclass support - * function. Return TRUE if OK, FALSE if not. + * function. Return true if OK, false if not. * - * The "..." represents maxargs argument-type OIDs. If "exact" is TRUE, they + * The "..." represents maxargs argument-type OIDs. If "exact" is true, they * must match the function arg types exactly, else only binary-coercibly. * In any case the function result type must match restype exactly. */ @@ -184,7 +184,7 @@ check_amproc_signature(Oid funcid, Oid restype, bool exact, /* * Validate the signature (argument and result types) of an opclass operator. - * Return TRUE if OK, FALSE if not. + * Return true if OK, false if not. * * Currently, we can hard-wire this as accepting only binary operators. Also, * we can insist on exact type matches, since the given lefttype/righttype diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c index bef4255369..edf4172eb2 100644 --- a/src/backend/access/index/indexam.c +++ b/src/backend/access/index/indexam.c @@ -784,7 +784,7 @@ index_can_return(Relation indexRelation, int attno) { RELATION_CHECKS; - /* amcanreturn is optional; assume FALSE if not provided by AM */ + /* amcanreturn is optional; assume false if not provided by AM */ if (indexRelation->rd_amroutine->amcanreturn == NULL) return false; diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index bf963fcdef..6b99fc4d39 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -99,8 +99,8 @@ static void _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel); * don't actually insert. * * The result value is only significant for UNIQUE_CHECK_PARTIAL: - * it must be TRUE if the entry is known unique, else FALSE. - * (In the current implementation we'll also return TRUE after a + * it must be true if the entry is known unique, else false. + * (In the current implementation we'll also return true after a * successful UNIQUE_CHECK_YES or UNIQUE_CHECK_EXISTING call, but * that's just a coding artifact.) */ diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index 642c8943e7..558113bd13 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -524,7 +524,7 @@ _bt_compare(Relation rel, * scan->xs_ctup.t_self is set to the heap TID of the current tuple, * and if requested, scan->xs_itup points to a copy of the index tuple. * - * If there are no matching items in the index, we return FALSE, with no + * If there are no matching items in the index, we return false, with no * pins or locks held. * * Note that scan->keyData[], and the so->keyData[] scankey built from it, @@ -1336,7 +1336,7 @@ _bt_saveitem(BTScanOpaque so, int itemIndex, * * For success on a scan using a non-MVCC snapshot we hold a pin, but not a * read lock, on that page. If we do not hold the pin, we set so->currPos.buf - * to InvalidBuffer. We return TRUE to indicate success. + * to InvalidBuffer. We return true to indicate success. */ static bool _bt_steppage(IndexScanDesc scan, ScanDirection dir) @@ -1440,10 +1440,10 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir) * * On success exit, so->currPos is updated to contain data from the next * interesting page. Caller is responsible to release lock and pin on - * buffer on success. We return TRUE to indicate success. + * buffer on success. We return true to indicate success. * * If there are no more matching records in the given direction, we drop all - * locks and pins, set so->currPos.buf to InvalidBuffer, and return FALSE. + * locks and pins, set so->currPos.buf to InvalidBuffer, and return false. */ static bool _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir) @@ -1608,7 +1608,7 @@ _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno, ScanDirection dir) /* * _bt_parallel_readpage() -- Read current page containing valid data for scan * - * On success, release lock and maybe pin on buffer. We return TRUE to + * On success, release lock and maybe pin on buffer. We return true to * indicate success. */ static bool diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index dbfb775dec..9b53aa3320 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -540,8 +540,8 @@ _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir) /* * _bt_advance_array_keys() -- Advance to next set of array elements * - * Returns TRUE if there is another set of values to consider, FALSE if not. - * On TRUE result, the scankeys are initialized with the next set of values. + * Returns true if there is another set of values to consider, false if not. + * On true result, the scankeys are initialized with the next set of values. */ bool _bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir) @@ -724,7 +724,7 @@ _bt_restore_array_keys(IndexScanDesc scan) * for a forward scan; or after the last match for a backward scan.) * * As a byproduct of this work, we can detect contradictory quals such - * as "x = 1 AND x > 2". If we see that, we return so->qual_ok = FALSE, + * as "x = 1 AND x > 2". If we see that, we return so->qual_ok = false, * indicating the scan need not be run at all since no tuples can match. * (In this case we do not bother completing the output key array!) * Again, missing cross-type operators might cause us to fail to prove the @@ -1020,7 +1020,7 @@ _bt_preprocess_keys(IndexScanDesc scan) * * If the opfamily doesn't supply a complete set of cross-type operators we * may not be able to make the comparison. If we can make the comparison - * we store the operator result in *result and return TRUE. We return FALSE + * we store the operator result in *result and return true. We return false * if the comparison could not be made. * * Note: op always points at the same ScanKey as either leftarg or rightarg. @@ -1185,8 +1185,8 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op, * * Lastly, for ordinary scankeys (not IS NULL/NOT NULL), we check for a * NULL comparison value. Since all btree operators are assumed strict, - * a NULL means that the qual cannot be satisfied. We return TRUE if the - * comparison value isn't NULL, or FALSE if the scan should be abandoned. + * a NULL means that the qual cannot be satisfied. We return true if the + * comparison value isn't NULL, or false if the scan should be abandoned. * * This function is applied to the *input* scankey structure; therefore * on a rescan we will be looking at already-processed scankeys. Hence diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c index b0702a7f92..a5f4c4059c 100644 --- a/src/backend/access/spgist/spgdoinsert.c +++ b/src/backend/access/spgist/spgdoinsert.c @@ -580,7 +580,7 @@ setRedirectionTuple(SPPageDesc *current, OffsetNumber position, * Test to see if the user-defined picksplit function failed to do its job, * ie, it put all the leaf tuples into the same node. * If so, randomly divide the tuples into several nodes (all with the same - * label) and return TRUE to select allTheSame mode for this inner tuple. + * label) and return true to select allTheSame mode for this inner tuple. * * (This code is also used to forcibly select allTheSame mode for nulls.) * diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c index 9003b22193..18eaab2688 100644 --- a/src/backend/access/transam/clog.c +++ b/src/backend/access/transam/clog.c @@ -721,7 +721,7 @@ BootStrapCLOG(void) /* * Initialize (or reinitialize) a page of CLOG to zeroes. - * If writeXlog is TRUE, also emit an XLOG record saying we did this. + * If writeXlog is true, also emit an XLOG record saying we did this. * * The page is not actually written, just set up in shared memory. * The slot number of the new page is returned. diff --git a/src/backend/access/transam/commit_ts.c b/src/backend/access/transam/commit_ts.c index 60fb9eeb06..7b7bf2b2bf 100644 --- a/src/backend/access/transam/commit_ts.c +++ b/src/backend/access/transam/commit_ts.c @@ -531,7 +531,7 @@ BootStrapCommitTs(void) /* * Initialize (or reinitialize) a page of CommitTs to zeroes. - * If writeXlog is TRUE, also emit an XLOG record saying we did this. + * If writeXlog is true, also emit an XLOG record saying we did this. * * The page is not actually written, just set up in shared memory. * The slot number of the new page is returned. diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index 7142ecede0..0fb6bf2f02 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -1892,7 +1892,7 @@ BootStrapMultiXact(void) /* * Initialize (or reinitialize) a page of MultiXactOffset to zeroes. - * If writeXlog is TRUE, also emit an XLOG record saying we did this. + * If writeXlog is true, also emit an XLOG record saying we did this. * * The page is not actually written, just set up in shared memory. * The slot number of the new page is returned. diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index 77edc51e1c..588d2da531 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -629,7 +629,7 @@ SimpleLruDoesPhysicalPageExist(SlruCtl ctl, int pageno) * Physical read of a (previously existing) page into a buffer slot * * On failure, we cannot just ereport(ERROR) since caller has put state in - * shared memory that must be undone. So, we return FALSE and save enough + * shared memory that must be undone. So, we return false and save enough * info in static variables to let SlruReportIOError make the report. * * For now, assume it's not worth keeping a file pointer open across @@ -705,7 +705,7 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno) * Physical write of a page from a buffer slot * * On failure, we cannot just ereport(ERROR) since caller has put state in - * shared memory that must be undone. So, we return FALSE and save enough + * shared memory that must be undone. So, we return false and save enough * info in static variables to let SlruReportIOError make the report. * * For now, assume it's not worth keeping a file pointer open across diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index ae832917ce..3a123ecf1e 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -170,9 +170,9 @@ typedef struct GlobalTransactionData Oid owner; /* ID of user that executed the xact */ BackendId locking_backend; /* backend currently working on the xact */ - bool valid; /* TRUE if PGPROC entry is in proc array */ - bool ondisk; /* TRUE if prepare state file is on disk */ - bool inredo; /* TRUE if entry was added via xlog_redo */ + bool valid; /* true if PGPROC entry is in proc array */ + bool ondisk; /* true if prepare state file is on disk */ + bool inredo; /* true if entry was added via xlog_redo */ char gid[GIDSIZE]; /* The GID assigned to the prepared xact */ } GlobalTransactionData; diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 93dca7a72a..25fe50dac4 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -679,8 +679,8 @@ SubTransactionIsActive(SubTransactionId subxid) /* * GetCurrentCommandId * - * "used" must be TRUE if the caller intends to use the command ID to mark - * inserted/updated/deleted tuples. FALSE means the ID is being fetched + * "used" must be true if the caller intends to use the command ID to mark + * inserted/updated/deleted tuples. false means the ID is being fetched * for read-only purposes (ie, as a snapshot validity cutoff). See * CommandCounterIncrement() for discussion. */ @@ -3478,7 +3478,7 @@ BeginTransactionBlock(void) * This executes a PREPARE command. * * Since PREPARE may actually do a ROLLBACK, the result indicates what - * happened: TRUE for PREPARE, FALSE for ROLLBACK. + * happened: true for PREPARE, false for ROLLBACK. * * Note that we don't actually do anything here except change blockState. * The real work will be done in the upcoming PrepareTransaction(). @@ -3530,7 +3530,7 @@ PrepareTransactionBlock(char *gid) * This executes a COMMIT command. * * Since COMMIT may actually do a ROLLBACK, the result indicates what - * happened: TRUE for COMMIT, FALSE for ROLLBACK. + * happened: true for COMMIT, false for ROLLBACK. * * Note that we don't actually do anything here except change blockState. * The real work will be done in the upcoming CommitTransactionCommand(). diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index a3e8ce092f..273f5378e5 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -2324,7 +2324,7 @@ XLogCheckpointNeeded(XLogSegNo new_segno) /* * Write and/or fsync the log at least as far as WriteRqst indicates. * - * If flexible == TRUE, we don't have to write as far as WriteRqst, but + * If flexible == true, we don't have to write as far as WriteRqst, but * may stop at any convenient boundary (such as a cache or logfile boundary). * This option allows us to avoid uselessly issuing multiple writes when a * single one would do. @@ -2939,7 +2939,7 @@ XLogFlush(XLogRecPtr record) * * This routine is invoked periodically by the background walwriter process. * - * Returns TRUE if there was any work to do, even if we skipped flushing due + * Returns true if there was any work to do, even if we skipped flushing due * to wal_writer_delay/wal_writer_flush_after. */ bool @@ -3134,12 +3134,12 @@ XLogNeedsFlush(XLogRecPtr record) * * log, seg: identify segment to be created/opened. * - * *use_existent: if TRUE, OK to use a pre-existing file (else, any - * pre-existing file will be deleted). On return, TRUE if a pre-existing + * *use_existent: if true, OK to use a pre-existing file (else, any + * pre-existing file will be deleted). On return, true if a pre-existing * file was used. * - * use_lock: if TRUE, acquire ControlFileLock while moving file into - * place. This should be TRUE except during bootstrap log creation. The + * use_lock: if true, acquire ControlFileLock while moving file into + * place. This should be true except during bootstrap log creation. The * caller must *not* hold the lock at call. * * Returns FD of opened file. @@ -3438,24 +3438,24 @@ XLogFileCopy(XLogSegNo destsegno, TimeLineID srcTLI, XLogSegNo srcsegno, * filename while it's being created) and to recycle an old segment. * * *segno: identify segment to install as (or first possible target). - * When find_free is TRUE, this is modified on return to indicate the + * When find_free is true, this is modified on return to indicate the * actual installation location or last segment searched. * * tmppath: initial name of file to install. It will be renamed into place. * - * find_free: if TRUE, install the new segment at the first empty segno - * number at or after the passed numbers. If FALSE, install the new segment + * find_free: if true, install the new segment at the first empty segno + * number at or after the passed numbers. If false, install the new segment * exactly where specified, deleting any existing segment file there. * * max_segno: maximum segment number to install the new file as. Fail if no * free slot is found between *segno and max_segno. (Ignored when find_free - * is FALSE.) + * is false.) * - * use_lock: if TRUE, acquire ControlFileLock while moving file into - * place. This should be TRUE except during bootstrap log creation. The + * use_lock: if true, acquire ControlFileLock while moving file into + * place. This should be true except during bootstrap log creation. The * caller must *not* hold the lock at call. * - * Returns TRUE if the file was installed successfully. FALSE indicates that + * Returns true if the file was installed successfully. false indicates that * max_segno limit was exceeded, or an error occurred while renaming the * file into place. */ @@ -5622,7 +5622,7 @@ getRecordTimestamp(XLogReaderState *record, TimestampTz *recordXtime) * For point-in-time recovery, this function decides whether we want to * stop applying the XLOG before the current record. * - * Returns TRUE if we are stopping, FALSE otherwise. If stopping, some + * Returns true if we are stopping, false otherwise. If stopping, some * information is saved in recoveryStopXid et al for use in annotating the * new timeline's history file. */ @@ -6619,7 +6619,7 @@ StartupXLOG(void) ereport(DEBUG1, (errmsg_internal("redo record is at %X/%X; shutdown %s", (uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo, - wasShutdown ? "TRUE" : "FALSE"))); + wasShutdown ? "true" : "false"))); ereport(DEBUG1, (errmsg_internal("next transaction ID: %u:%u; next OID: %u", checkPoint.nextXidEpoch, checkPoint.nextXid, @@ -11174,11 +11174,11 @@ GetOldestRestartPoint(XLogRecPtr *oldrecptr, TimeLineID *oldtli) * later than the start of the dump, and so if we rely on it as the start * point, we will fail to restore a consistent database state. * - * Returns TRUE if a backup_label was found (and fills the checkpoint + * Returns true if a backup_label was found (and fills the checkpoint * location and its REDO location into *checkPointLoc and RedoStartLSN, - * respectively); returns FALSE if not. If this backup_label came from a - * streamed backup, *backupEndRequired is set to TRUE. If this backup_label - * was created during recovery, *backupFromStandby is set to TRUE. + * respectively); returns false if not. If this backup_label came from a + * streamed backup, *backupEndRequired is set to true. If this backup_label + * was created during recovery, *backupFromStandby is set to true. */ static bool read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired, @@ -11261,8 +11261,8 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired, * recovering from a backup dump file, and we therefore need to create symlinks * as per the information present in tablespace_map file. * - * Returns TRUE if a tablespace_map file was found (and fills the link - * information for all the tablespace links present in file); returns FALSE + * Returns true if a tablespace_map file was found (and fills the link + * information for all the tablespace links present in file); returns false * if not. */ static bool @@ -11694,7 +11694,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, * If primary_conninfo is set, launch walreceiver to try * to stream the missing WAL. * - * If fetching_ckpt is TRUE, RecPtr points to the initial + * If fetching_ckpt is true, RecPtr points to the initial * checkpoint location. In that case, we use RedoStartLSN * as the streaming start position instead of RecPtr, so * that when we later jump backwards to start redo at diff --git a/src/backend/access/transam/xlogarchive.c b/src/backend/access/transam/xlogarchive.c index 7afb73579b..cb2db743b6 100644 --- a/src/backend/access/transam/xlogarchive.c +++ b/src/backend/access/transam/xlogarchive.c @@ -33,11 +33,11 @@ * Attempt to retrieve the specified file from off-line archival storage. * If successful, fill "path" with its complete path (note that this will be * a temp file name that doesn't follow the normal naming convention), and - * return TRUE. + * return true. * * If not successful, fill "path" with the name of the normal on-line file * (which may or may not actually exist, but we'll try to use it), and return - * FALSE. + * false. * * For fixed-size files, the caller may pass the expected size as an * additional crosscheck on successful recovery. If the file size is not diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c index 3af03ecdb1..2a41667c39 100644 --- a/src/backend/access/transam/xloginsert.c +++ b/src/backend/access/transam/xloginsert.c @@ -797,8 +797,8 @@ XLogRecordAssemble(RmgrId rmid, uint8 info, /* * Create a compressed version of a backup block image. * - * Returns FALSE if compression fails (i.e., compressed result is actually - * bigger than original). Otherwise, returns TRUE and sets 'dlen' to + * Returns false if compression fails (i.e., compressed result is actually + * bigger than original). Otherwise, returns true and sets 'dlen' to * the length of compressed block image. */ static bool @@ -965,7 +965,7 @@ XLogSaveBufferForHint(Buffer buffer, bool buffer_std) * log_newpage_buffer instead. * * If the page follows the standard page layout, with a PageHeader and unused - * space between pd_lower and pd_upper, set 'page_std' to TRUE. That allows + * space between pd_lower and pd_upper, set 'page_std' to true. That allows * the unused space to be left out from the WAL record, making it smaller. */ XLogRecPtr @@ -1002,7 +1002,7 @@ log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno, * function. This function will set the page LSN. * * If the page follows the standard page layout, with a PageHeader and unused - * space between pd_lower and pd_upper, set 'page_std' to TRUE. That allows + * space between pd_lower and pd_upper, set 'page_std' to true. That allows * the unused space to be left out from the WAL record, making it smaller. */ XLogRecPtr diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index 0781a7b9de..a20d12c6c3 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -1300,8 +1300,8 @@ DecodeXLogRecord(XLogReaderState *state, XLogRecord *record, char **errormsg) * Returns information about the block that a block reference refers to. * * If the WAL record contains a block reference with the given ID, *rnode, - * *forknum, and *blknum are filled in (if not NULL), and returns TRUE. - * Otherwise returns FALSE. + * *forknum, and *blknum are filled in (if not NULL), and returns true. + * Otherwise returns false. */ bool XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index 05e70818e7..2bc9e90dcf 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -1000,15 +1000,15 @@ AddNewRelationType(const char *typeName, * cooked_constraints: list of precooked check constraints and defaults * relkind: relkind for new rel * relpersistence: rel's persistence status (permanent, temp, or unlogged) - * shared_relation: TRUE if it's to be a shared relation - * mapped_relation: TRUE if the relation will use the relfilenode map - * oidislocal: TRUE if oid column (if any) should be marked attislocal + * shared_relation: true if it's to be a shared relation + * mapped_relation: true if the relation will use the relfilenode map + * oidislocal: true if oid column (if any) should be marked attislocal * oidinhcount: attinhcount to assign to oid column (if any) * oncommit: ON COMMIT marking (only relevant if it's a temp table) * reloptions: reloptions in Datum form, or (Datum) 0 if none - * use_user_acl: TRUE if should look for user-defined default permissions; - * if FALSE, relacl is always set NULL - * allow_system_table_mods: TRUE to allow creation in system namespaces + * use_user_acl: true if should look for user-defined default permissions; + * if false, relacl is always set NULL + * allow_system_table_mods: true to allow creation in system namespaces * is_internal: is this a system-generated catalog? * * Output parameters: @@ -2208,9 +2208,9 @@ StoreConstraints(Relation rel, List *cooked_constraints, bool is_internal) * rel: relation to be modified * newColDefaults: list of RawColumnDefault structures * newConstraints: list of Constraint nodes - * allow_merge: TRUE if check constraints may be merged with existing ones - * is_local: TRUE if definition is local, FALSE if it's inherited - * is_internal: TRUE if result of some internal process, not a user request + * allow_merge: true if check constraints may be merged with existing ones + * is_local: true if definition is local, false if it's inherited + * is_internal: true if result of some internal process, not a user request * * All entries in newColDefaults will be processed. Entries in newConstraints * will be processed only if they are CONSTR_CHECK type. @@ -2455,7 +2455,7 @@ AddRelationNewConstraints(Relation rel, * new one, and either adjust its conislocal/coninhcount settings or throw * error as needed. * - * Returns TRUE if merged (constraint is a duplicate), or FALSE if it's + * Returns true if merged (constraint is a duplicate), or false if it's * got a so-far-unique name, or throws error if conflict. * * XXX See MergeConstraintsIntoExisting too if you change this code. diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c index 5d71302ded..0a2fb1b93a 100644 --- a/src/backend/catalog/namespace.c +++ b/src/backend/catalog/namespace.c @@ -95,7 +95,7 @@ * set up until the first attempt to create something in it. (The reason for * klugery is that we can't create the temp namespace outside a transaction, * but initial GUC processing of search_path happens outside a transaction.) - * activeTempCreationPending is TRUE if "pg_temp" appears first in the string + * activeTempCreationPending is true if "pg_temp" appears first in the string * but is not reflected in activeCreationNamespace because the namespace isn't * set up yet. * @@ -136,7 +136,7 @@ static List *activeSearchPath = NIL; /* default place to create stuff; if InvalidOid, no default */ static Oid activeCreationNamespace = InvalidOid; -/* if TRUE, activeCreationNamespace is wrong, it should be temp namespace */ +/* if true, activeCreationNamespace is wrong, it should be temp namespace */ static bool activeTempCreationPending = false; /* These variables are the values last derived from namespace_search_path: */ diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c index 1336c46d3f..f7e091a954 100644 --- a/src/backend/catalog/pg_constraint.c +++ b/src/backend/catalog/pg_constraint.c @@ -958,7 +958,7 @@ get_primary_key_attnos(Oid relid, bool deferrableOk, Oid *constraintOid) /* * Determine whether a relation can be proven functionally dependent on - * a set of grouping columns. If so, return TRUE and add the pg_constraint + * a set of grouping columns. If so, return true and add the pg_constraint * OIDs of the constraints needed for the proof to the *constraintDeps list. * * grouping_columns is a list of grouping expressions, in which columns of diff --git a/src/backend/catalog/pg_depend.c b/src/backend/catalog/pg_depend.c index dd6ca3e8f7..cf0086b9bd 100644 --- a/src/backend/catalog/pg_depend.c +++ b/src/backend/catalog/pg_depend.c @@ -490,7 +490,7 @@ getExtensionOfObject(Oid classId, Oid objectId) * * An ownership marker is an AUTO or INTERNAL dependency from the sequence to the * column. If we find one, store the identity of the owning column - * into *tableId and *colId and return TRUE; else return FALSE. + * into *tableId and *colId and return true; else return false. * * Note: if there's more than one such pg_depend entry then you get * a random one of them returned into the out parameters. This should diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c index ef81102150..61093dc473 100644 --- a/src/backend/catalog/pg_operator.c +++ b/src/backend/catalog/pg_operator.c @@ -124,7 +124,7 @@ validOperatorName(const char *name) * finds an operator given an exact specification (name, namespace, * left and right type IDs). * - * *defined is set TRUE if defined (not a shell) + * *defined is set true if defined (not a shell) */ static Oid OperatorGet(const char *operatorName, @@ -164,7 +164,7 @@ OperatorGet(const char *operatorName, * looks up an operator given a possibly-qualified name and * left and right type IDs. * - * *defined is set TRUE if defined (not a shell) + * *defined is set true if defined (not a shell) */ static Oid OperatorLookup(List *operatorName, diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c index 59ffd2104d..e02d312008 100644 --- a/src/backend/catalog/pg_type.c +++ b/src/backend/catalog/pg_type.c @@ -821,9 +821,9 @@ makeArrayTypeName(const char *typeName, Oid typeNamespace) * determine the new type's own array type name; else the latter will * certainly pick the same name. * - * Returns TRUE if successfully moved the type, FALSE if not. + * Returns true if successfully moved the type, false if not. * - * We also return TRUE if the given type is a shell type. In this case + * We also return true if the given type is a shell type. In this case * the type has not been renamed out of the way, but nonetheless it can * be expected that TypeCreate will succeed. This behavior is convenient * for most callers --- those that need to distinguish the shell-type case diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index cfa3f059c2..71d729123a 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -89,7 +89,7 @@ typedef enum EolType * characters, else we might find a false match to a trailing byte. In * supported server encodings, there is no possibility of a false match, and * it's faster to make useless comparisons to trailing bytes than it is to - * invoke pg_encoding_mblen() to skip over them. encoding_embeds_ascii is TRUE + * invoke pg_encoding_mblen() to skip over them. encoding_embeds_ascii is true * when we have to do it the hard way. */ typedef struct CopyStateData @@ -727,7 +727,7 @@ CopyGetInt16(CopyState cstate, int16 *val) /* * CopyLoadRawBuf loads some more data into raw_buf * - * Returns TRUE if able to obtain at least one more byte, else FALSE. + * Returns true if able to obtain at least one more byte, else false. * * If raw_buf_index < raw_buf_len, the unprocessed bytes are transferred * down to the start of the buffer and then we load more data after that. @@ -764,7 +764,7 @@ CopyLoadRawBuf(CopyState cstate) * DoCopy executes the SQL COPY statement * * Either unload or reload contents of table , depending on . - * ( = TRUE means we are inserting into the table.) In the "TO" case + * ( = true means we are inserting into the table.) In the "TO" case * we also support copying the output of an arbitrary SELECT, INSERT, UPDATE * or DELETE query. * diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index e138539035..eb1a4695c0 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -1718,8 +1718,8 @@ AlterDatabaseOwner(const char *dbname, Oid newOwnerId) /* * Look up info about the database named "name". If the database exists, * obtain the specified lock type on it, fill in any of the remaining - * parameters that aren't NULL, and return TRUE. If no such database, - * return FALSE. + * parameters that aren't NULL, and return true. If no such database, + * return false. */ static bool get_db_info(const char *name, LOCKMODE lockmode, @@ -1923,7 +1923,7 @@ remove_dbtablespaces(Oid db_id) /* * Check for existing files that conflict with a proposed new DB OID; - * return TRUE if there are any + * return true if there are any * * If there were a subdirectory in any tablespace matching the proposed new * OID, we'd get a create failure due to the duplicate name ... and then we'd diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c index 9d2d43fe6b..1a239fabea 100644 --- a/src/backend/commands/proclang.c +++ b/src/backend/commands/proclang.c @@ -513,7 +513,7 @@ find_language_template(const char *languageName) /* - * This just returns TRUE if we have a valid template for a given language + * This just returns true if we have a valid template for a given language */ bool PLTemplateExists(const char *languageName) diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c index 8559c3b6b3..d574e4dd00 100644 --- a/src/backend/commands/tablespace.c +++ b/src/backend/commands/tablespace.c @@ -655,7 +655,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid) * does not justify throwing an error that would require manual intervention * to get the database running again. * - * Returns TRUE if successful, FALSE if some subdirectory is not empty + * Returns true if successful, false if some subdirectory is not empty */ static bool destroy_tablespace_directories(Oid tablespaceoid, bool redo) diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 269c9e17dd..7975297f7c 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -126,7 +126,7 @@ static void AfterTriggerEnlargeQueryState(void); * * If isInternal is true then this is an internally-generated trigger. * This argument sets the tgisinternal field of the pg_trigger entry, and - * if TRUE causes us to modify the given trigger name to ensure uniqueness. + * if true causes us to modify the given trigger name to ensure uniqueness. * * When isInternal is not true we require ACL_TRIGGER permissions on the * relation, as well as ACL_EXECUTE on the trigger function. For internal @@ -4089,10 +4089,10 @@ AfterTriggerExecute(AfterTriggerEvent event, * If move_list isn't NULL, events that are not to be invoked now are * transferred to move_list. * - * When immediate_only is TRUE, do not invoke currently-deferred triggers. - * (This will be FALSE only at main transaction exit.) + * When immediate_only is true, do not invoke currently-deferred triggers. + * (This will be false only at main transaction exit.) * - * Returns TRUE if any invokable events were found. + * Returns true if any invokable events were found. */ static bool afterTriggerMarkEvents(AfterTriggerEventList *events, @@ -4156,14 +4156,14 @@ afterTriggerMarkEvents(AfterTriggerEventList *events, * make one locally to cache the info in case there are multiple trigger * events per rel. * - * When delete_ok is TRUE, it's safe to delete fully-processed events. + * When delete_ok is true, it's safe to delete fully-processed events. * (We are not very tense about that: we simply reset a chunk to be empty * if all its events got fired. The objective here is just to avoid useless * rescanning of events when a trigger queues new events during transaction * end, so it's not necessary to worry much about the case where only * some events are fired.) * - * Returns TRUE if no unfired events remain in the list (this allows us + * Returns true if no unfired events remain in the list (this allows us * to avoid repeating afterTriggerMarkEvents). */ static bool diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c index 7ed16aeff4..3c0b7129c5 100644 --- a/src/backend/commands/typecmds.c +++ b/src/backend/commands/typecmds.c @@ -3327,9 +3327,9 @@ AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype) * AlterTypeOwner_oid - change type owner unconditionally * * This function recurses to handle a pg_class entry, if necessary. It - * invokes any necessary access object hooks. If hasDependEntry is TRUE, this + * invokes any necessary access object hooks. If hasDependEntry is true, this * function modifies the pg_shdepend entry appropriately (this should be - * passed as FALSE only for table rowtypes and array types). + * passed as false only for table rowtypes and array types). * * This is used by ALTER TABLE/TYPE OWNER commands, as well as by REASSIGN * OWNED BY. It assumes the caller has done all needed check. @@ -3495,10 +3495,10 @@ AlterTypeNamespace_oid(Oid typeOid, Oid nspOid, ObjectAddresses *objsMoved) * Caller must have already checked privileges. * * The function automatically recurses to process the type's array type, - * if any. isImplicitArray should be TRUE only when doing this internal + * if any. isImplicitArray should be true only when doing this internal * recursion (outside callers must never try to move an array type directly). * - * If errorOnTableType is TRUE, the function errors out if the type is + * If errorOnTableType is true, the function errors out if the type is * a table type. ALTER TABLE has to be used to move a table to a new * namespace. * diff --git a/src/backend/executor/execCurrent.c b/src/backend/executor/execCurrent.c index f42df3916e..a3e962ee67 100644 --- a/src/backend/executor/execCurrent.c +++ b/src/backend/executor/execCurrent.c @@ -32,7 +32,7 @@ static ScanState *search_plan_tree(PlanState *node, Oid table_oid); * of the table is currently being scanned by the cursor named by CURRENT OF, * and return the row's TID into *current_tid. * - * Returns TRUE if a row was identified. Returns FALSE if the cursor is valid + * Returns true if a row was identified. Returns false if the cursor is valid * for the table but is not currently scanning a row of the table (this is a * legal situation in inheritance cases). Raises error if cursor is not a * valid updatable scan of the specified table. diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 4b594d489c..0b00945893 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -1486,8 +1486,8 @@ ExecCleanUpTriggerState(EState *estate) * going to be stored into a relation that has OIDs. In other contexts * we are free to choose whether to leave space for OIDs in result tuples * (we generally don't want to, but we do if a physical-tlist optimization - * is possible). This routine checks the plan context and returns TRUE if the - * choice is forced, FALSE if the choice is not forced. In the TRUE case, + * is possible). This routine checks the plan context and returns true if the + * choice is forced, false if the choice is not forced. In the true case, * *hasoids is set to the required value. * * One reason this is ugly is that all plan nodes in the plan tree will emit diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index d10d94ccc2..f7cd8fb347 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -918,10 +918,10 @@ ExecHashTableInsert(HashJoinTable hashtable, * econtext->ecxt_innertuple. Vars in the hashkeys expressions should have * varno either OUTER_VAR or INNER_VAR. * - * A TRUE result means the tuple's hash value has been successfully computed - * and stored at *hashvalue. A FALSE result means the tuple cannot match + * A true result means the tuple's hash value has been successfully computed + * and stored at *hashvalue. A false result means the tuple cannot match * because it contains a null attribute, and hence it should be discarded - * immediately. (If keep_nulls is true then FALSE is never returned.) + * immediately. (If keep_nulls is true then false is never returned.) */ bool ExecHashGetHashValue(HashJoinTable hashtable, diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index 638b17b07c..e6adba98a4 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -676,8 +676,8 @@ ExecIndexEvalRuntimeKeys(ExprContext *econtext, * ExecIndexEvalArrayKeys * Evaluate any array key values, and set up to iterate through arrays. * - * Returns TRUE if there are array elements to consider; FALSE means there - * is at least one null or empty array, so no match is possible. On TRUE + * Returns true if there are array elements to consider; false means there + * is at least one null or empty array, so no match is possible. On true * result, the scankeys are initialized with the first elements of the arrays. */ bool @@ -756,8 +756,8 @@ ExecIndexEvalArrayKeys(ExprContext *econtext, * ExecIndexAdvanceArrayKeys * Advance to the next set of array key values, if any. * - * Returns TRUE if there is another set of values to consider, FALSE if not. - * On TRUE result, the scankeys are initialized with the next set of values. + * Returns true if there is another set of values to consider, false if not. + * On true result, the scankeys are initialized with the next set of values. */ bool ExecIndexAdvanceArrayKeys(IndexArrayKeyInfo *arrayKeys, int numArrayKeys) diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c index 925b4cf553..ef9e1ee471 100644 --- a/src/backend/executor/nodeMergejoin.c +++ b/src/backend/executor/nodeMergejoin.c @@ -510,7 +510,7 @@ MJFillInner(MergeJoinState *node) /* * Check that a qual condition is constant true or constant false. - * If it is constant false (or null), set *is_const_false to TRUE. + * If it is constant false (or null), set *is_const_false to true. * * Constant true would normally be represented by a NIL list, but we allow an * actual bool Const as well. We do expect that the planner will have thrown diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c index 77ef6f3df1..a93fbf646c 100644 --- a/src/backend/executor/nodeSubplan.c +++ b/src/backend/executor/nodeSubplan.c @@ -220,7 +220,7 @@ ExecScanSubPlan(SubPlanState *node, MemoryContext oldcontext; TupleTableSlot *slot; Datum result; - bool found = false; /* TRUE if got at least one subplan tuple */ + bool found = false; /* true if got at least one subplan tuple */ ListCell *pvar; ListCell *l; ArrayBuildStateAny *astate = NULL; diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index afe231fca9..b881b5c5e1 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -1901,9 +1901,9 @@ _SPI_prepare_oneshot_plan(const char *src, SPIPlanPtr plan) * snapshot: query snapshot to use, or InvalidSnapshot for the normal * behavior of taking a new snapshot for each query. * crosscheck_snapshot: for RI use, all others pass InvalidSnapshot - * read_only: TRUE for read-only execution (no CommandCounterIncrement) - * fire_triggers: TRUE to fire AFTER triggers at end of query (normal case); - * FALSE means any AFTER triggers are postponed to end of outer query + * read_only: true for read-only execution (no CommandCounterIncrement) + * fire_triggers: true to fire AFTER triggers at end of query (normal case); + * false means any AFTER triggers are postponed to end of outer query * tcount: execution tuple-count limit, or 0 for none */ static int diff --git a/src/backend/executor/tqueue.c b/src/backend/executor/tqueue.c index 6afcd1a30a..d31369b38b 100644 --- a/src/backend/executor/tqueue.c +++ b/src/backend/executor/tqueue.c @@ -218,7 +218,7 @@ static TupleRemapInfo **BuildFieldRemapInfo(TupleDesc tupledesc, /* * Receive a tuple from a query, and send it to the designated shm_mq. * - * Returns TRUE if successful, FALSE if shm_mq has been detached. + * Returns true if successful, false if shm_mq has been detached. */ static bool tqueueReceiveSlot(TupleTableSlot *slot, DestReceiver *self) @@ -806,7 +806,7 @@ TQRemapTuple(TupleQueueReader *reader, /* * Process the given datum and replace any transient record typmods - * contained in it. Set *changed to TRUE if we actually changed the datum. + * contained in it. Set *changed to true if we actually changed the datum. * * remapinfo is previously-computed remapping info about the datum's type. * @@ -838,7 +838,7 @@ TQRemap(TupleQueueReader *reader, TupleRemapInfo *remapinfo, /* * Process the given array datum and replace any transient record typmods - * contained in it. Set *changed to TRUE if we actually changed the datum. + * contained in it. Set *changed to true if we actually changed the datum. */ static Datum TQRemapArray(TupleQueueReader *reader, ArrayRemapInfo *remapinfo, @@ -884,7 +884,7 @@ TQRemapArray(TupleQueueReader *reader, ArrayRemapInfo *remapinfo, /* * Process the given range datum and replace any transient record typmods - * contained in it. Set *changed to TRUE if we actually changed the datum. + * contained in it. Set *changed to true if we actually changed the datum. */ static Datum TQRemapRange(TupleQueueReader *reader, RangeRemapInfo *remapinfo, @@ -925,7 +925,7 @@ TQRemapRange(TupleQueueReader *reader, RangeRemapInfo *remapinfo, /* * Process the given record datum and replace any transient record typmods - * contained in it. Set *changed to TRUE if we actually changed the datum. + * contained in it. Set *changed to true if we actually changed the datum. */ static Datum TQRemapRecord(TupleQueueReader *reader, RecordRemapInfo *remapinfo, diff --git a/src/backend/foreign/foreign.c b/src/backend/foreign/foreign.c index a113bf540d..45fca52621 100644 --- a/src/backend/foreign/foreign.c +++ b/src/backend/foreign/foreign.c @@ -428,7 +428,7 @@ GetFdwRoutineForRelation(Relation relation, bool makecopy) /* * IsImportableForeignTable - filter table names for IMPORT FOREIGN SCHEMA * - * Returns TRUE if given table name should be imported according to the + * Returns true if given table name should be imported according to the * statement's import filter options. */ bool diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c index b2c487a8e8..210f13cc87 100644 --- a/src/backend/libpq/hba.c +++ b/src/backend/libpq/hba.c @@ -187,9 +187,9 @@ pg_isblank(const char c) * set *err_msg to a string describing the error. Currently the only * possible error is token too long for buf. * - * If successful: store null-terminated token at *buf and return TRUE. - * If no more tokens on line: set *buf = '\0' and return FALSE. - * If error: fill buf with truncated or misformatted token and return FALSE. + * If successful: store null-terminated token at *buf and return true. + * If no more tokens on line: set *buf = '\0' and return false. + * If error: fill buf with truncated or misformatted token and return false. */ static bool next_token(char **lineptr, char *buf, int bufsz, diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c index 4452ea4228..a08c760121 100644 --- a/src/backend/libpq/pqcomm.c +++ b/src/backend/libpq/pqcomm.c @@ -914,7 +914,7 @@ RemoveSocketFiles(void) /* -------------------------------- * socket_set_nonblocking - set socket blocking/non-blocking * - * Sets the socket non-blocking if nonblocking is TRUE, or sets it + * Sets the socket non-blocking if nonblocking is true, or sets it * blocking otherwise. * -------------------------------- */ diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c index bf8545d437..d4b82c6305 100644 --- a/src/backend/nodes/bitmapset.c +++ b/src/backend/nodes/bitmapset.c @@ -558,8 +558,8 @@ bms_singleton_member(const Bitmapset *a) * bms_get_singleton_member * * Test whether the given set is a singleton. - * If so, set *member to the value of its sole member, and return TRUE. - * If not, return FALSE, without changing *member. + * If so, set *member to the value of its sole member, and return true. + * If not, return false, without changing *member. * * This is more convenient and faster than calling bms_membership() and then * bms_singleton_member(), if we don't care about distinguishing empty sets diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c index e3eb0c5788..c15aff7e24 100644 --- a/src/backend/nodes/nodeFuncs.c +++ b/src/backend/nodes/nodeFuncs.c @@ -663,7 +663,7 @@ strip_implicit_coercions(Node *node) * Test whether an expression returns a set result. * * Because we use expression_tree_walker(), this can also be applied to - * whole targetlists; it'll produce TRUE if any one of the tlist items + * whole targetlists; it'll produce true if any one of the tlist items * returns a set. */ bool @@ -1632,9 +1632,9 @@ set_sa_opfuncid(ScalarArrayOpExpr *opexpr) * check_functions_in_node - * apply checker() to each function OID contained in given expression node * - * Returns TRUE if the checker() function does; for nodes representing more - * than one function call, returns TRUE if the checker() function does so - * for any of those functions. Returns FALSE if node does not invoke any + * Returns true if the checker() function does; for nodes representing more + * than one function call, returns true if the checker() function does so + * for any of those functions. Returns false if node does not invoke any * SQL-visible function. Caller must not pass node == NULL. * * This function examines only the given node; it does not recurse into any diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c index c4e53adb0c..db6abbde1e 100644 --- a/src/backend/nodes/tidbitmap.c +++ b/src/backend/nodes/tidbitmap.c @@ -609,7 +609,7 @@ tbm_intersect(TIDBitmap *a, const TIDBitmap *b) /* * Process one page of a during an intersection op * - * Returns TRUE if apage is now empty and should be deleted from a + * Returns true if apage is now empty and should be deleted from a */ static bool tbm_intersect_page(TIDBitmap *a, PagetableEntry *apage, const TIDBitmap *b) diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index e8e7202e11..ff9303b436 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -1822,7 +1822,7 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel, * Zero out result area for subquery_is_pushdown_safe, so that it can set * flags as needed while recursing. In particular, we need a workspace * for keeping track of unsafe-to-reference columns. unsafeColumns[i] - * will be set TRUE if we find that output column i of the subquery is + * will be set true if we find that output column i of the subquery is * unsafe to use in a pushed-down qual. */ memset(&safetyInfo, 0, sizeof(safetyInfo)); @@ -2498,7 +2498,7 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels) * In addition, we make several checks on the subquery's output columns to see * if it is safe to reference them in pushed-down quals. If output column k * is found to be unsafe to reference, we set safetyInfo->unsafeColumns[k] - * to TRUE, but we don't reject the subquery overall since column k might not + * to true, but we don't reject the subquery overall since column k might not * be referenced by some/all quals. The unsafeColumns[] array will be * consulted later by qual_is_pushdown_safe(). It's better to do it this way * than to make the checks directly in qual_is_pushdown_safe(), because when @@ -2620,7 +2620,7 @@ recurse_pushdown_safe(Node *setOp, Query *topquery, * * There are several cases in which it's unsafe to push down an upper-level * qual if it references a particular output column of a subquery. We check - * each output column of the subquery and set unsafeColumns[k] to TRUE if + * each output column of the subquery and set unsafeColumns[k] to true if * that column is unsafe for a pushed-down qual to reference. The conditions * checked here are: * diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c index 7997f50c18..2915c0deac 100644 --- a/src/backend/optimizer/path/equivclass.c +++ b/src/backend/optimizer/path/equivclass.c @@ -71,7 +71,7 @@ static bool reconsider_full_join_clause(PlannerInfo *root, * any delay by an outer join, so its two sides can be considered equal * anywhere they are both computable; moreover that equality can be * extended transitively. Record this knowledge in the EquivalenceClass - * data structure. Returns TRUE if successful, FALSE if not (in which + * data structure. Returns true if successful, false if not (in which * case caller should treat the clause as ordinary, not an equivalence). * * If below_outer_join is true, then the clause was found below the nullable @@ -564,8 +564,8 @@ add_eq_member(EquivalenceClass *ec, Expr *expr, Relids relids, * so for now we live with just reporting the first match. See also * generate_implied_equalities_for_column and match_pathkeys_to_index.) * - * If create_it is TRUE, we'll build a new EquivalenceClass when there is no - * match. If create_it is FALSE, we just return NULL when no match. + * If create_it is true, we'll build a new EquivalenceClass when there is no + * match. If create_it is false, we just return NULL when no match. * * This can be used safely both before and after EquivalenceClass merging; * since it never causes merging it does not invalidate any existing ECs @@ -1637,7 +1637,7 @@ reconsider_outer_join_clauses(PlannerInfo *root) /* * reconsider_outer_join_clauses for a single LEFT/RIGHT JOIN clause * - * Returns TRUE if we were able to propagate a constant through the clause. + * Returns true if we were able to propagate a constant through the clause. */ static bool reconsider_outer_join_clause(PlannerInfo *root, RestrictInfo *rinfo, @@ -1762,7 +1762,7 @@ reconsider_outer_join_clause(PlannerInfo *root, RestrictInfo *rinfo, /* * reconsider_outer_join_clauses for a single FULL JOIN clause * - * Returns TRUE if we were able to propagate a constant through the clause. + * Returns true if we were able to propagate a constant through the clause. */ static bool reconsider_full_join_clause(PlannerInfo *root, RestrictInfo *rinfo) diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c index f35380391a..18f6bafcdd 100644 --- a/src/backend/optimizer/path/indxpath.c +++ b/src/backend/optimizer/path/indxpath.c @@ -838,12 +838,12 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel, * * If skip_nonnative_saop is non-NULL, we ignore ScalarArrayOpExpr clauses * unless the index AM supports them directly, and we set *skip_nonnative_saop - * to TRUE if we found any such clauses (caller must initialize the variable - * to FALSE). If it's NULL, we do not ignore ScalarArrayOpExpr clauses. + * to true if we found any such clauses (caller must initialize the variable + * to false). If it's NULL, we do not ignore ScalarArrayOpExpr clauses. * * If skip_lower_saop is non-NULL, we ignore ScalarArrayOpExpr clauses for - * non-first index columns, and we set *skip_lower_saop to TRUE if we found - * any such clauses (caller must initialize the variable to FALSE). If it's + * non-first index columns, and we set *skip_lower_saop to true if we found + * any such clauses (caller must initialize the variable to false). If it's * NULL, we do not ignore non-first ScalarArrayOpExpr clauses, but they will * result in considering the scan's output to be unordered. * diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c index 43833ea9c9..d471d11ee3 100644 --- a/src/backend/optimizer/path/joinpath.c +++ b/src/backend/optimizer/path/joinpath.c @@ -313,7 +313,7 @@ add_paths_to_joinrel(PlannerInfo *root, * across joins unless there's a join-order-constraint-based reason to do so. * So we ignore the param_source_rels restriction when this case applies. * - * allow_star_schema_join() returns TRUE if the param_source_rels restriction + * allow_star_schema_join() returns true if the param_source_rels restriction * should be overridden, ie, it's okay to perform this join. */ static inline bool @@ -1794,7 +1794,7 @@ hash_inner_and_outer(PlannerInfo *root, * Select mergejoin clauses that are usable for a particular join. * Returns a list of RestrictInfo nodes for those clauses. * - * *mergejoin_allowed is normally set to TRUE, but it is set to FALSE if + * *mergejoin_allowed is normally set to true, but it is set to false if * this is a right/full join and there are nonmergejoinable join clauses. * The executor's mergejoin machinery cannot handle such cases, so we have * to avoid generating a mergejoin plan. (Note that this flag does NOT diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c index 6ee23509c5..5a627a9327 100644 --- a/src/backend/optimizer/path/joinrels.c +++ b/src/backend/optimizer/path/joinrels.c @@ -323,7 +323,7 @@ make_rels_by_clauseless_joins(PlannerInfo *root, * * On success, *sjinfo_p is set to NULL if this is to be a plain inner join, * else it's set to point to the associated SpecialJoinInfo node. Also, - * *reversed_p is set TRUE if the given relations need to be swapped to + * *reversed_p is set true if the given relations need to be swapped to * match the SpecialJoinInfo node. */ static bool @@ -1235,7 +1235,7 @@ mark_dummy_rel(RelOptInfo *rel) * decide there's no match for an outer row, which is pretty stupid. So, * we need to detect the case. * - * If only_pushed_down is TRUE, then consider only pushed-down quals. + * If only_pushed_down is true, then consider only pushed-down quals. */ static bool restriction_is_constant_false(List *restrictlist, bool only_pushed_down) diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c index 9d83a5ca62..c6870d314e 100644 --- a/src/backend/optimizer/path/pathkeys.c +++ b/src/backend/optimizer/path/pathkeys.c @@ -162,8 +162,8 @@ pathkey_is_redundant(PathKey *new_pathkey, List *pathkeys) * considered. Otherwise child members are ignored. (See the comments for * get_eclass_for_sort_expr.) * - * create_it is TRUE if we should create any missing EquivalenceClass - * needed to represent the sort key. If it's FALSE, we return NULL if the + * create_it is true if we should create any missing EquivalenceClass + * needed to represent the sort key. If it's false, we return NULL if the * sort key isn't already present in any EquivalenceClass. */ static PathKey * @@ -987,8 +987,8 @@ update_mergeclause_eclasses(PlannerInfo *root, RestrictInfo *restrictinfo) * If successful, it returns a list of mergeclauses. * * 'pathkeys' is a pathkeys list showing the ordering of an input path. - * 'outer_keys' is TRUE if these keys are for the outer input path, - * FALSE if for inner. + * 'outer_keys' is true if these keys are for the outer input path, + * false if for inner. * 'restrictinfos' is a list of mergejoinable restriction clauses for the * join relation being formed. * diff --git a/src/backend/optimizer/plan/analyzejoins.c b/src/backend/optimizer/plan/analyzejoins.c index 34317fe778..0ac3379307 100644 --- a/src/backend/optimizer/plan/analyzejoins.c +++ b/src/backend/optimizer/plan/analyzejoins.c @@ -582,7 +582,7 @@ reduce_unique_semijoins(PlannerInfo *root) * Could the relation possibly be proven distinct on some set of columns? * * This is effectively a pre-checking function for rel_is_distinct_for(). - * It must return TRUE if rel_is_distinct_for() could possibly return TRUE + * It must return true if rel_is_distinct_for() could possibly return true * with this rel, but it should not expend a lot of cycles. The idea is * that callers can avoid doing possibly-expensive processing to compute * rel_is_distinct_for()'s argument lists if the call could not possibly @@ -727,7 +727,7 @@ rel_is_distinct_for(PlannerInfo *root, RelOptInfo *rel, List *clause_list) * on some set of output columns? * * This is effectively a pre-checking function for query_is_distinct_for(). - * It must return TRUE if query_is_distinct_for() could possibly return TRUE + * It must return true if query_is_distinct_for() could possibly return true * with this query, but it should not expend a lot of cycles. The idea is * that callers can avoid doing possibly-expensive processing to compute * query_is_distinct_for()'s argument lists if the call could not possibly diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index 28216629aa..bede0d0d28 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -5517,7 +5517,7 @@ make_sort(Plan *lefttree, int numCols, * 'pathkeys' is the list of pathkeys by which the result is to be sorted * 'relids' identifies the child relation being sorted, if any * 'reqColIdx' is NULL or an array of required sort key column numbers - * 'adjust_tlist_in_place' is TRUE if lefttree must be modified in-place + * 'adjust_tlist_in_place' is true if lefttree must be modified in-place * * We must convert the pathkey information into arrays of sort key column * numbers, sort operator OIDs, collation OIDs, and nulls-first flags, @@ -5537,7 +5537,7 @@ make_sort(Plan *lefttree, int numCols, * compute these expressions, since a Sort or MergeAppend node itself won't * do any such calculations. If the input plan type isn't one that can do * projections, this means adding a Result node just to do the projection. - * However, the caller can pass adjust_tlist_in_place = TRUE to force the + * However, the caller can pass adjust_tlist_in_place = true to force the * lefttree tlist to be modified in-place regardless of whether the node type * can project --- we use this for fixing the tlist of MergeAppend itself. * diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c index 987c20ac9f..382ee75716 100644 --- a/src/backend/optimizer/plan/initsplan.c +++ b/src/backend/optimizer/plan/initsplan.c @@ -719,7 +719,7 @@ deconstruct_jointree(PlannerInfo *root) * * Inputs: * jtnode is the jointree node to examine - * below_outer_join is TRUE if this node is within the nullable side of a + * below_outer_join is true if this node is within the nullable side of a * higher-level outer join * Outputs: * *qualscope gets the set of base Relids syntactically included in this @@ -1588,8 +1588,8 @@ compute_semijoin_info(SpecialJoinInfo *sjinfo, List *clause) * as belonging to a higher join level, just add it to postponed_qual_list. * * 'clause': the qual clause to be distributed - * 'is_deduced': TRUE if the qual came from implied-equality deduction - * 'below_outer_join': TRUE if the qual is from a JOIN/ON that is below the + * 'is_deduced': true if the qual came from implied-equality deduction + * 'below_outer_join': true if the qual is from a JOIN/ON that is below the * nullable side of a higher-level outer join * 'jointype': type of join the qual is from (JOIN_INNER for a WHERE clause) * 'security_level': security_level to assign to the qual @@ -1600,7 +1600,7 @@ compute_semijoin_info(SpecialJoinInfo *sjinfo, List *clause) * baserels appearing on the outer (nonnullable) side of the join * (for FULL JOIN this includes both sides of the join, and must in fact * equal qualscope) - * 'deduced_nullable_relids': if is_deduced is TRUE, the nullable relids to + * 'deduced_nullable_relids': if is_deduced is true, the nullable relids to * impute to the clause; otherwise NULL * 'postponed_qual_list': list of PostponedQual structs, which we can add * this qual to if it turns out to belong to a higher join level. @@ -1610,9 +1610,9 @@ compute_semijoin_info(SpecialJoinInfo *sjinfo, List *clause) * 'ojscope' is needed if we decide to force the qual up to the outer-join * level, which will be ojscope not necessarily qualscope. * - * In normal use (when is_deduced is FALSE), at the time this is called, + * In normal use (when is_deduced is false), at the time this is called, * root->join_info_list must contain entries for all and only those special - * joins that are syntactically below this qual. But when is_deduced is TRUE, + * joins that are syntactically below this qual. But when is_deduced is true, * we are adding new deduced clauses after completion of deconstruct_jointree, * so it cannot be assumed that root->join_info_list has anything to do with * qual placement. @@ -2001,8 +2001,8 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * may force extra delay of higher-level outer joins. * * If the qual must be delayed, add relids to *relids_p to reflect the lowest - * safe level for evaluating the qual, and return TRUE. Any extra delay for - * higher-level joins is reflected by setting delay_upper_joins to TRUE in + * safe level for evaluating the qual, and return true. Any extra delay for + * higher-level joins is reflected by setting delay_upper_joins to true in * SpecialJoinInfo structs. We also compute nullable_relids, the set of * referenced relids that are nullable by lower outer joins (note that this * can be nonempty even for a non-delayed qual). @@ -2034,7 +2034,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * Lastly, a pushed-down qual that references the nullable side of any current * join_info_list member and has to be evaluated above that OJ (because its * required relids overlap the LHS too) causes that OJ's delay_upper_joins - * flag to be set TRUE. This will prevent any higher-level OJs from + * flag to be set true. This will prevent any higher-level OJs from * being interchanged with that OJ, which would result in not having any * correct place to evaluate the qual. (The case we care about here is a * sub-select WHERE clause within the RHS of some outer join. The WHERE @@ -2118,7 +2118,7 @@ check_outerjoin_delay(PlannerInfo *root, /* * check_equivalence_delay * Detect whether a potential equivalence clause is rendered unsafe - * by outer-join-delay considerations. Return TRUE if it's safe. + * by outer-join-delay considerations. Return true if it's safe. * * The initial tests in distribute_qual_to_rels will consider a mergejoinable * clause to be a potential equivalence clause if it is not outerjoin_delayed. diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c index bba8a1ff58..889e8af33b 100644 --- a/src/backend/optimizer/plan/planagg.c +++ b/src/backend/optimizer/plan/planagg.c @@ -232,9 +232,9 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist) * that each one is a MIN/MAX aggregate. If so, build a list of the * distinct aggregate calls in the tree. * - * Returns TRUE if a non-MIN/MAX aggregate is found, FALSE otherwise. + * Returns true if a non-MIN/MAX aggregate is found, false otherwise. * (This seemingly-backward definition is used because expression_tree_walker - * aborts the scan on TRUE return, which is what we want.) + * aborts the scan on true return, which is what we want.) * * Found aggregates are added to the list at *context; it's up to the caller * to initialize the list to NIL. @@ -335,8 +335,8 @@ find_minmax_aggs_walker(Node *node, List **context) * Given a MIN/MAX aggregate, try to build an indexscan Path it can be * optimized with. * - * If successful, stash the best path in *mminfo and return TRUE. - * Otherwise, return FALSE. + * If successful, stash the best path in *mminfo and return true. + * Otherwise, return false. */ static bool build_minmax_path(PlannerInfo *root, MinMaxAggInfo *mminfo, diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index 907622eadb..afc04a130d 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -5614,7 +5614,7 @@ make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, * below the Sort step (and the Distinct step, if any). This will be * exactly final_target if we decide a projection step wouldn't be helpful. * - * In addition, *have_postponed_srfs is set to TRUE if we choose to postpone + * In addition, *have_postponed_srfs is set to true if we choose to postpone * any set-returning functions to after the Sort. */ static PathTarget * @@ -5966,7 +5966,7 @@ expression_planner(Expr *expr) * tableOid is the OID of a table to be clustered on its index indexOid * (which is already known to be a btree index). Decide whether it's * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER. - * Return TRUE to use sorting, FALSE to use an indexscan. + * Return true to use sorting, false to use an indexscan. * * Note: caller had better already hold some type of lock on the table. */ diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c index 1103984779..8f75fa98ed 100644 --- a/src/backend/optimizer/plan/subselect.c +++ b/src/backend/optimizer/plan/subselect.c @@ -1563,7 +1563,7 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink, * won't occur, nor will other side-effects of volatile functions. This seems * unlikely to bother anyone in practice. * - * Returns TRUE if was able to discard the targetlist, else FALSE. + * Returns true if was able to discard the targetlist, else false. */ static bool simplify_EXISTS_query(PlannerInfo *root, Query *query) diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c index f3bb73a664..1d7e4994f5 100644 --- a/src/backend/optimizer/prep/prepjointree.c +++ b/src/backend/optimizer/prep/prepjointree.c @@ -644,9 +644,9 @@ pull_up_subqueries(PlannerInfo *root) * This forces use of the PlaceHolderVar mechanism for all non-Var targetlist * items, and puts some additional restrictions on what can be pulled up. * - * deletion_ok is TRUE if the caller can cope with us returning NULL for a + * deletion_ok is true if the caller can cope with us returning NULL for a * deletable leaf node (for example, a VALUES RTE that could be pulled up). - * If it's FALSE, we'll avoid pullup in such cases. + * If it's false, we'll avoid pullup in such cases. * * A tricky aspect of this code is that if we pull up a subquery we have * to replace Vars that reference the subquery's outputs throughout the @@ -1401,7 +1401,7 @@ make_setop_translation_list(Query *query, Index newvarno, * (Note subquery is not necessarily equal to rte->subquery; it could be a * processed copy of that.) * lowest_outer_join is the lowest outer join above the subquery, or NULL. - * deletion_ok is TRUE if it'd be okay to delete the subquery entirely. + * deletion_ok is true if it'd be okay to delete the subquery entirely. */ static bool is_simple_subquery(Query *subquery, RangeTblEntry *rte, @@ -1457,7 +1457,7 @@ is_simple_subquery(Query *subquery, RangeTblEntry *rte, /* * Don't pull up a subquery with an empty jointree, unless it has no quals - * and deletion_ok is TRUE and we're not underneath an outer join. + * and deletion_ok is true and we're not underneath an outer join. * * query_planner() will correctly generate a Result plan for a jointree * that's totally empty, but we can't cope with an empty FromExpr @@ -1681,7 +1681,7 @@ pull_up_simple_values(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte) * to pull up into the parent query. * * rte is the RTE_VALUES RangeTblEntry to check. - * deletion_ok is TRUE if it'd be okay to delete the VALUES RTE entirely. + * deletion_ok is true if it'd be okay to delete the VALUES RTE entirely. */ static bool is_simple_values(PlannerInfo *root, RangeTblEntry *rte, bool deletion_ok) @@ -1689,7 +1689,7 @@ is_simple_values(PlannerInfo *root, RangeTblEntry *rte, bool deletion_ok) Assert(rte->rtekind == RTE_VALUES); /* - * We can only pull up a VALUES RTE if deletion_ok is TRUE. It's + * We can only pull up a VALUES RTE if deletion_ok is true. It's * basically the same case as a sub-select with empty FROM list; see * comments in is_simple_subquery(). */ @@ -1844,7 +1844,7 @@ is_safe_append_member(Query *subquery) * * If restricted is false, all level-1 Vars are allowed (but we still must * search the jointree, since it might contain outer joins below which there - * will be restrictions). If restricted is true, return TRUE when any qual + * will be restrictions). If restricted is true, return true when any qual * in the jointree contains level-1 Vars coming from outside the rels listed * in safe_upper_varnos. */ diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c index 93add27dbe..cb3002fc4f 100644 --- a/src/backend/optimizer/util/clauses.c +++ b/src/backend/optimizer/util/clauses.c @@ -832,7 +832,7 @@ expression_returns_set_rows(Node *clause) * contain_subplans * Recursively search for subplan nodes within a clause. * - * If we see a SubLink node, we will return TRUE. This is only possible if + * If we see a SubLink node, we will return true. This is only possible if * the expression tree hasn't yet been transformed by subselect.c. We do not * know whether the node will produce a true subplan or just an initplan, * but we make the conservative assumption that it will be a subplan. @@ -1614,8 +1614,8 @@ contain_leaked_vars_walker(Node *node, void *context) * that either v1 or v2 can't be NULL, but it does prove that the t1 row * as a whole can't be all-NULL. * - * top_level is TRUE while scanning top-level AND/OR structure; here, showing - * the result is either FALSE or NULL is good enough. top_level is FALSE when + * top_level is true while scanning top-level AND/OR structure; here, showing + * the result is either FALSE or NULL is good enough. top_level is false when * we have descended below a NOT or a strict function: now we must be able to * prove that the subexpression goes to NULL. * @@ -1822,8 +1822,8 @@ find_nonnullable_rels_walker(Node *node, bool top_level) * The result is a palloc'd List, but we have not copied the member Var nodes. * Also, we don't bother trying to eliminate duplicate entries. * - * top_level is TRUE while scanning top-level AND/OR structure; here, showing - * the result is either FALSE or NULL is good enough. top_level is FALSE when + * top_level is true while scanning top-level AND/OR structure; here, showing + * the result is either FALSE or NULL is good enough. top_level is false when * we have descended below a NOT or a strict function: now we must be able to * prove that the subexpression goes to NULL. * @@ -3598,8 +3598,8 @@ eval_const_expressions_mutator(Node *node, * input is TRUE and at least one is NULL. We don't actually include the NULL * here, that's supposed to be done by the caller. * - * The output arguments *haveNull and *forceTrue must be initialized FALSE - * by the caller. They will be set TRUE if a null constant or true constant, + * The output arguments *haveNull and *forceTrue must be initialized false + * by the caller. They will be set true if a NULL constant or TRUE constant, * respectively, is detected anywhere in the argument list. */ static List * @@ -3710,8 +3710,8 @@ simplify_or_arguments(List *args, * no input is FALSE and at least one is NULL. We don't actually include the * NULL here, that's supposed to be done by the caller. * - * The output arguments *haveNull and *forceFalse must be initialized FALSE - * by the caller. They will be set TRUE if a null constant or false constant, + * The output arguments *haveNull and *forceFalse must be initialized false + * by the caller. They will be set true if a null constant or false constant, * respectively, is detected anywhere in the argument list. */ static List * diff --git a/src/backend/optimizer/util/placeholder.c b/src/backend/optimizer/util/placeholder.c index 970542dde5..d3e7c571e0 100644 --- a/src/backend/optimizer/util/placeholder.c +++ b/src/backend/optimizer/util/placeholder.c @@ -62,7 +62,7 @@ make_placeholder_expr(PlannerInfo *root, Expr *expr, Relids phrels) * simplified query passed to query_planner(). * * Note: this should only be called after query_planner() has started. Also, - * create_new_ph must not be TRUE after deconstruct_jointree begins, because + * create_new_ph must not be true after deconstruct_jointree begins, because * make_outerjoininfo assumes that we already know about all placeholders. */ PlaceHolderInfo * diff --git a/src/backend/optimizer/util/var.c b/src/backend/optimizer/util/var.c index b8d7d3ffad..81c60dce5e 100644 --- a/src/backend/optimizer/util/var.c +++ b/src/backend/optimizer/util/var.c @@ -657,9 +657,9 @@ pull_var_clause_walker(Node *node, pull_var_clause_context *context) * entries might now be arbitrary expressions, not just Vars. This affects * this function in one important way: we might find ourselves inserting * SubLink expressions into subqueries, and we must make sure that their - * Query.hasSubLinks fields get set to TRUE if so. If there are any + * Query.hasSubLinks fields get set to true if so. If there are any * SubLinks in the join alias lists, the outer Query should already have - * hasSubLinks = TRUE, so this is only relevant to un-flattened subqueries. + * hasSubLinks = true, so this is only relevant to un-flattened subqueries. * * NOTE: this is used on not-yet-planned expressions. We do not expect it * to be applied directly to the whole Query, so if we see a Query to start diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c index 9ff80b8b40..bffe811a52 100644 --- a/src/backend/parser/parse_clause.c +++ b/src/backend/parser/parse_clause.c @@ -2076,7 +2076,7 @@ findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist, /* * If no matches, construct a new target entry which is appended to the - * end of the target list. This target is given resjunk = TRUE so that it + * end of the target list. This target is given resjunk = true so that it * will not be projected into the final tuple. */ target_result = transformTargetEntry(pstate, node, expr, exprKind, diff --git a/src/backend/parser/parse_coerce.c b/src/backend/parser/parse_coerce.c index e79ad26e71..b1aa5a1f9e 100644 --- a/src/backend/parser/parse_coerce.c +++ b/src/backend/parser/parse_coerce.c @@ -1399,7 +1399,7 @@ coerce_to_common_type(ParseState *pstate, Node *node, * that is, so long as there is no use of ANYELEMENT. This is mostly for * backwards compatibility with the pre-7.4 behavior of ANYARRAY. * - * We do not ereport here, but just return FALSE if a rule is violated. + * We do not ereport here, but just return false if a rule is violated. */ bool check_generic_type_consistency(Oid *actual_arg_types, @@ -2022,7 +2022,7 @@ TypeCategory(Oid type) /* IsPreferredType() * Check if this type is a preferred type for the given category. * - * If category is TYPCATEGORY_INVALID, then we'll return TRUE for preferred + * If category is TYPCATEGORY_INVALID, then we'll return true for preferred * types of any category; otherwise, only for preferred types of that * category. */ diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c index d7971cc3d9..568eda0cf7 100644 --- a/src/backend/parser/parse_oper.c +++ b/src/backend/parser/parse_oper.c @@ -1023,7 +1023,7 @@ static HTAB *OprCacheHash = NULL; * make_oper_cache_key * Fill the lookup key struct given operator name and arg types. * - * Returns TRUE if successful, FALSE if the search_path overflowed + * Returns true if successful, false if the search_path overflowed * (hence no caching is possible). * * pstate/location are used only to report the error position; pass NULL/-1 diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c index 4c5c684b44..af2f3aa7e1 100644 --- a/src/backend/parser/parse_relation.c +++ b/src/backend/parser/parse_relation.c @@ -2162,8 +2162,8 @@ addRTEtoQuery(ParseState *pstate, RangeTblEntry *rte, * * This creates lists of an RTE's column names (aliases if provided, else * real names) and Vars for each column. Only user columns are considered. - * If include_dropped is FALSE then dropped columns are omitted from the - * results. If include_dropped is TRUE then empty strings and NULL constants + * If include_dropped is false then dropped columns are omitted from the + * results. If include_dropped is true then empty strings and NULL constants * (not Vars!) are returned for dropped columns. * * rtindex, sublevels_up, and location are the varno, varlevelsup, and location @@ -3310,7 +3310,7 @@ errorMissingColumn(ParseState *pstate, /* - * Examine a fully-parsed query, and return TRUE iff any relation underlying + * Examine a fully-parsed query, and return true iff any relation underlying * the query is a temporary relation (table, view, or materialized view). */ bool diff --git a/src/backend/parser/scansup.c b/src/backend/parser/scansup.c index c3d2805803..dff7a04147 100644 --- a/src/backend/parser/scansup.c +++ b/src/backend/parser/scansup.c @@ -209,7 +209,7 @@ truncate_identifier(char *ident, int len, bool warn) } /* - * scanner_isspace() --- return TRUE if flex scanner considers char whitespace + * scanner_isspace() --- return true if flex scanner considers char whitespace * * This should be used instead of the potentially locale-dependent isspace() * function when it's important to match the lexer's behavior. diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c index accf302cf7..6aede0ce14 100644 --- a/src/backend/postmaster/pgstat.c +++ b/src/backend/postmaster/pgstat.c @@ -5262,7 +5262,7 @@ pgstat_read_db_statsfile(Oid databaseid, HTAB *tabhash, HTAB *funchash, * pgstat_read_db_statsfile_timestamp() - * * Attempt to determine the timestamp of the last db statfile write. - * Returns TRUE if successful; the timestamp is stored in *ts. + * Returns true if successful; the timestamp is stored in *ts. * * This needs to be careful about handling databases for which no stats file * exists, such as databases without a stat entry or those not yet written: diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c index 071b3a9ec9..007d3dabc1 100644 --- a/src/backend/rewrite/rewriteDefine.c +++ b/src/backend/rewrite/rewriteDefine.c @@ -531,7 +531,7 @@ DefineQueryRewrite(char *rulename, replace); /* - * Set pg_class 'relhasrules' field TRUE for event relation. + * Set pg_class 'relhasrules' field true for event relation. * * Important side effect: an SI notice is broadcast to force all * backends (including me!) to update relcache entries with the new diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c index ef52dd5b95..ef5da05e88 100644 --- a/src/backend/rewrite/rewriteHandler.c +++ b/src/backend/rewrite/rewriteHandler.c @@ -325,8 +325,8 @@ acquireLocksOnSubLinks(Node *node, acquireLocksOnSubLinks_context *context) * rt_index - RT index of result relation in original query * event - type of rule event * Output arguments: - * *returning_flag - set TRUE if we rewrite RETURNING clause in rule_action - * (must be initialized to FALSE) + * *returning_flag - set true if we rewrite RETURNING clause in rule_action + * (must be initialized to false) * Return value: * rewritten form of rule_action */ @@ -2017,10 +2017,10 @@ CopyAndAddInvertedQual(Query *parsetree, * event - type of rule event * locks - list of rules to fire * Output arguments: - * *instead_flag - set TRUE if any unqualified INSTEAD rule is found - * (must be initialized to FALSE) - * *returning_flag - set TRUE if we rewrite RETURNING clause in any rule - * (must be initialized to FALSE) + * *instead_flag - set true if any unqualified INSTEAD rule is found + * (must be initialized to false) + * *returning_flag - set true if we rewrite RETURNING clause in any rule + * (must be initialized to false) * *qual_product - filled with modified original query if any qualified * INSTEAD rule is found (must be initialized to NULL) * Return value: diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c index 5c17213720..64ae464878 100644 --- a/src/backend/rewrite/rewriteManip.c +++ b/src/backend/rewrite/rewriteManip.c @@ -1203,7 +1203,7 @@ replace_rte_variables_mutator(Node *node, * appear in the expression. * * If the expression tree contains a whole-row Var for the target RTE, - * *found_whole_row is returned as TRUE. In addition, if to_rowtype is + * *found_whole_row is returned as true. In addition, if to_rowtype is * not InvalidOid, we modify the Var's vartype and insert a ConvertRowTypeExpr * to map back to the orignal rowtype. Callers that don't provide to_rowtype * should report an error if *found_row_type is true; we don't do that here diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c index 4ca0ea4f2a..410980991d 100644 --- a/src/backend/storage/file/buffile.c +++ b/src/backend/storage/file/buffile.c @@ -68,7 +68,7 @@ struct BufFile * avoid making redundant FileSeek calls. */ - bool isTemp; /* can only add files if this is TRUE */ + bool isTemp; /* can only add files if this is true */ bool isInterXact; /* keep open over transactions? */ bool dirty; /* does buffer need to be written? */ diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c index 83b061a036..c03fb1f7dc 100644 --- a/src/backend/storage/file/fd.c +++ b/src/backend/storage/file/fd.c @@ -2545,7 +2545,7 @@ SetTempTablespaces(Oid *tableSpaces, int numSpaces) /* * TempTablespacesAreSet * - * Returns TRUE if SetTempTablespaces has been called in current transaction. + * Returns true if SetTempTablespaces has been called in current transaction. * (This is just so that tablespaces.c doesn't need its own per-transaction * state.) */ diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index ffa6180eff..37e12bd829 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -1791,7 +1791,7 @@ GetSnapshotData(Snapshot snapshot) * check that the source transaction is still running, and we'd better do * that atomically with installing the new xmin. * - * Returns TRUE if successful, FALSE if source xact is no longer running. + * Returns true if successful, false if source xact is no longer running. */ bool ProcArrayInstallImportedXmin(TransactionId xmin, @@ -1866,7 +1866,7 @@ ProcArrayInstallImportedXmin(TransactionId xmin, * PGPROC of the transaction from which we imported the snapshot, rather than * an XID. * - * Returns TRUE if successful, FALSE if source xact is no longer running. + * Returns true if successful, false if source xact is no longer running. */ bool ProcArrayInstallRestoredXmin(TransactionId xmin, PGPROC *proc) @@ -2873,7 +2873,7 @@ CountUserBackends(Oid roleid) * The current backend is always ignored; it is caller's responsibility to * check whether the current backend uses the given DB, if it's important. * - * Returns TRUE if there are (still) other backends in the DB, FALSE if not. + * Returns true if there are (still) other backends in the DB, false if not. * Also, *nbackends and *nprepared are set to the number of other backends * and prepared transactions in the DB, respectively. * diff --git a/src/backend/storage/ipc/sinvaladt.c b/src/backend/storage/ipc/sinvaladt.c index 1b9d32b7cf..0d517a00e6 100644 --- a/src/backend/storage/ipc/sinvaladt.c +++ b/src/backend/storage/ipc/sinvaladt.c @@ -627,7 +627,7 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize) * SICleanupQueue * Remove messages that have been consumed by all active backends * - * callerHasWriteLock is TRUE if caller is holding SInvalWriteLock. + * callerHasWriteLock is true if caller is holding SInvalWriteLock. * minFree is the minimum number of message slots to make free. * * Possible side effects of this routine include marking one or more diff --git a/src/backend/storage/lmgr/deadlock.c b/src/backend/storage/lmgr/deadlock.c index 5e49c78905..968e6c0e6d 100644 --- a/src/backend/storage/lmgr/deadlock.c +++ b/src/backend/storage/lmgr/deadlock.c @@ -307,7 +307,7 @@ GetBlockingAutoVacuumPgproc(void) * by an outer level of recursion. Add to this each possible solution * constraint for any cycle detected at this level. * - * Returns TRUE if no solution exists. Returns FALSE if a deadlock-free + * Returns true if no solution exists. Returns false if a deadlock-free * state is attainable, in which case waitOrders[] shows the required * rearrangements of lock wait queues (if any). */ @@ -432,8 +432,8 @@ TestConfiguration(PGPROC *startProc) * FindLockCycle -- basic check for deadlock cycles * * Scan outward from the given proc to see if there is a cycle in the - * waits-for graph that includes this proc. Return TRUE if a cycle - * is found, else FALSE. If a cycle is found, we return a list of + * waits-for graph that includes this proc. Return true if a cycle + * is found, else false. If a cycle is found, we return a list of * the "soft edges", if any, included in the cycle. These edges could * potentially be eliminated by rearranging wait queues. We also fill * deadlockDetails[] with information about the detected cycle; this info @@ -792,8 +792,8 @@ FindLockCycleRecurseMember(PGPROC *checkProc, * of nWaitOrders WAIT_ORDER structs in waitOrders[], with PGPROC array * workspace in waitOrderProcs[]. * - * Returns TRUE if able to build an ordering that satisfies all the - * constraints, FALSE if not (there are contradictory constraints). + * Returns true if able to build an ordering that satisfies all the + * constraints, false if not (there are contradictory constraints). */ static bool ExpandConstraints(EDGE *constraints, @@ -864,8 +864,8 @@ ExpandConstraints(EDGE *constraints, * the "blocker" in the output array. The EDGE array may well contain * edges associated with other locks; these should be ignored. * - * Returns TRUE if able to build an ordering that satisfies all the - * constraints, FALSE if not (there are contradictory constraints). + * Returns true if able to build an ordering that satisfies all the + * constraints, false if not (there are contradictory constraints). */ static bool TopoSort(LOCK *lock, diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c index fe9889894b..da5679b7a3 100644 --- a/src/backend/storage/lmgr/lmgr.c +++ b/src/backend/storage/lmgr/lmgr.c @@ -129,7 +129,7 @@ LockRelationOid(Oid relid, LOCKMODE lockmode) * ConditionalLockRelationOid * * As above, but only lock if we can get the lock without blocking. - * Returns TRUE iff the lock was acquired. + * Returns true iff the lock was acquired. * * NOTE: we do not currently need conditional versions of all the * LockXXX routines in this file, but they could easily be added if needed. @@ -344,7 +344,7 @@ LockRelationForExtension(Relation relation, LOCKMODE lockmode) * ConditionalLockRelationForExtension * * As above, but only lock if we can get the lock without blocking. - * Returns TRUE iff the lock was acquired. + * Returns true iff the lock was acquired. */ bool ConditionalLockRelationForExtension(Relation relation, LOCKMODE lockmode) @@ -413,7 +413,7 @@ LockPage(Relation relation, BlockNumber blkno, LOCKMODE lockmode) * ConditionalLockPage * * As above, but only lock if we can get the lock without blocking. - * Returns TRUE iff the lock was acquired. + * Returns true iff the lock was acquired. */ bool ConditionalLockPage(Relation relation, BlockNumber blkno, LOCKMODE lockmode) @@ -469,7 +469,7 @@ LockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode) * ConditionalLockTuple * * As above, but only lock if we can get the lock without blocking. - * Returns TRUE iff the lock was acquired. + * Returns true iff the lock was acquired. */ bool ConditionalLockTuple(Relation relation, ItemPointer tid, LOCKMODE lockmode) @@ -601,7 +601,7 @@ XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, * ConditionalXactLockTableWait * * As above, but only lock if we can get the lock without blocking. - * Returns TRUE if the lock was acquired. + * Returns true if the lock was acquired. */ bool ConditionalXactLockTableWait(TransactionId xid) diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index 82a1cf5150..5d54956bbb 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -1275,7 +1275,7 @@ LWLockAcquire(LWLock *lock, LWLockMode mode) /* * LWLockConditionalAcquire - acquire a lightweight lock in the specified mode * - * If the lock is not available, return FALSE with no side-effects. + * If the lock is not available, return false with no side-effects. * * If successful, cancel/die interrupts are held off until lock release. */ diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c index 4eb85720a7..a55b9a239b 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c @@ -2106,7 +2106,7 @@ check_log_statement(List *stmt_list) * If logging is needed, the duration in msec is formatted into msec_str[], * which must be a 32-byte buffer. * - * was_logged should be TRUE if caller already logged query details (this + * was_logged should be true if caller already logged query details (this * essentially prevents 2 from being returned). */ int diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c index cc462efc37..2f98135a59 100644 --- a/src/backend/tcop/pquery.c +++ b/src/backend/tcop/pquery.c @@ -682,7 +682,7 @@ PortalSetResultFormat(Portal portal, int nFormats, int16 *formats) * in which to store a command completion status string. * May be NULL if caller doesn't want a status string. * - * Returns TRUE if the portal's execution is complete, FALSE if it was + * Returns true if the portal's execution is complete, false if it was * suspended due to exhaustion of the count parameter. */ bool diff --git a/src/backend/tsearch/spell.c b/src/backend/tsearch/spell.c index 6527c73731..e82a69d337 100644 --- a/src/backend/tsearch/spell.c +++ b/src/backend/tsearch/spell.c @@ -776,7 +776,7 @@ NIAddAffix(IspellDict *Conf, const char *flag, char flagflags, const char *mask, * * The buffer at "next" must be of size BUFSIZ; we truncate the input to fit. * - * Returns TRUE if we found a field, FALSE if not. + * Returns true if we found a field, false if not. */ static bool get_nextfield(char **str, char *next) diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c index 62341b84d1..1980ff5ac7 100644 --- a/src/backend/utils/adt/misc.c +++ b/src/backend/utils/adt/misc.c @@ -47,7 +47,7 @@ /* * Common subroutine for num_nulls() and num_nonnulls(). - * Returns TRUE if successful, FALSE if function should return NULL. + * Returns true if successful, false if function should return NULL. * If successful, total argument count and number of nulls are * returned into *nargs and *nulls. */ diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c index 139bb583b1..e858b5910f 100644 --- a/src/backend/utils/adt/regexp.c +++ b/src/backend/utils/adt/regexp.c @@ -247,7 +247,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation) /* * RE_wchar_execute - execute a RE on pg_wchar data * - * Returns TRUE on match, FALSE on no match + * Returns true on match, false on no match * * re --- the compiled pattern as returned by RE_compile_and_cache * data --- the data to match against (need not be null-terminated) @@ -291,7 +291,7 @@ RE_wchar_execute(regex_t *re, pg_wchar *data, int data_len, /* * RE_execute - execute a RE * - * Returns TRUE on match, FALSE on no match + * Returns true on match, false on no match * * re --- the compiled pattern as returned by RE_compile_and_cache * dat --- the data to match against (need not be null-terminated) @@ -323,7 +323,7 @@ RE_execute(regex_t *re, char *dat, int dat_len, /* * RE_compile_and_execute - compile and execute a RE * - * Returns TRUE on match, FALSE on no match + * Returns true on match, false on no match * * text_re --- the pattern, expressed as a TEXT object * dat --- the data to match against (need not be null-terminated) @@ -1294,7 +1294,7 @@ build_regexp_split_result(regexp_matches_ctx *splitctx) * regexp_fixed_prefix - extract fixed prefix, if any, for a regexp * * The result is NULL if there is no fixed prefix, else a palloc'd string. - * If it is an exact match, not just a prefix, *exact is returned as TRUE. + * If it is an exact match, not just a prefix, *exact is returned as true. */ char * regexp_fixed_prefix(text *text_re, bool case_insensitive, Oid collation, diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c index 6fe81fab7e..afd0c00b8a 100644 --- a/src/backend/utils/adt/regproc.c +++ b/src/backend/utils/adt/regproc.c @@ -1728,7 +1728,7 @@ stringToQualifiedNameList(const char *string) * the argtypes array should be of size FUNC_MAX_ARGS). The function or * operator name is returned to *names as a List of Strings. * - * If allowNone is TRUE, accept "NONE" and return it as InvalidOid (this is + * If allowNone is true, accept "NONE" and return it as InvalidOid (this is * for unary operators). */ static void diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c index c2891e6fa1..f1e74927e0 100644 --- a/src/backend/utils/adt/ri_triggers.c +++ b/src/backend/utils/adt/ri_triggers.c @@ -2075,8 +2075,8 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS) * * Check if we really need to fire the RI trigger for an update to a PK * relation. This is called by the AFTER trigger queue manager to see if - * it can skip queuing an instance of an RI trigger. Returns TRUE if the - * trigger must be fired, FALSE if we can prove the constraint will still + * it can skip queuing an instance of an RI trigger. Returns true if the + * trigger must be fired, false if we can prove the constraint will still * be satisfied. * ---------- */ @@ -2132,8 +2132,8 @@ RI_FKey_pk_upd_check_required(Trigger *trigger, Relation pk_rel, * * Check if we really need to fire the RI trigger for an update to an FK * relation. This is called by the AFTER trigger queue manager to see if - * it can skip queuing an instance of an RI trigger. Returns TRUE if the - * trigger must be fired, FALSE if we can prove the constraint will still + * it can skip queuing an instance of an RI trigger. Returns true if the + * trigger must be fired, false if we can prove the constraint will still * be satisfied. * ---------- */ diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 0ea5078218..2224d6cfe5 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -112,7 +112,7 @@ typedef struct int prettyFlags; /* enabling of pretty-print functions */ int wrapColumn; /* max line length, or -1 for no limit */ int indentLevel; /* current indent level for prettyprint */ - bool varprefix; /* TRUE to print prefixes on Vars */ + bool varprefix; /* true to print prefixes on Vars */ ParseExprKind special_exprkind; /* set only for exprkinds needing special * handling */ } deparse_context; @@ -130,7 +130,7 @@ typedef struct * rtable_columns holds the column alias names to be used for each RTE. * * In some cases we need to make names of merged JOIN USING columns unique - * across the whole query, not only per-RTE. If so, unique_using is TRUE + * across the whole query, not only per-RTE. If so, unique_using is true * and using_names is a list of C strings representing names already assigned * to USING columns. * @@ -2998,9 +2998,9 @@ deparse_expression(Node *expr, List *dpcontext, * for interpreting Vars in the node tree. It can be NIL if no Vars are * expected. * - * forceprefix is TRUE to force all Vars to be prefixed with their table names. + * forceprefix is true to force all Vars to be prefixed with their table names. * - * showimplicit is TRUE to force all implicit casts to be shown explicitly. + * showimplicit is true to force all implicit casts to be shown explicitly. * * Tries to pretty up the output according to prettyFlags and startIndent. * @@ -3573,7 +3573,7 @@ set_using_names(deparse_namespace *dpns, Node *jtnode, List *parentUsing) * If there's a USING clause, select the USING column names and push * those names down to the children. We have two strategies: * - * If dpns->unique_using is TRUE, we force all USING names to be + * If dpns->unique_using is true, we force all USING names to be * unique across the whole query level. In principle we'd only need * the names of dangerous USING columns to be globally unique, but to * safely assign all USING names in a single pass, we have to enforce @@ -3586,7 +3586,7 @@ set_using_names(deparse_namespace *dpns, Node *jtnode, List *parentUsing) * this simplifies the logic and seems likely to lead to less aliasing * overall. * - * If dpns->unique_using is FALSE, we only need USING names to be + * If dpns->unique_using is false, we only need USING names to be * unique within their own join RTE. We still need to honor * pushed-down names, though. * @@ -5153,7 +5153,7 @@ get_simple_values_rte(Query *query) ListCell *lc; /* - * We want to return TRUE even if the Query also contains OLD or NEW rule + * We want to return true even if the Query also contains OLD or NEW rule * RTEs. So the idea is to scan the rtable and see if there is only one * inFromCl RTE that is a VALUES RTE. */ @@ -6402,7 +6402,7 @@ get_utility_query_def(Query *query, deparse_context *context) * the Var's varlevelsup has to be interpreted with respect to a context * above the current one; levelsup indicates the offset. * - * If istoplevel is TRUE, the Var is at the top level of a SELECT's + * If istoplevel is true, the Var is at the top level of a SELECT's * targetlist, which means we need special treatment of whole-row Vars. * Instead of the normal "tab.*", we'll print "tab.*::typename", which is a * dirty hack to prevent "tab.*" from being expanded into multiple columns. @@ -10608,7 +10608,7 @@ generate_qualified_relation_name(Oid relid) * means a FuncExpr or Aggref, not some other way of calling a function), then * has_variadic must specify whether variadic arguments have been merged, * and *use_variadic_p will be set to indicate whether to print VARIADIC in - * the output. For non-FuncExpr cases, has_variadic should be FALSE and + * the output. For non-FuncExpr cases, has_variadic should be false and * use_variadic_p can be NULL. * * The result includes all necessary quoting and schema-prefixing. diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index db1792bf8d..8c65b1dbe8 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -790,7 +790,7 @@ ineq_histogram_selectivity(PlannerInfo *root, * hand! (For example, we might have a '<=' operator rather than the '<' * operator that will appear in staop.) For now, assume that whatever * appears in pg_statistic is sorted the same way our operator sorts, or - * the reverse way if isgt is TRUE. + * the reverse way if isgt is true. */ if (HeapTupleIsValid(vardata->statsTuple) && statistic_proc_security_check(vardata, opproc->fn_oid) && @@ -3805,7 +3805,7 @@ estimate_hash_bucket_stats(PlannerInfo *root, Node *hashkey, double nbuckets, * * Varinfos that aren't for simple Vars are ignored. * - * Return TRUE if we're able to find a match, FALSE otherwise. + * Return true if we're able to find a match, false otherwise. */ static bool estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel, @@ -4518,12 +4518,12 @@ convert_timevalue_to_scalar(Datum value, Oid typid) * args: clause argument list * varRelid: see specs for restriction selectivity functions * - * Outputs: (these are valid only if TRUE is returned) + * Outputs: (these are valid only if true is returned) * *vardata: gets information about variable (see examine_variable) * *other: gets other clause argument, aggressively reduced to a constant - * *varonleft: set TRUE if variable is on the left, FALSE if on the right + * *varonleft: set true if variable is on the left, false if on the right * - * Returns TRUE if a variable is identified, otherwise FALSE. + * Returns true if a variable is identified, otherwise false. * * Note: if there are Vars on both sides of the clause, we must fail, because * callers are expecting that the other side will act like a pseudoconstant. @@ -4639,12 +4639,12 @@ get_join_variables(PlannerInfo *root, List *args, SpecialJoinInfo *sjinfo, * atttype, atttypmod: actual type/typmod of the "var" expression. This is * commonly the same as the exposed type of the variable argument, * but can be different in binary-compatible-type cases. - * isunique: TRUE if we were able to match the var to a unique index or a + * isunique: true if we were able to match the var to a unique index or a * single-column DISTINCT clause, implying its values are unique for * this query. (Caution: this should be trusted for statistical * purposes only, since we do not check indimmediate nor verify that * the exact same definition of equality applies.) - * acl_ok: TRUE if current user has permission to read the column(s) + * acl_ok: true if current user has permission to read the column(s) * underlying the pg_statistic entry. This is consulted by * statistic_proc_security_check(). * @@ -5051,7 +5051,7 @@ statistic_proc_security_check(VariableStatData *vardata, Oid func_oid) * Estimate the number of distinct values of a variable. * * vardata: results of examine_variable - * *isdefault: set to TRUE if the result is a default rather than based on + * *isdefault: set to true if the result is a default rather than based on * anything meaningful. * * NB: be careful to produce a positive integral result, since callers may @@ -5184,8 +5184,8 @@ get_variable_numdistinct(VariableStatData *vardata, bool *isdefault) /* * get_variable_range * Estimate the minimum and maximum value of the specified variable. - * If successful, store values in *min and *max, and return TRUE. - * If no data available, return FALSE. + * If successful, store values in *min and *max, and return true. + * If no data available, return false. * * sortop is the "<" comparison operator to use. This should generally * be "<" not ">", as only the former is likely to be found in pg_statistic. @@ -5318,9 +5318,9 @@ get_variable_range(PlannerInfo *root, VariableStatData *vardata, Oid sortop, * Attempt to identify the current *actual* minimum and/or maximum * of the specified variable, by looking for a suitable btree index * and fetching its low and/or high values. - * If successful, store values in *min and *max, and return TRUE. + * If successful, store values in *min and *max, and return true. * (Either pointer can be NULL if that endpoint isn't needed.) - * If no data available, return FALSE. + * If no data available, return false. * * sortop is the "<" comparison operator to use. */ diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index ebfb823fb8..0659feb973 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -3251,7 +3251,7 @@ textToQualifiedNameList(text *textval) * namelist: filled with a palloc'd list of pointers to identifiers within * rawstring. Caller should list_free() this even on error return. * - * Returns TRUE if okay, FALSE if there is a syntax error in the string. + * Returns true if okay, false if there is a syntax error in the string. * * Note that an empty string is considered okay here, though not in * textToQualifiedNameList. @@ -3379,7 +3379,7 @@ SplitIdentifierString(char *rawstring, char separator, * namelist: filled with a palloc'd list of directory names. * Caller should list_free_deep() this even on error return. * - * Returns TRUE if okay, FALSE if there is a syntax error in the string. + * Returns true if okay, false if there is a syntax error in the string. * * Note that an empty string is considered okay here. */ diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index 24229c2dff..c9d07f2ae9 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -4376,7 +4376,7 @@ XmlTableSetColumnFilter(TableFuncScanState *state, char *path, int colnum) /* * XmlTableFetchRow * Prepare the next "current" tuple for upcoming GetValue calls. - * Returns FALSE if the row-filter expression returned no more rows. + * Returns false if the row-filter expression returned no more rows. */ static bool XmlTableFetchRow(TableFuncScanState *state) diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c index b7a14dc87e..d60714f75a 100644 --- a/src/backend/utils/cache/lsyscache.c +++ b/src/backend/utils/cache/lsyscache.c @@ -186,7 +186,7 @@ get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, * determine its opfamily, its declared input datatype, and its * strategy number (BTLessStrategyNumber or BTGreaterStrategyNumber). * - * Returns TRUE if successful, FALSE if no matching pg_amop entry exists. + * Returns true if successful, false if no matching pg_amop entry exists. * (This indicates that the operator is not a valid ordering operator.) * * Note: the operator could be registered in multiple families, for example @@ -254,8 +254,8 @@ get_ordering_op_properties(Oid opno, * Get the OID of the datatype-specific btree equality operator * associated with an ordering operator (a "<" or ">" operator). * - * If "reverse" isn't NULL, also set *reverse to FALSE if the operator is "<", - * TRUE if it's ">" + * If "reverse" isn't NULL, also set *reverse to false if the operator is "<", + * true if it's ">" * * Returns InvalidOid if no matching equality operator can be found. * (This indicates that the operator is not a valid ordering operator.) @@ -682,7 +682,7 @@ get_op_btree_interpretation(Oid opno) /* * equality_ops_are_compatible - * Return TRUE if the two given equality operators have compatible + * Return true if the two given equality operators have compatible * semantics. * * This is trivially true if they are the same operator. Otherwise, @@ -2854,7 +2854,7 @@ get_attavgwidth(Oid relid, AttrNumber attnum) * get_attstatsslot * * Extract the contents of a "slot" of a pg_statistic tuple. - * Returns TRUE if requested slot type was found, else FALSE. + * Returns true if requested slot type was found, else false. * * Unlike other routines in this file, this takes a pointer to an * already-looked-up tuple in the pg_statistic cache. We do this since @@ -2870,7 +2870,7 @@ get_attavgwidth(Oid relid, AttrNumber attnum) * reqop: STAOP value wanted, or InvalidOid if don't care. * flags: bitmask of ATTSTATSSLOT_VALUES and/or ATTSTATSSLOT_NUMBERS. * - * If a matching slot is found, TRUE is returned, and *sslot is filled thus: + * If a matching slot is found, true is returned, and *sslot is filled thus: * staop: receives the actual STAOP value. * valuetype: receives actual datatype of the elements of stavalues. * values: receives pointer to an array of the slot's stavalues. @@ -2882,7 +2882,7 @@ get_attavgwidth(Oid relid, AttrNumber attnum) * wasn't specified. Likewise, numbers/nnumbers are NULL/0 if * ATTSTATSSLOT_NUMBERS wasn't specified. * - * If no matching slot is found, FALSE is returned, and *sslot is zeroed. + * If no matching slot is found, false is returned, and *sslot is zeroed. * * The data referred to by the fields of sslot is locally palloc'd and * is independent of the original pg_statistic tuple. When the caller diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index ad8a82f1e3..853c1f6e85 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -319,7 +319,7 @@ CreateOneShotCachedPlan(RawStmt *raw_parse_tree, * parserSetup: alternate method for handling query parameters * parserSetupArg: data to pass to parserSetup * cursor_options: options bitmask to pass to planner - * fixed_result: TRUE to disallow future changes in query's result tupdesc + * fixed_result: true to disallow future changes in query's result tupdesc */ void CompleteCachedPlan(CachedPlanSource *plansource, diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index b8e37809b0..7aa78700ef 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -232,7 +232,7 @@ do { \ typedef struct opclasscacheent { Oid opclassoid; /* lookup key: OID of opclass */ - bool valid; /* set TRUE after successful fill-in */ + bool valid; /* set true after successful fill-in */ StrategyNumber numSupport; /* max # of support procs (from pg_am) */ Oid opcfamily; /* OID of opclass's family */ Oid opcintype; /* OID of opclass's declared input type */ @@ -5363,9 +5363,9 @@ errtableconstraint(Relation rel, const char *conname) * load_relcache_init_file -- attempt to load cache from the shared * or local cache init file * - * If successful, return TRUE and set criticalRelcachesBuilt or + * If successful, return true and set criticalRelcachesBuilt or * criticalSharedRelcachesBuilt to true. - * If not successful, return FALSE. + * If not successful, return false. * * NOTE: we assume we are already switched into CacheMemoryContext. */ diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c index f5394dc43d..4205422313 100644 --- a/src/backend/utils/cache/relmapper.c +++ b/src/backend/utils/cache/relmapper.c @@ -694,13 +694,13 @@ load_relmap_file(bool shared) * The magic number and CRC are automatically updated in *newmap. On * success, we copy the data to the appropriate permanent static variable. * - * If write_wal is TRUE then an appropriate WAL message is emitted. + * If write_wal is true then an appropriate WAL message is emitted. * (It will be false for bootstrap and WAL replay cases.) * - * If send_sinval is TRUE then a SI invalidation message is sent. + * If send_sinval is true then a SI invalidation message is sent. * (This should be true except in bootstrap case.) * - * If preserve_files is TRUE then the storage manager is warned not to + * If preserve_files is true then the storage manager is warned not to * delete the files listed in the map. * * Because this may be called during WAL replay when MyDatabaseId, diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c index 977c03834a..f6bb05f135 100644 --- a/src/backend/utils/error/elog.c +++ b/src/backend/utils/error/elog.c @@ -226,7 +226,7 @@ err_gettext(const char *str) * the stack entry. Finally, errfinish() will be called to actually process * the error report. * - * Returns TRUE in normal case. Returns FALSE to short-circuit the error + * Returns true in normal case. Returns false to short-circuit the error * report (if it's a warning or lower and not to be reported anywhere). */ bool @@ -285,7 +285,7 @@ errstart(int elevel, const char *filename, int lineno, /* * Now decide whether we need to process this report at all; if it's - * warning or less and not enabled for logging, just return FALSE without + * warning or less and not enabled for logging, just return false without * starting up any error logging machinery. */ diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c index 9c3f4510ce..e742e8c9f5 100644 --- a/src/backend/utils/fmgr/funcapi.c +++ b/src/backend/utils/fmgr/funcapi.c @@ -396,8 +396,8 @@ internal_get_result_type(Oid funcid, /* * Given the result tuple descriptor for a function with OUT parameters, * replace any polymorphic columns (ANYELEMENT etc) with correct data types - * deduced from the input arguments. Returns TRUE if able to deduce all types, - * FALSE if not. + * deduced from the input arguments. Returns true if able to deduce all types, + * false if not. */ static bool resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args, @@ -589,7 +589,7 @@ resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args, /* * Given the declared argument types and modes for a function, replace any * polymorphic types (ANYELEMENT etc) with correct data types deduced from the - * input arguments. Returns TRUE if able to deduce all types, FALSE if not. + * input arguments. Returns true if able to deduce all types, false if not. * This is the same logic as resolve_polymorphic_tupdesc, but with a different * argument representation. * diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c index 6f6b03c815..71f5f0688a 100644 --- a/src/backend/utils/hash/dynahash.c +++ b/src/backend/utils/hash/dynahash.c @@ -891,8 +891,8 @@ calc_bucket(HASHHDR *hctl, uint32 hash_val) * HASH_ENTER_NULL cannot be used with the default palloc-based allocator, * since palloc internally ereports on out-of-memory. * - * If foundPtr isn't NULL, then *foundPtr is set TRUE if we found an - * existing entry in the table, FALSE otherwise. This is needed in the + * If foundPtr isn't NULL, then *foundPtr is set true if we found an + * existing entry in the table, false otherwise. This is needed in the * HASH_ENTER case, but is redundant with the return value otherwise. * * For hash_search_with_hash_value, the hashvalue parameter must have been @@ -1096,12 +1096,12 @@ hash_search_with_hash_value(HTAB *hashp, * Therefore this cannot suffer an out-of-memory failure, even if there are * other processes operating in other partitions of the hashtable. * - * Returns TRUE if successful, FALSE if the requested new hash key is already + * Returns true if successful, false if the requested new hash key is already * present. Throws error if the specified entry pointer isn't actually a * table member. * * NB: currently, there is no special case for old and new hash keys being - * identical, which means we'll report FALSE for that situation. This is + * identical, which means we'll report false for that situation. This is * preferable for existing uses. * * NB: for a partitioned hashtable, caller must hold lock on both relevant diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c index afbf8f8691..544fed8096 100644 --- a/src/backend/utils/init/miscinit.c +++ b/src/backend/utils/init/miscinit.c @@ -1273,11 +1273,11 @@ AddToDataDirLockFile(int target_line, const char *str) /* * Recheck that the data directory lock file still exists with expected - * content. Return TRUE if the lock file appears OK, FALSE if it isn't. + * content. Return true if the lock file appears OK, false if it isn't. * * We call this periodically in the postmaster. The idea is that if the * lock file has been removed or replaced by another postmaster, we should - * do a panic database shutdown. Therefore, we should return TRUE if there + * do a panic database shutdown. Therefore, we should return true if there * is any doubt: we do not want to cause a panic shutdown unnecessarily. * Transient failures like EINTR or ENFILE should not cause us to fail. * (If there really is something wrong, we'll detect it on a future recheck.) diff --git a/src/backend/utils/misc/tzparser.c b/src/backend/utils/misc/tzparser.c index 04d6ee3503..3986141899 100644 --- a/src/backend/utils/misc/tzparser.c +++ b/src/backend/utils/misc/tzparser.c @@ -45,7 +45,7 @@ static int ParseTzFile(const char *filename, int depth, /* * Apply additional validation checks to a tzEntry * - * Returns TRUE if OK, else false + * Returns true if OK, else false */ static bool validateTzEntry(tzEntry *tzentry) @@ -92,7 +92,7 @@ validateTzEntry(tzEntry *tzentry) * name zone * name offset dst * - * Returns TRUE if OK, else false; data is stored in *tzentry + * Returns true if OK, else false; data is stored in *tzentry */ static bool splitTzLine(const char *filename, int lineno, char *line, tzEntry *tzentry) @@ -180,7 +180,7 @@ splitTzLine(const char *filename, int lineno, char *line, tzEntry *tzentry) * *arraysize: allocated length of array (changeable if must enlarge array) * n: current number of valid elements in array * entry: new data to insert - * override: TRUE if OK to override + * override: true if OK to override * * Returns the new array length (new value for n), or -1 if error */ diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c index 89db08464f..d03b779407 100644 --- a/src/backend/utils/mmgr/portalmem.c +++ b/src/backend/utils/mmgr/portalmem.c @@ -623,8 +623,8 @@ PortalHashTableDeleteAll(void) * simply removed. Portals remaining from prior transactions should be * left untouched. * - * Returns TRUE if any portals changed state (possibly causing user-defined - * code to be run), FALSE if not. + * Returns true if any portals changed state (possibly causing user-defined + * code to be run), false if not. */ bool PreCommit_Portals(bool isPrepare) diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index 17e1b6860b..ed992a0b30 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -1229,7 +1229,7 @@ tuplesort_end(Tuplesortstate *state) /* * Grow the memtuples[] array, if possible within our memory constraint. We * must not exceed INT_MAX tuples in memory or the caller-provided memory - * limit. Return TRUE if we were able to enlarge the array, FALSE if not. + * limit. Return true if we were able to enlarge the array, false if not. * * Normally, at each increment we double the size of the array. When doing * that would exceed a limit, we attempt one last, smaller increase (and then @@ -1850,7 +1850,7 @@ tuplesort_performsort(Tuplesortstate *state) /* * Internal routine to fetch the next tuple in either forward or back - * direction into *stup. Returns FALSE if no more tuples. + * direction into *stup. Returns false if no more tuples. * Returned tuple belongs to tuplesort memory context, and must not be freed * by caller. Note that fetched tuple is stored in memory that may be * recycled by any future fetch. @@ -2092,10 +2092,10 @@ tuplesort_gettuple_common(Tuplesortstate *state, bool forward, /* * Fetch the next tuple in either forward or back direction. - * If successful, put tuple in slot and return TRUE; else, clear the slot - * and return FALSE. + * If successful, put tuple in slot and return true; else, clear the slot + * and return false. * - * Caller may optionally be passed back abbreviated value (on TRUE return + * Caller may optionally be passed back abbreviated value (on true return * value) when abbreviation was used, which can be used to cheaply avoid * equality checks that might otherwise be required. Caller can safely make a * determination of "non-equal tuple" based on simple binary inequality. A @@ -2182,13 +2182,13 @@ tuplesort_getindextuple(Tuplesortstate *state, bool forward) /* * Fetch the next Datum in either forward or back direction. - * Returns FALSE if no more datums. + * Returns false if no more datums. * * If the Datum is pass-by-ref type, the returned value is freshly palloc'd * and is now owned by the caller (this differs from similar routines for * other types of tuplesorts). * - * Caller may optionally be passed back abbreviated value (on TRUE return + * Caller may optionally be passed back abbreviated value (on true return * value) when abbreviation was used, which can be used to cheaply avoid * equality checks that might otherwise be required. Caller can safely make a * determination of "non-equal tuple" based on simple binary inequality. A @@ -2232,7 +2232,7 @@ tuplesort_getdatum(Tuplesortstate *state, bool forward, /* * Advance over N tuples in either forward or back direction, * without returning any data. N==0 is a no-op. - * Returns TRUE if successful, FALSE if ran out of tuples. + * Returns true if successful, false if ran out of tuples. */ bool tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward) diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c index 98c006b663..1977b61fd9 100644 --- a/src/backend/utils/sort/tuplestore.c +++ b/src/backend/utils/sort/tuplestore.c @@ -562,7 +562,7 @@ tuplestore_ateof(Tuplestorestate *state) /* * Grow the memtuples[] array, if possible within our memory constraint. We * must not exceed INT_MAX tuples in memory or the caller-provided memory - * limit. Return TRUE if we were able to enlarge the array, FALSE if not. + * limit. Return true if we were able to enlarge the array, false if not. * * Normally, at each increment we double the size of the array. When doing * that would exceed a limit, we attempt one last, smaller increase (and then @@ -1064,12 +1064,12 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward, /* * tuplestore_gettupleslot - exported function to fetch a MinimalTuple * - * If successful, put tuple in slot and return TRUE; else, clear the slot - * and return FALSE. + * If successful, put tuple in slot and return true; else, clear the slot + * and return false. * - * If copy is TRUE, the slot receives a copied tuple (allocated in current + * If copy is true, the slot receives a copied tuple (allocated in current * memory context) that will stay valid regardless of future manipulations of - * the tuplestore's state. If copy is FALSE, the slot may just receive a + * the tuplestore's state. If copy is false, the slot may just receive a * pointer to a tuple held within the tuplestore. The latter is more * efficient but the slot contents may be corrupted if additional writes to * the tuplestore occur. (If using tuplestore_trim, see comments therein.) @@ -1129,7 +1129,7 @@ tuplestore_advance(Tuplestorestate *state, bool forward) /* * Advance over N tuples in either forward or back direction, * without returning any data. N<=0 is a no-op. - * Returns TRUE if successful, FALSE if ran out of tuples. + * Returns true if successful, false if ran out of tuples. */ bool tuplestore_skiptuples(Tuplestorestate *state, int64 ntuples, bool forward) diff --git a/src/backend/utils/time/combocid.c b/src/backend/utils/time/combocid.c index c7e4331efb..200fa3765f 100644 --- a/src/backend/utils/time/combocid.c +++ b/src/backend/utils/time/combocid.c @@ -142,8 +142,8 @@ HeapTupleHeaderGetCmax(HeapTupleHeader tup) * into its t_cid field. * * If we don't need a combo CID, *cmax is unchanged and *iscombo is set to - * FALSE. If we do need one, *cmax is replaced by a combo CID and *iscombo - * is set to TRUE. + * false. If we do need one, *cmax is replaced by a combo CID and *iscombo + * is set to true. * * The reason this is separate from the actual HeapTupleHeaderSetCmax() * operation is that this could fail due to out-of-memory conditions. Hence diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c index bbac4083c9..900be55f61 100644 --- a/src/backend/utils/time/tqual.c +++ b/src/backend/utils/time/tqual.c @@ -1424,7 +1424,7 @@ HeapTupleSatisfiesNonVacuumable(HeapTuple htup, Snapshot snapshot, * should already be set. We assume that if no hint bits are set, the xmin * or xmax transaction is still running. This is therefore faster than * HeapTupleSatisfiesVacuum, because we don't consult PGXACT nor CLOG. - * It's okay to return FALSE when in doubt, but we must return TRUE only + * It's okay to return false when in doubt, but we must return true only * if the tuple is removable. */ bool diff --git a/src/bin/pg_dump/dumputils.c b/src/bin/pg_dump/dumputils.c index e4c95feb63..70d8f24d17 100644 --- a/src/bin/pg_dump/dumputils.c +++ b/src/bin/pg_dump/dumputils.c @@ -42,7 +42,7 @@ static void AddAcl(PQExpBuffer aclbuf, const char *keyword, * prefix: string to prefix to each generated command; typically empty * remoteVersion: version of database * - * Returns TRUE if okay, FALSE if could not parse the acl string. + * Returns true if okay, false if could not parse the acl string. * The resulting commands (if any) are appended to the contents of 'sql'. * * Note: when processing a default ACL, prefix is "ALTER DEFAULT PRIVILEGES " @@ -359,7 +359,7 @@ buildACLCommands(const char *name, const char *subname, * owner: username of privileges owner (will be passed through fmtId) * remoteVersion: version of database * - * Returns TRUE if okay, FALSE if could not parse the acl string. + * Returns true if okay, false if could not parse the acl string. * The resulting commands (if any) are appended to the contents of 'sql'. */ bool diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 75f08cd792..a864dd9b1f 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -2452,7 +2452,7 @@ getTableDataFKConstraints(void) * In 8.4 and up we can rely on the conislocal field to decide which * constraints must be dumped; much safer. * - * This function assumes all conislocal flags were initialized to TRUE. + * This function assumes all conislocal flags were initialized to true. * It clears the flag on anything that seems to be inherited. */ static void diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h index e7593e6da7..da884ffd09 100644 --- a/src/bin/pg_dump/pg_dump.h +++ b/src/bin/pg_dump/pg_dump.h @@ -339,7 +339,7 @@ typedef struct _attrDefInfo TableInfo *adtable; /* link to table of attribute */ int adnum; char *adef_expr; /* decompiled DEFAULT expression */ - bool separate; /* TRUE if must dump as separate item */ + bool separate; /* true if must dump as separate item */ } AttrDefInfo; typedef struct _tableDataInfo @@ -380,7 +380,7 @@ typedef struct _ruleInfo char ev_type; bool is_instead; char ev_enabled; - bool separate; /* TRUE if must dump as separate item */ + bool separate; /* true if must dump as separate item */ /* separate is always true for non-ON SELECT rules */ } RuleInfo; @@ -430,10 +430,10 @@ typedef struct _constraintInfo char *condef; /* definition, if CHECK or FOREIGN KEY */ Oid confrelid; /* referenced table, if FOREIGN KEY */ DumpId conindex; /* identifies associated index if any */ - bool condeferrable; /* TRUE if constraint is DEFERRABLE */ - bool condeferred; /* TRUE if constraint is INITIALLY DEFERRED */ - bool conislocal; /* TRUE if constraint has local definition */ - bool separate; /* TRUE if must dump as separate item */ + bool condeferrable; /* true if constraint is DEFERRABLE */ + bool condeferred; /* true if constraint is INITIALLY DEFERRED */ + bool conislocal; /* true if constraint has local definition */ + bool separate; /* true if must dump as separate item */ } ConstraintInfo; typedef struct _procLangInfo diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c index 5044a76787..48b6dd594c 100644 --- a/src/bin/pg_dump/pg_dump_sort.c +++ b/src/bin/pg_dump/pg_dump_sort.c @@ -342,13 +342,13 @@ sortDumpableObjects(DumpableObject **objs, int numObjs, * The input is the list of numObjs objects in objs[]. This list is not * modified. * - * Returns TRUE if able to build an ordering that satisfies all the - * constraints, FALSE if not (there are contradictory constraints). + * Returns true if able to build an ordering that satisfies all the + * constraints, false if not (there are contradictory constraints). * - * On success (TRUE result), ordering[] is filled with a sorted array of + * On success (true result), ordering[] is filled with a sorted array of * DumpableObject pointers, of length equal to the input list length. * - * On failure (FALSE result), ordering[] is filled with an unsorted array of + * On failure (false result), ordering[] is filled with an unsorted array of * DumpableObject pointers of length *nOrdering, listing the objects that * prevented the sort from being completed. In general, these objects either * participate directly in a dependency cycle, or are depended on by objects diff --git a/src/bin/pg_upgrade/pg_upgrade.h b/src/bin/pg_upgrade/pg_upgrade.h index e44c23654d..a21dd48c42 100644 --- a/src/bin/pg_upgrade/pg_upgrade.h +++ b/src/bin/pg_upgrade/pg_upgrade.h @@ -284,7 +284,7 @@ typedef struct typedef struct { FILE *internal; /* internal log FILE */ - bool verbose; /* TRUE -> be verbose in messages */ + bool verbose; /* true -> be verbose in messages */ bool retain; /* retain log files on success */ } LogOpts; @@ -294,7 +294,7 @@ typedef struct */ typedef struct { - bool check; /* TRUE -> ask user for permission to make + bool check; /* true -> ask user for permission to make * changes */ transferMode transfer_mode; /* copy files or link them? */ int jobs; diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c index 041b5e0c87..8cc4de3878 100644 --- a/src/bin/psql/command.c +++ b/src/bin/psql/command.c @@ -4370,7 +4370,7 @@ echo_hidden_command(const char *query) /* * Look up the object identified by obj_type and desc. If successful, - * store its OID in *obj_oid and return TRUE, else return FALSE. + * store its OID in *obj_oid and return true, else return false. * * Note that we'll fail if the object doesn't exist OR if there are multiple * matching candidates OR if there's something syntactically wrong with the diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c index 9b59ee840b..7a91a44b2b 100644 --- a/src/bin/psql/common.c +++ b/src/bin/psql/common.c @@ -44,7 +44,7 @@ static bool is_select_command(const char *query); * Returns output file pointer into *fout, and is-a-pipe flag into *is_pipe. * Caller is responsible for adjusting SIGPIPE state if it's a pipe. * - * On error, reports suitable error message and returns FALSE. + * On error, reports suitable error message and returns false. */ bool openQueryOutputFile(const char *fname, FILE **fout, bool *is_pipe) @@ -266,7 +266,7 @@ NoticeProcessor(void *arg, const char *message) * database queries. In most places, this is accomplished by checking * cancel_pressed during long-running loops. However, that won't work when * blocked on user input (in readline() or fgets()). In those places, we - * set sigint_interrupt_enabled TRUE while blocked, instructing the signal + * set sigint_interrupt_enabled true while blocked, instructing the signal * catcher to longjmp through sigint_interrupt_jmp. We assume readline and * fgets are coded to handle possible interruption. (XXX currently this does * not work on win32, so control-C is less useful there) diff --git a/src/bin/psql/large_obj.c b/src/bin/psql/large_obj.c index 2a3416b369..8a8887202a 100644 --- a/src/bin/psql/large_obj.c +++ b/src/bin/psql/large_obj.c @@ -48,7 +48,7 @@ print_lo_result(const char *fmt,...) * Prepare to do a large-object operation. We *must* be inside a transaction * block for all these operations, so start one if needed. * - * Returns TRUE if okay, FALSE if failed. *own_transaction is set to indicate + * Returns true if okay, false if failed. *own_transaction is set to indicate * if we started our own transaction or not. */ static bool diff --git a/src/bin/psql/stringutils.c b/src/bin/psql/stringutils.c index 959381d085..eefd18fbd9 100644 --- a/src/bin/psql/stringutils.c +++ b/src/bin/psql/stringutils.c @@ -27,8 +27,8 @@ * delim - set of non-whitespace separator characters (or NULL) * quote - set of characters that can quote a token (NULL if none) * escape - character that can quote quotes (0 if none) - * e_strings - if TRUE, treat E'...' syntax as a valid token - * del_quotes - if TRUE, strip quotes from the returned token, else return + * e_strings - if true, treat E'...' syntax as a valid token + * del_quotes - if true, strip quotes from the returned token, else return * it exactly as found in the string * encoding - the active character-set encoding * @@ -39,7 +39,7 @@ * a single quote character in the data. If escape isn't 0, then escape * followed by anything (except \0) is a data character too. * - * The combination of e_strings and del_quotes both TRUE is not currently + * The combination of e_strings and del_quotes both true is not currently * handled. This could be fixed but it's not needed anywhere at the moment. * * Note that the string s is _not_ overwritten in this implementation. diff --git a/src/common/md5.c b/src/common/md5.c index ba65b02af6..9144cab6ee 100644 --- a/src/common/md5.c +++ b/src/common/md5.c @@ -317,7 +317,7 @@ pg_md5_binary(const void *buff, size_t len, void *outbuf) * Output format is "md5" followed by a 32-hex-digit MD5 checksum. * Hence, the output buffer "buf" must be at least 36 bytes long. * - * Returns TRUE if okay, FALSE on error (out of memory). + * Returns true if okay, false on error (out of memory). */ bool pg_md5_encrypt(const char *passwd, const char *salt, size_t salt_len, diff --git a/src/include/access/gin_private.h b/src/include/access/gin_private.h index adfdb0c6d9..7b5c845b83 100644 --- a/src/include/access/gin_private.h +++ b/src/include/access/gin_private.h @@ -300,7 +300,7 @@ typedef struct GinScanKeyData /* * Match status data. curItem is the TID most recently tested (could be a - * lossy-page pointer). curItemMatches is TRUE if it passes the + * lossy-page pointer). curItemMatches is true if it passes the * consistentFn test; if so, recheckCurItem is the recheck flag. * isFinished means that all the input entry streams are finished, so this * key cannot succeed for any later TIDs. diff --git a/src/include/access/hash_xlog.h b/src/include/access/hash_xlog.h index c778fdc8df..abe8579249 100644 --- a/src/include/access/hash_xlog.h +++ b/src/include/access/hash_xlog.h @@ -149,7 +149,7 @@ typedef struct xl_hash_split_complete typedef struct xl_hash_move_page_contents { uint16 ntups; - bool is_prim_bucket_same_wrt; /* TRUE if the page to which + bool is_prim_bucket_same_wrt; /* true if the page to which * tuples are moved is same as * primary bucket page */ } xl_hash_move_page_contents; @@ -174,10 +174,10 @@ typedef struct xl_hash_squeeze_page BlockNumber prevblkno; BlockNumber nextblkno; uint16 ntups; - bool is_prim_bucket_same_wrt; /* TRUE if the page to which + bool is_prim_bucket_same_wrt; /* true if the page to which * tuples are moved is same as * primary bucket page */ - bool is_prev_bucket_same_wrt; /* TRUE if the page to which + bool is_prev_bucket_same_wrt; /* true if the page to which * tuples are moved is the page * previous to the freed overflow * page */ @@ -196,9 +196,9 @@ typedef struct xl_hash_squeeze_page */ typedef struct xl_hash_delete { - bool clear_dead_marking; /* TRUE if this operation clears + bool clear_dead_marking; /* true if this operation clears * LH_PAGE_HAS_DEAD_TUPLES flag */ - bool is_primary_bucket_page; /* TRUE if the operation is for + bool is_primary_bucket_page; /* true if the operation is for * primary bucket page */ } xl_hash_delete; diff --git a/src/include/access/slru.h b/src/include/access/slru.h index d829a6fab4..20114c4d44 100644 --- a/src/include/access/slru.h +++ b/src/include/access/slru.h @@ -37,7 +37,7 @@ /* * Page status codes. Note that these do not include the "dirty" bit. - * page_dirty can be TRUE only in the VALID or WRITE_IN_PROGRESS states; + * page_dirty can be true only in the VALID or WRITE_IN_PROGRESS states; * in the latter case it implies that the page has been re-dirtied since * the write started. */ diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h index 66bfb77295..16093ed09f 100644 --- a/src/include/access/xlog.h +++ b/src/include/access/xlog.h @@ -43,7 +43,7 @@ extern bool InRecovery; /* * Like InRecovery, standbyState is only valid in the startup process. * In all other processes it will have the value STANDBY_DISABLED (so - * InHotStandby will read as FALSE). + * InHotStandby will read as false). * * In DISABLED state, we're performing crash recovery or hot standby was * disabled in postgresql.conf. diff --git a/src/include/c.h b/src/include/c.h index fd53010e24..78b1b0526f 100644 --- a/src/include/c.h +++ b/src/include/c.h @@ -493,7 +493,7 @@ typedef NameData *Name; #define NameStr(name) ((name).data) /* - * Support macros for escaping strings. escape_backslash should be TRUE + * Support macros for escaping strings. escape_backslash should be true * if generating a non-standard-conforming string. Prefixing a string * with ESCAPE_STRING_SYNTAX guarantees it is non-standard-conforming. * Beware of multiple evaluation of the "ch" argument! diff --git a/src/include/catalog/pg_conversion.h b/src/include/catalog/pg_conversion.h index 0682d7eb22..9344585e66 100644 --- a/src/include/catalog/pg_conversion.h +++ b/src/include/catalog/pg_conversion.h @@ -32,7 +32,7 @@ * conforencoding FOR encoding id * contoencoding TO encoding id * conproc OID of the conversion proc - * condefault TRUE if this is a default conversion + * condefault true if this is a default conversion * ---------------------------------------------------------------- */ #define ConversionRelationId 2607 diff --git a/src/include/catalog/pg_type.h b/src/include/catalog/pg_type.h index ffdb452b02..e3551440a0 100644 --- a/src/include/catalog/pg_type.h +++ b/src/include/catalog/pg_type.h @@ -51,7 +51,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO /* * typbyval determines whether internal Postgres routines pass a value of - * this type by value or by reference. typbyval had better be FALSE if + * this type by value or by reference. typbyval had better be false if * the length is not 1, 2, or 4 (or 8 on 8-byte-Datum machines). * Variable-length types are always passed by reference. Note that * typbyval can be false even if the length would allow pass-by-value; diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h index a9035112e9..17bbfb2cd6 100644 --- a/src/include/commands/vacuum.h +++ b/src/include/commands/vacuum.h @@ -29,8 +29,8 @@ * so they live until the end of the ANALYZE operation. * * The type-specific typanalyze function is passed a pointer to this struct - * and must return TRUE to continue analysis, FALSE to skip analysis of this - * column. In the TRUE case it must set the compute_stats and minrows fields, + * and must return true to continue analysis, false to skip analysis of this + * column. In the true case it must set the compute_stats and minrows fields, * and can optionally set extra_data to pass additional info to compute_stats. * minrows is its request for the minimum number of sample rows to be gathered * (but note this request might not be honored, eg if there are fewer rows @@ -45,7 +45,7 @@ * The fetchfunc may be called with rownum running from 0 to samplerows-1. * It returns a Datum and an isNull flag. * - * compute_stats should set stats_valid TRUE if it is able to compute + * compute_stats should set stats_valid true if it is able to compute * any useful statistics. If it does, the remainder of the struct holds * the information to be stored in a pg_statistic row for the column. Be * careful to allocate any pointed-to data in anl_context, which will NOT @@ -86,7 +86,7 @@ typedef struct VacAttrStats /* * These fields must be filled in by the typanalyze routine, unless it - * returns FALSE. + * returns false. */ AnalyzeAttrComputeStatsFunc compute_stats; /* function pointer */ int minrows; /* Minimum # of rows wanted for stats */ diff --git a/src/include/executor/instrument.h b/src/include/executor/instrument.h index 31573145a9..f1bae7a44d 100644 --- a/src/include/executor/instrument.h +++ b/src/include/executor/instrument.h @@ -44,10 +44,10 @@ typedef enum InstrumentOption typedef struct Instrumentation { /* Parameters set at node creation: */ - bool need_timer; /* TRUE if we need timer data */ - bool need_bufusage; /* TRUE if we need buffer usage data */ + bool need_timer; /* true if we need timer data */ + bool need_bufusage; /* true if we need buffer usage data */ /* Info about current plan cycle: */ - bool running; /* TRUE if we've completed first tuple */ + bool running; /* true if we've completed first tuple */ instr_time starttime; /* Start time of current iteration of node */ instr_time counter; /* Accumulated runtime for this node */ double firsttuple; /* Time for first tuple of this cycle */ diff --git a/src/include/executor/tuptable.h b/src/include/executor/tuptable.h index 55f4cce4ee..db2a42af5e 100644 --- a/src/include/executor/tuptable.h +++ b/src/include/executor/tuptable.h @@ -68,7 +68,7 @@ * A TupleTableSlot can also be "empty", holding no valid data. This is * the only valid state for a freshly-created slot that has not yet had a * tuple descriptor assigned to it. In this state, tts_isempty must be - * TRUE, tts_shouldFree FALSE, tts_tuple NULL, tts_buffer InvalidBuffer, + * true, tts_shouldFree false, tts_tuple NULL, tts_buffer InvalidBuffer, * and tts_nvalid zero. * * The tupleDescriptor is simply referenced, not copied, by the TupleTableSlot diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index 90a60abc4d..d0c9573577 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -766,8 +766,8 @@ typedef struct SubPlanState ProjectionInfo *projRight; /* for projecting subselect output */ TupleHashTable hashtable; /* hash table for no-nulls subselect rows */ TupleHashTable hashnulls; /* hash table for rows with null(s) */ - bool havehashrows; /* TRUE if hashtable is not empty */ - bool havenullrows; /* TRUE if hashnulls is not empty */ + bool havehashrows; /* true if hashtable is not empty */ + bool havenullrows; /* true if hashnulls is not empty */ MemoryContext hashtablecxt; /* memory context containing hash tables */ MemoryContext hashtempcxt; /* temp memory context for hash tables */ ExprContext *innerecontext; /* econtext for computing inner tuples */ diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h index f3e4c69753..af7f2ba276 100644 --- a/src/include/nodes/parsenodes.h +++ b/src/include/nodes/parsenodes.h @@ -198,7 +198,7 @@ typedef struct Query * Similarly, if "typmods" is NIL then the actual typmod is expected to * be prespecified in typemod, otherwise typemod is unused. * - * If pct_type is TRUE, then names is actually a field name and we look up + * If pct_type is true, then names is actually a field name and we look up * the type of that field. Otherwise (the normal case), names is a type * name possibly qualified with schema and database name. */ @@ -888,8 +888,8 @@ typedef struct PartitionCmd * them from the joinaliasvars list, because that would affect the attnums * of Vars referencing the rest of the list.) * - * inh is TRUE for relation references that should be expanded to include - * inheritance children, if the rel has any. This *must* be FALSE for + * inh is true for relation references that should be expanded to include + * inheritance children, if the rel has any. This *must* be false for * RTEs other than RTE_RELATION entries. * * inFromCl marks those range variables that are listed in the FROM clause. @@ -1147,7 +1147,7 @@ typedef struct WithCheckOption * or InvalidOid if not available. * nulls_first means about what you'd expect. If sortop is InvalidOid * then nulls_first is meaningless and should be set to false. - * hashable is TRUE if eqop is hashable (note this condition also depends + * hashable is true if eqop is hashable (note this condition also depends * on the datatype of the input expression). * * In an ORDER BY item, all fields must be valid. (The eqop isn't essential @@ -2679,7 +2679,7 @@ typedef struct FetchStmt FetchDirection direction; /* see above */ long howMany; /* number of rows, or position argument */ char *portalname; /* name of portal (cursor) */ - bool ismove; /* TRUE if MOVE */ + bool ismove; /* true if MOVE */ } FetchStmt; /* ---------------------- diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h index 8c536a8d38..1157691b08 100644 --- a/src/include/nodes/primnodes.h +++ b/src/include/nodes/primnodes.h @@ -302,7 +302,7 @@ typedef struct Aggref List *aggorder; /* ORDER BY (list of SortGroupClause) */ List *aggdistinct; /* DISTINCT (list of SortGroupClause) */ Expr *aggfilter; /* FILTER expression, if any */ - bool aggstar; /* TRUE if argument list was really '*' */ + bool aggstar; /* true if argument list was really '*' */ bool aggvariadic; /* true if variadic arguments have been * combined into an array last argument */ char aggkind; /* aggregate kind (see pg_aggregate.h) */ @@ -359,7 +359,7 @@ typedef struct WindowFunc List *args; /* arguments to the window function */ Expr *aggfilter; /* FILTER expression, if any */ Index winref; /* index of associated WindowClause */ - bool winstar; /* TRUE if argument list was really '*' */ + bool winstar; /* true if argument list was really '*' */ bool winagg; /* is function a simple aggregate? */ int location; /* token location, or -1 if unknown */ } WindowFunc; @@ -695,9 +695,9 @@ typedef struct SubPlan Oid firstColCollation; /* Collation of first column of subplan * result */ /* Information about execution strategy: */ - bool useHashTable; /* TRUE to store subselect output in a hash + bool useHashTable; /* true to store subselect output in a hash * table (implies we are doing "IN") */ - bool unknownEqFalse; /* TRUE if it's okay to return FALSE when the + bool unknownEqFalse; /* true if it's okay to return FALSE when the * spec result is UNKNOWN; this allows much * simpler handling of null values */ bool parallel_safe; /* is the subplan parallel-safe? */ diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h index a39e59d8ac..385fdafc97 100644 --- a/src/include/nodes/relation.h +++ b/src/include/nodes/relation.h @@ -1335,14 +1335,14 @@ typedef JoinPath NestPath; * mergejoin. If it is not NIL then it is a PathKeys list describing * the ordering that must be created by an explicit Sort node. * - * skip_mark_restore is TRUE if the executor need not do mark/restore calls. + * skip_mark_restore is true if the executor need not do mark/restore calls. * Mark/restore overhead is usually required, but can be skipped if we know * that the executor need find only one match per outer tuple, and that the * mergeclauses are sufficient to identify a match. In such cases the * executor can immediately advance the outer relation after processing a * match, and therefoere it need never back up the inner relation. * - * materialize_inner is TRUE if a Material node should be placed atop the + * materialize_inner is true if a Material node should be placed atop the * inner input. This may appear with or without an inner Sort step. */ @@ -1746,15 +1746,15 @@ typedef struct RestrictInfo Expr *clause; /* the represented clause of WHERE or JOIN */ - bool is_pushed_down; /* TRUE if clause was pushed down in level */ + bool is_pushed_down; /* true if clause was pushed down in level */ - bool outerjoin_delayed; /* TRUE if delayed by lower outer join */ + bool outerjoin_delayed; /* true if delayed by lower outer join */ bool can_join; /* see comment above */ bool pseudoconstant; /* see comment above */ - bool leakproof; /* TRUE if known to contain no leaked Vars */ + bool leakproof; /* true if known to contain no leaked Vars */ Index security_level; /* see comment above */ @@ -1885,7 +1885,7 @@ typedef struct PlaceHolderVar * syntactically below this special join. (These are needed to help compute * min_lefthand and min_righthand for higher joins.) * - * delay_upper_joins is set TRUE if we detect a pushed-down clause that has + * delay_upper_joins is set true if we detect a pushed-down clause that has * to be evaluated after this join is formed (because it references the RHS). * Any outer joins that have such a clause and this join in their RHS cannot * commute with this join, because that would leave noplace to check the diff --git a/src/include/parser/parse_node.h b/src/include/parser/parse_node.h index 68930c1f4a..f0e210ad8d 100644 --- a/src/include/parser/parse_node.h +++ b/src/include/parser/parse_node.h @@ -111,7 +111,7 @@ typedef Node *(*CoerceParamHook) (ParseState *pstate, Param *param, * namespace for table and column lookup. (The RTEs listed here may be just * a subset of the whole rtable. See ParseNamespaceItem comments below.) * - * p_lateral_active: TRUE if we are currently parsing a LATERAL subexpression + * p_lateral_active: true if we are currently parsing a LATERAL subexpression * of this parse level. This makes p_lateral_only namespace items visible, * whereas they are not visible when p_lateral_active is FALSE. * diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h index bbf505e246..99e109853f 100644 --- a/src/include/storage/s_lock.h +++ b/src/include/storage/s_lock.h @@ -22,7 +22,7 @@ * Unlock a previously acquired lock. * * bool S_LOCK_FREE(slock_t *lock) - * Tests if the lock is free. Returns TRUE if free, FALSE if locked. + * Tests if the lock is free. Returns true if free, false if locked. * This does *not* change the state of the lock. * * void SPIN_DELAY(void) diff --git a/src/include/storage/spin.h b/src/include/storage/spin.h index 66698645c2..16413856ca 100644 --- a/src/include/storage/spin.h +++ b/src/include/storage/spin.h @@ -19,7 +19,7 @@ * Unlock a previously acquired lock. * * bool SpinLockFree(slock_t *lock) - * Tests if the lock is free. Returns TRUE if free, FALSE if locked. + * Tests if the lock is free. Returns true if free, false if locked. * This does *not* change the state of the lock. * * Callers must beware that the macro argument may be evaluated multiple diff --git a/src/include/tsearch/ts_utils.h b/src/include/tsearch/ts_utils.h index 3312353026..782548c0af 100644 --- a/src/include/tsearch/ts_utils.h +++ b/src/include/tsearch/ts_utils.h @@ -146,7 +146,7 @@ typedef struct ExecPhraseData * val: lexeme to test for presence of * data: to be filled with lexeme positions; NULL if position data not needed * - * Return TRUE if lexeme is present in data, else FALSE. If data is not + * Return true if lexeme is present in data, else false. If data is not * NULL, it should be filled with lexeme positions, but function can leave * it as zeroes if position data is not available. */ @@ -167,7 +167,7 @@ typedef bool (*TSExecuteCallback) (void *arg, QueryOperand *val, #define TS_EXEC_CALC_NOT (0x01) /* * If TS_EXEC_PHRASE_NO_POS is set, allow OP_PHRASE to be executed lossily - * in the absence of position information: a TRUE result indicates that the + * in the absence of position information: a true result indicates that the * phrase might be present. Without this flag, OP_PHRASE always returns * false if lexeme position information is not available. */ diff --git a/src/interfaces/libpq/fe-secure.c b/src/interfaces/libpq/fe-secure.c index 7c2d0cb4e6..c122c63106 100644 --- a/src/interfaces/libpq/fe-secure.c +++ b/src/interfaces/libpq/fe-secure.c @@ -465,10 +465,10 @@ pq_block_sigpipe(sigset_t *osigset, bool *sigpipe_pending) * As long as it doesn't queue multiple events, we're OK because the caller * can't tell the difference. * - * The caller should say got_epipe = FALSE if it is certain that it + * The caller should say got_epipe = false if it is certain that it * didn't get an EPIPE error; in that case we'll skip the clear operation * and things are definitely OK, queuing or no. If it got one or might have - * gotten one, pass got_epipe = TRUE. + * gotten one, pass got_epipe = true. * * We do not want this to change errno, since if it did that could lose * the error code from a preceding send(). We essentially assume that if diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c index 9931ee038f..d0afa59242 100644 --- a/src/pl/plpgsql/src/pl_comp.c +++ b/src/pl/plpgsql/src/pl_comp.c @@ -869,7 +869,7 @@ plpgsql_compile_inline(char *proc_source) /* * Remember if function is STABLE/IMMUTABLE. XXX would it be better to - * set this TRUE inside a read-only transaction? Not clear. + * set this true inside a read-only transaction? Not clear. */ function->fn_readonly = false; @@ -1350,8 +1350,8 @@ make_datum_param(PLpgSQL_expr *expr, int dno, int location) * yytxt is the original token text; we need this to check for quoting, * so that later checks for unreserved keywords work properly. * - * If recognized as a variable, fill in *wdatum and return TRUE; - * if not recognized, fill in *word and return FALSE. + * If recognized as a variable, fill in *wdatum and return true; + * if not recognized, fill in *word and return false. * (Note: those two pointers actually point to members of the same union, * but for notational reasons we pass them separately.) * ---------- diff --git a/src/pl/plpgsql/src/pl_funcs.c b/src/pl/plpgsql/src/pl_funcs.c index cd44a8e9a3..23f54e1c21 100644 --- a/src/pl/plpgsql/src/pl_funcs.c +++ b/src/pl/plpgsql/src/pl_funcs.c @@ -113,7 +113,7 @@ plpgsql_ns_additem(PLpgSQL_nsitem_type itemtype, int itemno, const char *name) * * Note that this only searches for variables, not labels. * - * If localmode is TRUE, only the topmost block level is searched. + * If localmode is true, only the topmost block level is searched. * * name1 must be non-NULL. Pass NULL for name2 and/or name3 if parsing a name * with fewer than three components. -- 2.11.0 (Apple Git-81)