From ba420a0da247cd1a2fb0a311928af2937ebf5b69 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Mon, 9 Nov 2020 15:33:47 +0100 Subject: [PATCH v1 3/3] WIP: Fix remaining -Wformat-signedness warnings --- contrib/amcheck/verify_heapam.c | 28 +++++++------- contrib/amcheck/verify_nbtree.c | 20 +++++----- contrib/isn/isn.c | 2 +- contrib/pageinspect/btreefuncs.c | 24 ++++++------ contrib/pageinspect/ginfuncs.c | 4 +- contrib/pageinspect/hashfuncs.c | 10 ++--- contrib/pageinspect/heapfuncs.c | 2 +- contrib/pg_surgery/heap_surgery.c | 2 +- contrib/pgrowlocks/pgrowlocks.c | 6 +-- contrib/pgstattuple/pgstatindex.c | 14 +++---- contrib/pgstattuple/pgstattuple.c | 12 +++--- contrib/postgres_fdw/deparse.c | 12 +++--- src/backend/access/common/bufmask.c | 6 +-- src/backend/access/common/printtup.c | 6 +-- src/backend/access/gin/gindatapage.c | 8 ++-- src/backend/access/gist/gistbuild.c | 4 +- src/backend/access/heap/heapam.c | 2 +- src/backend/access/heap/heapam_handler.c | 2 +- src/backend/access/heap/rewriteheap.c | 2 +- src/backend/access/nbtree/nbtpage.c | 4 +- src/backend/access/nbtree/nbtutils.c | 4 +- src/backend/access/rmgrdesc/gindesc.c | 2 +- src/backend/access/rmgrdesc/heapdesc.c | 2 +- src/backend/access/rmgrdesc/relmapdesc.c | 2 +- src/backend/access/spgist/spgdoinsert.c | 6 +-- src/backend/access/spgist/spgutils.c | 8 ++-- src/backend/access/spgist/spgxlog.c | 6 +-- src/backend/access/transam/multixact.c | 10 ++--- src/backend/access/transam/slru.c | 12 +++--- src/backend/access/transam/twophase.c | 2 +- src/backend/access/transam/xlog.c | 38 +++++++++---------- src/backend/access/transam/xlogarchive.c | 2 +- src/backend/access/transam/xlogreader.c | 2 +- src/backend/access/transam/xlogutils.c | 6 +-- src/backend/catalog/Catalog.pm | 2 +- src/backend/catalog/Makefile | 2 +- src/backend/catalog/dependency.c | 4 +- src/backend/catalog/genbki.pl | 6 +-- src/backend/catalog/pg_shdepend.c | 2 +- src/backend/commands/copy.c | 2 +- src/backend/commands/tablecmds.c | 2 +- src/backend/executor/nodeWindowAgg.c | 4 +- src/backend/libpq/auth-scram.c | 2 +- src/backend/libpq/auth.c | 6 +-- src/backend/libpq/be-secure-openssl.c | 4 +- src/backend/nodes/outfuncs.c | 2 +- src/backend/nodes/readfuncs.c | 2 +- src/backend/port/sysv_sema.c | 2 +- src/backend/port/sysv_shmem.c | 4 +- src/backend/postmaster/autovacuum.c | 2 +- src/backend/postmaster/bgworker.c | 2 +- src/backend/postmaster/postmaster.c | 2 +- src/backend/regex/regerror.c | 2 +- src/backend/replication/basebackup.c | 4 +- src/backend/replication/logical/logical.c | 2 +- src/backend/replication/logical/snapbuild.c | 6 +-- src/backend/replication/logical/worker.c | 2 +- src/backend/replication/pgoutput/pgoutput.c | 6 +-- src/backend/replication/slot.c | 2 +- src/backend/replication/syncrep.c | 6 +-- src/backend/replication/walreceiver.c | 2 +- src/backend/rewrite/rewriteHandler.c | 2 +- src/backend/statistics/dependencies.c | 8 ++-- src/backend/statistics/mcv.c | 8 ++-- src/backend/statistics/mvdistinct.c | 6 +-- src/backend/storage/buffer/localbuf.c | 4 +- src/backend/storage/ipc/ipc.c | 2 +- src/backend/storage/ipc/standby.c | 4 +- src/backend/storage/lmgr/lmgr.c | 2 +- src/backend/storage/page/bufpage.c | 10 ++--- src/backend/tsearch/dict_thesaurus.c | 4 +- src/backend/utils/adt/inet_cidr_ntop.c | 6 +-- src/backend/utils/adt/json.c | 2 +- src/backend/utils/adt/jsonb_util.c | 10 ++--- src/backend/utils/adt/jsonfuncs.c | 2 +- src/backend/utils/adt/mac.c | 2 +- src/backend/utils/adt/numeric.c | 2 +- src/backend/utils/adt/oracle_compat.c | 8 ++-- src/backend/utils/adt/ruleutils.c | 8 ++-- src/backend/utils/adt/timestamp.c | 4 +- src/backend/utils/adt/tsvector_op.c | 4 +- src/backend/utils/adt/xml.c | 8 ++-- src/backend/utils/cache/catcache.c | 2 +- src/backend/utils/cache/relmapper.c | 2 +- src/backend/utils/error/elog.c | 6 +-- src/backend/utils/misc/guc-file.l | 2 +- src/backend/utils/misc/guc.c | 6 +-- src/backend/utils/mmgr/aset.c | 2 +- src/backend/utils/mmgr/generation.c | 2 +- src/backend/utils/mmgr/mcxt.c | 4 +- src/backend/utils/mmgr/slab.c | 2 +- src/backend/utils/time/snapmgr.c | 6 +-- src/bin/initdb/initdb.c | 6 ++- src/bin/pg_basebackup/pg_basebackup.c | 6 +-- src/bin/pg_basebackup/pg_recvlogical.c | 4 +- src/bin/pg_basebackup/receivelog.c | 12 +++--- src/bin/pg_basebackup/streamutil.c | 6 +-- src/bin/pg_checksums/pg_checksums.c | 8 ++-- src/bin/pg_resetwal/pg_resetwal.c | 4 +- src/bin/pg_rewind/pg_rewind.c | 6 +-- src/bin/pg_test_fsync/pg_test_fsync.c | 2 +- src/bin/pg_test_timing/pg_test_timing.c | 2 +- src/bin/pg_upgrade/check.c | 2 +- src/bin/pg_upgrade/tablespace.c | 2 +- src/bin/pg_waldump/pg_waldump.c | 20 +++++----- src/bin/pgbench/pgbench.c | 2 +- src/fe_utils/print.c | 14 +++---- src/include/access/transam.h | 6 +-- src/include/catalog/pg_control.h | 2 +- src/include/lib/simplehash.h | 2 +- src/include/replication/walsender_private.h | 2 +- src/interfaces/ecpg/test/expected/sql-bytea.c | 2 +- src/interfaces/ecpg/test/sql/bytea.pgc | 2 +- src/interfaces/libpq/fe-misc.c | 2 +- src/interfaces/libpq/fe-protocol2.c | 6 +-- src/interfaces/libpq/fe-protocol3.c | 4 +- src/port/inet_net_ntop.c | 4 +- .../test_ginpostinglist/test_ginpostinglist.c | 6 +-- src/test/modules/worker_spi/worker_spi.c | 3 +- src/test/regress/regress.c | 6 +-- 120 files changed, 320 insertions(+), 319 deletions(-) diff --git a/contrib/amcheck/verify_heapam.c b/contrib/amcheck/verify_heapam.c index 570f44b59e..e296e29b43 100644 --- a/contrib/amcheck/verify_heapam.c +++ b/contrib/amcheck/verify_heapam.c @@ -642,7 +642,7 @@ check_tuple_header_and_visibilty(HeapTupleHeader tuphdr, HeapCheckContext *ctx) expected_hoff, ctx->tuphdr->t_hoff)); else if ((infomask & HEAP_HASNULL)) report_corruption(ctx, - psprintf("tuple data should begin at byte %u, but actually begins at byte %u (%u attributes, has nulls)", + psprintf("tuple data should begin at byte %u, but actually begins at byte %u (%d attributes, has nulls)", expected_hoff, ctx->tuphdr->t_hoff, ctx->natts)); else if (ctx->natts == 1) report_corruption(ctx, @@ -650,7 +650,7 @@ check_tuple_header_and_visibilty(HeapTupleHeader tuphdr, HeapCheckContext *ctx) expected_hoff, ctx->tuphdr->t_hoff)); else report_corruption(ctx, - psprintf("tuple data should begin at byte %u, but actually begins at byte %u (%u attributes, no nulls)", + psprintf("tuple data should begin at byte %u, but actually begins at byte %u (%d attributes, no nulls)", expected_hoff, ctx->tuphdr->t_hoff, ctx->natts)); header_garbled = true; } @@ -886,14 +886,14 @@ check_toast_tuple(HeapTuple toasttup, HeapCheckContext *ctx) if (curchunk != ctx->chunkno) { report_corruption(ctx, - psprintf("toast chunk sequence number %u does not match the expected sequence number %u", + psprintf("toast chunk sequence number %d does not match the expected sequence number %d", curchunk, ctx->chunkno)); return; } if (curchunk > ctx->endchunk) { report_corruption(ctx, - psprintf("toast chunk sequence number %u exceeds the end chunk sequence number %u", + psprintf("toast chunk sequence number %d exceeds the end chunk sequence number %d", curchunk, ctx->endchunk)); return; } @@ -903,7 +903,7 @@ check_toast_tuple(HeapTuple toasttup, HeapCheckContext *ctx) if (chunksize != expected_size) { report_corruption(ctx, - psprintf("toast chunk size %u differs from the expected size %u", + psprintf("toast chunk size %d differs from the expected size %d", chunksize, expected_size)); return; } @@ -952,7 +952,7 @@ check_tuple_attribute(HeapCheckContext *ctx) if (ctx->tuphdr->t_hoff + ctx->offset > ctx->lp_len) { report_corruption(ctx, - psprintf("attribute %u with length %u starts at offset %u beyond total tuple length %u", + psprintf("attribute %d with length %d starts at offset %u beyond total tuple length %u", ctx->attnum, thisatt->attlen, ctx->tuphdr->t_hoff + ctx->offset, @@ -973,7 +973,7 @@ check_tuple_attribute(HeapCheckContext *ctx) if (ctx->tuphdr->t_hoff + ctx->offset > ctx->lp_len) { report_corruption(ctx, - psprintf("attribute %u with length %u ends at offset %u beyond total tuple length %u", + psprintf("attribute %d with length %d ends at offset %u beyond total tuple length %u", ctx->attnum, thisatt->attlen, ctx->tuphdr->t_hoff + ctx->offset, @@ -1006,7 +1006,7 @@ check_tuple_attribute(HeapCheckContext *ctx) if (va_tag != VARTAG_ONDISK) { report_corruption(ctx, - psprintf("toasted attribute %u has unexpected TOAST tag %u", + psprintf("toasted attribute %d has unexpected TOAST tag %u", ctx->attnum, va_tag)); /* We can't know where the next attribute begins */ @@ -1021,7 +1021,7 @@ check_tuple_attribute(HeapCheckContext *ctx) if (ctx->tuphdr->t_hoff + ctx->offset > ctx->lp_len) { report_corruption(ctx, - psprintf("attribute %u with length %u ends at offset %u beyond total tuple length %u", + psprintf("attribute %d with length %d ends at offset %u beyond total tuple length %u", ctx->attnum, thisatt->attlen, ctx->tuphdr->t_hoff + ctx->offset, @@ -1053,7 +1053,7 @@ check_tuple_attribute(HeapCheckContext *ctx) if (!(infomask & HEAP_HASEXTERNAL)) { report_corruption(ctx, - psprintf("attribute %u is external but tuple header flag HEAP_HASEXTERNAL not set", + psprintf("attribute %d is external but tuple header flag HEAP_HASEXTERNAL not set", ctx->attnum)); return true; } @@ -1062,7 +1062,7 @@ check_tuple_attribute(HeapCheckContext *ctx) if (!ctx->rel->rd_rel->reltoastrelid) { report_corruption(ctx, - psprintf("attribute %u is external but relation has no toast relation", + psprintf("attribute %d is external but relation has no toast relation", ctx->attnum)); return true; } @@ -1109,11 +1109,11 @@ check_tuple_attribute(HeapCheckContext *ctx) } if (ctx->chunkno != (ctx->endchunk + 1)) report_corruption(ctx, - psprintf("final toast chunk number %u differs from expected value %u", + psprintf("final toast chunk number %d differs from expected value %d", ctx->chunkno, (ctx->endchunk + 1))); if (!found_toasttup) report_corruption(ctx, - psprintf("toasted value for attribute %u missing from toast table", + psprintf("toasted value for attribute %d missing from toast table", ctx->attnum)); systable_endscan_ordered(toastscan); @@ -1265,7 +1265,7 @@ check_tuple(HeapCheckContext *ctx) if (RelationGetDescr(ctx->rel)->natts < ctx->natts) { report_corruption(ctx, - psprintf("number of attributes %u exceeds maximum expected for table %u", + psprintf("number of attributes %d exceeds maximum expected for table %d", ctx->natts, RelationGetDescr(ctx->rel)->natts)); return; diff --git a/contrib/amcheck/verify_nbtree.c b/contrib/amcheck/verify_nbtree.c index 6d86e3ccda..30f3df5d35 100644 --- a/contrib/amcheck/verify_nbtree.c +++ b/contrib/amcheck/verify_nbtree.c @@ -1074,7 +1074,7 @@ bt_target_page_check(BtreeCheckState *state) (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("wrong number of high key index tuple attributes in index \"%s\"", RelationGetRelationName(state->rel)), - errdetail_internal("Index block=%u natts=%u block type=%s page lsn=%X/%X.", + errdetail_internal("Index block=%u natts=%d block type=%s page lsn=%X/%X.", state->targetblock, BTreeTupleGetNAtts(itup, state->rel), P_ISLEAF(topaque) ? "heap" : "index", @@ -1117,7 +1117,7 @@ bt_target_page_check(BtreeCheckState *state) (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("index tuple size does not equal lp_len in index \"%s\"", RelationGetRelationName(state->rel)), - errdetail_internal("Index tid=(%u,%u) tuple size=%zu lp_len=%u page lsn=%X/%X.", + errdetail_internal("Index tid=(%u,%u) tuple size=%zu lp_len=%d page lsn=%X/%X.", state->targetblock, offset, tupsize, ItemIdGetLength(itemid), (uint32) (state->targetlsn >> 32), @@ -1142,7 +1142,7 @@ bt_target_page_check(BtreeCheckState *state) (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("wrong number of index tuple attributes in index \"%s\"", RelationGetRelationName(state->rel)), - errdetail_internal("Index tid=%s natts=%u points to %s tid=%s page lsn=%X/%X.", + errdetail_internal("Index tid=%s natts=%d points to %s tid=%s page lsn=%X/%X.", itid, BTreeTupleGetNAtts(itup, state->rel), P_ISLEAF(topaque) ? "heap" : "index", @@ -1186,7 +1186,7 @@ bt_target_page_check(BtreeCheckState *state) *htid; itid = psprintf("(%u,%u)", state->targetblock, offset); - htid = psprintf("(%u,%u)", ItemPointerGetBlockNumber(tid), + htid = psprintf("(%u,%d)", ItemPointerGetBlockNumber(tid), ItemPointerGetOffsetNumber(tid)); ereport(ERROR, @@ -2487,7 +2487,7 @@ bt_tuple_present_callback(Relation index, ItemPointer tid, Datum *values, IndexTupleSize(norm))) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("heap tuple (%u,%u) from table \"%s\" lacks matching index tuple within index \"%s\"", + errmsg("heap tuple (%u,%d) from table \"%s\" lacks matching index tuple within index \"%s\"", ItemPointerGetBlockNumber(&(itup->t_tid)), ItemPointerGetOffsetNumber(&(itup->t_tid)), RelationGetRelationName(state->heaprel), @@ -2575,7 +2575,7 @@ bt_normalize_tuple(BtreeCheckState *state, IndexTuple itup) if (VARATT_IS_EXTERNAL(DatumGetPointer(normalized[i]))) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), - errmsg("external varlena datum in tuple that references heap row (%u,%u) in index \"%s\"", + errmsg("external varlena datum in tuple that references heap row (%u,%d) in index \"%s\"", ItemPointerGetBlockNumber(&(itup->t_tid)), ItemPointerGetOffsetNumber(&(itup->t_tid)), RelationGetRelationName(state->rel)))); @@ -2991,7 +2991,7 @@ palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum) metad->btm_version > BTREE_VERSION) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), - errmsg("version mismatch in index \"%s\": file version %d, " + errmsg("version mismatch in index \"%s\": file version %u, " "current version %d, minimum supported version %d", RelationGetRelationName(state->rel), metad->btm_version, BTREE_VERSION, @@ -3041,7 +3041,7 @@ palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum) if (maxoffset > MaxIndexTuplesPerPage) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), - errmsg("Number of items on block %u of index \"%s\" exceeds MaxIndexTuplesPerPage (%u)", + errmsg("Number of items on block %u of index \"%s\" exceeds MaxIndexTuplesPerPage (%d)", blocknum, RelationGetRelationName(state->rel), MaxIndexTuplesPerPage))); @@ -3132,7 +3132,7 @@ PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block, Page page, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("line pointer points past end of tuple space in index \"%s\"", RelationGetRelationName(state->rel)), - errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.", + errdetail_internal("Index tid=(%u,%u) lp_off=%d, lp_len=%d lp_flags=%d.", block, offset, ItemIdGetOffset(itemid), ItemIdGetLength(itemid), ItemIdGetFlags(itemid)))); @@ -3148,7 +3148,7 @@ PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block, Page page, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("invalid line pointer storage in index \"%s\"", RelationGetRelationName(state->rel)), - errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.", + errdetail_internal("Index tid=(%u,%u) lp_off=%d, lp_len=%d lp_flags=%d.", block, offset, ItemIdGetOffset(itemid), ItemIdGetLength(itemid), ItemIdGetFlags(itemid)))); diff --git a/contrib/isn/isn.c b/contrib/isn/isn.c index cf36bb69d4..b613ba651f 100644 --- a/contrib/isn/isn.c +++ b/contrib/isn/isn.c @@ -890,7 +890,7 @@ string2ean(const char *str, bool errorOK, ean13 *result, ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid check digit for %s number: \"%s\", should be %c", - isn_names[accept], str, (rcheck == 10) ? ('X') : (rcheck + '0')))); + isn_names[accept], str, (rcheck == 10) ? ('X') : (int) (rcheck + '0')))); } } return false; diff --git a/contrib/pageinspect/btreefuncs.c b/contrib/pageinspect/btreefuncs.c index 445605db58..958ccc77e4 100644 --- a/contrib/pageinspect/btreefuncs.c +++ b/contrib/pageinspect/btreefuncs.c @@ -219,16 +219,16 @@ bt_page_stats(PG_FUNCTION_ARGS) elog(ERROR, "return type must be a row type"); j = 0; - values[j++] = psprintf("%d", stat.blkno); + values[j++] = psprintf("%u", stat.blkno); values[j++] = psprintf("%c", stat.type); - values[j++] = psprintf("%d", stat.live_items); - values[j++] = psprintf("%d", stat.dead_items); - values[j++] = psprintf("%d", stat.avg_item_size); - values[j++] = psprintf("%d", stat.page_size); - values[j++] = psprintf("%d", stat.free_size); - values[j++] = psprintf("%d", stat.btpo_prev); - values[j++] = psprintf("%d", stat.btpo_next); - values[j++] = psprintf("%d", (stat.type == 'd') ? stat.btpo.xact : stat.btpo.level); + values[j++] = psprintf("%u", stat.live_items); + values[j++] = psprintf("%u", stat.dead_items); + values[j++] = psprintf("%u", stat.avg_item_size); + values[j++] = psprintf("%u", stat.page_size); + values[j++] = psprintf("%u", stat.free_size); + values[j++] = psprintf("%u", stat.btpo_prev); + values[j++] = psprintf("%u", stat.btpo_next); + values[j++] = psprintf("%u", (stat.type == 'd') ? stat.btpo.xact : stat.btpo.level); values[j++] = psprintf("%d", stat.btpo_flags); tuple = BuildTupleFromCStrings(TupleDescGetAttInMetadata(tupleDesc), @@ -334,7 +334,7 @@ bt_page_print_tuples(struct user_args *uargs) { if (off > 0) *dump++ = ' '; - sprintf(dump, "%02x", *(ptr + off) & 0xff); + sprintf(dump, "%02x", (unsigned) (*(ptr + off) & 0xff)); dump += 2; } values[j++] = CStringGetTextDatum(datacstring); @@ -666,8 +666,8 @@ bt_metap(PG_FUNCTION_ARGS) errhint("To resolve the problem, update the \"pageinspect\" extension to the latest version."))); j = 0; - values[j++] = psprintf("%d", metad->btm_magic); - values[j++] = psprintf("%d", metad->btm_version); + values[j++] = psprintf("%u", metad->btm_magic); + values[j++] = psprintf("%u", metad->btm_version); values[j++] = psprintf(INT64_FORMAT, (int64) metad->btm_root); values[j++] = psprintf(INT64_FORMAT, (int64) metad->btm_level); values[j++] = psprintf(INT64_FORMAT, (int64) metad->btm_fastroot); diff --git a/contrib/pageinspect/ginfuncs.c b/contrib/pageinspect/ginfuncs.c index 711473579a..5e796e3afe 100644 --- a/contrib/pageinspect/ginfuncs.c +++ b/contrib/pageinspect/ginfuncs.c @@ -55,7 +55,7 @@ gin_metapage_info(PG_FUNCTION_ARGS) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("input page is not a GIN metapage"), errdetail("Flags %04X, expected %04X", - opaq->flags, GIN_META))); + opaq->flags, (unsigned int) GIN_META))); /* Build a tuple descriptor for our result type */ if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE) @@ -199,7 +199,7 @@ gin_leafpage_items(PG_FUNCTION_ARGS) errmsg("input page is not a compressed GIN data leaf page"), errdetail("Flags %04X, expected %04X", opaq->flags, - (GIN_DATA | GIN_LEAF | GIN_COMPRESSED)))); + (unsigned int) (GIN_DATA | GIN_LEAF | GIN_COMPRESSED)))); inter_call_data = palloc(sizeof(gin_leafpage_items_state)); diff --git a/contrib/pageinspect/hashfuncs.c b/contrib/pageinspect/hashfuncs.c index 3b2f0339cf..4eb51fcc10 100644 --- a/contrib/pageinspect/hashfuncs.c +++ b/contrib/pageinspect/hashfuncs.c @@ -75,7 +75,7 @@ verify_hash_page(bytea *raw_page, int flags) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("page is not a hash page"), errdetail("Expected %08x, got %08x.", - HASHO_PAGE_ID, pageopaque->hasho_page_id))); + (unsigned) HASHO_PAGE_ID, pageopaque->hasho_page_id))); pagetype = pageopaque->hasho_flag & LH_PAGE_TYPE; } @@ -86,7 +86,7 @@ verify_hash_page(bytea *raw_page, int flags) pagetype != LH_UNUSED_PAGE) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid hash page type %08x", pagetype))); + errmsg("invalid hash page type %08x", (unsigned) pagetype))); /* If requested, verify page type. */ if (flags != 0 && (pagetype & flags) == 0) @@ -111,7 +111,7 @@ verify_hash_page(bytea *raw_page, int flags) default: elog(ERROR, "hash page of type %08x not in mask %08x", - pagetype, flags); + (unsigned) pagetype, (unsigned) flags); break; } } @@ -128,13 +128,13 @@ verify_hash_page(bytea *raw_page, int flags) (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("invalid magic number for metadata"), errdetail("Expected 0x%08x, got 0x%08x.", - HASH_MAGIC, metap->hashm_magic))); + (unsigned) HASH_MAGIC, metap->hashm_magic))); if (metap->hashm_version != HASH_VERSION) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), errmsg("invalid version for metadata"), - errdetail("Expected %d, got %d", + errdetail("Expected %d, got %u", HASH_VERSION, metap->hashm_version))); } diff --git a/contrib/pageinspect/heapfuncs.c b/contrib/pageinspect/heapfuncs.c index f04455da12..91dd611bae 100644 --- a/contrib/pageinspect/heapfuncs.c +++ b/contrib/pageinspect/heapfuncs.c @@ -468,7 +468,7 @@ tuple_data_split(PG_FUNCTION_ARGS) if (bits_len != bits_str_len) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("unexpected length of t_bits string: %u, expected %u", + errmsg("unexpected length of t_bits string: %d, expected %d", bits_str_len, bits_len))); /* do the conversion */ diff --git a/contrib/pg_surgery/heap_surgery.c b/contrib/pg_surgery/heap_surgery.c index eb96b4bb36..d92fda10cc 100644 --- a/contrib/pg_surgery/heap_surgery.c +++ b/contrib/pg_surgery/heap_surgery.c @@ -185,7 +185,7 @@ heap_force_common(FunctionCallInfo fcinfo, HeapTupleForceOption heap_force_opt) if (ItemIdIsRedirected(itemid)) { ereport(NOTICE, - errmsg("skipping tid (%u, %u) for relation \"%s\" because it redirects to item %u", + errmsg("skipping tid (%u, %u) for relation \"%s\" because it redirects to item %d", blkno, offno, RelationGetRelationName(rel), ItemIdGetRedirect(itemid))); continue; diff --git a/contrib/pgrowlocks/pgrowlocks.c b/contrib/pgrowlocks/pgrowlocks.c index 714398831b..2eb9d1cff8 100644 --- a/contrib/pgrowlocks/pgrowlocks.c +++ b/contrib/pgrowlocks/pgrowlocks.c @@ -168,7 +168,7 @@ pgrowlocks(PG_FUNCTION_ARGS) PointerGetDatum(&tuple->t_self)); values[Atnum_xmax] = palloc(NCHARS * sizeof(char)); - snprintf(values[Atnum_xmax], NCHARS, "%d", xmax); + snprintf(values[Atnum_xmax], NCHARS, "%u", xmax); if (infomask & HEAP_XMAX_IS_MULTI) { MultiXactMember *members; @@ -209,7 +209,7 @@ pgrowlocks(PG_FUNCTION_ARGS) strcat(values[Atnum_modes], ","); strcat(values[Atnum_pids], ","); } - snprintf(buf, NCHARS, "%d", members[j].xid); + snprintf(buf, NCHARS, "%u", members[j].xid); strcat(values[Atnum_xids], buf); switch (members[j].status) { @@ -250,7 +250,7 @@ pgrowlocks(PG_FUNCTION_ARGS) values[Atnum_ismulti] = pstrdup("false"); values[Atnum_xids] = palloc(NCHARS * sizeof(char)); - snprintf(values[Atnum_xids], NCHARS, "{%d}", xmax); + snprintf(values[Atnum_xids], NCHARS, "{%u}", xmax); values[Atnum_modes] = palloc(NCHARS); if (infomask & HEAP_XMAX_LOCK_ONLY) diff --git a/contrib/pgstattuple/pgstatindex.c b/contrib/pgstattuple/pgstatindex.c index b1ce0d77d7..919d40f3b2 100644 --- a/contrib/pgstattuple/pgstatindex.c +++ b/contrib/pgstattuple/pgstatindex.c @@ -331,19 +331,19 @@ pgstatindex_impl(Relation rel, FunctionCallInfo fcinfo) elog(ERROR, "return type must be a row type"); j = 0; - values[j++] = psprintf("%d", indexStat.version); - values[j++] = psprintf("%d", indexStat.level); - values[j++] = psprintf(INT64_FORMAT, + values[j++] = psprintf("%u", indexStat.version); + values[j++] = psprintf("%u", indexStat.level); + values[j++] = psprintf(UINT64_FORMAT, (1 + /* include the metapage in index_size */ indexStat.leaf_pages + indexStat.internal_pages + indexStat.deleted_pages + indexStat.empty_pages) * BLCKSZ); values[j++] = psprintf("%u", indexStat.root_blkno); - values[j++] = psprintf(INT64_FORMAT, indexStat.internal_pages); - values[j++] = psprintf(INT64_FORMAT, indexStat.leaf_pages); - values[j++] = psprintf(INT64_FORMAT, indexStat.empty_pages); - values[j++] = psprintf(INT64_FORMAT, indexStat.deleted_pages); + values[j++] = psprintf(UINT64_FORMAT, indexStat.internal_pages); + values[j++] = psprintf(UINT64_FORMAT, indexStat.leaf_pages); + values[j++] = psprintf(UINT64_FORMAT, indexStat.empty_pages); + values[j++] = psprintf(UINT64_FORMAT, indexStat.deleted_pages); if (indexStat.max_avail > 0) values[j++] = psprintf("%.2f", 100.0 - (double) indexStat.free_space / (double) indexStat.max_avail * 100.0); diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c index 69179d4104..42bd4ce95b 100644 --- a/contrib/pgstattuple/pgstattuple.c +++ b/contrib/pgstattuple/pgstattuple.c @@ -133,14 +133,14 @@ build_pgstattuple_type(pgstattuple_type *stat, FunctionCallInfo fcinfo) for (i = 0; i < NCOLUMNS; i++) values[i] = values_buf[i]; i = 0; - snprintf(values[i++], NCHARS, INT64_FORMAT, stat->table_len); - snprintf(values[i++], NCHARS, INT64_FORMAT, stat->tuple_count); - snprintf(values[i++], NCHARS, INT64_FORMAT, stat->tuple_len); + snprintf(values[i++], NCHARS, UINT64_FORMAT, stat->table_len); + snprintf(values[i++], NCHARS, UINT64_FORMAT, stat->tuple_count); + snprintf(values[i++], NCHARS, UINT64_FORMAT, stat->tuple_len); snprintf(values[i++], NCHARS, "%.2f", tuple_percent); - snprintf(values[i++], NCHARS, INT64_FORMAT, stat->dead_tuple_count); - snprintf(values[i++], NCHARS, INT64_FORMAT, stat->dead_tuple_len); + snprintf(values[i++], NCHARS, UINT64_FORMAT, stat->dead_tuple_count); + snprintf(values[i++], NCHARS, UINT64_FORMAT, stat->dead_tuple_len); snprintf(values[i++], NCHARS, "%.2f", dead_tuple_percent); - snprintf(values[i++], NCHARS, INT64_FORMAT, stat->free_space); + snprintf(values[i++], NCHARS, UINT64_FORMAT, stat->free_space); snprintf(values[i++], NCHARS, "%.2f", free_percent); /* build a tuple */ diff --git a/contrib/postgres_fdw/deparse.c b/contrib/postgres_fdw/deparse.c index 9e3bfe7621..dc5a91c837 100644 --- a/contrib/postgres_fdw/deparse.c +++ b/contrib/postgres_fdw/deparse.c @@ -105,7 +105,7 @@ typedef struct deparse_expr_cxt #define REL_ALIAS_PREFIX "r" /* Handy macro to add relation name qualification */ #define ADD_REL_QUALIFIER(buf, varno) \ - appendStringInfo((buf), "%s%d.", REL_ALIAS_PREFIX, (varno)) + appendStringInfo((buf), "%s%u.", REL_ALIAS_PREFIX, (varno)) #define SUBQUERY_REL_ALIAS_PREFIX "s" #define SUBQUERY_COL_ALIAS_PREFIX "c" @@ -140,7 +140,7 @@ static void deparseReturningList(StringInfo buf, RangeTblEntry *rte, List *withCheckOptionList, List *returningList, List **retrieved_attrs); -static void deparseColumnRef(StringInfo buf, int varno, int varattno, +static void deparseColumnRef(StringInfo buf, Index varno, AttrNumber varattno, RangeTblEntry *rte, bool qualify_col); static void deparseRelation(StringInfo buf, Relation rel); static void deparseExpr(Expr *expr, deparse_expr_cxt *context); @@ -1621,7 +1621,7 @@ deparseFromExprForRel(StringInfo buf, PlannerInfo *root, RelOptInfo *foreignrel, * join. */ if (use_alias) - appendStringInfo(buf, " %s%d", REL_ALIAS_PREFIX, foreignrel->relid); + appendStringInfo(buf, " %s%u", REL_ALIAS_PREFIX, foreignrel->relid); table_close(rel, NoLock); } @@ -1844,7 +1844,7 @@ deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root, appendStringInfoString(buf, "UPDATE "); deparseRelation(buf, rel); if (foreignrel->reloptkind == RELOPT_JOINREL) - appendStringInfo(buf, " %s%d", REL_ALIAS_PREFIX, rtindex); + appendStringInfo(buf, " %s%u", REL_ALIAS_PREFIX, rtindex); appendStringInfoString(buf, " SET "); /* Make sure any constants in the exprs are printed portably */ @@ -1952,7 +1952,7 @@ deparseDirectDeleteSql(StringInfo buf, PlannerInfo *root, appendStringInfoString(buf, "DELETE FROM "); deparseRelation(buf, rel); if (foreignrel->reloptkind == RELOPT_JOINREL) - appendStringInfo(buf, " %s%d", REL_ALIAS_PREFIX, rtindex); + appendStringInfo(buf, " %s%u", REL_ALIAS_PREFIX, rtindex); if (foreignrel->reloptkind == RELOPT_JOINREL) { @@ -2121,7 +2121,7 @@ deparseAnalyzeSql(StringInfo buf, Relation rel, List **retrieved_attrs) * If qualify_col is true, qualify column name with the alias of relation. */ static void -deparseColumnRef(StringInfo buf, int varno, int varattno, RangeTblEntry *rte, +deparseColumnRef(StringInfo buf, Index varno, AttrNumber varattno, RangeTblEntry *rte, bool qualify_col) { /* We support fetching the remote side's CTID and OID. */ diff --git a/src/backend/access/common/bufmask.c b/src/backend/access/common/bufmask.c index 4bdb1848ad..c2bd201b4a 100644 --- a/src/backend/access/common/bufmask.c +++ b/src/backend/access/common/bufmask.c @@ -70,9 +70,9 @@ mask_page_hint_bits(Page page) void mask_unused_space(Page page) { - int pd_lower = ((PageHeader) page)->pd_lower; - int pd_upper = ((PageHeader) page)->pd_upper; - int pd_special = ((PageHeader) page)->pd_special; + LocationIndex pd_lower = ((PageHeader) page)->pd_lower; + LocationIndex pd_upper = ((PageHeader) page)->pd_upper; + LocationIndex pd_special = ((PageHeader) page)->pd_special; /* Sanity check */ if (pd_lower > pd_upper || pd_special < pd_upper || diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c index dd1bac0aa9..7e387147f2 100644 --- a/src/backend/access/common/printtup.c +++ b/src/backend/access/common/printtup.c @@ -572,7 +572,7 @@ printtup_destroy(DestReceiver *self) * ---------------- */ static void -printatt(unsigned attributeId, +printatt(int attributeId, Form_pg_attribute attributeP, char *value) { @@ -602,7 +602,7 @@ debugStartup(DestReceiver *self, int operation, TupleDesc typeinfo) * show the return type of the tuples */ for (i = 0; i < natts; ++i) - printatt((unsigned) i + 1, TupleDescAttr(typeinfo, i), NULL); + printatt(i + 1, TupleDescAttr(typeinfo, i), NULL); printf("\t----\n"); } @@ -632,7 +632,7 @@ debugtup(TupleTableSlot *slot, DestReceiver *self) value = OidOutputFunctionCall(typoutput, attr); - printatt((unsigned) i + 1, TupleDescAttr(typeinfo, i), value); + printatt(i + 1, TupleDescAttr(typeinfo, i), value); } printf("\t----\n"); diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c index 7a2690e97f..4b4e35a5f9 100644 --- a/src/backend/access/gin/gindatapage.c +++ b/src/backend/access/gin/gindatapage.c @@ -606,11 +606,11 @@ dataBeginPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, *ptp_workspace = leaf; if (append) - elog(DEBUG2, "appended %d new items to block %u; %d bytes (%d to go)", + elog(DEBUG2, "appended %d new items to block %u; %d bytes (%u to go)", maxitems, BufferGetBlockNumber(buf), (int) leaf->lsize, items->nitem - items->curitem - maxitems); else - elog(DEBUG2, "inserted %d new items to block %u; %d bytes (%d to go)", + elog(DEBUG2, "inserted %d new items to block %u; %d bytes (%u to go)", maxitems, BufferGetBlockNumber(buf), (int) leaf->lsize, items->nitem - items->curitem - maxitems); } @@ -692,11 +692,11 @@ dataBeginPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, GinDataPageGetRightBound(*newrpage)) < 0); if (append) - elog(DEBUG2, "appended %d items to block %u; split %d/%d (%d to go)", + elog(DEBUG2, "appended %d items to block %u; split %d/%d (%u to go)", maxitems, BufferGetBlockNumber(buf), (int) leaf->lsize, (int) leaf->rsize, items->nitem - items->curitem - maxitems); else - elog(DEBUG2, "inserted %d items to block %u; split %d/%d (%d to go)", + elog(DEBUG2, "inserted %d items to block %u; split %d/%d (%u to go)", maxitems, BufferGetBlockNumber(buf), (int) leaf->lsize, (int) leaf->rsize, items->nitem - items->curitem - maxitems); } diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c index 9d3fa9c3b7..752dfadab7 100644 --- a/src/backend/access/gist/gistbuild.c +++ b/src/backend/access/gist/gistbuild.c @@ -1212,7 +1212,7 @@ gistBufferingFindCorrectParent(GISTBuildState *buildstate, * number. */ if (*parentblkno == InvalidBlockNumber) - elog(ERROR, "no parent buffer provided of child %d", childblkno); + elog(ERROR, "no parent buffer provided of child %u", childblkno); parent = *parentblkno; } @@ -1545,7 +1545,7 @@ gistGetParent(GISTBuildState *buildstate, BlockNumber child) HASH_FIND, &found); if (!found) - elog(ERROR, "could not find parent of block %d in lookup table", child); + elog(ERROR, "could not find parent of block %u in lookup table", child); return entry->parentblkno; } diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 1585861a02..a6ff4711c1 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -3932,7 +3932,7 @@ get_mxact_status_for_lock(LockTupleMode mode, bool is_update) retval = tupleLockExtraInfo[mode].lockstatus; if (retval == -1) - elog(ERROR, "invalid lock tuple mode %d/%s", mode, + elog(ERROR, "invalid lock tuple mode %u/%s", mode, is_update ? "true" : "false"); return (MultiXactStatus) retval; diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c index dcaea7135f..dafba87c0a 100644 --- a/src/backend/access/heap/heapam_handler.c +++ b/src/backend/access/heap/heapam_handler.c @@ -1825,7 +1825,7 @@ heapam_index_validate_scan(Relation heapRelation, if (!OffsetNumberIsValid(root_offnum)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg_internal("failed to find parent tuple for heap-only tuple at (%u,%u) in table \"%s\"", + errmsg_internal("failed to find parent tuple for heap-only tuple at (%u,%d) in table \"%s\"", ItemPointerGetBlockNumber(heapcursor), ItemPointerGetOffsetNumber(heapcursor), RelationGetRelationName(heapRelation)))); diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index 39e33763df..23fce2b58f 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -918,7 +918,7 @@ logical_heap_rewrite_flush_mappings(RewriteState state) if (written != len) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not write to file \"%s\", wrote %d of %d: %m", src->path, + errmsg("could not write to file \"%s\", wrote %d of %u: %m", src->path, written, len))); src->off += len; diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index 7f392480ac..a0f991bf41 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -155,7 +155,7 @@ _bt_getmeta(Relation rel, Buffer metabuf) metad->btm_version > BTREE_VERSION) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), - errmsg("version mismatch in index \"%s\": file version %d, " + errmsg("version mismatch in index \"%s\": file version %u, " "current version %d, minimal supported version %d", RelationGetRelationName(rel), metad->btm_version, BTREE_VERSION, BTREE_MIN_VERSION))); @@ -543,7 +543,7 @@ _bt_gettrueroot(Relation rel) metad->btm_version > BTREE_VERSION) ereport(ERROR, (errcode(ERRCODE_INDEX_CORRUPTED), - errmsg("version mismatch in index \"%s\": file version %d, " + errmsg("version mismatch in index \"%s\": file version %u, " "current version %d, minimal supported version %d", RelationGetRelationName(rel), metad->btm_version, BTREE_VERSION, BTREE_MIN_VERSION))); diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index 81589b9056..f84e514a32 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -2661,13 +2661,13 @@ _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace, ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("index row size %zu exceeds btree version %u maximum %zu for index \"%s\"", + errmsg("index row size %zu exceeds btree version %d maximum %zu for index \"%s\"", itemsz, needheaptidspace ? BTREE_VERSION : BTREE_NOVAC_VERSION, needheaptidspace ? BTMaxItemSize(page) : BTMaxItemSizeNoHeapTid(page), RelationGetRelationName(rel)), - errdetail("Index row references tuple (%u,%u) in relation \"%s\".", + errdetail("Index row references tuple (%u,%d) in relation \"%s\".", ItemPointerGetBlockNumber(BTreeTupleGetHeapTID(newtup)), ItemPointerGetOffsetNumber(BTreeTupleGetHeapTID(newtup)), RelationGetRelationName(heap)), diff --git a/src/backend/access/rmgrdesc/gindesc.c b/src/backend/access/rmgrdesc/gindesc.c index 9ab0d8e1f7..366b3c1c83 100644 --- a/src/backend/access/rmgrdesc/gindesc.c +++ b/src/backend/access/rmgrdesc/gindesc.c @@ -122,7 +122,7 @@ gin_desc(StringInfo buf, XLogReaderState *record) ginxlogInsertDataInternal *insertData = (ginxlogInsertDataInternal *) payload; - appendStringInfo(buf, " pitem: %u-%u/%u", + appendStringInfo(buf, " pitem: %u-%u/%d", PostingItemGetBlockNumber(&insertData->newitem), ItemPointerGetBlockNumber(&insertData->newitem.key), ItemPointerGetOffsetNumber(&insertData->newitem.key)); diff --git a/src/backend/access/rmgrdesc/heapdesc.c b/src/backend/access/rmgrdesc/heapdesc.c index 3c16e6ef1f..b0c7cff1f4 100644 --- a/src/backend/access/rmgrdesc/heapdesc.c +++ b/src/backend/access/rmgrdesc/heapdesc.c @@ -166,7 +166,7 @@ heap2_desc(StringInfo buf, XLogReaderState *record) { xl_heap_new_cid *xlrec = (xl_heap_new_cid *) rec; - appendStringInfo(buf, "rel %u/%u/%u; tid %u/%u", + appendStringInfo(buf, "rel %u/%u/%u; tid %u/%d", xlrec->target_node.spcNode, xlrec->target_node.dbNode, xlrec->target_node.relNode, diff --git a/src/backend/access/rmgrdesc/relmapdesc.c b/src/backend/access/rmgrdesc/relmapdesc.c index 8a8d594956..9c88d53300 100644 --- a/src/backend/access/rmgrdesc/relmapdesc.c +++ b/src/backend/access/rmgrdesc/relmapdesc.c @@ -26,7 +26,7 @@ relmap_desc(StringInfo buf, XLogReaderState *record) { xl_relmap_update *xlrec = (xl_relmap_update *) rec; - appendStringInfo(buf, "database %u tablespace %u size %u", + appendStringInfo(buf, "database %u tablespace %u size %d", xlrec->dbid, xlrec->tsid, xlrec->nbytes); } } diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c index 934d65b89f..8a3985dd4a 100644 --- a/src/backend/access/spgist/spgdoinsert.c +++ b/src/backend/access/spgist/spgdoinsert.c @@ -167,7 +167,7 @@ spgPageIndexMultiDelete(SpGistState *state, Page page, if (PageAddItem(page, (Item) tuple, tuple->size, itemno, false, false) != itemno) - elog(ERROR, "failed to add item of size %u to SPGiST index page", + elog(ERROR, "failed to add item of size %d to SPGiST index page", tuple->size); if (tupstate == SPGIST_REDIRECT) @@ -276,7 +276,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple, if (PageAddItem(current->page, (Item) leafTuple, leafTuple->size, current->offnum, false, false) != current->offnum) - elog(ERROR, "failed to add item of size %u to SPGiST index page", + elog(ERROR, "failed to add item of size %d to SPGiST index page", leafTuple->size); /* WAL replay distinguishes this case by equal offnums */ @@ -1634,7 +1634,7 @@ spgAddNodeAction(Relation index, SpGistState *state, if (PageAddItem(saveCurrent.page, (Item) dt, dt->size, saveCurrent.offnum, false, false) != saveCurrent.offnum) - elog(ERROR, "failed to add item of size %u to SPGiST index page", + elog(ERROR, "failed to add item of size %d to SPGiST index page", dt->size); if (state->isBuild) diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c index 64d3ba8288..b37ba962b8 100644 --- a/src/backend/access/spgist/spgutils.c +++ b/src/backend/access/spgist/spgutils.c @@ -951,8 +951,8 @@ SpGistPageAddNewItem(SpGistState *state, Page page, Item item, Size size, *startOffset = offnum + 1; } else - elog(PANIC, "failed to add item of size %u to SPGiST index page", - (int) size); + elog(PANIC, "failed to add item of size %zu to SPGiST index page", + size); return offnum; } @@ -963,8 +963,8 @@ SpGistPageAddNewItem(SpGistState *state, Page page, Item item, Size size, InvalidOffsetNumber, false, false); if (offnum == InvalidOffsetNumber && !errorOK) - elog(ERROR, "failed to add item of size %u to SPGiST index page", - (int) size); + elog(ERROR, "failed to add item of size %zu to SPGiST index page", + size); return offnum; } diff --git a/src/backend/access/spgist/spgxlog.c b/src/backend/access/spgist/spgxlog.c index 999d0ca15d..33ad6a9ec2 100644 --- a/src/backend/access/spgist/spgxlog.c +++ b/src/backend/access/spgist/spgxlog.c @@ -68,7 +68,7 @@ addOrReplaceTuple(Page page, Item tuple, int size, OffsetNumber offset) Assert(offset <= PageGetMaxOffsetNumber(page) + 1); if (PageAddItem(page, tuple, size, offset, false, false) != offset) - elog(ERROR, "failed to add item of size %u to SPGiST index page", + elog(ERROR, "failed to add item of size %d to SPGiST index page", size); } @@ -133,7 +133,7 @@ spgRedoAddLeaf(XLogReaderState *record) if (PageAddItem(page, (Item) leafTuple, leafTupleHdr.size, xldata->offnumLeaf, false, false) != xldata->offnumLeaf) - elog(ERROR, "failed to add item of size %u to SPGiST index page", + elog(ERROR, "failed to add item of size %d to SPGiST index page", leafTupleHdr.size); } @@ -395,7 +395,7 @@ spgRedoAddNode(XLogReaderState *record) if (PageAddItem(page, (Item) dt, dt->size, xldata->offnum, false, false) != xldata->offnum) - elog(ERROR, "failed to add item of size %u to SPGiST index page", + elog(ERROR, "failed to add item of size %d to SPGiST index page", dt->size); if (state.isBuild) diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index f885ac9548..44698a139e 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -1117,8 +1117,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("multixact \"members\" limit exceeded"), - errdetail_plural("This command would create a multixact with %u members, but the remaining space is only enough for %u member.", - "This command would create a multixact with %u members, but the remaining space is only enough for %u members.", + errdetail_plural("This command would create a multixact with %d members, but the remaining space is only enough for %u member.", + "This command would create a multixact with %d members, but the remaining space is only enough for %u members.", MultiXactState->offsetStopLimit - nextOffset - 1, nmembers, MultiXactState->offsetStopLimit - nextOffset - 1), @@ -1153,8 +1153,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) nmembers + MULTIXACT_MEMBERS_PER_PAGE * SLRU_PAGES_PER_SEGMENT * OFFSET_WARN_SEGMENTS)) ereport(WARNING, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg_plural("database with OID %u must be vacuumed before %d more multixact member is used", - "database with OID %u must be vacuumed before %d more multixact members are used", + errmsg_plural("database with OID %u must be vacuumed before %u more multixact member is used", + "database with OID %u must be vacuumed before %u more multixact members are used", MultiXactState->offsetStopLimit - nextOffset + nmembers, MultiXactState->oldestMultiXactDB, MultiXactState->offsetStopLimit - nextOffset + nmembers), @@ -2896,7 +2896,7 @@ PerformMembersTruncation(MultiXactOffset oldestOffset, MultiXactOffset newOldest */ while (segment != endsegment) { - elog(DEBUG2, "truncating multixact members segment %x", segment); + elog(DEBUG2, "truncating multixact members segment %x", (unsigned) segment); SlruDeleteSegment(MultiXactMemberCtl, segment); /* move to next segment, handling wraparound correctly */ diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index cec17cb2ae..27a937bc88 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -60,7 +60,7 @@ #include "storage/shmem.h" #define SlruFileName(ctl, path, seg) \ - snprintf(path, MAXPGPATH, "%s/%04X", (ctl)->Dir, seg) + snprintf(path, MAXPGPATH, "%s/%04X", (ctl)->Dir, (unsigned) (seg)) /* * During SimpleLruWriteAll(), we will usually not need to write more than one @@ -948,7 +948,7 @@ SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid) ereport(ERROR, (errcode_for_file_access(), errmsg("could not access status of transaction %u", xid), - errdetail("Could not seek in file \"%s\" to offset %u: %m.", + errdetail("Could not seek in file \"%s\" to offset %d: %m.", path, offset))); break; case SLRU_READ_FAILED: @@ -956,24 +956,24 @@ SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid) ereport(ERROR, (errcode_for_file_access(), errmsg("could not access status of transaction %u", xid), - errdetail("Could not read from file \"%s\" at offset %u: %m.", + errdetail("Could not read from file \"%s\" at offset %d: %m.", path, offset))); else ereport(ERROR, (errmsg("could not access status of transaction %u", xid), - errdetail("Could not read from file \"%s\" at offset %u: read too few bytes.", path, offset))); + errdetail("Could not read from file \"%s\" at offset %d: read too few bytes.", path, offset))); break; case SLRU_WRITE_FAILED: if (errno) ereport(ERROR, (errcode_for_file_access(), errmsg("could not access status of transaction %u", xid), - errdetail("Could not write to file \"%s\" at offset %u: %m.", + errdetail("Could not write to file \"%s\" at offset %d: %m.", path, offset))); else ereport(ERROR, (errmsg("could not access status of transaction %u", xid), - errdetail("Could not write to file \"%s\" at offset %u: wrote too few bytes.", + errdetail("Could not write to file \"%s\" at offset %d: wrote too few bytes.", path, offset))); break; case SLRU_FSYNC_FAILED: diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index 7940060443..d3e3801ce8 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -1691,7 +1691,7 @@ void CheckPointTwoPhase(XLogRecPtr redo_horizon) { int i; - int serialized_xacts = 0; + unsigned int serialized_xacts = 0; if (max_prepared_xacts <= 0) return; /* nothing to do */ diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index a71b55341f..b4a31be0c8 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -1495,7 +1495,7 @@ checkXLogConsistency(XLogReaderState *record) if (memcmp(replay_image_masked, primary_image_masked, BLCKSZ) != 0) { elog(FATAL, - "inconsistent page found, rel %u/%u/%u, forknum %u, blkno %u", + "inconsistent page found, rel %u/%u/%u, forknum %d, blkno %u", rnode.spcNode, rnode.dbNode, rnode.relNode, forknum, blkno); } @@ -4034,7 +4034,7 @@ RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr lastredoptr, XLogRecPtr endptr) * doesn't matter, we ignore that in the comparison. (During recovery, * ThisTimeLineID isn't set, so we can't use that.) */ - XLogFileName(lastoff, 0, segno, wal_segment_size); + XLogFileName(lastoff, 0U, segno, wal_segment_size); elog(DEBUG2, "attempting to remove WAL segments older than log file %s", lastoff); @@ -4381,7 +4381,7 @@ ReadRecord(XLogReaderState *xlogreader, int emode, XLogFileName(fname, xlogreader->seg.ws_tli, segno, wal_segment_size); ereport(emode_for_corrupt_record(emode, EndRecPtr), - (errmsg("unexpected timeline ID %u in log segment %s, offset %u", + (errmsg("unexpected timeline ID %u in log segment %s, offset %d", xlogreader->latestPageTLI, fname, offset))); @@ -4748,8 +4748,8 @@ ReadControlFile(void) if (ControlFile->pg_control_version != PG_CONTROL_VERSION && ControlFile->pg_control_version % 65536 == 0 && ControlFile->pg_control_version / 65536 != 0) ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with PG_CONTROL_VERSION %d (0x%08x)," - " but the server was compiled with PG_CONTROL_VERSION %d (0x%08x).", + errdetail("The database cluster was initialized with PG_CONTROL_VERSION %u (0x%08x)," + " but the server was compiled with PG_CONTROL_VERSION %u (0x%08x).", ControlFile->pg_control_version, ControlFile->pg_control_version, PG_CONTROL_VERSION, PG_CONTROL_VERSION), errhint("This could be a problem of mismatched byte ordering. It looks like you need to initdb."))); @@ -4757,8 +4757,8 @@ ReadControlFile(void) if (ControlFile->pg_control_version != PG_CONTROL_VERSION) ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with PG_CONTROL_VERSION %d," - " but the server was compiled with PG_CONTROL_VERSION %d.", + errdetail("The database cluster was initialized with PG_CONTROL_VERSION %u," + " but the server was compiled with PG_CONTROL_VERSION %u.", ControlFile->pg_control_version, PG_CONTROL_VERSION), errhint("It looks like you need to initdb."))); @@ -4781,14 +4781,14 @@ ReadControlFile(void) if (ControlFile->catalog_version_no != CATALOG_VERSION_NO) ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with CATALOG_VERSION_NO %d," + errdetail("The database cluster was initialized with CATALOG_VERSION_NO %u," " but the server was compiled with CATALOG_VERSION_NO %d.", ControlFile->catalog_version_no, CATALOG_VERSION_NO), errhint("It looks like you need to initdb."))); if (ControlFile->maxAlign != MAXIMUM_ALIGNOF) ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with MAXALIGN %d," + errdetail("The database cluster was initialized with MAXALIGN %u," " but the server was compiled with MAXALIGN %d.", ControlFile->maxAlign, MAXIMUM_ALIGNOF), errhint("It looks like you need to initdb."))); @@ -4800,49 +4800,49 @@ ReadControlFile(void) if (ControlFile->blcksz != BLCKSZ) ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with BLCKSZ %d," + errdetail("The database cluster was initialized with BLCKSZ %u," " but the server was compiled with BLCKSZ %d.", ControlFile->blcksz, BLCKSZ), errhint("It looks like you need to recompile or initdb."))); if (ControlFile->relseg_size != RELSEG_SIZE) ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with RELSEG_SIZE %d," + errdetail("The database cluster was initialized with RELSEG_SIZE %u," " but the server was compiled with RELSEG_SIZE %d.", ControlFile->relseg_size, RELSEG_SIZE), errhint("It looks like you need to recompile or initdb."))); if (ControlFile->xlog_blcksz != XLOG_BLCKSZ) ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with XLOG_BLCKSZ %d," + errdetail("The database cluster was initialized with XLOG_BLCKSZ %u," " but the server was compiled with XLOG_BLCKSZ %d.", ControlFile->xlog_blcksz, XLOG_BLCKSZ), errhint("It looks like you need to recompile or initdb."))); if (ControlFile->nameDataLen != NAMEDATALEN) ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with NAMEDATALEN %d," + errdetail("The database cluster was initialized with NAMEDATALEN %u," " but the server was compiled with NAMEDATALEN %d.", ControlFile->nameDataLen, NAMEDATALEN), errhint("It looks like you need to recompile or initdb."))); if (ControlFile->indexMaxKeys != INDEX_MAX_KEYS) ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with INDEX_MAX_KEYS %d," + errdetail("The database cluster was initialized with INDEX_MAX_KEYS %u," " but the server was compiled with INDEX_MAX_KEYS %d.", ControlFile->indexMaxKeys, INDEX_MAX_KEYS), errhint("It looks like you need to recompile or initdb."))); if (ControlFile->toast_max_chunk_size != TOAST_MAX_CHUNK_SIZE) ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d," + errdetail("The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %u," " but the server was compiled with TOAST_MAX_CHUNK_SIZE %d.", ControlFile->toast_max_chunk_size, (int) TOAST_MAX_CHUNK_SIZE), errhint("It looks like you need to recompile or initdb."))); if (ControlFile->loblksize != LOBLKSIZE) ereport(FATAL, (errmsg("database files are incompatible with server"), - errdetail("The database cluster was initialized with LOBLKSIZE %d," + errdetail("The database cluster was initialized with LOBLKSIZE %u," " but the server was compiled with LOBLKSIZE %d.", ControlFile->loblksize, (int) LOBLKSIZE), errhint("It looks like you need to recompile or initdb."))); @@ -10282,13 +10282,13 @@ xlog_block_info(StringInfo buf, XLogReaderState *record) XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blk); if (forknum != MAIN_FORKNUM) - appendStringInfo(buf, "; blkref #%u: rel %u/%u/%u, fork %u, blk %u", + appendStringInfo(buf, "; blkref #%d: rel %u/%u/%u, fork %d, blk %u", block_id, rnode.spcNode, rnode.dbNode, rnode.relNode, forknum, blk); else - appendStringInfo(buf, "; blkref #%u: rel %u/%u/%u, blk %u", + appendStringInfo(buf, "; blkref #%d: rel %u/%u/%u, blk %u", block_id, rnode.spcNode, rnode.dbNode, rnode.relNode, blk); @@ -10313,7 +10313,7 @@ xlog_outdesc(StringInfo buf, XLogReaderState *record) id = RmgrTable[rmid].rm_identify(info); if (id == NULL) - appendStringInfo(buf, "UNKNOWN (%X): ", info & ~XLR_INFO_MASK); + appendStringInfo(buf, "UNKNOWN (%X): ", (unsigned) (info & ~XLR_INFO_MASK)); else appendStringInfo(buf, "%s: ", id); diff --git a/src/backend/access/transam/xlogarchive.c b/src/backend/access/transam/xlogarchive.c index cae93ab69d..812a52996c 100644 --- a/src/backend/access/transam/xlogarchive.c +++ b/src/backend/access/transam/xlogarchive.c @@ -146,7 +146,7 @@ RestoreArchivedFile(char *path, const char *xlogfname, Assert(strcmp(lastRestartPointFname, xlogfname) <= 0); } else - XLogFileName(lastRestartPointFname, 0, 0L, wal_segment_size); + XLogFileName(lastRestartPointFname, 0U, 0L, wal_segment_size); /* Build the restore command to execute */ xlogRestoreCmd = BuildRestoreCommand(recoveryRestoreCommand, diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index a63ad8cfd0..c451ac9f4b 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -789,7 +789,7 @@ XLogReaderValidatePageHeader(XLogReaderState *state, XLogRecPtr recptr, { XLogRecPtr recaddr; XLogSegNo segno; - int32 offset; + uint32 offset; XLogPageHeader hdr = (XLogPageHeader) phdr; Assert((recptr % XLOG_BLCKSZ) == 0); diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c index 7e915bcadf..ea19ba4777 100644 --- a/src/backend/access/transam/xlogutils.c +++ b/src/backend/access/transam/xlogutils.c @@ -967,15 +967,15 @@ WALReadRaiseError(WALReadError *errinfo) errno = errinfo->wre_errno; ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read from log segment %s, offset %u: %m", + errmsg("could not read from log segment %s, offset %d: %m", fname, errinfo->wre_off))); } else if (errinfo->wre_read == 0) { ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("could not read from log segment %s, offset %u: read %d of %zu", + errmsg("could not read from log segment %s, offset %d: read %d of %d", fname, errinfo->wre_off, errinfo->wre_read, - (Size) errinfo->wre_req))); + errinfo->wre_req))); } } diff --git a/src/backend/catalog/Catalog.pm b/src/backend/catalog/Catalog.pm index dd39a086ce..8d1b6fc326 100644 --- a/src/backend/catalog/Catalog.pm +++ b/src/backend/catalog/Catalog.pm @@ -459,7 +459,7 @@ sub FindDefinedSymbol open(my $find_defined_symbol, '<', $file) || die "$file: $!"; while (<$find_defined_symbol>) { - if (/^#define\s+\Q$symbol\E\s+(\S+)/) + if (/^#define\s+\Q$symbol\E\s+(\d+)/) { $value = $1; last; diff --git a/src/backend/catalog/Makefile b/src/backend/catalog/Makefile index 2519771210..deffce5b6c 100644 --- a/src/backend/catalog/Makefile +++ b/src/backend/catalog/Makefile @@ -101,7 +101,7 @@ generated-header-symlinks: $(top_builddir)/src/include/catalog/header-stamp # configure run, even in distribution tarballs. So depending on configure.ac # instead is cheating a bit, but it will achieve the goal of updating the # version number when it changes. -bki-stamp: genbki.pl Catalog.pm $(POSTGRES_BKI_SRCS) $(POSTGRES_BKI_DATA) $(top_srcdir)/configure.ac +bki-stamp: genbki.pl Catalog.pm $(POSTGRES_BKI_SRCS) $(POSTGRES_BKI_DATA) $(top_srcdir)/src/include/access/transam.h $(top_srcdir)/configure.ac $(PERL) $< --include-path=$(top_srcdir)/src/include/ \ --set-version=$(MAJORVERSION) $(POSTGRES_BKI_SRCS) touch $@ diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c index b0d037600e..d2ee4bd0a5 100644 --- a/src/backend/catalog/dependency.c +++ b/src/backend/catalog/dependency.c @@ -1858,10 +1858,10 @@ find_expr_references_walker(Node *node, /* Find matching rtable entry, or complain if not found */ if (var->varlevelsup >= list_length(context->rtables)) - elog(ERROR, "invalid varlevelsup %d", var->varlevelsup); + elog(ERROR, "invalid varlevelsup %u", var->varlevelsup); rtable = (List *) list_nth(context->rtables, var->varlevelsup); if (var->varno <= 0 || var->varno > list_length(rtable)) - elog(ERROR, "invalid varno %d", var->varno); + elog(ERROR, "invalid varno %u", var->varno); rte = rt_fetch(var->varno, rtable); /* diff --git a/src/backend/catalog/genbki.pl b/src/backend/catalog/genbki.pl index 66fdaf67b1..a9ea5c7a5f 100644 --- a/src/backend/catalog/genbki.pl +++ b/src/backend/catalog/genbki.pl @@ -433,10 +433,10 @@ EOM # Emit OID macros for catalog's OID and rowtype OID, if wanted - printf $def "#define %s %s\n", + printf $def "#define %s %sU\n", $catalog->{relation_oid_macro}, $catalog->{relation_oid} if $catalog->{relation_oid_macro}; - printf $def "#define %s %s\n", + printf $def "#define %s %sU\n", $catalog->{rowtype_oid_macro}, $catalog->{rowtype_oid} if $catalog->{rowtype_oid_macro}; print $def "\n"; @@ -613,7 +613,7 @@ $bki_values{oid_symbol} if $catname eq 'pg_proc'; - printf $def "#define %s %s\n", + printf $def "#define %s %sU\n", $bki_values{oid_symbol}, $bki_values{oid}; } } diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c index 7751a23a60..237224bbc8 100644 --- a/src/backend/catalog/pg_shdepend.c +++ b/src/backend/catalog/pg_shdepend.c @@ -245,7 +245,7 @@ shdepChangeDep(Relation sdepRel, if (oldtup) elog(ERROR, "multiple pg_shdepend entries for object %u/%u/%d deptype %c", - classid, objid, objsubid, deptype); + classid, objid, objsubid, (int) deptype); oldtup = heap_copytuple(scantup); } diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 115860a9d4..238697c98e 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -699,7 +699,7 @@ CopyGetData(CopyState cstate, void *databuf, int minread, int maxread) default: ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), - errmsg("unexpected message type 0x%02X during COPY from stdin", + errmsg("unexpected message type %d during COPY from stdin", mtype))); break; } diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index e3cfaf8b07..48298599d3 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -14688,7 +14688,7 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode /* fallthrough */ ; } else - elog(ERROR, "unexpected identity type %u", stmt->identity_type); + elog(ERROR, "unexpected identity type %d", stmt->identity_type); /* Check that the index exists */ diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c index de58df3d3f..7d811769eb 100644 --- a/src/backend/executor/nodeWindowAgg.c +++ b/src/backend/executor/nodeWindowAgg.c @@ -3330,7 +3330,7 @@ WinGetFuncArgInFrame(WindowObject winobj, int argno, break; default: elog(ERROR, "unrecognized frame option state: 0x%x", - winstate->frameOptions); + (unsigned int) winstate->frameOptions); break; } break; @@ -3400,7 +3400,7 @@ WinGetFuncArgInFrame(WindowObject winobj, int argno, break; default: elog(ERROR, "unrecognized frame option state: 0x%x", - winstate->frameOptions); + (unsigned int) winstate->frameOptions); mark_pos = 0; /* keep compiler quiet */ break; } diff --git a/src/backend/libpq/auth-scram.c b/src/backend/libpq/auth-scram.c index 0f79b28bb5..7a91ae42fc 100644 --- a/src/backend/libpq/auth-scram.c +++ b/src/backend/libpq/auth-scram.c @@ -1180,7 +1180,7 @@ build_server_first_message(scram_state *state) state->server_nonce[encoded_len] = '\0'; state->server_first_message = - psprintf("r=%s%s,s=%s,i=%u", + psprintf("r=%s%s,s=%s,i=%d", state->client_nonce, state->server_nonce, state->salt, state->iterations); diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c index d132c5cb48..8c393ef663 100644 --- a/src/backend/libpq/auth.c +++ b/src/backend/libpq/auth.c @@ -1017,7 +1017,7 @@ CheckSCRAMAuth(Port *port, char *shadow_pass, char **logdetail) /* * Negotiation generated data to be sent to the client. */ - elog(DEBUG4, "sending SASL challenge of length %u", outputlen); + elog(DEBUG4, "sending SASL challenge of length %d", outputlen); if (result == SASL_EXCHANGE_SUCCESS) sendAuthRequest(port, AUTH_REQ_SASL_FIN, output, outputlen); @@ -1161,8 +1161,8 @@ pg_GSS_recvauth(Port *port) /* gbuf no longer used */ pfree(buf.data); - elog(DEBUG5, "gss_accept_sec_context major: %d, " - "minor: %d, outlen: %u, outflags: %x", + elog(DEBUG5, "gss_accept_sec_context major: %u, " + "minor: %u, outlen: %u, outflags: %x", maj_stat, min_stat, (unsigned int) port->gss->outbuf.length, gflags); diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c index e10260051f..fae91a2f6a 100644 --- a/src/backend/libpq/be-secure-openssl.c +++ b/src/backend/libpq/be-secure-openssl.c @@ -1027,11 +1027,11 @@ info_cb(const SSL *ssl, int type, int args) break; case SSL_CB_READ_ALERT: ereport(DEBUG4, - (errmsg_internal("SSL: read alert (0x%04x)", args))); + (errmsg_internal("SSL: read alert (0x%04x)", (unsigned int) args))); break; case SSL_CB_WRITE_ALERT: ereport(DEBUG4, - (errmsg_internal("SSL: write alert (0x%04x)", args))); + (errmsg_internal("SSL: write alert (0x%04x)", (unsigned int) args))); break; } } diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index 4504b1503b..4b703e3a75 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -695,7 +695,7 @@ _outForeignScan(StringInfo str, const ForeignScan *node) WRITE_NODE_FIELD(fdw_recheck_quals); WRITE_BITMAPSET_FIELD(fs_relids); WRITE_BOOL_FIELD(fsSystemCol); - WRITE_INT_FIELD(resultRelation); + WRITE_UINT_FIELD(resultRelation); } static void diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c index ab7b535caa..81f7205ca3 100644 --- a/src/backend/nodes/readfuncs.c +++ b/src/backend/nodes/readfuncs.c @@ -2014,7 +2014,7 @@ _readForeignScan(void) READ_NODE_FIELD(fdw_recheck_quals); READ_BITMAPSET_FIELD(fs_relids); READ_BOOL_FIELD(fsSystemCol); - READ_INT_FIELD(resultRelation); + READ_UINT_FIELD(resultRelation); READ_DONE(); } diff --git a/src/backend/port/sysv_sema.c b/src/backend/port/sysv_sema.c index 88c2862d58..12957fb9fd 100644 --- a/src/backend/port/sysv_sema.c +++ b/src/backend/port/sysv_sema.c @@ -124,7 +124,7 @@ InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey, int numSems) (errmsg("could not create semaphores: %m"), errdetail("Failed system call was semget(%lu, %d, 0%o).", (unsigned long) semKey, numSems, - IPC_CREAT | IPC_EXCL | IPCProtection), + (unsigned int) (IPC_CREAT | IPC_EXCL | IPCProtection)), (saved_errno == ENOSPC) ? errhint("This error does *not* mean that you have run out of disk space. " "It occurs when either the system limit for the maximum number of " diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c index 203555822d..4cde34fcd0 100644 --- a/src/backend/port/sysv_shmem.c +++ b/src/backend/port/sysv_shmem.c @@ -217,7 +217,7 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, Size size) (errmsg("could not create shared memory segment: %m"), errdetail("Failed system call was shmget(key=%lu, size=%zu, 0%o).", (unsigned long) memKey, size, - IPC_CREAT | IPC_EXCL | IPCProtection), + (unsigned int) (IPC_CREAT | IPC_EXCL | IPCProtection)), (shmget_errno == EINVAL) ? errhint("This error usually means that PostgreSQL's request for a shared memory " "segment exceeded your kernel's SHMMAX parameter, or possibly that " @@ -249,7 +249,7 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, Size size) if (memAddress == (void *) -1) elog(FATAL, "shmat(id=%d, addr=%p, flags=0x%x) failed: %m", - shmid, requestedAddress, PG_SHMAT_FLAGS); + shmid, requestedAddress, (unsigned int) PG_SHMAT_FLAGS); /* Register on-exit routine to detach new segment before deleting */ on_shmem_exit(IpcMemoryDetach, PointerGetDatum(memAddress)); diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c index 18957cece8..b98a14930c 100644 --- a/src/backend/postmaster/autovacuum.c +++ b/src/backend/postmaster/autovacuum.c @@ -1855,7 +1855,7 @@ autovac_balance_cost(void) } if (worker->wi_proc != NULL) - elog(DEBUG2, "autovac_balance_cost(pid=%u db=%u, rel=%u, dobalance=%s cost_limit=%d, cost_limit_base=%d, cost_delay=%g)", + elog(DEBUG2, "autovac_balance_cost(pid=%d db=%u, rel=%u, dobalance=%s cost_limit=%d, cost_limit_base=%d, cost_delay=%f)", worker->wi_proc->pid, worker->wi_dboid, worker->wi_tableoid, worker->wi_dobalance ? "yes" : "no", worker->wi_cost_limit, worker->wi_cost_limit_base, diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c index 5a9a0e3435..1efb0f5915 100644 --- a/src/backend/postmaster/bgworker.c +++ b/src/backend/postmaster/bgworker.c @@ -376,7 +376,7 @@ BackgroundWorkerStateChange(void) rw->rw_worker.bgw_notify_pid = slot->worker.bgw_notify_pid; if (!PostmasterMarkPIDForWorkerNotify(rw->rw_worker.bgw_notify_pid)) { - elog(DEBUG1, "worker notification PID %lu is not valid", + elog(DEBUG1, "worker notification PID %ld is not valid", (long) rw->rw_worker.bgw_notify_pid); rw->rw_worker.bgw_notify_pid = 0; } diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index 959e3b8873..2f9939e41e 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -2106,7 +2106,7 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done) PG_PROTOCOL_MAJOR(proto) > PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST)) ereport(FATAL, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("unsupported frontend protocol %u.%u: server supports %u.0 to %u.%u", + errmsg("unsupported frontend protocol %u.%u: server supports %d.0 to %d.%d", PG_PROTOCOL_MAJOR(proto), PG_PROTOCOL_MINOR(proto), PG_PROTOCOL_MAJOR(PG_PROTOCOL_EARLIEST), PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST), diff --git a/src/backend/regex/regerror.c b/src/backend/regex/regerror.c index 4a27c2552c..fb657e1ada 100644 --- a/src/backend/regex/regerror.c +++ b/src/backend/regex/regerror.c @@ -34,7 +34,7 @@ #include "regex/regguts.h" /* unknown-error explanation */ -static const char unk[] = "*** unknown regex error code 0x%x ***"; +static const char unk[] = "*** unknown regex error code %d ***"; /* struct to map among codes, code names, and explanations */ static const struct rerr diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c index d67ddf7e54..f3af113c68 100644 --- a/src/backend/replication/basebackup.c +++ b/src/backend/replication/basebackup.c @@ -1662,7 +1662,7 @@ sendFile(const char *readfilename, const char *tarfilename, { ereport(WARNING, (errmsg("could not verify checksum in file \"%s\", block " - "%d: read buffer size %d and page size %d " + "%u: read buffer size %d and page size %d " "differ", readfilename, blkno, (int) cnt, BLCKSZ))); verify_checksum = false; @@ -1735,7 +1735,7 @@ sendFile(const char *readfilename, const char *tarfilename, if (checksum_failures <= 5) ereport(WARNING, (errmsg("checksum verification failed in " - "file \"%s\", block %d: calculated " + "file \"%s\", block %u: calculated " "%X but expected %X", readfilename, blkno, checksum, phdr->pd_checksum))); diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c index d5cfbeaa4a..08793cb871 100644 --- a/src/backend/replication/logical/logical.c +++ b/src/backend/replication/logical/logical.c @@ -1425,7 +1425,7 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn) { ReplicationSlotMarkDirty(); ReplicationSlotSave(); - elog(DEBUG1, "updated xmin: %u restart: %u", updated_xmin, updated_restart); + elog(DEBUG1, "updated xmin: %d restart: %d", updated_xmin, updated_restart); } /* diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c index 9d5d68f3fa..6f7e747896 100644 --- a/src/backend/replication/logical/snapbuild.c +++ b/src/backend/replication/logical/snapbuild.c @@ -1550,7 +1550,7 @@ SnapBuildSerialize(SnapBuild *builder, XLogRecPtr lsn) elog(DEBUG1, "serializing snapshot to %s", path); /* to make sure only we will write to this tempfile, include pid */ - sprintf(tmppath, "pg_logical/snapshots/%X-%X.snap.%u.tmp", + sprintf(tmppath, "pg_logical/snapshots/%X-%X.snap.%d.tmp", (uint32) (lsn >> 32), (uint32) lsn, MyProcPid); /* @@ -1752,13 +1752,13 @@ SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn) if (ondisk.magic != SNAPBUILD_MAGIC) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("snapbuild state file \"%s\" has wrong magic number: %u instead of %u", + errmsg("snapbuild state file \"%s\" has wrong magic number: %u instead of %d", path, ondisk.magic, SNAPBUILD_MAGIC))); if (ondisk.version != SNAPBUILD_VERSION) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("snapbuild state file \"%s\" has unsupported version: %u instead of %u", + errmsg("snapbuild state file \"%s\" has unsupported version: %u instead of %d", path, ondisk.version, SNAPBUILD_VERSION))); INIT_CRC32C(checksum); diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index 04684912de..2a5d4bf475 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -1955,7 +1955,7 @@ apply_dispatch(StringInfo s) ereport(ERROR, (errcode(ERRCODE_PROTOCOL_VIOLATION), - errmsg("invalid logical replication message type \"%c\"", action))); + errmsg("invalid logical replication message type \"%c\"", (char) action))); } /* diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index 9c997aed83..b07462a620 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -275,13 +275,13 @@ pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, if (data->protocol_version > LOGICALREP_PROTO_MAX_VERSION_NUM) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("client sent proto_version=%d but we only support protocol %d or lower", + errmsg("client sent proto_version=%u but we only support protocol %d or lower", data->protocol_version, LOGICALREP_PROTO_MAX_VERSION_NUM))); if (data->protocol_version < LOGICALREP_PROTO_MIN_VERSION_NUM) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("client sent proto_version=%d but we only support protocol %d or higher", + errmsg("client sent proto_version=%u but we only support protocol %d or higher", data->protocol_version, LOGICALREP_PROTO_MIN_VERSION_NUM))); if (list_length(data->publication_names) < 1) @@ -300,7 +300,7 @@ pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, else if (data->protocol_version < LOGICALREP_PROTO_STREAM_VERSION_NUM) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("requested proto_version=%d does not support streaming, need %d or higher", + errmsg("requested proto_version=%u does not support streaming, need %d or higher", data->protocol_version, LOGICALREP_PROTO_STREAM_VERSION_NUM))); else if (!ctx->streaming) ereport(ERROR, diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index 09be1d8c48..beeab1b56a 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -86,7 +86,7 @@ typedef struct ReplicationSlotOnDisk #define ReplicationSlotOnDiskV2Size \ sizeof(ReplicationSlotOnDisk) - ReplicationSlotOnDiskConstantSize -#define SLOT_MAGIC 0x1051CA1 /* format identifier */ +#define SLOT_MAGIC 0x1051CA1U /* format identifier */ #define SLOT_VERSION 2 /* version for new files */ /* Control array for replication slot management */ diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c index 6e8c76537a..6135e9b3ae 100644 --- a/src/backend/replication/syncrep.c +++ b/src/backend/replication/syncrep.c @@ -116,7 +116,7 @@ static void SyncRepGetNthLatestSyncRecPtr(XLogRecPtr *writePtr, SyncRepStandbyData *sync_standbys, int num_standbys, uint8 nth); -static int SyncRepGetStandbyPriority(void); +static unsigned int SyncRepGetStandbyPriority(void); static int standby_priority_comparator(const void *a, const void *b); static int cmp_lsn(const void *a, const void *b); @@ -411,7 +411,7 @@ SyncRepCleanupAtProcExit(void) void SyncRepInitConfig(void) { - int priority; + unsigned int priority; /* * Determine if we are a potential sync standby and remember the result @@ -828,7 +828,7 @@ standby_priority_comparator(const void *a, const void *b) * Compare the parameter SyncRepStandbyNames against the application_name * for this WALSender, or allow any name if we find a wildcard "*". */ -static int +static unsigned int SyncRepGetStandbyPriority(void) { const char *standby_name; diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c index bb1d44ccb7..d19d47b33f 100644 --- a/src/backend/replication/walreceiver.c +++ b/src/backend/replication/walreceiver.c @@ -901,7 +901,7 @@ XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len) static void XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr) { - int startoff; + unsigned int startoff; int byteswritten; while (nbytes > 0) diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c index 41dd670572..9f07767ce4 100644 --- a/src/backend/rewrite/rewriteHandler.c +++ b/src/backend/rewrite/rewriteHandler.c @@ -237,7 +237,7 @@ AcquireRewriteLocks(Query *parsetree, { curinputvarno = aliasvar->varno; if (curinputvarno >= rt_index) - elog(ERROR, "unexpected varno %d in JOIN RTE %d", + elog(ERROR, "unexpected varno %u in JOIN RTE %d", curinputvarno, rt_index); curinputrte = rt_fetch(curinputvarno, parsetree->rtable); diff --git a/src/backend/statistics/dependencies.c b/src/backend/statistics/dependencies.c index d950b4eabe..f3becb8f30 100644 --- a/src/backend/statistics/dependencies.c +++ b/src/backend/statistics/dependencies.c @@ -515,7 +515,7 @@ statext_dependencies_deserialize(bytea *data) return NULL; if (VARSIZE_ANY_EXHDR(data) < SizeOfHeader) - elog(ERROR, "invalid MVDependencies size %zd (expected at least %zd)", + elog(ERROR, "invalid MVDependencies size %lu (expected at least %lu)", VARSIZE_ANY_EXHDR(data), SizeOfHeader); /* read the MVDependencies header */ @@ -533,11 +533,11 @@ statext_dependencies_deserialize(bytea *data) tmp += sizeof(uint32); if (dependencies->magic != STATS_DEPS_MAGIC) - elog(ERROR, "invalid dependency magic %d (expected %d)", + elog(ERROR, "invalid dependency magic %u (expected %u)", dependencies->magic, STATS_DEPS_MAGIC); if (dependencies->type != STATS_DEPS_TYPE_BASIC) - elog(ERROR, "invalid dependency type %d (expected %d)", + elog(ERROR, "invalid dependency type %u (expected %d)", dependencies->type, STATS_DEPS_TYPE_BASIC); if (dependencies->ndeps == 0) @@ -547,7 +547,7 @@ statext_dependencies_deserialize(bytea *data) min_expected_size = SizeOfItem(dependencies->ndeps); if (VARSIZE_ANY_EXHDR(data) < min_expected_size) - elog(ERROR, "invalid dependencies size %zd (expected at least %zd)", + elog(ERROR, "invalid dependencies size %lu (expected at least %lu)", VARSIZE_ANY_EXHDR(data), min_expected_size); /* allocate space for the MCV items */ diff --git a/src/backend/statistics/mcv.c b/src/backend/statistics/mcv.c index bec14fbc57..8fd08f8ceb 100644 --- a/src/backend/statistics/mcv.c +++ b/src/backend/statistics/mcv.c @@ -1025,7 +1025,7 @@ statext_mcv_deserialize(bytea *data) * header fields one by one, so we need to ignore struct alignment. */ if (VARSIZE_ANY(data) < MinSizeOfMCVList) - elog(ERROR, "invalid MCV size %zd (expected at least %zu)", + elog(ERROR, "invalid MCV size %lu (expected at least %zu)", VARSIZE_ANY(data), MinSizeOfMCVList); /* read the MCV list header */ @@ -1054,7 +1054,7 @@ statext_mcv_deserialize(bytea *data) mcvlist->magic, STATS_MCV_MAGIC); if (mcvlist->type != STATS_MCV_TYPE_BASIC) - elog(ERROR, "invalid MCV type %u (expected %u)", + elog(ERROR, "invalid MCV type %u (expected %d)", mcvlist->type, STATS_MCV_TYPE_BASIC); if (mcvlist->ndimensions == 0) @@ -1086,7 +1086,7 @@ statext_mcv_deserialize(bytea *data) * to do this check first, before accessing the dimension info. */ if (VARSIZE_ANY(data) < expected_size) - elog(ERROR, "invalid MCV size %zd (expected %zu)", + elog(ERROR, "invalid MCV size %lu (expected %zu)", VARSIZE_ANY(data), expected_size); /* Now copy the array of type Oids. */ @@ -1118,7 +1118,7 @@ statext_mcv_deserialize(bytea *data) * check on size. */ if (VARSIZE_ANY(data) != expected_size) - elog(ERROR, "invalid MCV size %zd (expected %zu)", + elog(ERROR, "invalid MCV size %lu (expected %zu)", VARSIZE_ANY(data), expected_size); /* diff --git a/src/backend/statistics/mvdistinct.c b/src/backend/statistics/mvdistinct.c index 4b86f0ab2d..15477e61cf 100644 --- a/src/backend/statistics/mvdistinct.c +++ b/src/backend/statistics/mvdistinct.c @@ -259,7 +259,7 @@ statext_ndistinct_deserialize(bytea *data) /* we expect at least the basic fields of MVNDistinct struct */ if (VARSIZE_ANY_EXHDR(data) < SizeOfHeader) - elog(ERROR, "invalid MVNDistinct size %zd (expected at least %zd)", + elog(ERROR, "invalid MVNDistinct size %lu (expected at least %lu)", VARSIZE_ANY_EXHDR(data), SizeOfHeader); /* initialize pointer to the data part (skip the varlena header) */ @@ -277,7 +277,7 @@ statext_ndistinct_deserialize(bytea *data) elog(ERROR, "invalid ndistinct magic %08x (expected %08x)", ndist.magic, STATS_NDISTINCT_MAGIC); if (ndist.type != STATS_NDISTINCT_TYPE_BASIC) - elog(ERROR, "invalid ndistinct type %d (expected %d)", + elog(ERROR, "invalid ndistinct type %u (expected %d)", ndist.type, STATS_NDISTINCT_TYPE_BASIC); if (ndist.nitems == 0) elog(ERROR, "invalid zero-length item array in MVNDistinct"); @@ -285,7 +285,7 @@ statext_ndistinct_deserialize(bytea *data) /* what minimum bytea size do we expect for those parameters */ minimum_size = MinSizeOfItems(ndist.nitems); if (VARSIZE_ANY_EXHDR(data) < minimum_size) - elog(ERROR, "invalid MVNDistinct size %zd (expected at least %zd)", + elog(ERROR, "invalid MVNDistinct size %lu (expected at least %lu)", VARSIZE_ANY_EXHDR(data), minimum_size); /* diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c index 6ffd7b3306..088a8c3f83 100644 --- a/src/backend/storage/buffer/localbuf.c +++ b/src/backend/storage/buffer/localbuf.c @@ -342,7 +342,7 @@ DropRelFileNodeLocalBuffers(RelFileNode rnode, ForkNumber forkNum, bufHdr->tag.blockNum >= firstDelBlock) { if (LocalRefCount[i] != 0) - elog(ERROR, "block %u of %s is still referenced (local %u)", + elog(ERROR, "block %u of %s is still referenced (local %d)", bufHdr->tag.blockNum, relpathbackend(bufHdr->tag.rnode, MyBackendId, bufHdr->tag.forkNum), @@ -386,7 +386,7 @@ DropRelFileNodeAllLocalBuffers(RelFileNode rnode) RelFileNodeEquals(bufHdr->tag.rnode, rnode)) { if (LocalRefCount[i] != 0) - elog(ERROR, "block %u of %s is still referenced (local %u)", + elog(ERROR, "block %u of %s is still referenced (local %d)", bufHdr->tag.blockNum, relpathbackend(bufHdr->tag.rnode, MyBackendId, bufHdr->tag.forkNum), diff --git a/src/backend/storage/ipc/ipc.c b/src/backend/storage/ipc/ipc.c index 36a067c924..b2923cd05c 100644 --- a/src/backend/storage/ipc/ipc.c +++ b/src/backend/storage/ipc/ipc.c @@ -396,7 +396,7 @@ cancel_before_shmem_exit(pg_on_exit_callback function, Datum arg) --before_shmem_exit_index; else elog(ERROR, "before_shmem_exit callback (%p,0x%llx) is not the latest entry", - function, (long long) arg); + function, (unsigned long long) arg); } /* ---------------------------------------------------------------- diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c index 52b2809dac..afa9c96fbf 100644 --- a/src/backend/storage/ipc/standby.c +++ b/src/backend/storage/ipc/standby.c @@ -1006,7 +1006,7 @@ LogCurrentRunningXacts(RunningTransactions CurrRunningXacts) if (CurrRunningXacts->subxid_overflow) elog(trace_recovery(DEBUG2), - "snapshot of %u running transactions overflowed (lsn %X/%X oldest xid %u latest complete %u next xid %u)", + "snapshot of %d running transactions overflowed (lsn %X/%X oldest xid %u latest complete %u next xid %u)", CurrRunningXacts->xcnt, (uint32) (recptr >> 32), (uint32) recptr, CurrRunningXacts->oldestRunningXid, @@ -1014,7 +1014,7 @@ LogCurrentRunningXacts(RunningTransactions CurrRunningXacts) CurrRunningXacts->nextXid); else elog(trace_recovery(DEBUG2), - "snapshot of %u+%u running transaction ids (lsn %X/%X oldest xid %u latest complete %u next xid %u)", + "snapshot of %d+%d running transaction ids (lsn %X/%X oldest xid %u latest complete %u next xid %u)", CurrRunningXacts->xcnt, CurrRunningXacts->subxcnt, (uint32) (recptr >> 32), (uint32) recptr, CurrRunningXacts->oldestRunningXid, diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c index 7409de9405..bde550c2e5 100644 --- a/src/backend/storage/lmgr/lmgr.c +++ b/src/backend/storage/lmgr/lmgr.c @@ -1140,7 +1140,7 @@ DescribeLockTag(StringInfo buf, const LOCKTAG *tag) break; case LOCKTAG_VIRTUALTRANSACTION: appendStringInfo(buf, - _("virtual transaction %d/%u"), + _("virtual transaction %u/%u"), tag->locktag_field1, tag->locktag_field2); break; diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c index ddf18079e2..4fe17aac50 100644 --- a/src/backend/storage/page/bufpage.c +++ b/src/backend/storage/page/bufpage.c @@ -709,7 +709,7 @@ PageRepairFragmentation(Page page) pd_special != MAXALIGN(pd_special)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u", + errmsg("corrupted page pointers: lower = %d, upper = %d, special = %d", pd_lower, pd_upper, pd_special))); /* @@ -738,7 +738,7 @@ PageRepairFragmentation(Page page) itemidptr->itemoff >= (int) pd_special)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("corrupted line pointer: %u", + errmsg("corrupted line pointer: %d", itemidptr->itemoff))); itemidptr->alignedlen = MAXALIGN(ItemIdGetLength(lp)); totallen += itemidptr->alignedlen; @@ -765,7 +765,7 @@ PageRepairFragmentation(Page page) if (totallen > (Size) (pd_special - pd_lower)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("corrupted item lengths: total %u, available space %u", + errmsg("corrupted item lengths: total %u, available space %d", (unsigned int) totallen, pd_special - pd_lower))); compactify_tuples(itemidbase, nstorage, page, presorted); @@ -1088,7 +1088,7 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems) pd_special != MAXALIGN(pd_special)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u", + errmsg("corrupted page pointers: lower = %d, upper = %d, special = %d", pd_lower, pd_upper, pd_special))); /* @@ -1146,7 +1146,7 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems) if (totallen > (Size) (pd_special - pd_lower)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("corrupted item lengths: total %u, available space %u", + errmsg("corrupted item lengths: total %u, available space %d", (unsigned int) totallen, pd_special - pd_lower))); /* diff --git a/src/backend/tsearch/dict_thesaurus.c b/src/backend/tsearch/dict_thesaurus.c index 64c979086d..0d8a9ef759 100644 --- a/src/backend/tsearch/dict_thesaurus.c +++ b/src/backend/tsearch/dict_thesaurus.c @@ -413,13 +413,13 @@ compileTheLexeme(DictThesaurus *d) if (!ptr) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("thesaurus sample word \"%s\" isn't recognized by subdictionary (rule %d)", + errmsg("thesaurus sample word \"%s\" isn't recognized by subdictionary (rule %u)", d->wrds[i].lexeme, d->wrds[i].entries->idsubst + 1))); else if (!(ptr->lexeme)) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("thesaurus sample word \"%s\" is a stop word (rule %d)", + errmsg("thesaurus sample word \"%s\" is a stop word (rule %u)", d->wrds[i].lexeme, d->wrds[i].entries->idsubst + 1), errhint("Use \"?\" to represent a stop word within a sample phrase."))); diff --git a/src/backend/utils/adt/inet_cidr_ntop.c b/src/backend/utils/adt/inet_cidr_ntop.c index 5f74c05a65..88ca8077e1 100644 --- a/src/backend/utils/adt/inet_cidr_ntop.c +++ b/src/backend/utils/adt/inet_cidr_ntop.c @@ -136,7 +136,7 @@ inet_cidr_ntop_ipv4(const u_char *src, int bits, char *dst, size_t size) /* Format CIDR /width. */ if (size <= sizeof "/32") goto emsgsize; - dst += SPRINTF((dst, "/%u", bits)); + dst += SPRINTF((dst, "/%d", bits)); return odst; emsgsize: @@ -275,13 +275,13 @@ inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size) { if (cp != outbuf) *cp++ = ':'; - cp += SPRINTF((cp, "%x", *s * 256 + s[1])); + cp += SPRINTF((cp, "%x", (unsigned int) (*s * 256 + s[1]))); s += 2; } } } /* Format CIDR /width. */ - (void) SPRINTF((cp, "/%u", bits)); + (void) SPRINTF((cp, "/%d", bits)); if (strlen(outbuf) + 1 > size) goto emsgsize; strcpy(dst, outbuf); diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c index cbeaa0a482..5906359b01 100644 --- a/src/backend/utils/adt/json.c +++ b/src/backend/utils/adt/json.c @@ -1308,7 +1308,7 @@ escape_json(StringInfo buf, const char *str) break; default: if ((unsigned char) *p < ' ') - appendStringInfo(buf, "\\u%04x", (int) *p); + appendStringInfo(buf, "\\u%04x", (unsigned int) *p); else appendStringInfoCharMacro(buf, *p); break; diff --git a/src/backend/utils/adt/jsonb_util.c b/src/backend/utils/adt/jsonb_util.c index 4eeffa1424..292f85562b 100644 --- a/src/backend/utils/adt/jsonb_util.c +++ b/src/backend/utils/adt/jsonb_util.c @@ -1611,7 +1611,7 @@ convertJsonbArray(StringInfo buffer, JEntry *pheader, JsonbValue *val, int level if (totallen > JENTRY_OFFLENMASK) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("total size of jsonb array elements exceeds the maximum of %u bytes", + errmsg("total size of jsonb array elements exceeds the maximum of %d bytes", JENTRY_OFFLENMASK))); /* @@ -1631,7 +1631,7 @@ convertJsonbArray(StringInfo buffer, JEntry *pheader, JsonbValue *val, int level if (totallen > JENTRY_OFFLENMASK) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("total size of jsonb array elements exceeds the maximum of %u bytes", + errmsg("total size of jsonb array elements exceeds the maximum of %d bytes", JENTRY_OFFLENMASK))); /* Initialize the header of this node in the container's JEntry array */ @@ -1692,7 +1692,7 @@ convertJsonbObject(StringInfo buffer, JEntry *pheader, JsonbValue *val, int leve if (totallen > JENTRY_OFFLENMASK) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("total size of jsonb object elements exceeds the maximum of %u bytes", + errmsg("total size of jsonb object elements exceeds the maximum of %d bytes", JENTRY_OFFLENMASK))); /* @@ -1727,7 +1727,7 @@ convertJsonbObject(StringInfo buffer, JEntry *pheader, JsonbValue *val, int leve if (totallen > JENTRY_OFFLENMASK) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("total size of jsonb object elements exceeds the maximum of %u bytes", + errmsg("total size of jsonb object elements exceeds the maximum of %d bytes", JENTRY_OFFLENMASK))); /* @@ -1747,7 +1747,7 @@ convertJsonbObject(StringInfo buffer, JEntry *pheader, JsonbValue *val, int leve if (totallen > JENTRY_OFFLENMASK) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("total size of jsonb object elements exceeds the maximum of %u bytes", + errmsg("total size of jsonb object elements exceeds the maximum of %d bytes", JENTRY_OFFLENMASK))); /* Initialize the header of this node in the container's JEntry array */ diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index d370348a1c..ea0ef70910 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -3099,7 +3099,7 @@ populate_record_field(ColumnIOData *col, jsv, *isnull); default: - elog(ERROR, "unrecognized type category '%c'", typcat); + elog(ERROR, "unrecognized type category '%c'", (int) typcat); return (Datum) 0; } } diff --git a/src/backend/utils/adt/mac.c b/src/backend/utils/adt/mac.c index 8aeddc6863..e45f616a45 100644 --- a/src/backend/utils/adt/mac.c +++ b/src/backend/utils/adt/mac.c @@ -57,7 +57,7 @@ macaddr_in(PG_FUNCTION_ARGS) { char *str = PG_GETARG_CSTRING(0); macaddr *result; - int a, + unsigned int a, b, c, d, diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index 20c9cac2fa..3539537dc7 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -7272,7 +7272,7 @@ make_result_opt_error(const NumericVar *var, bool *have_error) if (!(sign == NUMERIC_NAN || sign == NUMERIC_PINF || sign == NUMERIC_NINF)) - elog(ERROR, "invalid numeric sign value 0x%x", sign); + elog(ERROR, "invalid numeric sign value 0x%x", (unsigned) sign); result = (Numeric) palloc(NUMERIC_HDRSZ_SHORT); diff --git a/src/backend/utils/adt/oracle_compat.c b/src/backend/utils/adt/oracle_compat.c index 76e666474e..1224f86255 100644 --- a/src/backend/utils/adt/oracle_compat.c +++ b/src/backend/utils/adt/oracle_compat.c @@ -925,7 +925,7 @@ ascii(PG_FUNCTION_ARGS) Datum chr (PG_FUNCTION_ARGS) { - uint32 cvalue = PG_GETARG_UINT32(0); + uint32 cvalue = PG_GETARG_UINT32(0); // XXX text *result; int encoding = GetDatabaseEncoding(); @@ -943,7 +943,7 @@ chr (PG_FUNCTION_ARGS) if (cvalue > 0x0010ffff) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("requested character too large for encoding: %d", + errmsg("requested character too large for encoding: %u", cvalue))); if (cvalue > 0xffff) @@ -984,7 +984,7 @@ chr (PG_FUNCTION_ARGS) if (!pg_utf8_islegal(wch, bytes)) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("requested character not valid for encoding: %d", + errmsg("requested character not valid for encoding: %u", cvalue))); } else @@ -1005,7 +1005,7 @@ chr (PG_FUNCTION_ARGS) if ((is_mb && (cvalue > 127)) || (!is_mb && (cvalue > 255))) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("requested character too large for encoding: %d", + errmsg("requested character too large for encoding: %u", cvalue))); result = (text *) palloc(VARHDRSZ + 1); diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 0681cc14e0..a203509a6c 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -6623,7 +6623,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context) /* Find appropriate nesting depth */ netlevelsup = var->varlevelsup + levelsup; if (netlevelsup >= list_length(context->namespaces)) - elog(ERROR, "bogus varlevelsup: %d offset %d", + elog(ERROR, "bogus varlevelsup: %u offset %d", var->varlevelsup, levelsup); dpns = (deparse_namespace *) list_nth(context->namespaces, netlevelsup); @@ -6939,7 +6939,7 @@ resolve_special_varno(Node *node, deparse_context *context, return; } else if (var->varno < 1 || var->varno > list_length(dpns->rtable)) - elog(ERROR, "bogus varno: %d", var->varno); + elog(ERROR, "bogus varno: %u", var->varno); /* Not special. Just invoke the callback. */ (*callback) (node, context, callback_arg); @@ -7026,7 +7026,7 @@ get_name_for_var_field(Var *var, int fieldno, /* Find appropriate nesting depth */ netlevelsup = var->varlevelsup + levelsup; if (netlevelsup >= list_length(context->namespaces)) - elog(ERROR, "bogus varlevelsup: %d offset %d", + elog(ERROR, "bogus varlevelsup: %u offset %d", var->varlevelsup, levelsup); dpns = (deparse_namespace *) list_nth(context->namespaces, netlevelsup); @@ -7117,7 +7117,7 @@ get_name_for_var_field(Var *var, int fieldno, } else { - elog(ERROR, "bogus varno: %d", varno); + elog(ERROR, "bogus varno: %u", varno); return NULL; /* keep compiler quiet */ } diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c index ea0ada704f..05d89d4ef0 100644 --- a/src/backend/utils/adt/timestamp.c +++ b/src/backend/utils/adt/timestamp.c @@ -1166,7 +1166,7 @@ intervaltypmodout(PG_FUNCTION_ARGS) fieldstr = ""; break; default: - elog(ERROR, "invalid INTERVAL typmod: 0x%x", typmod); + elog(ERROR, "invalid INTERVAL typmod: 0x%x", (unsigned) typmod); fieldstr = ""; break; } @@ -1226,7 +1226,7 @@ intervaltypmodleastfield(int32 typmod) case INTERVAL_FULL_RANGE: return 0; /* SECOND */ default: - elog(ERROR, "invalid INTERVAL typmod: 0x%x", typmod); + elog(ERROR, "invalid INTERVAL typmod: 0x%x", (unsigned) typmod); break; } return 0; /* can't get here, but keep compiler quiet */ diff --git a/src/backend/utils/adt/tsvector_op.c b/src/backend/utils/adt/tsvector_op.c index 756a48a167..e9fa94d387 100644 --- a/src/backend/utils/adt/tsvector_op.c +++ b/src/backend/utils/adt/tsvector_op.c @@ -2367,9 +2367,9 @@ ts_process_call(FuncCallContext *funcctx) values[0] = palloc(entry->lenlexeme + 1); memcpy(values[0], entry->lexeme, entry->lenlexeme); (values[0])[entry->lenlexeme] = '\0'; - sprintf(ndoc, "%d", entry->ndoc); + sprintf(ndoc, "%u", entry->ndoc); values[1] = ndoc; - sprintf(nentry, "%d", entry->nentry); + sprintf(nentry, "%u", entry->nentry); values[2] = nentry; tuple = BuildTupleFromCStrings(funcctx->attinmeta, values); diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index 5447786336..4a4554318e 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -959,8 +959,8 @@ pg_xml_init_library(void) if (sizeof(char) != sizeof(xmlChar)) ereport(ERROR, (errmsg("could not initialize XML library"), - errdetail("libxml2 has incompatible char type: sizeof(char)=%u, sizeof(xmlChar)=%u.", - (int) sizeof(char), (int) sizeof(xmlChar)))); + errdetail("libxml2 has incompatible char type: sizeof(char)=%zu, sizeof(xmlChar)=%zu.", + sizeof(char), sizeof(xmlChar)))); #ifdef USE_LIBXMLCONTEXT /* Set up libxml's memory allocation our way */ @@ -3637,8 +3637,8 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod) " \n" " \n" " \n", - (((uint64) 1) << (sizeof(int64) * 8 - 1)) - 1, - (((uint64) 1) << (sizeof(int64) * 8 - 1))); + PG_INT64_MAX, + PG_INT64_MIN); break; case FLOAT4OID: diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c index 3613ae5f44..8abb3c7f09 100644 --- a/src/backend/utils/cache/catcache.c +++ b/src/backend/utils/cache/catcache.c @@ -2076,7 +2076,7 @@ PrintCatCacheLeakWarning(HeapTuple tuple) /* Safety check to ensure we were handed a cache entry */ Assert(ct->ct_magic == CT_MAGIC); - elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d", + elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%d has count %d", ct->my_cache->cc_relname, ct->my_cache->id, ItemPointerGetBlockNumber(&(tuple->t_self)), ItemPointerGetOffsetNumber(&(tuple->t_self)), diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c index 671fbb0ed5..21a5a6b5fc 100644 --- a/src/backend/utils/cache/relmapper.c +++ b/src/backend/utils/cache/relmapper.c @@ -1004,7 +1004,7 @@ relmap_redo(XLogReaderState *record) char *dbpath; if (xlrec->nbytes != sizeof(RelMapFile)) - elog(PANIC, "relmap_redo: wrong size %u in relmap update record", + elog(PANIC, "relmap_redo: wrong size %d in relmap update record", xlrec->nbytes); memcpy(&newmap, xlrec->data, sizeof(newmap)); diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c index 1ba47c194b..d7db16bf3c 100644 --- a/src/backend/utils/error/elog.c +++ b/src/backend/utils/error/elog.c @@ -2433,11 +2433,11 @@ log_line_prefix(StringInfo buf, ErrorData *edata) char strfbuf[128]; snprintf(strfbuf, sizeof(strfbuf) - 1, "%lx.%x", - (long) (MyStartTime), MyProcPid); + (unsigned long) (MyStartTime), (unsigned) MyProcPid); appendStringInfo(buf, "%*s", padding, strfbuf); } else - appendStringInfo(buf, "%lx.%x", (long) (MyStartTime), MyProcPid); + appendStringInfo(buf, "%lx.%x", (unsigned long) (MyStartTime), (unsigned) MyProcPid); break; case 'p': if (padding != 0) @@ -2734,7 +2734,7 @@ write_csvlog(ErrorData *edata) appendStringInfoChar(&buf, ','); /* session id */ - appendStringInfo(&buf, "%lx.%x", (long) MyStartTime, MyProcPid); + appendStringInfo(&buf, "%lx.%x", (unsigned long) MyStartTime, (unsigned) MyProcPid); appendStringInfoChar(&buf, ','); /* Line number */ diff --git a/src/backend/utils/misc/guc-file.l b/src/backend/utils/misc/guc-file.l index c98e220295..2f7fdef684 100644 --- a/src/backend/utils/misc/guc-file.l +++ b/src/backend/utils/misc/guc-file.l @@ -311,7 +311,7 @@ ProcessConfigFileInternal(GucContext context, bool applySettings, int elevel) /* Invalid non-custom variable, so complain */ ereport(elevel, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("unrecognized configuration parameter \"%s\" in file \"%s\" line %u", + errmsg("unrecognized configuration parameter \"%s\" in file \"%s\" line %d", item->name, item->filename, item->sourceline))); item->errmsg = pstrdup("unrecognized configuration parameter"); diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index bb5d473cdb..78608791c9 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -11695,7 +11695,7 @@ show_unix_socket_permissions(void) { static char buf[12]; - snprintf(buf, sizeof(buf), "%04o", Unix_socket_permissions); + snprintf(buf, sizeof(buf), "%04o", (unsigned) Unix_socket_permissions); return buf; } @@ -11704,7 +11704,7 @@ show_log_file_mode(void) { static char buf[12]; - snprintf(buf, sizeof(buf), "%04o", Log_file_mode); + snprintf(buf, sizeof(buf), "%04o", (unsigned) Log_file_mode); return buf; } @@ -11713,7 +11713,7 @@ show_data_directory_mode(void) { static char buf[12]; - snprintf(buf, sizeof(buf), "%04o", data_directory_mode); + snprintf(buf, sizeof(buf), "%04o", (unsigned) data_directory_mode); return buf; } diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c index 60a761caba..27ca834cc6 100644 --- a/src/backend/utils/mmgr/aset.c +++ b/src/backend/utils/mmgr/aset.c @@ -1376,7 +1376,7 @@ AllocSetStats(MemoryContext context, char stats_string[200]; snprintf(stats_string, sizeof(stats_string), - "%zu total in %zd blocks; %zu free (%zd chunks); %zu used", + "%zu total in %zu blocks; %zu free (%zu chunks); %zu used", totalspace, nblocks, freespace, freechunks, totalspace - freespace); printfunc(context, passthru, stats_string); diff --git a/src/backend/utils/mmgr/generation.c b/src/backend/utils/mmgr/generation.c index af52616e57..0c84a3bc8f 100644 --- a/src/backend/utils/mmgr/generation.c +++ b/src/backend/utils/mmgr/generation.c @@ -701,7 +701,7 @@ GenerationStats(MemoryContext context, char stats_string[200]; snprintf(stats_string, sizeof(stats_string), - "%zu total in %zd blocks (%zd chunks); %zu free (%zd chunks); %zu used", + "%zu total in %zu blocks (%zu chunks); %zu free (%zu chunks); %zu used", totalspace, nblocks, nchunks, freespace, nfreechunks, totalspace - freespace); printfunc(context, passthru, stats_string); diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c index dda70ef9f3..c775f4122b 100644 --- a/src/backend/utils/mmgr/mcxt.c +++ b/src/backend/utils/mmgr/mcxt.c @@ -517,7 +517,7 @@ MemoryContextStatsDetail(MemoryContext context, int max_children) MemoryContextStatsInternal(context, 0, true, max_children, &grand_totals); fprintf(stderr, - "Grand total: %zu bytes in %zd blocks; %zu free (%zd chunks); %zu used\n", + "Grand total: %zu bytes in %zu blocks; %zu free (%zu chunks); %zu used\n", grand_totals.totalspace, grand_totals.nblocks, grand_totals.freespace, grand_totals.freechunks, grand_totals.totalspace - grand_totals.freespace); @@ -577,7 +577,7 @@ MemoryContextStatsInternal(MemoryContext context, int level, for (i = 0; i <= level; i++) fprintf(stderr, " "); fprintf(stderr, - "%d more child contexts containing %zu total in %zd blocks; %zu free (%zd chunks); %zu used\n", + "%d more child contexts containing %zu total in %zu blocks; %zu free (%zu chunks); %zu used\n", ichild - max_children, local_totals.totalspace, local_totals.nblocks, diff --git a/src/backend/utils/mmgr/slab.c b/src/backend/utils/mmgr/slab.c index f8d801c419..a1f12e0031 100644 --- a/src/backend/utils/mmgr/slab.c +++ b/src/backend/utils/mmgr/slab.c @@ -668,7 +668,7 @@ SlabStats(MemoryContext context, char stats_string[200]; snprintf(stats_string, sizeof(stats_string), - "%zu total in %zd blocks; %zu free (%zd chunks); %zu used", + "%zu total in %zu blocks; %zu free (%zu chunks); %zu used", totalspace, nblocks, freespace, freechunks, totalspace - freespace); printfunc(context, passthru, stats_string); diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c index 8c41483e87..b76c4a5628 100644 --- a/src/backend/utils/time/snapmgr.c +++ b/src/backend/utils/time/snapmgr.c @@ -1158,7 +1158,7 @@ ExportSnapshot(Snapshot snapshot) * inside the transaction from 1. */ snprintf(path, sizeof(path), SNAPSHOT_EXPORT_DIR "/%08X-%08X-%d", - MyProc->backendId, MyProc->lxid, list_length(exportedSnapshots) + 1); + (unsigned) MyProc->backendId, MyProc->lxid, list_length(exportedSnapshots) + 1); /* * Copy the snapshot into TopTransactionContext, add it to the @@ -1207,7 +1207,7 @@ ExportSnapshot(Snapshot snapshot) */ addTopXid = (TransactionIdIsValid(topXid) && TransactionIdPrecedes(topXid, snapshot->xmax)) ? 1 : 0; - appendStringInfo(&buf, "xcnt:%d\n", snapshot->xcnt + addTopXid); + appendStringInfo(&buf, "xcnt:%u\n", snapshot->xcnt + addTopXid); for (i = 0; i < snapshot->xcnt; i++) appendStringInfo(&buf, "xip:%u\n", snapshot->xip[i]); if (addTopXid) @@ -1229,7 +1229,7 @@ ExportSnapshot(Snapshot snapshot) for (i = 0; i < nchildren; i++) appendStringInfo(&buf, "sxp:%u\n", children[i]); } - appendStringInfo(&buf, "rec:%u\n", snapshot->takenDuringRecovery); + appendStringInfo(&buf, "rec:%d\n", snapshot->takenDuringRecovery); /* * Now write the text representation into a file. We first write to a diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c index ee3bfa82f4..cea5c20dd2 100644 --- a/src/bin/initdb/initdb.c +++ b/src/bin/initdb/initdb.c @@ -1395,7 +1395,7 @@ bootstrap_template1(void) unsetenv("PGCLIENTENCODING"); snprintf(cmd, sizeof(cmd), - "\"%s\" --boot -x1 -X %u %s %s %s", + "\"%s\" --boot -x1 -X %d %s %s %s", backend_exec, wal_segment_size_mb * (1024 * 1024), data_checksums ? "-k" : "", @@ -1711,7 +1711,7 @@ setup_privileges(FILE *cmdfd) " (SELECT E'=r/\"$POSTGRES_SUPERUSERNAME\"' as acl " " UNION SELECT unnest(pg_catalog.acldefault(" " CASE WHEN relkind = " CppAsString2(RELKIND_SEQUENCE) " THEN 's' " - " ELSE 'r' END::\"char\"," CppAsString2(BOOTSTRAP_SUPERUSERID) "::oid))" + " ELSE 'r' END::\"char\",$BOOTSTRAP_SUPERUSERID::oid))" " ) as a) " " WHERE relkind IN (" CppAsString2(RELKIND_RELATION) ", " CppAsString2(RELKIND_VIEW) ", " CppAsString2(RELKIND_MATVIEW) ", " @@ -1843,6 +1843,8 @@ setup_privileges(FILE *cmdfd) priv_lines = replace_token(privileges_setup, "$POSTGRES_SUPERUSERNAME", escape_quotes(username)); + priv_lines = replace_token(priv_lines, "$BOOTSTRAP_SUPERUSERID", + psprintf("%u", BOOTSTRAP_SUPERUSERID)); for (line = priv_lines; *line != NULL; line++) PG_CMD_PUTS(*line); } diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c index 7a5d4562f9..d0271277aa 100644 --- a/src/bin/pg_basebackup/pg_basebackup.c +++ b/src/bin/pg_basebackup/pg_basebackup.c @@ -807,9 +807,9 @@ progress_report(int tablespacenum, const char *filename, * translatable strings. And we only test for INT64_FORMAT availability * in snprintf, not fprintf. */ - snprintf(totaldone_str, sizeof(totaldone_str), INT64_FORMAT, + snprintf(totaldone_str, sizeof(totaldone_str), UINT64_FORMAT, totaldone / 1024); - snprintf(totalsize_str, sizeof(totalsize_str), INT64_FORMAT, totalsize_kb); + snprintf(totalsize_str, sizeof(totalsize_str), UINT64_FORMAT, totalsize_kb); #define VERBOSE_FILENAME_LENGTH 35 if (verbose) @@ -1865,7 +1865,7 @@ BaseBackup(void) PQescapeStringConn(conn, escaped_label, label, sizeof(escaped_label), &i); if (maxrate > 0) - maxrate_clause = psprintf("MAX_RATE %u", maxrate); + maxrate_clause = psprintf("MAX_RATE %d", maxrate); if (manifest) { diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c index a4e0d6aeb2..5bef7dce19 100644 --- a/src/bin/pg_basebackup/pg_recvlogical.c +++ b/src/bin/pg_basebackup/pg_recvlogical.c @@ -549,7 +549,7 @@ StreamLogicalLog(void) if (ret < 0) { - pg_log_error("could not write %u bytes to log file \"%s\": %m", + pg_log_error("could not write %d bytes to log file \"%s\": %m", bytes_left, outfile); goto error; } @@ -561,7 +561,7 @@ StreamLogicalLog(void) if (write(outfd, "\n", 1) != 1) { - pg_log_error("could not write %u bytes to log file \"%s\": %m", + pg_log_error("could not write %d bytes to log file \"%s\": %m", 1, outfile); goto error; } diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c index dc97c7e89c..fd5eb381f0 100644 --- a/src/bin/pg_basebackup/receivelog.c +++ b/src/bin/pg_basebackup/receivelog.c @@ -144,10 +144,10 @@ open_walfile(StreamCtl *stream, XLogRecPtr startpoint) /* if write didn't set errno, assume problem is no disk space */ if (errno == 0) errno = ENOSPC; - pg_log_error(ngettext("write-ahead log file \"%s\" has %d byte, should be 0 or %d", - "write-ahead log file \"%s\" has %d bytes, should be 0 or %d", + pg_log_error(ngettext("write-ahead log file \"%s\" has %zd byte, should be 0 or %u", + "write-ahead log file \"%s\" has %zd bytes, should be 0 or %u", size), - fn, (int) size, WalSegSz); + fn, size, WalSegSz); return false; } /* File existed and was empty, so fall through and open */ @@ -1075,7 +1075,7 @@ ProcessXLogDataMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len, if (xlogoff != 0) { pg_log_error("received write-ahead log record for offset %u with no file open", - xlogoff); + (unsigned int) xlogoff); return false; } } @@ -1085,7 +1085,7 @@ ProcessXLogDataMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len, if (stream->walmethod->get_current_pos(walfile) != xlogoff) { pg_log_error("got WAL data offset %08x, expected %08x", - xlogoff, (int) stream->walmethod->get_current_pos(walfile)); + (unsigned int) xlogoff, (unsigned int) stream->walmethod->get_current_pos(walfile)); return false; } } @@ -1118,7 +1118,7 @@ ProcessXLogDataMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len, if (stream->walmethod->write(walfile, copybuf + hdr_len + bytes_written, bytes_to_write) != bytes_to_write) { - pg_log_error("could not write %u bytes to WAL file \"%s\": %s", + pg_log_error("could not write %d bytes to WAL file \"%s\": %s", bytes_to_write, current_walfile_name, stream->walmethod->getlasterror()); return false; diff --git a/src/bin/pg_basebackup/streamutil.c b/src/bin/pg_basebackup/streamutil.c index da577a7f8f..2311077864 100644 --- a/src/bin/pg_basebackup/streamutil.c +++ b/src/bin/pg_basebackup/streamutil.c @@ -330,8 +330,8 @@ RetrieveWalSegSize(PGconn *conn) if (!IsValidWalSegSize(WalSegSz)) { - pg_log_error(ngettext("WAL segment size must be a power of two between 1 MB and 1 GB, but the remote server reported a value of %d byte", - "WAL segment size must be a power of two between 1 MB and 1 GB, but the remote server reported a value of %d bytes", + pg_log_error(ngettext("WAL segment size must be a power of two between 1 MB and 1 GB, but the remote server reported a value of %u byte", + "WAL segment size must be a power of two between 1 MB and 1 GB, but the remote server reported a value of %u bytes", WalSegSz), WalSegSz); return false; @@ -355,7 +355,7 @@ static bool RetrieveDataDirCreatePerm(PGconn *conn) { PGresult *res; - int data_directory_mode; + unsigned int data_directory_mode; /* check connection existence */ Assert(conn != NULL); diff --git a/src/bin/pg_checksums/pg_checksums.c b/src/bin/pg_checksums/pg_checksums.c index ffdc23945c..cad1c5090b 100644 --- a/src/bin/pg_checksums/pg_checksums.c +++ b/src/bin/pg_checksums/pg_checksums.c @@ -361,7 +361,7 @@ scan_directory(const char *basedir, const char *subdir, bool sizeonly) segmentno = atoi(segmentpath); if (segmentno == 0) { - pg_log_error("invalid segment number %d in file name \"%s\"", + pg_log_error("invalid segment number %u in file name \"%s\"", segmentno, fn); exit(1); } @@ -569,7 +569,7 @@ main(int argc, char *argv[]) if (ControlFile->blcksz != BLCKSZ) { pg_log_error("database cluster is not compatible"); - fprintf(stderr, _("The database cluster was initialized with block size %u, but pg_checksums was compiled with block size %u.\n"), + fprintf(stderr, _("The database cluster was initialized with block size %u, but pg_checksums was compiled with block size %d.\n"), ControlFile->blcksz, BLCKSZ); exit(1); } @@ -635,7 +635,7 @@ main(int argc, char *argv[]) if (mode == PG_MODE_CHECK) { printf(_("Bad checksums: %s\n"), psprintf(INT64_FORMAT, badblocks)); - printf(_("Data checksum version: %d\n"), ControlFile->data_checksum_version); + printf(_("Data checksum version: %u\n"), ControlFile->data_checksum_version); if (badblocks > 0) exit(1); @@ -662,7 +662,7 @@ main(int argc, char *argv[]) update_controlfile(DataDir, ControlFile, do_sync); if (verbose) - printf(_("Data checksum version: %d\n"), ControlFile->data_checksum_version); + printf(_("Data checksum version: %u\n"), ControlFile->data_checksum_version); if (mode == PG_MODE_ENABLE) printf(_("Checksums enabled in cluster\n")); else diff --git a/src/bin/pg_resetwal/pg_resetwal.c b/src/bin/pg_resetwal/pg_resetwal.c index cb6ef19182..1318a0f72c 100644 --- a/src/bin/pg_resetwal/pg_resetwal.c +++ b/src/bin/pg_resetwal/pg_resetwal.c @@ -635,8 +635,8 @@ read_controlfile(void) /* return false if WAL segment size is not valid */ if (!IsValidWalSegSize(ControlFile.xlog_seg_size)) { - pg_log_warning(ngettext("pg_control specifies invalid WAL segment size (%d byte); proceed with caution", - "pg_control specifies invalid WAL segment size (%d bytes); proceed with caution", + pg_log_warning(ngettext("pg_control specifies invalid WAL segment size (%u byte); proceed with caution", + "pg_control specifies invalid WAL segment size (%u bytes); proceed with caution", ControlFile.xlog_seg_size), ControlFile.xlog_seg_size); return false; diff --git a/src/bin/pg_rewind/pg_rewind.c b/src/bin/pg_rewind/pg_rewind.c index 52e3fc40e8..bed88245e8 100644 --- a/src/bin/pg_rewind/pg_rewind.c +++ b/src/bin/pg_rewind/pg_rewind.c @@ -686,9 +686,9 @@ progress_report(bool finished) * translatable strings. And we only test for INT64_FORMAT availability * in snprintf, not fprintf. */ - snprintf(fetch_done_str, sizeof(fetch_done_str), INT64_FORMAT, + snprintf(fetch_done_str, sizeof(fetch_done_str), UINT64_FORMAT, fetch_done / 1024); - snprintf(fetch_size_str, sizeof(fetch_size_str), INT64_FORMAT, + snprintf(fetch_size_str, sizeof(fetch_size_str), UINT64_FORMAT, fetch_size / 1024); fprintf(stderr, _("%*s/%s kB (%d%%) copied"), @@ -779,7 +779,7 @@ getTimelineHistory(ControlFileData *controlFile, int *nentries) TimeLineHistoryEntry *entry; entry = &history[i]; - pg_log_debug("%d: %X/%X - %X/%X", entry->tli, + pg_log_debug("%u: %X/%X - %X/%X", entry->tli, (uint32) (entry->begin >> 32), (uint32) (entry->begin), (uint32) (entry->end >> 32), (uint32) (entry->end)); } diff --git a/src/bin/pg_test_fsync/pg_test_fsync.c b/src/bin/pg_test_fsync/pg_test_fsync.c index 3eddd983c6..87b09a55f2 100644 --- a/src/bin/pg_test_fsync/pg_test_fsync.c +++ b/src/bin/pg_test_fsync/pg_test_fsync.c @@ -191,7 +191,7 @@ handle_args(int argc, char *argv[]) if (secs_per_test == 0) { pg_log_error("%s must be in range %u..%u", - "--secs-per-test", 1, UINT_MAX); + "--secs-per-test", 1U, UINT_MAX); exit(1); } break; diff --git a/src/bin/pg_test_timing/pg_test_timing.c b/src/bin/pg_test_timing/pg_test_timing.c index c29d6f8762..a284e1e4c1 100644 --- a/src/bin/pg_test_timing/pg_test_timing.c +++ b/src/bin/pg_test_timing/pg_test_timing.c @@ -88,7 +88,7 @@ handle_args(int argc, char *argv[]) if (test_duration == 0) { fprintf(stderr, _("%s: %s must be in range %u..%u\n"), - progname, "--duration", 1, UINT_MAX); + progname, "--duration", 1U, UINT_MAX); exit(1); } break; diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c index 6685d517ff..f4a0306936 100644 --- a/src/bin/pg_upgrade/check.c +++ b/src/bin/pg_upgrade/check.c @@ -590,7 +590,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name) PATH_SEPARATOR); for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) - fprintf(script, RMDIR_CMD " %c%s%c%d%c\n", PATH_QUOTE, + fprintf(script, RMDIR_CMD " %c%s%c%u%c\n", PATH_QUOTE, fix_path_separator(os_info.old_tablespaces[tblnum]), PATH_SEPARATOR, old_cluster.dbarr.dbs[dbnum].db_oid, PATH_QUOTE); diff --git a/src/bin/pg_upgrade/tablespace.c b/src/bin/pg_upgrade/tablespace.c index 11a2429738..7724cc892a 100644 --- a/src/bin/pg_upgrade/tablespace.c +++ b/src/bin/pg_upgrade/tablespace.c @@ -112,7 +112,7 @@ set_tablespace_directory_suffix(ClusterInfo *cluster) /* This cluster has a version-specific subdirectory */ /* The leading slash is needed to start a new directory. */ - cluster->tablespace_suffix = psprintf("/PG_%s_%d", + cluster->tablespace_suffix = psprintf("/PG_%s_%u", cluster->major_version_str, cluster->controldata.cat_ver); } diff --git a/src/bin/pg_waldump/pg_waldump.c b/src/bin/pg_waldump/pg_waldump.c index 31e99c2a6d..20c0a1d9ca 100644 --- a/src/bin/pg_waldump/pg_waldump.c +++ b/src/bin/pg_waldump/pg_waldump.c @@ -364,13 +364,13 @@ WALDumpReadPage(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, if (errinfo.wre_errno != 0) { errno = errinfo.wre_errno; - fatal_error("could not read from file %s, offset %u: %m", + fatal_error("could not read from file %s, offset %d: %m", fname, errinfo.wre_off); } else - fatal_error("could not read from file %s, offset %u: read %d of %zu", + fatal_error("could not read from file %s, offset %d: read %d of %d", fname, errinfo.wre_off, errinfo.wre_read, - (Size) errinfo.wre_req); + errinfo.wre_req); } return count; @@ -472,7 +472,7 @@ XLogDumpDisplayRecord(XLogDumpConfig *config, XLogReaderState *record) id = desc->rm_identify(info); if (id == NULL) - printf("desc: UNKNOWN (%x) ", info & ~XLR_INFO_MASK); + printf("desc: UNKNOWN (%x) ", (unsigned int) info & ~XLR_INFO_MASK); else printf("desc: %s ", id); @@ -491,13 +491,13 @@ XLogDumpDisplayRecord(XLogDumpConfig *config, XLogReaderState *record) XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blk); if (forknum != MAIN_FORKNUM) - printf(", blkref #%u: rel %u/%u/%u fork %s blk %u", + printf(", blkref #%d: rel %u/%u/%u fork %s blk %u", block_id, rnode.spcNode, rnode.dbNode, rnode.relNode, forkNames[forknum], blk); else - printf(", blkref #%u: rel %u/%u/%u blk %u", + printf(", blkref #%d: rel %u/%u/%u blk %u", block_id, rnode.spcNode, rnode.dbNode, rnode.relNode, blk); @@ -521,7 +521,7 @@ XLogDumpDisplayRecord(XLogDumpConfig *config, XLogReaderState *record) continue; XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blk); - printf("\tblkref #%u: rel %u/%u/%u fork %s blk %u", + printf("\tblkref #%d: rel %u/%u/%u fork %s blk %u", block_id, rnode.spcNode, rnode.dbNode, rnode.relNode, forkNames[forknum], @@ -532,7 +532,7 @@ XLogDumpDisplayRecord(XLogDumpConfig *config, XLogReaderState *record) BKPIMAGE_IS_COMPRESSED) { printf(" (FPW%s); hole: offset: %u, length: %u, " - "compression saved: %u", + "compression saved: %d", XLogRecBlockImageApply(record, block_id) ? "" : " for WAL verification", record->blocks[block_id].hole_offset, @@ -671,7 +671,7 @@ XLogDumpDisplayStats(XLogDumpConfig *config, XLogDumpStats *stats) /* the upper four bits in xl_info are the rmgr's */ id = desc->rm_identify(rj << 4); if (id == NULL) - id = psprintf("UNKNOWN (%x)", rj << 4); + id = psprintf("UNKNOWN (%x)", (unsigned int) (rj << 4)); XLogDumpStatsRow(psprintf("%s/%s", desc->rm_name, id), count, total_count, rec_len, total_rec_len, @@ -888,7 +888,7 @@ main(int argc, char **argv) private.startptr = (uint64) xlogid << 32 | xrecoff; break; case 't': - if (sscanf(optarg, "%d", &private.timeline) != 1) + if (sscanf(optarg, "%u", &private.timeline) != 1) { pg_log_error("could not parse timeline \"%s\"", optarg); goto bad_argument; diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index 82fad85773..d580a2e88a 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -5043,7 +5043,7 @@ parseScriptWeight(const char *option, char **script) } if (wtmp > INT_MAX || wtmp < 0) { - pg_log_fatal("weight specification out of range (0 .. %u): " INT64_FORMAT, + pg_log_fatal("weight specification out of range (0 .. %d): " INT64_FORMAT, INT_MAX, (int64) wtmp); exit(1); } diff --git a/src/fe_utils/print.c b/src/fe_utils/print.c index 470f5cec50..ca694f8dc2 100644 --- a/src/fe_utils/print.c +++ b/src/fe_utils/print.c @@ -520,7 +520,7 @@ print_unaligned_vertical(const printTableContent *cont, FILE *fout) /* draw "line" */ static void -_print_horizontal_line(const unsigned int ncolumns, const unsigned int *widths, +_print_horizontal_line(const unsigned int ncolumns, const int *widths, unsigned short border, printTextRule pos, const printTextFormat *format, FILE *fout) @@ -576,7 +576,7 @@ print_aligned_text(const printTableContent *cont, FILE *fout, bool is_pager) unsigned int i, j; - unsigned int *width_header, + int *width_header, *max_width, *width_wrap, *width_average; @@ -584,7 +584,7 @@ print_aligned_text(const printTableContent *cont, FILE *fout, bool is_pager) *curr_nl_line, *max_bytes; unsigned char **format_buf; - unsigned int width_total; + int width_total; unsigned int total_header_width; unsigned int extra_row_output_lines = 0; unsigned int extra_output_lines = 0; @@ -905,7 +905,7 @@ print_aligned_text(const printTableContent *cont, FILE *fout, bool is_pager) for (i = 0; i < cont->ncolumns; i++) { struct lineptr *this_line = col_lineptrs[i] + curr_nl_line; - unsigned int nbspace; + int nbspace; if (opt_border != 0 || (!format->wrap_right_border && i > 0)) @@ -1218,7 +1218,7 @@ print_aligned_vertical(const printTableContent *cont, int encoding = cont->opt->encoding; unsigned long record = cont->opt->prior_records + 1; const char *const *ptr; - unsigned int i, + int i, hwidth = 0, dwidth = 0, hheight = 1, @@ -1349,7 +1349,7 @@ print_aligned_vertical(const printTableContent *cont, */ if (cont->opt->format == PRINT_WRAPPED) { - unsigned int swidth, + int swidth, rwidth = 0, newdwidth; @@ -1578,7 +1578,7 @@ print_aligned_vertical(const printTableContent *cont, } else { - unsigned int swidth = hwidth + opt_border; + int swidth = hwidth + opt_border; if ((opt_border < 2) && (hmultiline) && diff --git a/src/include/access/transam.h b/src/include/access/transam.h index 2f1f144db4..478603c008 100644 --- a/src/include/access/transam.h +++ b/src/include/access/transam.h @@ -185,9 +185,9 @@ FullTransactionIdAdvance(FullTransactionId *dest) * reassigning OIDs that might have been assigned during initdb. * ---------- */ -#define FirstGenbkiObjectId 10000 -#define FirstBootstrapObjectId 12000 -#define FirstNormalObjectId 16384 +#define FirstGenbkiObjectId 10000U +#define FirstBootstrapObjectId 12000U +#define FirstNormalObjectId 16384U /* * VariableCache is a data structure in shared memory that is used to track diff --git a/src/include/catalog/pg_control.h b/src/include/catalog/pg_control.h index 06bed90c5e..750f2956e4 100644 --- a/src/include/catalog/pg_control.h +++ b/src/include/catalog/pg_control.h @@ -22,7 +22,7 @@ /* Version identifier for this pg_control format */ -#define PG_CONTROL_VERSION 1300 +#define PG_CONTROL_VERSION 1300U /* Nonce key length, see below */ #define MOCK_AUTH_NONCE_LEN 32 diff --git a/src/include/lib/simplehash.h b/src/include/lib/simplehash.h index 395be1ca9a..82928f41b6 100644 --- a/src/include/lib/simplehash.h +++ b/src/include/lib/simplehash.h @@ -1056,7 +1056,7 @@ SH_STAT(SH_TYPE * tb) avg_collisions = 0; } - sh_log("size: " UINT64_FORMAT ", members: %u, filled: %f, total chain: %u, max chain: %u, avg chain: %f, total_collisions: %u, max_collisions: %i, avg_collisions: %f", + sh_log("size: " UINT64_FORMAT ", members: %u, filled: %f, total chain: %u, max chain: %u, avg chain: %f, total_collisions: %u, max_collisions: %u, avg_collisions: %f", tb->size, tb->members, fillfactor, total_chain_length, max_chain_length, avg_chain_length, total_collisions, max_collisions, avg_collisions); } diff --git a/src/include/replication/walsender_private.h b/src/include/replication/walsender_private.h index 509856c057..1b77319ce3 100644 --- a/src/include/replication/walsender_private.h +++ b/src/include/replication/walsender_private.h @@ -63,7 +63,7 @@ typedef struct WalSnd * The priority order of the standby managed by this WALSender, as listed * in synchronous_standby_names, or 0 if not-listed. */ - int sync_standby_priority; + unsigned int sync_standby_priority; /* Protects shared variables shown above. */ slock_t mutex; diff --git a/src/interfaces/ecpg/test/expected/sql-bytea.c b/src/interfaces/ecpg/test/expected/sql-bytea.c index 8338c6008d..686639e9d0 100644 --- a/src/interfaces/ecpg/test/expected/sql-bytea.c +++ b/src/interfaces/ecpg/test/expected/sql-bytea.c @@ -33,7 +33,7 @@ dump_binary(char *buf, int len, int ind) printf("len=%d, ind=%d, data=0x", len, ind); for (i = 0; i < len; ++i) - printf("%02x", 0xff & buf[i]); + printf("%02x", (unsigned) (0xff & buf[i])); printf("\n"); } diff --git a/src/interfaces/ecpg/test/sql/bytea.pgc b/src/interfaces/ecpg/test/sql/bytea.pgc index e874123119..b8095aa19a 100644 --- a/src/interfaces/ecpg/test/sql/bytea.pgc +++ b/src/interfaces/ecpg/test/sql/bytea.pgc @@ -13,7 +13,7 @@ dump_binary(char *buf, int len, int ind) printf("len=%d, ind=%d, data=0x", len, ind); for (i = 0; i < len; ++i) - printf("%02x", 0xff & buf[i]); + printf("%02x", (unsigned) (0xff & buf[i])); printf("\n"); } diff --git a/src/interfaces/libpq/fe-misc.c b/src/interfaces/libpq/fe-misc.c index 4ffc7f33fb..a1f585bd51 100644 --- a/src/interfaces/libpq/fe-misc.c +++ b/src/interfaces/libpq/fe-misc.c @@ -573,7 +573,7 @@ int pqPutMsgEnd(PGconn *conn) { if (conn->Pfdebug) - fprintf(conn->Pfdebug, "To backend> Msg complete, length %u\n", + fprintf(conn->Pfdebug, "To backend> Msg complete, length %d\n", conn->outMsgEnd - conn->outCount); /* Fill in length word if needed */ diff --git a/src/interfaces/libpq/fe-protocol2.c b/src/interfaces/libpq/fe-protocol2.c index bfe9603fd4..f05b9efcd8 100644 --- a/src/interfaces/libpq/fe-protocol2.c +++ b/src/interfaces/libpq/fe-protocol2.c @@ -471,7 +471,7 @@ pqParseInput2(PGconn *conn) { pqInternalNotice(&conn->noticeHooks, "message type 0x%02x arrived from server while idle", - id); + (unsigned) id); /* Discard the unexpected message; good idea?? */ conn->inStart = conn->inEnd; break; @@ -1528,7 +1528,7 @@ pqFunctionCall2(PGconn *conn, Oid fnid, /* The backend violates the protocol. */ printfPQExpBuffer(&conn->errorMessage, libpq_gettext("protocol error: id=0x%x\n"), - id); + (unsigned) id); pqSaveErrorResult(conn); conn->inStart = conn->inCursor; return pqPrepareAsyncResult(conn); @@ -1560,7 +1560,7 @@ pqFunctionCall2(PGconn *conn, Oid fnid, /* The backend violates the protocol. */ printfPQExpBuffer(&conn->errorMessage, libpq_gettext("protocol error: id=0x%x\n"), - id); + (unsigned) id); pqSaveErrorResult(conn); conn->inStart = conn->inCursor; return pqPrepareAsyncResult(conn); diff --git a/src/interfaces/libpq/fe-protocol3.c b/src/interfaces/libpq/fe-protocol3.c index 1696525475..a67e7ac815 100644 --- a/src/interfaces/libpq/fe-protocol3.c +++ b/src/interfaces/libpq/fe-protocol3.c @@ -181,7 +181,7 @@ pqParseInput3(PGconn *conn) { pqInternalNotice(&conn->noticeHooks, "message type 0x%02x arrived from server while idle", - id); + (unsigned) id); /* Discard the unexpected message */ conn->inCursor += msgLength; } @@ -2093,7 +2093,7 @@ pqFunctionCall3(PGconn *conn, Oid fnid, /* The backend violates the protocol. */ printfPQExpBuffer(&conn->errorMessage, libpq_gettext("protocol error: id=0x%x\n"), - id); + (unsigned) id); pqSaveErrorResult(conn); /* trust the specified message length as what to skip */ conn->inStart += 5 + msgLength; diff --git a/src/port/inet_net_ntop.c b/src/port/inet_net_ntop.c index b8ad69c390..92a2713351 100644 --- a/src/port/inet_net_ntop.c +++ b/src/port/inet_net_ntop.c @@ -142,7 +142,7 @@ inet_net_ntop_ipv4(const u_char *src, int bits, char *dst, size_t size) { if (size <= sizeof "/32") goto emsgsize; - dst += SPRINTF((dst, "/%u", bits)); + dst += SPRINTF((dst, "/%d", bits)); } return (odst); @@ -282,7 +282,7 @@ inet_net_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size) *tp = '\0'; if (bits != -1 && bits != 128) - tp += SPRINTF((tp, "/%u", bits)); + tp += SPRINTF((tp, "/%d", bits)); /* * Check for overflow, copy, and we're done. diff --git a/src/test/modules/test_ginpostinglist/test_ginpostinglist.c b/src/test/modules/test_ginpostinglist/test_ginpostinglist.c index 4a8451e659..ce68ad7105 100644 --- a/src/test/modules/test_ginpostinglist/test_ginpostinglist.c +++ b/src/test/modules/test_ginpostinglist/test_ginpostinglist.c @@ -47,7 +47,7 @@ test_itemptr_pair(BlockNumber blk, OffsetNumber off, int maxsize) int ndecoded; elog(NOTICE, "testing with (%u, %d), (%u, %d), max %d bytes", - 0, 1, blk, off, maxsize); + 0U, 1, blk, off, maxsize); ItemPointerSet(&orig_itemptrs[0], 0, 1); ItemPointerSet(&orig_itemptrs[1], blk, off); @@ -67,7 +67,7 @@ test_itemptr_pair(BlockNumber blk, OffsetNumber off, int maxsize) /* Check the result */ if (!ItemPointerEquals(&orig_itemptrs[0], &decoded_itemptrs[0])) elog(ERROR, "mismatch on first itemptr: (%u, %d) vs (%u, %d)", - 0, 1, + 0U, 1, ItemPointerGetBlockNumber(&decoded_itemptrs[0]), ItemPointerGetOffsetNumber(&decoded_itemptrs[0])); @@ -75,7 +75,7 @@ test_itemptr_pair(BlockNumber blk, OffsetNumber off, int maxsize) !ItemPointerEquals(&orig_itemptrs[0], &decoded_itemptrs[0])) { elog(ERROR, "mismatch on second itemptr: (%u, %d) vs (%u, %d)", - 0, 1, + 0U, 1, ItemPointerGetBlockNumber(&decoded_itemptrs[0]), ItemPointerGetOffsetNumber(&decoded_itemptrs[0])); } diff --git a/src/test/modules/worker_spi/worker_spi.c b/src/test/modules/worker_spi/worker_spi.c index 258237f9bf..02368a18a4 100644 --- a/src/test/modules/worker_spi/worker_spi.c +++ b/src/test/modules/worker_spi/worker_spi.c @@ -316,7 +316,6 @@ void _PG_init(void) { BackgroundWorker worker; - unsigned int i; /* get the configuration */ DefineCustomIntVariable("worker_spi.naptime", @@ -370,7 +369,7 @@ _PG_init(void) /* * Now fill in worker-specific data, and do the actual registrations. */ - for (i = 1; i <= worker_spi_total_workers; i++) + for (int i = 1; i <= worker_spi_total_workers; i++) { snprintf(worker.bgw_name, BGW_MAXLEN, "worker_spi worker %d", i); snprintf(worker.bgw_type, BGW_MAXLEN, "worker_spi"); diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c index 09bc42a8c0..226881022e 100644 --- a/src/test/regress/regress.c +++ b/src/test/regress/regress.c @@ -45,7 +45,7 @@ do { \ if (!(expr)) \ elog(ERROR, \ - "%s was unexpectedly false in file \"%s\" line %u", \ + "%s was unexpectedly false in file \"%s\" line %d", \ #expr, __FILE__, __LINE__); \ } while (0) @@ -55,7 +55,7 @@ uint32 expected = (expected_expr); \ if (result != expected) \ elog(ERROR, \ - "%s yielded %u, expected %s in file \"%s\" line %u", \ + "%s yielded %u, expected %s in file \"%s\" line %d", \ #result_expr, result, #expected_expr, __FILE__, __LINE__); \ } while (0) @@ -65,7 +65,7 @@ uint64 expected = (expected_expr); \ if (result != expected) \ elog(ERROR, \ - "%s yielded " UINT64_FORMAT ", expected %s in file \"%s\" line %u", \ + "%s yielded " UINT64_FORMAT ", expected %s in file \"%s\" line %d", \ #result_expr, result, #expected_expr, __FILE__, __LINE__); \ } while (0) -- 2.29.1