diff --git a/contrib/pg_overexplain/pg_overexplain.c b/contrib/pg_overexplain/pg_overexplain.c
index fb277e02308..a93a4dcfed6 100644
--- a/contrib/pg_overexplain/pg_overexplain.c
+++ b/contrib/pg_overexplain/pg_overexplain.c
@@ -459,7 +459,7 @@ overexplain_range_table(PlannedStmt *plannedstmt, ExplainState *es)
 		char	   *relkind;
 		SubPlanRTInfo *next_rtinfo;
 
-		/* Advance to next SubRTInfo, if it's time. */
+		/* Advance to next SubPlanRTInfo, if it's time. */
 		if (lc_subrtinfo != NULL)
 		{
 			next_rtinfo = lfirst(lc_subrtinfo);
@@ -512,8 +512,8 @@ overexplain_range_table(PlannedStmt *plannedstmt, ExplainState *es)
 				/*
 				 * We should not see RTE of this kind here since property
 				 * graph RTE gets converted to subquery RTE in
-				 * RewriteGraphTable(). In case we decide not to do the
-				 * conversion and leave RTEkind unchanged in future, print
+				 * rewriteGraphTable(). In case we decide not to do the
+				 * conversion and leave RTE kind unchanged in future, print
 				 * correct name of RTE kind.
 				 */
 				kind = "graph_table";
diff --git a/contrib/pg_plan_advice/pgpa_ast.c b/contrib/pg_plan_advice/pgpa_ast.c
index 3c340c6ae7a..01db8d24cd0 100644
--- a/contrib/pg_plan_advice/pgpa_ast.c
+++ b/contrib/pg_plan_advice/pgpa_ast.c
@@ -321,7 +321,7 @@ pgpa_identifiers_match_target(int nrids, pgpa_identifier *rids,
  * Returns true if every target or sub-target is matched by at least one
  * identifier, and otherwise false.
  *
- * Also sets rids_used[i] = true for each idenifier that matches at least one
+ * Also sets rids_used[i] = true for each identifier that matches at least one
  * target.
  */
 static bool
diff --git a/contrib/pg_plan_advice/pgpa_ast.h b/contrib/pg_plan_advice/pgpa_ast.h
index a89f1251929..4bd6ffa5e3a 100644
--- a/contrib/pg_plan_advice/pgpa_ast.h
+++ b/contrib/pg_plan_advice/pgpa_ast.h
@@ -116,7 +116,7 @@ typedef struct pgpa_advice_item
 } pgpa_advice_item;
 
 /*
- * Result of comparing an array of pgpa_relation_identifier objects to a
+ * Result of comparing an array of pgpa_identifier objects to a
  * pgpa_advice_target.
  *
  * PGPA_ITM_EQUAL means all targets are matched by some identifier, and
diff --git a/contrib/pg_plan_advice/pgpa_identifier.c b/contrib/pg_plan_advice/pgpa_identifier.c
index 0cfc4aa4f7e..9d620c70bb9 100644
--- a/contrib/pg_plan_advice/pgpa_identifier.c
+++ b/contrib/pg_plan_advice/pgpa_identifier.c
@@ -211,7 +211,7 @@ pgpa_compute_identifier_by_rti(PlannerInfo *root, Index rti,
  * RTE_JOIN entries are excluded because they cannot be mentioned by plan
  * advice.
  *
- * The caller is responsible for making sure that the tkeys array is large
+ * The caller is responsible for making sure that the rids array is large
  * enough to store the results.
  *
  * The return value is the number of identifiers computed.
diff --git a/contrib/pg_plan_advice/pgpa_join.c b/contrib/pg_plan_advice/pgpa_join.c
index 38e7b91ed7e..067321081e7 100644
--- a/contrib/pg_plan_advice/pgpa_join.c
+++ b/contrib/pg_plan_advice/pgpa_join.c
@@ -93,7 +93,7 @@ pgpa_create_join_unroller(void)
  *
  * pgpa_plan_walker creates a "top level" join unroller object when it
  * encounters a join in a portion of the plan tree in which no join unroller
- * is already active. From there, this function is responsible for determing
+ * is already active. From there, this function is responsible for determining
  * to what portion of the plan tree that join unroller applies, and for
  * creating any subordinate join unroller objects that are needed as a result
  * of non-outer-deep join trees. We do this by returning the join unroller
diff --git a/contrib/pg_plan_advice/pgpa_planner.c b/contrib/pg_plan_advice/pgpa_planner.c
index 72ef3230abc..4b44a98fcbc 100644
--- a/contrib/pg_plan_advice/pgpa_planner.c
+++ b/contrib/pg_plan_advice/pgpa_planner.c
@@ -1648,13 +1648,14 @@ pgpa_planner_apply_scan_advice(RelOptInfo *rel,
 			/*
 			 * Currently, PGS_CONSIDER_INDEXONLY can suppress Bitmap Heap
 			 * Scans, so don't clear it when such a scan is requested. This
-			 * happens because build_index_scan() thinks that the possibility
-			 * of an index-only scan is a sufficient reason to consider using
-			 * an otherwise-useless index, and get_index_paths() thinks that
-			 * the same paths that are useful for index or index-only scans
-			 * should also be considered for bitmap scans. Perhaps that logic
-			 * should be tightened up, but until then we need to include
-			 * PGS_CONSIDER_INDEXONLY in my_scan_type here.
+			 * happens because build_index_scankeys() thinks that the
+			 * possibility of an index-only scan is a sufficient reason to
+			 * consider using an otherwise-useless index, and
+			 * get_index_paths() thinks that the same paths that are useful
+			 * for index or index-only scans should also be considered for
+			 * bitmap scans. Perhaps that logic should be tightened up, but
+			 * until then we need to include PGS_CONSIDER_INDEXONLY in
+			 * my_scan_type here.
 			 */
 			my_scan_type = PGS_BITMAPSCAN | PGS_CONSIDER_INDEXONLY;
 		}
@@ -2083,7 +2084,7 @@ pgpa_compute_rt_offsets(pgpa_planner_state *pps, PlannedStmt *pstmt)
 
 		/*
 		 * It's not guaranteed that every plan name we saw during planning has
-		 * a SubPlanInfo, but any that do not certainly don't appear in the
+		 * a SubPlanRTInfo, but any that do not certainly don't appear in the
 		 * final range table.
 		 */
 		foreach_node(SubPlanRTInfo, rtinfo, pstmt->subrtinfos)
diff --git a/contrib/pg_stash_advice/stashfuncs.c b/contrib/pg_stash_advice/stashfuncs.c
index 77f8e19e867..d7aa9f2223f 100644
--- a/contrib/pg_stash_advice/stashfuncs.c
+++ b/contrib/pg_stash_advice/stashfuncs.c
@@ -286,7 +286,7 @@ pg_set_stashed_advice(PG_FUNCTION_ARGS)
 	/*
 	 * Get and check query ID.
 	 *
-	 * queryID 0 means no query ID was computed, so reject that.
+	 * Query ID 0 means no query ID was computed, so reject that.
 	 */
 	queryId = PG_GETARG_INT64(1);
 	if (queryId == 0)
diff --git a/contrib/pg_stash_advice/stashpersist.c b/contrib/pg_stash_advice/stashpersist.c
index 07a4da65b7e..00a0a74f04d 100644
--- a/contrib/pg_stash_advice/stashpersist.c
+++ b/contrib/pg_stash_advice/stashpersist.c
@@ -85,7 +85,7 @@ static void pgsa_write_to_disk(void);
 /*
  * Background worker entry point for pg_stash_advice persistence.
  *
- * On startup, if load_from_disk_pending is set, we load previously saved
+ * On startup, if stashes_ready is set, we load previously saved
  * stash data from disk.  Then we enter a loop, periodically checking whether
  * any changes have been made (via the change_count atomic counter) and
  * writing them to disk.  On shutdown, we perform a final write.
diff --git a/contrib/pg_trgm/trgm_op.c b/contrib/pg_trgm/trgm_op.c
index 0aca9b5826f..3fd087852e7 100644
--- a/contrib/pg_trgm/trgm_op.c
+++ b/contrib/pg_trgm/trgm_op.c
@@ -242,7 +242,7 @@ CMPTRGM_CHOOSE(const void *a, const void *b)
 #define ST_DECLARE
 #include "lib/sort_template.h"
 
-/* Sort an array of trigrams, handling signedess correctly */
+/* Sort an array of trigrams, handling signedness correctly */
 static void
 trigram_qsort(trgm *array, size_t n)
 {
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index 0f20f38c83e..c42cb690c7b 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -5798,7 +5798,7 @@ fetch_remote_statistics(Relation relation,
 													  remote_relname,
 													  column_list.data);
 
-			/* If any attribute statsare missing, fallback to sampling. */
+			/* If any attribute stats are missing, fallback to sampling. */
 			if (!match_attrmap(attstats,
 							   local_schemaname, local_relname,
 							   remote_schemaname, remote_relname,
diff --git a/doc/src/sgml/pgstashadvice.sgml b/doc/src/sgml/pgstashadvice.sgml
index c9b1e078382..7813d63d91e 100644
--- a/doc/src/sgml/pgstashadvice.sgml
+++ b/doc/src/sgml/pgstashadvice.sgml
@@ -84,7 +84,7 @@
   <literal>pg_stash_advice.stash_name</literal> for their session, and this
   may reveal the contents of any advice stash with that name. Users should
   assume that information embedded in stashed advice strings may become visible
-  to nonprivileged users.
+  to non-privileged users.
  </para>
 
  <sect2 id="pgstashadvice-functions">
diff --git a/doc/src/sgml/ref/alter_property_graph.sgml b/doc/src/sgml/ref/alter_property_graph.sgml
index 19352c06305..f517f2b2d7a 100644
--- a/doc/src/sgml/ref/alter_property_graph.sgml
+++ b/doc/src/sgml/ref/alter_property_graph.sgml
@@ -78,7 +78,7 @@ ALTER PROPERTY GRAPH [ IF EXISTS ] <replaceable class="parameter">name</replacea
       <para>
        This form removes vertex or edge tables from the property graph.  (Only
        the association of the tables with the graph is removed.  The tables
-       themself are not dropped.)
+       themselves are not dropped.)
       </para>
      </listitem>
     </varlistentry>
diff --git a/doc/src/sgml/release-19.sgml b/doc/src/sgml/release-19.sgml
index 6a8dfa0526f..c1bf18abfd3 100644
--- a/doc/src/sgml/release-19.sgml
+++ b/doc/src/sgml/release-19.sgml
@@ -3231,7 +3231,7 @@ Add functions to pg_buffercache to mark buffers as dirty (Nazir Bilal Yavuz)
 </para>
 
 <para>
-The functions are pg_buffercache_mark_dirty(), pg_buffercache_mark_dirt_relation(), and pg_buffercache_mark_dirty_all().
+The functions are pg_buffercache_mark_dirty(), pg_buffercache_mark_dirty_relation(), and pg_buffercache_mark_dirty_all().
 </para>
 </listitem>
 
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index a2db55e9b73..60bba0a2145 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -222,7 +222,7 @@ index_form_tuple_context(TupleDesc tupleDescriptor,
  *		nocache_index_getattr
  *
  *		This gets called from index_getattr() macro, and only in cases
- *		where we can't use cacheoffset and the value is not null.
+ *		where we can't use attcacheoff and the value is not null.
  * ----------------
  */
 Datum
diff --git a/src/backend/access/heap/heapam_visibility.c b/src/backend/access/heap/heapam_visibility.c
index 3a6a1e5a084..361b76e5065 100644
--- a/src/backend/access/heap/heapam_visibility.c
+++ b/src/backend/access/heap/heapam_visibility.c
@@ -192,7 +192,7 @@ SetHintBitsExt(HeapTupleHeader tuple, Buffer buffer,
 }
 
 /*
- * Simple wrapper around SetHintBitExt(), use when operating on a single
+ * Simple wrapper around SetHintBitsExt(), use when operating on a single
  * tuple.
  */
 static inline void
@@ -1671,7 +1671,7 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
 }
 
 /*
- * Perform HeaptupleSatisfiesMVCC() on each passed in tuple. This is more
+ * Perform HeapTupleSatisfiesMVCC() on each passed in tuple. This is more
  * efficient than doing HeapTupleSatisfiesMVCC() one-by-one.
  *
  * To be checked tuples are passed via BatchMVCCState->tuples. Each tuple's
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index cb78ba0842d..10cbc0d76bd 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -341,8 +341,8 @@ static void ExtendMultiXactMember(MultiXactOffset offset, int nmembers);
 static void SetOldestOffset(void);
 static bool find_multixact_start(MultiXactId multi, MultiXactOffset *result);
 static void WriteMTruncateXlogRec(Oid oldestMultiDB,
-								  MultiXactId endTruncOff,
-								  MultiXactOffset endTruncMemb);
+								  MultiXactId oldestMulti,
+								  MultiXactOffset oldestOffset);
 
 
 /*
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index f85b5286086..e39af79c03b 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -668,7 +668,7 @@ static TimeLineID LocalMinRecoveryPointTLI;
 static bool updateMinRecoveryPoint = true;
 
 /*
- * Local state for Controlfile data_checksum_version.  After initialization
+ * Local state for ControlFile data_checksum_version.  After initialization
  * this is only updated when absorbing a procsignal barrier during interrupt
  * processing.  The reason for keeping a copy in backend-private memory is to
  * avoid locking for interrogating the data checksum state.  Possible values
@@ -5045,7 +5045,7 @@ check_wal_buffers(int *newval, void **extra, GucSource source)
 	{
 		/*
 		 * If we haven't yet changed the boot_val default of -1, just let it
-		 * be.  We'll fix it when XLOGShmemSize is called.
+		 * be.  We'll fix it when XLOGShmemRequest is called.
 		 */
 		if (XLOGbuffers == -1)
 			return true;
diff --git a/src/backend/commands/explain_state.c b/src/backend/commands/explain_state.c
index 0e07a63fca6..a0ee0a664be 100644
--- a/src/backend/commands/explain_state.c
+++ b/src/backend/commands/explain_state.c
@@ -435,7 +435,7 @@ GUCCheckExplainExtensionOption(const char *option_name,
  * for an EXPLAIN extension option, the caller is entitled to assume that
  * a suitably constructed DefElem passed to the main option handler will
  * not cause an error. To construct this DefElem, the caller should set
- * the DefElem's defname to option_name. If option_values is NULL, arg
+ * the DefElem's defname to option_name. If option_value is NULL, arg
  * should be NULL. Otherwise, arg should be of the type given by
  * option_type, with option_value as the associated value. The only option
  * types that should be passed are T_String, T_Float, and T_Integer; in
diff --git a/src/backend/commands/repack.c b/src/backend/commands/repack.c
index 67364cc60e3..4a9dc7b164d 100644
--- a/src/backend/commands/repack.c
+++ b/src/backend/commands/repack.c
@@ -104,7 +104,7 @@ typedef struct ChangeContext
 	/* The relation the changes are applied to. */
 	Relation	cc_rel;
 
-	/* Needed to update indexes of rel_dst. */
+	/* Needed to update indexes of cc_rel. */
 	ResultRelInfo *cc_rri;
 	EState	   *cc_estate;
 
diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c
index 979c2be4abd..41cefcfde54 100644
--- a/src/backend/commands/vacuumparallel.c
+++ b/src/backend/commands/vacuumparallel.c
@@ -545,7 +545,7 @@ parallel_vacuum_end(ParallelVacuumState *pvs, IndexBulkDeleteResult **istats)
 /*
  * DSM detach callback. This is invoked when an autovacuum worker detaches
  * from the DSM segment holding PVShared. It ensures to reset the local pointer
- * to the shared state even if paralell vacuum raises an error and doesn't
+ * to the shared state even if parallel vacuum raises an error and doesn't
  * call parallel_vacuum_end().
  */
 static void
diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c
index f08982a43cc..9810a62e6ce 100644
--- a/src/backend/executor/execTuples.c
+++ b/src/backend/executor/execTuples.c
@@ -1403,7 +1403,7 @@ MakeTupleTableSlot(TupleDesc tupleDesc,
 		 * Precalculate the maximum guaranteed attribute that has to exist in
 		 * every tuple which gets deformed into this slot.  When the
 		 * TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS flag is enabled, we simply take
-		 * the precalculated value from the tupleDesc, otherwise the
+		 * the pre-calculated value from the tupleDesc, otherwise the
 		 * optimization is disabled, and we set the value to 0.
 		 */
 		if ((flags & TTS_FLAG_OBEYS_NOT_NULL_CONSTRAINTS) != 0)
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index ef2a6bc6e9d..b013f8356ee 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -1430,7 +1430,7 @@ ExecForPortionOfLeftovers(ModifyTableContext *context,
 		/*
 		 * If we don't have a ForPortionOfState yet, we must be a partition
 		 * child being hit for the first time. Make a copy from the root, with
-		 * our own tupleTableSlot. We do this lazily so that we don't pay the
+		 * our own TupleTableSlot. We do this lazily so that we don't pay the
 		 * price of unused partitions.
 		 */
 		ForPortionOfState *leafState = makeNode(ForPortionOfState);
diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c
index a3e222f3a3d..f64b2787f66 100644
--- a/src/backend/libpq/be-secure-openssl.c
+++ b/src/backend/libpq/be-secure-openssl.c
@@ -200,7 +200,7 @@ be_tls_init(bool isServerStart)
 	 *
 	 * The reason for not doing everything in this if-else conditional is that
 	 * we want to use the same processing of postgresql.conf for when ssl_sni
-	 * is off as well as when it's on but the hostsfile is missing etc.  Thus
+	 * is off as well as when it's on but the hosts file is missing etc.  Thus
 	 * we set res to the state and continue with a new conditional instead of
 	 * duplicating logic and risk it diverging over time.
 	 */
@@ -208,7 +208,7 @@ be_tls_init(bool isServerStart)
 	{
 		/*
 		 * The GUC check hook should have already blocked this but to be on
-		 * the safe side we doublecheck here.
+		 * the safe side we double-check here.
 		 */
 #ifndef HAVE_SSL_CTX_SET_CLIENT_HELLO_CB
 		ereport(isServerStart ? FATAL : LOG,
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 84deed9aaa6..cb4e5019c2f 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -80,7 +80,7 @@ static OnConflictExpr *transformOnConflictClause(ParseState *pstate,
 												 OnConflictClause *onConflictClause);
 static ForPortionOfExpr *transformForPortionOfClause(ParseState *pstate,
 													 int rtindex,
-													 const ForPortionOfClause *forPortionOfClause,
+													 const ForPortionOfClause *forPortionOf,
 													 bool isUpdate);
 static int	count_rowexpr_columns(ParseState *pstate, Node *expr);
 static Query *transformSelectStmt(ParseState *pstate, SelectStmt *stmt,
diff --git a/src/backend/parser/parse_graphtable.c b/src/backend/parser/parse_graphtable.c
index 30ddce5aa9f..f889c8df4e3 100644
--- a/src/backend/parser/parse_graphtable.c
+++ b/src/backend/parser/parse_graphtable.c
@@ -157,7 +157,7 @@ transformGraphTablePropertyRef(ParseState *pstate, ColumnRef *cref)
  * A label expression is parsed as either a ColumnRef with a single field or a
  * label expression like label disjunction. The single field in the ColumnRef is
  * treated as a label name and transformed to a GraphLabelRef node. The label
- * expression is recursively transformed into an expression tree containg
+ * expression is recursively transformed into an expression tree containing
  * GraphLabelRef nodes corresponding to the names of the labels appearing in the
  * expression. If any label name cannot be resolved to a label in the property
  * graph, an error is raised.
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index 680db664be4..a5a8db2ff88 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -3047,7 +3047,7 @@ table_recheck_autovac(Oid relid, HTAB *table_toast_map,
  *
  * One exception to the previous paragraph is for tables nearing wraparound,
  * i.e., those that have surpassed the effective failsafe ages.  In that case,
- * the relfrozen/relminmxid-based score is scaled aggressively so that the
+ * the relfrozenxid/relminmxid-based score is scaled aggressively so that the
  * table has a decent chance of sorting to the front of the list.
  *
  * To adjust how strongly each component contributes to the score, the
diff --git a/src/backend/postmaster/datachecksum_state.c b/src/backend/postmaster/datachecksum_state.c
index 18797a8ee3d..3a060cbcceb 100644
--- a/src/backend/postmaster/datachecksum_state.c
+++ b/src/backend/postmaster/datachecksum_state.c
@@ -99,12 +99,12 @@
  * state will also be set to "off".
  *
  * Backends transition Bd -> Bi via a procsignalbarrier which is emitted by the
- * DataChecksumsLauncher.  When all backends have acknowledged the barrier then
- * Bd will be empty and the next phase can begin: calculating and writing data
- * checksums with DataChecksumsWorkers.  When the DataChecksumsWorker processes
- * have finished writing checksums on all pages, data checksums are enabled
- * cluster-wide via another procsignalbarrier. There are four sets of backends
- * where Bd shall be an empty set:
+ * DataChecksumsWorkerLauncherMain.  When all backends have acknowledged the
+ * barrier then  Bd will be empty and the next phase can begin: calculating and
+ * writing data checksums with DataChecksumsWorkers.  When the
+ * DataChecksumsWorker processes have finished writing checksums on all pages,
+ * data checksums are enabled cluster-wide via another procsignalbarrier.
+ * There are four sets of backends where Bd shall be an empty set:
  *
  * Bg: Backend updating the global state and emitting the procsignalbarrier
  * Bd: Backends in "off" state
@@ -634,7 +634,7 @@ ProcessSingleRelationFork(Relation reln, ForkNumber forkNum, BufferAccessStrateg
 
 	relns = get_namespace_name(RelationGetNamespace(reln));
 
-	/* Report the current relation to pgstat_activity */
+	/* Report the current relation to pg_stat_activity */
 	snprintf(activity, sizeof(activity) - 1, "processing: %s.%s (%s, %u blocks)",
 			 (relns ? relns : ""), RelationGetRelationName(reln), forkNames[forkNum], numblocks);
 	pgstat_report_activity(STATE_RUNNING, activity);
@@ -659,7 +659,7 @@ ProcessSingleRelationFork(Relation reln, ForkNumber forkNum, BufferAccessStrateg
 		 * re-write the page to WAL even if the checksum hasn't changed,
 		 * because if there is a replica it might have a slightly different
 		 * version of the page with an invalid checksum, caused by unlogged
-		 * changes (e.g. hintbits) on the primary happening while checksums
+		 * changes (e.g. hint bits) on the primary happening while checksums
 		 * were off. This can happen if there was a valid checksum on the page
 		 * at one point in the past, so only when checksums are first on, then
 		 * off, and then turned on again.  TODO: investigate if this could be
@@ -1262,7 +1262,7 @@ ProcessAllDatabases(void)
 }
 
 /*
- * DataChecksumShmemRequest
+ * DataChecksumsShmemRequest
  *		Request datachecksumsworker-related shared memory
  */
 static void
diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c
index 372d77c475e..c9dfb094c2b 100644
--- a/src/backend/replication/logical/origin.c
+++ b/src/backend/replication/logical/origin.c
@@ -1371,7 +1371,7 @@ replorigin_session_get_progress(bool flush)
 /*
  * Clear the per-transaction replication origin state.
  *
- * replorigin_session_origin is also cleared if clear_origin is set.
+ * replorigin_xact_state.origin is also cleared if clear_origin is set.
  */
 void
 replorigin_xact_clear(bool clear_origin)
diff --git a/src/backend/rewrite/rewriteGraphTable.c b/src/backend/rewrite/rewriteGraphTable.c
index 2c3199d3230..7b64818e369 100644
--- a/src/backend/rewrite/rewriteGraphTable.c
+++ b/src/backend/rewrite/rewriteGraphTable.c
@@ -91,10 +91,10 @@ struct path_element
 
 static Node *replace_property_refs(Oid propgraphid, Node *node, const List *mappings);
 static List *build_edge_vertex_link_quals(HeapTuple edgetup, int edgerti, int refrti, Oid refid, AttrNumber catalog_key_attnum, AttrNumber catalog_ref_attnum, AttrNumber catalog_eqop_attnum);
-static List *generate_queries_for_path_pattern(RangeTblEntry *rte, List *element_patterns);
+static List *generate_queries_for_path_pattern(RangeTblEntry *rte, List *path_pattern);
 static Query *generate_query_for_graph_path(RangeTblEntry *rte, List *path);
 static Node *generate_setop_from_pathqueries(List *pathqueries, List **rtable, List **targetlist);
-static List *generate_queries_for_path_pattern_recurse(RangeTblEntry *rte, List *pathqueries, List *cur_path, List *path_pattern_lists, int elempos);
+static List *generate_queries_for_path_pattern_recurse(RangeTblEntry *rte, List *pathqueries, List *cur_path, List *path_elem_lists, int elempos);
 static Query *generate_query_for_empty_path_pattern(RangeTblEntry *rte);
 static Query *generate_union_from_pathqueries(List **pathqueries);
 static List *get_path_elements_for_path_factor(Oid propgraphid, struct path_factor *pf);
diff --git a/src/backend/statistics/extended_stats_funcs.c b/src/backend/statistics/extended_stats_funcs.c
index 9279904b465..9108187477c 100644
--- a/src/backend/statistics/extended_stats_funcs.c
+++ b/src/backend/statistics/extended_stats_funcs.c
@@ -571,7 +571,7 @@ extended_statistics_update(FunctionCallInfo fcinfo)
 
 	/*
 	 * Either of these statistic types requires that we supply a semi-filled
-	 * VacAttrStatP array.
+	 * VacAttrStatsP array.
 	 *
 	 * It is not possible to use the existing lookup_var_attr_stats() and
 	 * examine_attribute() because these functions will skip attributes where
@@ -586,7 +586,7 @@ extended_statistics_update(FunctionCallInfo fcinfo)
 
 		/*
 		 * The leading stxkeys are attribute numbers up through numattnums.
-		 * These keys must be in ascending AttNumber order, but we do not rely
+		 * These keys must be in ascending AttrNumber order, but we do not rely
 		 * on that.
 		 */
 		for (int i = 0; i < numattnums; i++)
@@ -724,7 +724,7 @@ extended_statistics_update(FunctionCallInfo fcinfo)
 		/*
 		 * Generate the expressions array.
 		 *
-		 * The attytypids, attytypmods, and atttypcolls arrays have all the
+		 * The atttypids, atttypmods, and atttypcolls arrays have all the
 		 * regular attributes listed first, so we can pass those arrays with a
 		 * start point after the last regular attribute.  There are numexprs
 		 * elements remaining.
@@ -1091,7 +1091,7 @@ array_in_safe(FmgrInfo *array_in, const char *s, Oid typid, int32 typmod,
  * still return a legit tuple datum.
  *
  * Set pg_statistic_ok to true if all of the values found in the container
- * were imported without issue.  pg_statistic_ok is swicthed to "true" once
+ * were imported without issue.  pg_statistic_ok is switched to "true" once
  * the full pg_statistic tuple has been built and validated.
  */
 static Datum
@@ -1307,10 +1307,6 @@ import_pg_statistic(Relation pgsd, JsonbContainer *cont,
 	 * if they aren't then we need to reject that stakind completely.
 	 * Currently we go a step further and reject the expression array
 	 * completely.
-	 *
-	 * Once it is established that the pairs are in NULL/NOT-NULL alignment,
-	 * we can test either expr_nulls[] value to see if the stakind has
-	 * value(s) that we can set or not.
 	 */
 
 	if (found[MOST_COMMON_VALS_ELEM])
diff --git a/src/backend/storage/aio/method_worker.c b/src/backend/storage/aio/method_worker.c
index 061a93d90d4..63e34d66690 100644
--- a/src/backend/storage/aio/method_worker.c
+++ b/src/backend/storage/aio/method_worker.c
@@ -814,7 +814,7 @@ IoWorkerMain(const void *startup_data, size_t startup_data_len)
 			 * it.  Crossing the current worker count is a useful signal
 			 * because it's clearly too deep to avoid queuing latency already,
 			 * but still leaves a small window of opportunity to improve the
-			 * situation before the queue oveflows.
+			 * situation before the queue overflows.
 			 *
 			 * 2. The worker pool is keeping up, no latency is being
 			 * introduced and an extra worker would be a waste of resources.
@@ -830,10 +830,10 @@ IoWorkerMain(const void *startup_data, size_t startup_data_len)
 			 *
 			 * On its own, this is an extremely crude signal.  When combined
 			 * with the wakeup propagation test that precedes it (but on its
-			 * own tends to overshoot) and io_worker_launch_delay, the result
-			 * is that we gradually test each pool size until we find one that
-			 * doesn't trigger further expansion, and then hold it for at
-			 * least io_worker_idle_timeout.
+			 * own tends to overshoot) and io_worker_launch_interval, the
+			 * result is that we gradually test each pool size until we find
+			 * one that doesn't trigger further expansion, and then hold it
+			 * for at least io_worker_idle_timeout.
 			 *
 			 * XXX Perhaps ideas from queueing theory or control theory could
 			 * do a better job of this.
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 3cc0b0bdd92..32f501bbd21 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -5844,7 +5844,7 @@ MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
  * Used to clean up after errors.
  *
  * Currently, we can expect that resource owner cleanup, via
- * ResOwnerReleaseBufferPin(), took care of releasing buffer content locks per
+ * ResOwnerReleaseBuffer(), took care of releasing buffer content locks per
  * se; the only thing we need to deal with here is clearing any PIN_COUNT
  * request that was in progress.
  */
@@ -5993,7 +5993,7 @@ BufferLockAcquire(Buffer buffer, BufferDesc *buf_hdr, BufferLockMode mode)
 
 		pgstat_report_wait_end();
 
-		/* Retrying, allow BufferLockRelease to release waiters again. */
+		/* Retrying, allow BufferLockReleaseSub to release waiters again. */
 		pg_atomic_fetch_and_u64(&buf_hdr->state, ~BM_LOCK_WAKE_IN_PROGRESS);
 	}
 
diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c
index bf6b81e621b..e149a738c8d 100644
--- a/src/backend/storage/ipc/ipci.c
+++ b/src/backend/storage/ipc/ipci.c
@@ -169,7 +169,7 @@ RegisterBuiltinShmemCallbacks(void)
 {
 	/*
 	 * Call RegisterShmemCallbacks(...) on each subsystem listed in
-	 * subsystemslist.h
+	 * subsystemlist.h
 	 */
 #define PG_SHMEM_SUBSYSTEM(subsystem_callbacks) \
 	RegisterShmemCallbacks(&(subsystem_callbacks));
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index c221fe96889..8d246ed5a4e 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -453,8 +453,7 @@ LockManagerShmemRequest(void *arg)
 	int64		max_table_size;
 
 	/*
-	 * Compute sizes for lock hashtables.  Note that these calculations must
-	 * agree with LockManagerShmemSize!
+	 * Compute sizes for lock hashtables.
 	 */
 	max_table_size = NLOCKENTS();
 
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index 899a4ef06e4..0ae85b7d5b4 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -1156,8 +1156,7 @@ PredicateLockShmemRequest(void *arg)
 		);
 
 	/*
-	 * Compute size for serializable transaction hashtable. Note these
-	 * calculations must agree with PredicateLockShmemSize!
+	 * Compute size for serializable transaction hashtable.
 	 *
 	 * Assume an average of 10 predicate locking transactions per backend.
 	 * This allows aggressive cleanup while detail is present before data must
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 090e8cc28c1..78587d223cb 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -13352,7 +13352,7 @@ get_for_portion_of(ForPortionOfExpr *forPortionOf, deparse_context *context)
 
 		/*
 		 * Try to write it as FROM ... TO ... if we received it that way,
-		 * otherwise (targetExpr).
+		 * otherwise (targetRange).
 		 */
 		if (forPortionOf->targetFrom && forPortionOf->targetTo)
 		{
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index ad960336e8d..1b8a73f589a 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -2353,7 +2353,7 @@ DebugFileOpen(void)
  * GUC check_hook for log_min_messages
  *
  * This value is parsed as a comma-separated list of zero or more TYPE:LEVEL
- * elements.  For each element, TYPE corresponds to a bk_category value (see
+ * elements.  For each element, TYPE corresponds to a bkcategory value (see
  * postmaster/proctypelist.h); LEVEL is one of server_message_level_options.
  *
  * In addition, there must be a single LEVEL element (with no TYPE part)
diff --git a/src/bin/pg_combinebackup/t/011_ib_truncation.pl b/src/bin/pg_combinebackup/t/011_ib_truncation.pl
index c5e0124c04d..e1ce4521558 100644
--- a/src/bin/pg_combinebackup/t/011_ib_truncation.pl
+++ b/src/bin/pg_combinebackup/t/011_ib_truncation.pl
@@ -102,7 +102,8 @@ is($vm_limits, '1',
 	'WAL summary has correct VM fork truncation limit');
 
 # Combine full and incremental backups.  Before the fix, this failed because
-# the INCREMENTAL file header contained an incorrect truncation_block value.
+# the INCREMENTAL file header contained an incorrect truncation_block_length
+# value.
 my $restored = PostgreSQL::Test::Cluster->new('node2');
 $restored->init_from_backup($primary, 'incr', combine_with_prior => ['full']);
 $restored->start();
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 1ca03d6b278..d56dcc701ce 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -18861,7 +18861,7 @@ dumpStatisticsExtStats(Archive *fout, const StatsExtInfo *statsextinfo)
 		{
 			/*
 			 * There is no ordering column in pg_stats_ext_exprs.  However, we
-			 * can rely on the unnesting of pg_statistic.ext_data.stxdexpr to
+			 * can rely on the unnesting of pg_statistic_ext_data.stxdexpr to
 			 * maintain the desired order of expression elements.
 			 */
 			appendPQExpBufferStr(pq,
diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c
index f016b336308..95f4ac110b9 100644
--- a/src/bin/pg_dump/pg_restore.c
+++ b/src/bin/pg_dump/pg_restore.c
@@ -704,7 +704,7 @@ restore_one_database(const char *inputFileSpec, RestoreOptions *opts,
 	 * We don't have a connection yet but that doesn't matter. The connection
 	 * is initialized to NULL and if we terminate through exit_nicely() while
 	 * it's still NULL, the cleanup function will just be a no-op. If we are
-	 * restoring multiple databases, then only update AX handle for cleanup as
+	 * restoring multiple databases, then only update AH handle for cleanup as
 	 * the previous entry was already in the array and we had closed previous
 	 * connection, so we can use the same array slot.
 	 */
diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c
index 06134cf5d2e..5a7afe62eab 100644
--- a/src/bin/pg_upgrade/check.c
+++ b/src/bin/pg_upgrade/check.c
@@ -2601,7 +2601,7 @@ check_old_cluster_global_names(ClusterInfo *cluster)
 	conn_template1 = connectToServer(cluster, "template1");
 
 	/*
-	 * Get database, user/role and tablespacenames from cluster.  Can't use
+	 * Get database, user/role and tablespace names from cluster.  Can't use
 	 * pg_authid because only superusers can view it.
 	 */
 	res = executeQueryOrDie(conn_template1,
diff --git a/src/common/wchar.c b/src/common/wchar.c
index a44ee73accf..4c77e3e1dc8 100644
--- a/src/common/wchar.c
+++ b/src/common/wchar.c
@@ -26,9 +26,9 @@
  * this pair specifically.  Byte pair range constraints, in encoding
  * originator documentation, always excluded this pair.  No core conversion
  * could translate it.  However, longstanding verifychar implementations
- * accepted any non-NUL byte.  big5_to_euc_tw and big5_to_mic even translate
- * pairs not valid per encoding originator documentation.  To avoid tightening
- * core or non-core conversions in a security patch, we sought this one pair.
+ * accepted any non-NUL byte.  big5_to_euc_tw even translates pairs not
+ * valid per encoding originator documentation.  To avoid tightening core
+ * or non-core conversions in a security patch, we sought this one pair.
  *
  * PQescapeString() historically used spaces for BYTE1; many other values
  * could suffice for BYTE1.
diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h
index c13f05d39db..f2c36696bca 100644
--- a/src/include/access/tableam.h
+++ b/src/include/access/tableam.h
@@ -1570,8 +1570,6 @@ table_tuple_delete(Relation rel, ItemPointer tid, CommandId cid,
  *	crosscheck - if not InvalidSnapshot, also check old tuple against this
  *	options - These allow the caller to specify options that may change the
  *	behavior of the AM. The AM will ignore options that it does not support.
- *		TABLE_UPDATE_WAIT -- set if should wait for any conflicting update to
- *		commit/abort
  *		TABLE_UPDATE_NO_LOGICAL -- force-disables the emitting of logical
  *		decoding information for the tuple.
  *
diff --git a/src/include/postmaster/datachecksum_state.h b/src/include/postmaster/datachecksum_state.h
index 05625539604..7acb3b43ab8 100644
--- a/src/include/postmaster/datachecksum_state.h
+++ b/src/include/postmaster/datachecksum_state.h
@@ -17,7 +17,7 @@
 
 #include "storage/procsignal.h"
 
-/* Possible operations the Datachecksumsworker can perform */
+/* Possible operations the DataChecksumsWorker can perform */
 typedef enum DataChecksumsWorkerOperation
 {
 	ENABLE_DATACHECKSUMS,
diff --git a/src/test/modules/test_checksums/t/003_standby_restarts.pl b/src/test/modules/test_checksums/t/003_standby_restarts.pl
index 7ad11417ca6..11e15c9d734 100644
--- a/src/test/modules/test_checksums/t/003_standby_restarts.pl
+++ b/src/test/modules/test_checksums/t/003_standby_restarts.pl
@@ -110,7 +110,7 @@ $node_primary->wait_for_catchup($node_standby, 'replay');
 # Ensure that the primary and standby has switched to off
 wait_for_checksum_state($node_primary, 'off');
 wait_for_checksum_state($node_standby, 'off');
-# Doublecheck reading data without errors
+# Double-check reading data without errors
 $result =
   $node_primary->safe_psql('postgres', "SELECT count(a) FROM t WHERE a > 1");
 is($result, "19998", 'ensure we can safely read all data without checksums');
diff --git a/src/test/modules/test_checksums/t/005_injection.pl b/src/test/modules/test_checksums/t/005_injection.pl
index a37a24dbbad..7240b93bdd1 100644
--- a/src/test/modules/test_checksums/t/005_injection.pl
+++ b/src/test/modules/test_checksums/t/005_injection.pl
@@ -25,7 +25,7 @@ if ($ENV{enable_injection_points} ne 'yes')
 # Test cluster setup
 #
 
-# Initiate testcluster
+# Initiate test cluster
 my $node = PostgreSQL::Test::Cluster->new('injection_node');
 $node->init(no_data_checksums => 1);
 $node->start;
diff --git a/src/test/modules/test_checksums/test_checksums.c b/src/test/modules/test_checksums/test_checksums.c
index c2eabc2821c..621cf788dad 100644
--- a/src/test/modules/test_checksums/test_checksums.c
+++ b/src/test/modules/test_checksums/test_checksums.c
@@ -22,8 +22,6 @@
 PG_MODULE_MAGIC;
 
 extern PGDLLEXPORT void dc_delay_barrier(const char *name, const void *private_data, void *arg);
-extern PGDLLEXPORT void dc_modify_db_result(const char *name, const void *private_data, void *arg);
-extern PGDLLEXPORT void dc_fake_temptable(const char *name, const void *private_data, void *arg);
 
 /*
  * Test for delaying emission of procsignalbarriers.
