From e4307810980d4874153ab53142a31ddce8d85298 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Wed, 30 Nov 2022 07:52:19 +0100 Subject: [PATCH] pg_dump: Remove "blob" terminology For historical reasons, pg_dump refers to large objects as "BLOBs". This term is not used anywhere else in PostgreSQL, and it also means something different in the SQL standard and other SQL systems. This patch renames internal functinos, code comments, documentation, etc. to use the "large object" or "LO" terminology instead. There is no functionality change, so the archive format still uses the name "BLOB" for the archive entry. Additional long command-line options are added with the new naming. --- doc/src/sgml/ref/pg_dump.sgml | 14 +- src/bin/pg_dump/pg_backup.h | 12 +- src/bin/pg_dump/pg_backup_archiver.c | 76 ++++----- src/bin/pg_dump/pg_backup_archiver.h | 32 ++-- src/bin/pg_dump/pg_backup_custom.c | 52 +++---- src/bin/pg_dump/pg_backup_db.c | 4 +- src/bin/pg_dump/pg_backup_directory.c | 104 ++++++------- src/bin/pg_dump/pg_backup_null.c | 50 +++--- src/bin/pg_dump/pg_backup_tar.c | 68 ++++----- src/bin/pg_dump/pg_dump.c | 212 +++++++++++++------------- src/bin/pg_dump/pg_dump.h | 8 +- src/bin/pg_dump/pg_dump_sort.c | 16 +- src/bin/pg_dump/t/002_pg_dump.pl | 46 +++--- 13 files changed, 348 insertions(+), 346 deletions(-) diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml index 8b9d9f4cad43..989815939bf1 100644 --- a/doc/src/sgml/ref/pg_dump.sgml +++ b/doc/src/sgml/ref/pg_dump.sgml @@ -132,7 +132,7 @@ Options - + Include large objects in the dump. This is the default behavior @@ -140,7 +140,7 @@ Options is specified. The switch is therefore only useful to add large objects to dumps where a specific schema or table has been requested. Note that - blobs are considered data and therefore will be included when + large objects are considered data and therefore will be included when is used, but not when is. @@ -149,7 +149,7 @@ Options - + Exclude large objects in the dump. @@ -323,7 +323,7 @@ Options Output a directory-format archive suitable for input into pg_restore. This will create a directory - with one file for each table and blob being dumped, plus a + with one file for each table and large object being dumped, plus a so-called Table of Contents file describing the dumped objects in a machine-readable format that pg_restore can read. A directory format archive can be manipulated with @@ -434,9 +434,9 @@ Options - Non-schema objects such as blobs are not dumped when is - specified. You can add blobs back to the dump with the - switch. + Non-schema objects such as large objects are not dumped when is + specified. You can add large objects back to the dump with the + switch. diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h index e8b78982971e..a39b8e6c9bb3 100644 --- a/src/bin/pg_dump/pg_backup.h +++ b/src/bin/pg_dump/pg_backup.h @@ -52,9 +52,9 @@ typedef enum _archiveMode typedef enum _teSection { - SECTION_NONE = 1, /* COMMENTs, ACLs, etc; can be anywhere */ + SECTION_NONE = 1, /* comments, ACLs, etc; can be anywhere */ SECTION_PRE_DATA, /* stuff to be processed before data */ - SECTION_DATA, /* TABLE DATA, BLOBS, BLOB COMMENTS */ + SECTION_DATA, /* table data, large objects, LO comments */ SECTION_POST_DATA /* stuff to be processed after data */ } teSection; @@ -191,8 +191,8 @@ typedef struct _dumpOptions int outputClean; int outputCreateDB; - bool outputBlobs; - bool dontOutputBlobs; + bool outputLOs; + bool dontOutputLOs; int outputNoOwner; char *outputSuperuser; @@ -287,8 +287,8 @@ extern PGconn *GetConnection(Archive *AHX); /* Called to write *data* to the archive */ extern void WriteData(Archive *AHX, const void *data, size_t dLen); -extern int StartBlob(Archive *AHX, Oid oid); -extern int EndBlob(Archive *AHX, Oid oid); +extern int StartLO(Archive *AHX, Oid oid); +extern int EndLO(Archive *AHX, Oid oid); extern void CloseArchive(Archive *AHX); diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index f39c0fa36fdc..f48a6f2b08fa 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -545,7 +545,7 @@ RestoreArchive(Archive *AHX) */ if (strncmp(te->desc, "BLOB", 4) == 0) { - DropBlobIfExists(AH, te->catalogId.oid); + DropLOIfExists(AH, te->catalogId.oid); } else { @@ -1209,44 +1209,44 @@ PrintTOCSummary(Archive *AHX) } /*********** - * BLOB Archival + * Large Object Archival ***********/ -/* Called by a dumper to signal start of a BLOB */ +/* Called by a dumper to signal start of a LO */ int -StartBlob(Archive *AHX, Oid oid) +StartLO(Archive *AHX, Oid oid) { ArchiveHandle *AH = (ArchiveHandle *) AHX; - if (!AH->StartBlobPtr) + if (!AH->StartLOPtr) pg_fatal("large-object output not supported in chosen format"); - AH->StartBlobPtr(AH, AH->currToc, oid); + AH->StartLOPtr(AH, AH->currToc, oid); return 1; } -/* Called by a dumper to signal end of a BLOB */ +/* Called by a dumper to signal end of a LO */ int -EndBlob(Archive *AHX, Oid oid) +EndLO(Archive *AHX, Oid oid) { ArchiveHandle *AH = (ArchiveHandle *) AHX; - if (AH->EndBlobPtr) - AH->EndBlobPtr(AH, AH->currToc, oid); + if (AH->EndLOPtr) + AH->EndLOPtr(AH, AH->currToc, oid); return 1; } /********** - * BLOB Restoration + * Large Object Restoration **********/ /* - * Called by a format handler before any blobs are restored + * Called by a format handler before any LOs are restored */ void -StartRestoreBlobs(ArchiveHandle *AH) +StartRestoreLOs(ArchiveHandle *AH) { RestoreOptions *ropt = AH->public.ropt; @@ -1258,14 +1258,14 @@ StartRestoreBlobs(ArchiveHandle *AH) ahprintf(AH, "BEGIN;\n\n"); } - AH->blobCount = 0; + AH->loCount = 0; } /* - * Called by a format handler after all blobs are restored + * Called by a format handler after all LOs are restored */ void -EndRestoreBlobs(ArchiveHandle *AH) +EndRestoreLOs(ArchiveHandle *AH) { RestoreOptions *ropt = AH->public.ropt; @@ -1279,21 +1279,21 @@ EndRestoreBlobs(ArchiveHandle *AH) pg_log_info(ngettext("restored %d large object", "restored %d large objects", - AH->blobCount), - AH->blobCount); + AH->loCount), + AH->loCount); } /* - * Called by a format handler to initiate restoration of a blob + * Called by a format handler to initiate restoration of a LO */ void -StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop) +StartRestoreLO(ArchiveHandle *AH, Oid oid, bool drop) { - bool old_blob_style = (AH->version < K_VERS_1_12); + bool old_lo_style = (AH->version < K_VERS_1_12); Oid loOid; - AH->blobCount++; + AH->loCount++; /* Initialize the LO Buffer */ AH->lo_buf_used = 0; @@ -1301,12 +1301,12 @@ StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop) pg_log_info("restoring large object with OID %u", oid); /* With an old archive we must do drop and create logic here */ - if (old_blob_style && drop) - DropBlobIfExists(AH, oid); + if (old_lo_style && drop) + DropLOIfExists(AH, oid); if (AH->connection) { - if (old_blob_style) + if (old_lo_style) { loOid = lo_create(AH->connection, oid); if (loOid == 0 || loOid != oid) @@ -1320,7 +1320,7 @@ StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop) } else { - if (old_blob_style) + if (old_lo_style) ahprintf(AH, "SELECT pg_catalog.lo_open(pg_catalog.lo_create('%u'), %d);\n", oid, INV_WRITE); else @@ -1328,11 +1328,11 @@ StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop) oid, INV_WRITE); } - AH->writingBlob = 1; + AH->writingLO = true; } void -EndRestoreBlob(ArchiveHandle *AH, Oid oid) +EndRestoreLO(ArchiveHandle *AH, Oid oid) { if (AH->lo_buf_used > 0) { @@ -1340,7 +1340,7 @@ EndRestoreBlob(ArchiveHandle *AH, Oid oid) dump_lo_buf(AH); } - AH->writingBlob = 0; + AH->writingLO = false; if (AH->connection) { @@ -1629,7 +1629,7 @@ RestoringToDB(ArchiveHandle *AH) } /* - * Dump the current contents of the LO data buffer while writing a BLOB + * Dump the current contents of the LO data buffer while writing a LO */ static void dump_lo_buf(ArchiveHandle *AH) @@ -1657,10 +1657,10 @@ dump_lo_buf(ArchiveHandle *AH) AH->lo_buf_used, AH); - /* Hack: turn off writingBlob so ahwrite doesn't recurse to here */ - AH->writingBlob = 0; + /* Hack: turn off writingLO so ahwrite doesn't recurse to here */ + AH->writingLO = false; ahprintf(AH, "SELECT pg_catalog.lowrite(0, %s);\n", buf->data); - AH->writingBlob = 1; + AH->writingLO = true; destroyPQExpBuffer(buf); } @@ -1679,7 +1679,7 @@ ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH) { int bytes_written = 0; - if (AH->writingBlob) + if (AH->writingLO) { size_t remaining = size * nmemb; @@ -2307,7 +2307,7 @@ _allocAH(const char *FileSpec, const ArchiveFormat fmt, } /* - * Write out all data (tables & blobs) + * Write out all data (tables & LOs) */ void WriteDataChunks(ArchiveHandle *AH, ParallelState *pstate) @@ -2401,8 +2401,8 @@ WriteDataChunksForTocEntry(ArchiveHandle *AH, TocEntry *te) if (strcmp(te->desc, "BLOBS") == 0) { - startPtr = AH->StartBlobsPtr; - endPtr = AH->EndBlobsPtr; + startPtr = AH->StartLOsPtr; + endPtr = AH->EndLOsPtr; } else { @@ -3434,7 +3434,7 @@ _getObjectDescription(PQExpBuffer buf, const TocEntry *te) appendPQExpBuffer(buf, "%s.", fmtId(te->namespace)); appendPQExpBufferStr(buf, fmtId(te->tag)); } - /* BLOBs just have a name, but it's numeric so must not use fmtId */ + /* LOs just have a name, but it's numeric so must not use fmtId */ else if (strcmp(type, "BLOB") == 0) { appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag); diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h index 42687c4ec835..c489dc27a2cd 100644 --- a/src/bin/pg_dump/pg_backup_archiver.h +++ b/src/bin/pg_dump/pg_backup_archiver.h @@ -145,10 +145,10 @@ typedef void (*StartDataPtrType) (ArchiveHandle *AH, TocEntry *te); typedef void (*WriteDataPtrType) (ArchiveHandle *AH, const void *data, size_t dLen); typedef void (*EndDataPtrType) (ArchiveHandle *AH, TocEntry *te); -typedef void (*StartBlobsPtrType) (ArchiveHandle *AH, TocEntry *te); -typedef void (*StartBlobPtrType) (ArchiveHandle *AH, TocEntry *te, Oid oid); -typedef void (*EndBlobPtrType) (ArchiveHandle *AH, TocEntry *te, Oid oid); -typedef void (*EndBlobsPtrType) (ArchiveHandle *AH, TocEntry *te); +typedef void (*StartLOsPtrType) (ArchiveHandle *AH, TocEntry *te); +typedef void (*StartLOPtrType) (ArchiveHandle *AH, TocEntry *te, Oid oid); +typedef void (*EndLOPtrType) (ArchiveHandle *AH, TocEntry *te, Oid oid); +typedef void (*EndLOsPtrType) (ArchiveHandle *AH, TocEntry *te); typedef int (*WriteBytePtrType) (ArchiveHandle *AH, const int i); typedef int (*ReadBytePtrType) (ArchiveHandle *AH); @@ -285,10 +285,10 @@ struct _archiveHandle PrintExtraTocPtrType PrintExtraTocPtr; /* Extra TOC info for format */ PrintTocDataPtrType PrintTocDataPtr; - StartBlobsPtrType StartBlobsPtr; - EndBlobsPtrType EndBlobsPtr; - StartBlobPtrType StartBlobPtr; - EndBlobPtrType EndBlobPtr; + StartLOsPtrType StartLOsPtr; + EndLOsPtrType EndLOsPtr; + StartLOPtrType StartLOPtr; + EndLOPtrType EndLOPtr; SetupWorkerPtrType SetupWorkerPtr; WorkerJobDumpPtrType WorkerJobDumpPtr; @@ -313,9 +313,9 @@ struct _archiveHandle ArchiverOutput outputKind; /* Flag for what we're currently writing */ bool pgCopyIn; /* Currently in libpq 'COPY IN' mode. */ - int loFd; /* BLOB fd */ - int writingBlob; /* Flag */ - int blobCount; /* # of blobs restored */ + int loFd; + bool writingLO; + int loCount; /* # of LOs restored */ char *fSpec; /* Archive File Spec */ FILE *FH; /* General purpose file handle */ @@ -462,10 +462,10 @@ extern size_t WriteStr(ArchiveHandle *AH, const char *c); int ReadOffset(ArchiveHandle *, pgoff_t *); size_t WriteOffset(ArchiveHandle *, pgoff_t, int); -extern void StartRestoreBlobs(ArchiveHandle *AH); -extern void StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop); -extern void EndRestoreBlob(ArchiveHandle *AH, Oid oid); -extern void EndRestoreBlobs(ArchiveHandle *AH); +extern void StartRestoreLOs(ArchiveHandle *AH); +extern void StartRestoreLO(ArchiveHandle *AH, Oid oid, bool drop); +extern void EndRestoreLO(ArchiveHandle *AH, Oid oid); +extern void EndRestoreLOs(ArchiveHandle *AH); extern void InitArchiveFmt_Custom(ArchiveHandle *AH); extern void InitArchiveFmt_Null(ArchiveHandle *AH); @@ -475,7 +475,7 @@ extern void InitArchiveFmt_Tar(ArchiveHandle *AH); extern bool isValidTarHeader(char *header); extern void ReconnectToServer(ArchiveHandle *AH, const char *dbname); -extern void DropBlobIfExists(ArchiveHandle *AH, Oid oid); +extern void DropLOIfExists(ArchiveHandle *AH, Oid oid); void ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH); int ahprintf(ArchiveHandle *AH, const char *fmt,...) pg_attribute_printf(2, 3); diff --git a/src/bin/pg_dump/pg_backup_custom.c b/src/bin/pg_dump/pg_backup_custom.c index a0a55a1edd09..94507bea0176 100644 --- a/src/bin/pg_dump/pg_backup_custom.c +++ b/src/bin/pg_dump/pg_backup_custom.c @@ -52,13 +52,13 @@ static void _PrintExtraToc(ArchiveHandle *AH, TocEntry *te); static void _PrintData(ArchiveHandle *AH); static void _skipData(ArchiveHandle *AH); -static void _skipBlobs(ArchiveHandle *AH); +static void _skipLOs(ArchiveHandle *AH); -static void _StartBlobs(ArchiveHandle *AH, TocEntry *te); -static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid); -static void _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid); -static void _EndBlobs(ArchiveHandle *AH, TocEntry *te); -static void _LoadBlobs(ArchiveHandle *AH, bool drop); +static void _StartLOs(ArchiveHandle *AH, TocEntry *te); +static void _StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid); +static void _EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid); +static void _EndLOs(ArchiveHandle *AH, TocEntry *te); +static void _LoadLOs(ArchiveHandle *AH, bool drop); static void _PrepParallelRestore(ArchiveHandle *AH); static void _Clone(ArchiveHandle *AH); @@ -123,10 +123,10 @@ InitArchiveFmt_Custom(ArchiveHandle *AH) AH->WriteExtraTocPtr = _WriteExtraToc; AH->PrintExtraTocPtr = _PrintExtraToc; - AH->StartBlobsPtr = _StartBlobs; - AH->StartBlobPtr = _StartBlob; - AH->EndBlobPtr = _EndBlob; - AH->EndBlobsPtr = _EndBlobs; + AH->StartLOsPtr = _StartLOs; + AH->StartLOPtr = _StartLO; + AH->EndLOPtr = _EndLO; + AH->EndLOsPtr = _EndLOs; AH->PrepParallelRestorePtr = _PrepParallelRestore; AH->ClonePtr = _Clone; @@ -304,7 +304,7 @@ _StartData(ArchiveHandle *AH, TocEntry *te) /* * Called by archiver when dumper calls WriteData. This routine is * called for both BLOB and TABLE data; it is the responsibility of - * the format to manage each kind of data using StartBlob/StartData. + * the format to manage each kind of data using StartLO/StartData. * * It should only be called from within a DataDumper routine. * @@ -347,7 +347,7 @@ _EndData(ArchiveHandle *AH, TocEntry *te) * Optional, but strongly recommended. */ static void -_StartBlobs(ArchiveHandle *AH, TocEntry *te) +_StartLOs(ArchiveHandle *AH, TocEntry *te) { lclContext *ctx = (lclContext *) AH->formatData; lclTocEntry *tctx = (lclTocEntry *) te->formatData; @@ -361,14 +361,14 @@ _StartBlobs(ArchiveHandle *AH, TocEntry *te) } /* - * Called by the archiver when the dumper calls StartBlob. + * Called by the archiver when the dumper calls StartLO. * * Mandatory. * * Must save the passed OID for retrieval at restore-time. */ static void -_StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) +_StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid) { lclContext *ctx = (lclContext *) AH->formatData; @@ -381,12 +381,12 @@ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) } /* - * Called by the archiver when the dumper calls EndBlob. + * Called by the archiver when the dumper calls EndLO. * * Optional. */ static void -_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) +_EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid) { lclContext *ctx = (lclContext *) AH->formatData; @@ -401,9 +401,9 @@ _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) * Optional. */ static void -_EndBlobs(ArchiveHandle *AH, TocEntry *te) +_EndLOs(ArchiveHandle *AH, TocEntry *te) { - /* Write out a fake zero OID to mark end-of-blobs. */ + /* Write out a fake zero OID to mark end-of-LOs. */ WriteInt(AH, 0); } @@ -488,7 +488,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te) break; case BLK_BLOBS: - _skipBlobs(AH); + _skipLOs(AH); break; default: /* Always have a default */ @@ -536,7 +536,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te) break; case BLK_BLOBS: - _LoadBlobs(AH, AH->public.ropt->dropSchema); + _LoadLOs(AH, AH->public.ropt->dropSchema); break; default: /* Always have a default */ @@ -570,22 +570,22 @@ _PrintData(ArchiveHandle *AH) } static void -_LoadBlobs(ArchiveHandle *AH, bool drop) +_LoadLOs(ArchiveHandle *AH, bool drop) { Oid oid; - StartRestoreBlobs(AH); + StartRestoreLOs(AH); oid = ReadInt(AH); while (oid != 0) { - StartRestoreBlob(AH, oid, drop); + StartRestoreLO(AH, oid, drop); _PrintData(AH); - EndRestoreBlob(AH, oid); + EndRestoreLO(AH, oid); oid = ReadInt(AH); } - EndRestoreBlobs(AH); + EndRestoreLOs(AH); } /* @@ -595,7 +595,7 @@ _LoadBlobs(ArchiveHandle *AH, bool drop) * A zero OID indicates the end of the BLOBS. */ static void -_skipBlobs(ArchiveHandle *AH) +_skipLOs(ArchiveHandle *AH) { Oid oid; diff --git a/src/bin/pg_dump/pg_backup_db.c b/src/bin/pg_dump/pg_backup_db.c index 28baa68fd4e9..f766b65059da 100644 --- a/src/bin/pg_dump/pg_backup_db.c +++ b/src/bin/pg_dump/pg_backup_db.c @@ -542,11 +542,11 @@ CommitTransaction(Archive *AHX) } void -DropBlobIfExists(ArchiveHandle *AH, Oid oid) +DropLOIfExists(ArchiveHandle *AH, Oid oid) { /* * If we are not restoring to a direct database connection, we have to - * guess about how to detect whether the blob exists. Assume new-style. + * guess about how to detect whether the LO exists. Assume new-style. */ if (AH->connection == NULL || PQserverVersion(AH->connection) >= 90000) diff --git a/src/bin/pg_dump/pg_backup_directory.c b/src/bin/pg_dump/pg_backup_directory.c index 798182b6f7eb..e38b784f422f 100644 --- a/src/bin/pg_dump/pg_backup_directory.c +++ b/src/bin/pg_dump/pg_backup_directory.c @@ -52,7 +52,7 @@ typedef struct cfp *dataFH; /* currently open data file */ - cfp *blobsTocFH; /* file handle for blobs.toc */ + cfp *LOsTocFH; /* file handle for blobs.toc */ ParallelState *pstate; /* for parallel backup / restore */ } lclContext; @@ -78,11 +78,11 @@ static void _WriteExtraToc(ArchiveHandle *AH, TocEntry *te); static void _ReadExtraToc(ArchiveHandle *AH, TocEntry *te); static void _PrintExtraToc(ArchiveHandle *AH, TocEntry *te); -static void _StartBlobs(ArchiveHandle *AH, TocEntry *te); -static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid); -static void _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid); -static void _EndBlobs(ArchiveHandle *AH, TocEntry *te); -static void _LoadBlobs(ArchiveHandle *AH); +static void _StartLOs(ArchiveHandle *AH, TocEntry *te); +static void _StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid); +static void _EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid); +static void _EndLOs(ArchiveHandle *AH, TocEntry *te); +static void _LoadLOs(ArchiveHandle *AH); static void _PrepParallelRestore(ArchiveHandle *AH); static void _Clone(ArchiveHandle *AH); @@ -125,10 +125,10 @@ InitArchiveFmt_Directory(ArchiveHandle *AH) AH->WriteExtraTocPtr = _WriteExtraToc; AH->PrintExtraTocPtr = _PrintExtraToc; - AH->StartBlobsPtr = _StartBlobs; - AH->StartBlobPtr = _StartBlob; - AH->EndBlobPtr = _EndBlob; - AH->EndBlobsPtr = _EndBlobs; + AH->StartLOsPtr = _StartLOs; + AH->StartLOPtr = _StartLO; + AH->EndLOPtr = _EndLO; + AH->EndLOsPtr = _EndLOs; AH->PrepParallelRestorePtr = _PrepParallelRestore; AH->ClonePtr = _Clone; @@ -142,7 +142,7 @@ InitArchiveFmt_Directory(ArchiveHandle *AH) AH->formatData = (void *) ctx; ctx->dataFH = NULL; - ctx->blobsTocFH = NULL; + ctx->LOsTocFH = NULL; /* Initialize LO buffering */ AH->lo_buf_size = LOBBUFSIZE; @@ -335,7 +335,7 @@ _StartData(ArchiveHandle *AH, TocEntry *te) /* * Called by archiver when dumper calls WriteData. This routine is * called for both BLOB and TABLE data; it is the responsibility of - * the format to manage each kind of data using StartBlob/StartData. + * the format to manage each kind of data using StartLO/StartData. * * It should only be called from within a DataDumper routine. * @@ -419,7 +419,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te) return; if (strcmp(te->desc, "BLOBS") == 0) - _LoadBlobs(AH); + _LoadLOs(AH); else { char fname[MAXPGPATH]; @@ -430,50 +430,50 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te) } static void -_LoadBlobs(ArchiveHandle *AH) +_LoadLOs(ArchiveHandle *AH) { Oid oid; lclContext *ctx = (lclContext *) AH->formatData; char tocfname[MAXPGPATH]; char line[MAXPGPATH]; - StartRestoreBlobs(AH); + StartRestoreLOs(AH); setFilePath(AH, tocfname, "blobs.toc"); - ctx->blobsTocFH = cfopen_read(tocfname, PG_BINARY_R); + ctx->LOsTocFH = cfopen_read(tocfname, PG_BINARY_R); - if (ctx->blobsTocFH == NULL) + if (ctx->LOsTocFH == NULL) pg_fatal("could not open large object TOC file \"%s\" for input: %m", tocfname); - /* Read the blobs TOC file line-by-line, and process each blob */ - while ((cfgets(ctx->blobsTocFH, line, MAXPGPATH)) != NULL) + /* Read the LOs TOC file line-by-line, and process each LO */ + while ((cfgets(ctx->LOsTocFH, line, MAXPGPATH)) != NULL) { - char blobfname[MAXPGPATH + 1]; + char lofname[MAXPGPATH + 1]; char path[MAXPGPATH]; - /* Can't overflow because line and blobfname are the same length */ - if (sscanf(line, "%u %" CppAsString2(MAXPGPATH) "s\n", &oid, blobfname) != 2) + /* Can't overflow because line and lofname are the same length */ + if (sscanf(line, "%u %" CppAsString2(MAXPGPATH) "s\n", &oid, lofname) != 2) pg_fatal("invalid line in large object TOC file \"%s\": \"%s\"", tocfname, line); - StartRestoreBlob(AH, oid, AH->public.ropt->dropSchema); - snprintf(path, MAXPGPATH, "%s/%s", ctx->directory, blobfname); + StartRestoreLO(AH, oid, AH->public.ropt->dropSchema); + snprintf(path, MAXPGPATH, "%s/%s", ctx->directory, lofname); _PrintFileData(AH, path); - EndRestoreBlob(AH, oid); + EndRestoreLO(AH, oid); } - if (!cfeof(ctx->blobsTocFH)) + if (!cfeof(ctx->LOsTocFH)) pg_fatal("error reading large object TOC file \"%s\"", tocfname); - if (cfclose(ctx->blobsTocFH) != 0) + if (cfclose(ctx->LOsTocFH) != 0) pg_fatal("could not close large object TOC file \"%s\": %m", tocfname); - ctx->blobsTocFH = NULL; + ctx->LOsTocFH = NULL; - EndRestoreBlobs(AH); + EndRestoreLOs(AH); } @@ -625,7 +625,7 @@ _ReopenArchive(ArchiveHandle *AH) } /* - * BLOB support + * LO support */ /* @@ -633,29 +633,29 @@ _ReopenArchive(ArchiveHandle *AH) * It is called just prior to the dumper's DataDumper routine. * * We open the large object TOC file here, so that we can append a line to - * it for each blob. + * it for each LO. */ static void -_StartBlobs(ArchiveHandle *AH, TocEntry *te) +_StartLOs(ArchiveHandle *AH, TocEntry *te) { lclContext *ctx = (lclContext *) AH->formatData; char fname[MAXPGPATH]; setFilePath(AH, fname, "blobs.toc"); - /* The blob TOC file is never compressed */ - ctx->blobsTocFH = cfopen_write(fname, "ab", 0); - if (ctx->blobsTocFH == NULL) + /* The LO TOC file is never compressed */ + ctx->LOsTocFH = cfopen_write(fname, "ab", 0); + if (ctx->LOsTocFH == NULL) pg_fatal("could not open output file \"%s\": %m", fname); } /* - * Called by the archiver when we're about to start dumping a blob. + * Called by the archiver when we're about to start dumping a LO. * - * We create a file to write the blob to. + * We create a file to write the LO to. */ static void -_StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) +_StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid) { lclContext *ctx = (lclContext *) AH->formatData; char fname[MAXPGPATH]; @@ -669,41 +669,41 @@ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) } /* - * Called by the archiver when the dumper is finished writing a blob. + * Called by the archiver when the dumper is finished writing a LO. * - * We close the blob file and write an entry to the blob TOC file for it. + * We close the LO file and write an entry to the LO TOC file for it. */ static void -_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) +_EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid) { lclContext *ctx = (lclContext *) AH->formatData; char buf[50]; int len; - /* Close the BLOB data file itself */ + /* Close the LO data file itself */ if (cfclose(ctx->dataFH) != 0) - pg_fatal("could not close blob data file: %m"); + pg_fatal("could not close LO data file: %m"); ctx->dataFH = NULL; - /* register the blob in blobs.toc */ + /* register the LO in blobs.toc */ len = snprintf(buf, sizeof(buf), "%u blob_%u.dat\n", oid, oid); - if (cfwrite(buf, len, ctx->blobsTocFH) != len) - pg_fatal("could not write to blobs TOC file"); + if (cfwrite(buf, len, ctx->LOsTocFH) != len) + pg_fatal("could not write to LOs TOC file"); } /* * Called by the archiver when finishing saving all BLOB DATA. * - * We close the blobs TOC file. + * We close the LOs TOC file. */ static void -_EndBlobs(ArchiveHandle *AH, TocEntry *te) +_EndLOs(ArchiveHandle *AH, TocEntry *te) { lclContext *ctx = (lclContext *) AH->formatData; - if (cfclose(ctx->blobsTocFH) != 0) - pg_fatal("could not close blobs TOC file: %m"); - ctx->blobsTocFH = NULL; + if (cfclose(ctx->LOsTocFH) != 0) + pg_fatal("could not close LOs TOC file: %m"); + ctx->LOsTocFH = NULL; } /* @@ -782,7 +782,7 @@ _PrepParallelRestore(ArchiveHandle *AH) * If this is the BLOBS entry, what we stat'd was blobs.toc, which * most likely is a lot smaller than the actual blob data. We don't * have a cheap way to estimate how much smaller, but fortunately it - * doesn't matter too much as long as we get the blobs processed + * doesn't matter too much as long as we get the LOs processed * reasonably early. Arbitrarily scale up by a factor of 1K. */ if (strcmp(te->desc, "BLOBS") == 0) diff --git a/src/bin/pg_dump/pg_backup_null.c b/src/bin/pg_dump/pg_backup_null.c index 541306d99158..08f096251b68 100644 --- a/src/bin/pg_dump/pg_backup_null.c +++ b/src/bin/pg_dump/pg_backup_null.c @@ -29,16 +29,16 @@ #include "pg_backup_utils.h" static void _WriteData(ArchiveHandle *AH, const void *data, size_t dLen); -static void _WriteBlobData(ArchiveHandle *AH, const void *data, size_t dLen); +static void _WriteLOData(ArchiveHandle *AH, const void *data, size_t dLen); static void _EndData(ArchiveHandle *AH, TocEntry *te); static int _WriteByte(ArchiveHandle *AH, const int i); static void _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len); static void _CloseArchive(ArchiveHandle *AH); static void _PrintTocData(ArchiveHandle *AH, TocEntry *te); -static void _StartBlobs(ArchiveHandle *AH, TocEntry *te); -static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid); -static void _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid); -static void _EndBlobs(ArchiveHandle *AH, TocEntry *te); +static void _StartLOs(ArchiveHandle *AH, TocEntry *te); +static void _StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid); +static void _EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid); +static void _EndLOs(ArchiveHandle *AH, TocEntry *te); /* @@ -56,10 +56,10 @@ InitArchiveFmt_Null(ArchiveHandle *AH) AH->ReopenPtr = NULL; AH->PrintTocDataPtr = _PrintTocData; - AH->StartBlobsPtr = _StartBlobs; - AH->StartBlobPtr = _StartBlob; - AH->EndBlobPtr = _EndBlob; - AH->EndBlobsPtr = _EndBlobs; + AH->StartLOsPtr = _StartLOs; + AH->StartLOPtr = _StartLO; + AH->EndLOPtr = _EndLO; + AH->EndLOsPtr = _EndLOs; AH->ClonePtr = NULL; AH->DeClonePtr = NULL; @@ -90,10 +90,10 @@ _WriteData(ArchiveHandle *AH, const void *data, size_t dLen) /* * Called by dumper via archiver from within a data dump routine - * We substitute this for _WriteData while emitting a BLOB + * We substitute this for _WriteData while emitting a LO */ static void -_WriteBlobData(ArchiveHandle *AH, const void *data, size_t dLen) +_WriteLOData(ArchiveHandle *AH, const void *data, size_t dLen) { if (dLen > 0) { @@ -119,54 +119,54 @@ _EndData(ArchiveHandle *AH, TocEntry *te) /* * Called by the archiver when starting to save all BLOB DATA (not schema). * This routine should save whatever format-specific information is needed - * to read the BLOBs back into memory. + * to read the LOs back into memory. * * It is called just prior to the dumper's DataDumper routine. * * Optional, but strongly recommended. */ static void -_StartBlobs(ArchiveHandle *AH, TocEntry *te) +_StartLOs(ArchiveHandle *AH, TocEntry *te) { ahprintf(AH, "BEGIN;\n\n"); } /* - * Called by the archiver when the dumper calls StartBlob. + * Called by the archiver when the dumper calls StartLO. * * Mandatory. * * Must save the passed OID for retrieval at restore-time. */ static void -_StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) +_StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid) { - bool old_blob_style = (AH->version < K_VERS_1_12); + bool old_lo_style = (AH->version < K_VERS_1_12); if (oid == 0) pg_fatal("invalid OID for large object"); /* With an old archive we must do drop and create logic here */ - if (old_blob_style && AH->public.ropt->dropSchema) - DropBlobIfExists(AH, oid); + if (old_lo_style && AH->public.ropt->dropSchema) + DropLOIfExists(AH, oid); - if (old_blob_style) + if (old_lo_style) ahprintf(AH, "SELECT pg_catalog.lo_open(pg_catalog.lo_create('%u'), %d);\n", oid, INV_WRITE); else ahprintf(AH, "SELECT pg_catalog.lo_open('%u', %d);\n", oid, INV_WRITE); - AH->WriteDataPtr = _WriteBlobData; + AH->WriteDataPtr = _WriteLOData; } /* - * Called by the archiver when the dumper calls EndBlob. + * Called by the archiver when the dumper calls EndLO. * * Optional. */ static void -_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) +_EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid) { AH->WriteDataPtr = _WriteData; @@ -179,7 +179,7 @@ _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) * Optional. */ static void -_EndBlobs(ArchiveHandle *AH, TocEntry *te) +_EndLOs(ArchiveHandle *AH, TocEntry *te) { ahprintf(AH, "COMMIT;\n\n"); } @@ -197,12 +197,12 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te) AH->currToc = te; if (strcmp(te->desc, "BLOBS") == 0) - _StartBlobs(AH, te); + _StartLOs(AH, te); te->dataDumper((Archive *) AH, te->dataDumperArg); if (strcmp(te->desc, "BLOBS") == 0) - _EndBlobs(AH, te); + _EndLOs(AH, te); AH->currToc = NULL; } diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c index 402b93c610ae..d95d5ce3a187 100644 --- a/src/bin/pg_dump/pg_backup_tar.c +++ b/src/bin/pg_dump/pg_backup_tar.c @@ -55,10 +55,10 @@ static void _WriteExtraToc(ArchiveHandle *AH, TocEntry *te); static void _ReadExtraToc(ArchiveHandle *AH, TocEntry *te); static void _PrintExtraToc(ArchiveHandle *AH, TocEntry *te); -static void _StartBlobs(ArchiveHandle *AH, TocEntry *te); -static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid); -static void _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid); -static void _EndBlobs(ArchiveHandle *AH, TocEntry *te); +static void _StartLOs(ArchiveHandle *AH, TocEntry *te); +static void _StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid); +static void _EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid); +static void _EndLOs(ArchiveHandle *AH, TocEntry *te); #define K_STD_BUF_SIZE 1024 @@ -79,7 +79,7 @@ typedef struct { int hasSeek; pgoff_t filePos; - TAR_MEMBER *blobToc; + TAR_MEMBER *loToc; FILE *tarFH; pgoff_t tarFHpos; pgoff_t tarNextMember; @@ -94,7 +94,7 @@ typedef struct char *filename; } lclTocEntry; -static void _LoadBlobs(ArchiveHandle *AH); +static void _LoadLOs(ArchiveHandle *AH); static TAR_MEMBER *tarOpen(ArchiveHandle *AH, const char *filename, char mode); static void tarClose(ArchiveHandle *AH, TAR_MEMBER *th); @@ -138,10 +138,10 @@ InitArchiveFmt_Tar(ArchiveHandle *AH) AH->WriteExtraTocPtr = _WriteExtraToc; AH->PrintExtraTocPtr = _PrintExtraToc; - AH->StartBlobsPtr = _StartBlobs; - AH->StartBlobPtr = _StartBlob; - AH->EndBlobPtr = _EndBlob; - AH->EndBlobsPtr = _EndBlobs; + AH->StartLOsPtr = _StartLOs; + AH->StartLOPtr = _StartLO; + AH->EndLOPtr = _EndLO; + AH->EndLOsPtr = _EndLOs; AH->ClonePtr = NULL; AH->DeClonePtr = NULL; @@ -638,22 +638,22 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te) } if (strcmp(te->desc, "BLOBS") == 0) - _LoadBlobs(AH); + _LoadLOs(AH); else _PrintFileData(AH, tctx->filename); } static void -_LoadBlobs(ArchiveHandle *AH) +_LoadLOs(ArchiveHandle *AH) { Oid oid; lclContext *ctx = (lclContext *) AH->formatData; TAR_MEMBER *th; size_t cnt; - bool foundBlob = false; + bool foundLO = false; char buf[4096]; - StartRestoreBlobs(AH); + StartRestoreLOs(AH); th = tarOpen(AH, NULL, 'r'); /* Open next file */ while (th != NULL) @@ -667,15 +667,15 @@ _LoadBlobs(ArchiveHandle *AH) { pg_log_info("restoring large object with OID %u", oid); - StartRestoreBlob(AH, oid, AH->public.ropt->dropSchema); + StartRestoreLO(AH, oid, AH->public.ropt->dropSchema); while ((cnt = tarRead(buf, 4095, th)) > 0) { buf[cnt] = '\0'; ahwrite(buf, 1, cnt, AH); } - EndRestoreBlob(AH, oid); - foundBlob = true; + EndRestoreLO(AH, oid); + foundLO = true; } tarClose(AH, th); } @@ -684,18 +684,18 @@ _LoadBlobs(ArchiveHandle *AH) tarClose(AH, th); /* - * Once we have found the first blob, stop at the first non-blob + * Once we have found the first LO, stop at the first non-LO * entry (which will be 'blobs.toc'). This coding would eat all - * the rest of the archive if there are no blobs ... but this + * the rest of the archive if there are no LOs ... but this * function shouldn't be called at all in that case. */ - if (foundBlob) + if (foundLO) break; } th = tarOpen(AH, NULL, 'r'); } - EndRestoreBlobs(AH); + EndRestoreLOs(AH); } @@ -773,7 +773,7 @@ _CloseArchive(ArchiveHandle *AH) tarClose(AH, th); /* Not needed any more */ /* - * Now send the data (tables & blobs) + * Now send the data (tables & LOs) */ WriteDataChunks(AH, NULL); @@ -848,13 +848,13 @@ _scriptOut(ArchiveHandle *AH, const void *buf, size_t len) } /* - * BLOB support + * Large Object support */ /* * Called by the archiver when starting to save all BLOB DATA (not schema). * This routine should save whatever format-specific information is needed - * to read the BLOBs back into memory. + * to read the LOs back into memory. * * It is called just prior to the dumper's DataDumper routine. * @@ -862,24 +862,24 @@ _scriptOut(ArchiveHandle *AH, const void *buf, size_t len) * */ static void -_StartBlobs(ArchiveHandle *AH, TocEntry *te) +_StartLOs(ArchiveHandle *AH, TocEntry *te) { lclContext *ctx = (lclContext *) AH->formatData; char fname[K_STD_BUF_SIZE]; sprintf(fname, "blobs.toc"); - ctx->blobToc = tarOpen(AH, fname, 'w'); + ctx->loToc = tarOpen(AH, fname, 'w'); } /* - * Called by the archiver when the dumper calls StartBlob. + * Called by the archiver when the dumper calls StartLO. * * Mandatory. * * Must save the passed OID for retrieval at restore-time. */ static void -_StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) +_StartLO(ArchiveHandle *AH, TocEntry *te, Oid oid) { lclContext *ctx = (lclContext *) AH->formatData; lclTocEntry *tctx = (lclTocEntry *) te->formatData; @@ -893,19 +893,19 @@ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) sprintf(fname, "blob_%u.dat", oid); - tarPrintf(ctx->blobToc, "%u %s\n", oid, fname); + tarPrintf(ctx->loToc, "%u %s\n", oid, fname); tctx->TH = tarOpen(AH, fname, 'w'); } /* - * Called by the archiver when the dumper calls EndBlob. + * Called by the archiver when the dumper calls EndLO. * * Optional. * */ static void -_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) +_EndLO(ArchiveHandle *AH, TocEntry *te, Oid oid) { lclTocEntry *tctx = (lclTocEntry *) te->formatData; @@ -919,14 +919,14 @@ _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) * */ static void -_EndBlobs(ArchiveHandle *AH, TocEntry *te) +_EndLOs(ArchiveHandle *AH, TocEntry *te) { lclContext *ctx = (lclContext *) AH->formatData; - /* Write out a fake zero OID to mark end-of-blobs. */ + /* Write out a fake zero OID to mark end-of-LOs. */ /* WriteInt(AH, 0); */ - tarClose(AH, ctx->blobToc); + tarClose(AH, ctx->loToc); } diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index da427f4d4a17..a36ad8de46c6 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -281,9 +281,9 @@ static char *convertRegProcReference(const char *proc); static char *getFormattedOperatorName(const char *oproid); static char *convertTSFunction(Archive *fout, Oid funcOid); static const char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts); -static void getBlobs(Archive *fout); -static void dumpBlob(Archive *fout, const BlobInfo *binfo); -static int dumpBlobs(Archive *fout, const void *arg); +static void getLOs(Archive *fout); +static void dumpLO(Archive *fout, const LoInfo *binfo); +static int dumpLOs(Archive *fout, const void *arg); static void dumpPolicy(Archive *fout, const PolicyInfo *polinfo); static void dumpPublication(Archive *fout, const PublicationInfo *pubinfo); static void dumpPublicationTable(Archive *fout, const PublicationRelInfo *pubrinfo); @@ -350,7 +350,9 @@ main(int argc, char **argv) static struct option long_options[] = { {"data-only", no_argument, NULL, 'a'}, {"blobs", no_argument, NULL, 'b'}, + {"large-objects", no_argument, NULL, 'b'}, {"no-blobs", no_argument, NULL, 'B'}, + {"no-large-objects", no_argument, NULL, 'B'}, {"clean", no_argument, NULL, 'c'}, {"create", no_argument, NULL, 'C'}, {"dbname", required_argument, NULL, 'd'}, @@ -454,12 +456,12 @@ main(int argc, char **argv) dopt.dataOnly = true; break; - case 'b': /* Dump blobs */ - dopt.outputBlobs = true; + case 'b': /* Dump LOs */ + dopt.outputLOs = true; break; - case 'B': /* Don't dump blobs */ - dopt.dontOutputBlobs = true; + case 'B': /* Don't dump LOs */ + dopt.dontOutputLOs = true; break; case 'c': /* clean (i.e., drop) schema prior to create */ @@ -808,16 +810,16 @@ main(int argc, char **argv) } /* - * Dumping blobs is the default for dumps where an inclusion switch is not - * used (an "include everything" dump). -B can be used to exclude blobs - * from those dumps. -b can be used to include blobs even when an + * Dumping LOs is the default for dumps where an inclusion switch is not + * used (an "include everything" dump). -B can be used to exclude LOs + * from those dumps. -b can be used to include LOs even when an * inclusion switch is used. * - * -s means "schema only" and blobs are data, not schema, so we never - * include blobs when -s is used. + * -s means "schema only" and LOs are data, not schema, so we never + * include LOs when -s is used. */ - if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputBlobs) - dopt.outputBlobs = true; + if (dopt.include_everything && !dopt.schemaOnly && !dopt.dontOutputLOs) + dopt.outputLOs = true; /* * Collect role names so we can map object owner OIDs to names. @@ -842,15 +844,15 @@ main(int argc, char **argv) getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE); /* - * In binary-upgrade mode, we do not have to worry about the actual blob + * In binary-upgrade mode, we do not have to worry about the actual LO * data or the associated metadata that resides in the pg_largeobject and * pg_largeobject_metadata tables, respectively. * - * However, we do need to collect blob information as there may be - * comments or other information on blobs that we do need to dump out. + * However, we do need to collect LO information as there may be + * comments or other information on LOs that we do need to dump out. */ - if (dopt.outputBlobs || dopt.binary_upgrade) - getBlobs(fout); + if (dopt.outputLOs || dopt.binary_upgrade) + getLOs(fout); /* * Collect dependency data to assist in ordering the objects. @@ -1005,8 +1007,8 @@ help(const char *progname) printf(_("\nOptions controlling the output content:\n")); printf(_(" -a, --data-only dump only the data, not the schema\n")); - printf(_(" -b, --blobs include large objects in dump\n")); - printf(_(" -B, --no-blobs exclude large objects in dump\n")); + printf(_(" -b, --large-objects include large objects in dump\n")); + printf(_(" -B, --no-large-objects exclude large objects in dump\n")); printf(_(" -c, --clean clean (drop) database objects before recreating\n")); printf(_(" -C, --create include commands to create database in dump\n")); printf(_(" -e, --extension=PATTERN dump the specified extension(s) only\n")); @@ -3378,16 +3380,16 @@ dumpSearchPath(Archive *AH) /* - * getBlobs: + * getLOs: * Collect schema-level data about large objects */ static void -getBlobs(Archive *fout) +getLOs(Archive *fout) { DumpOptions *dopt = fout->dopt; - PQExpBuffer blobQry = createPQExpBuffer(); - BlobInfo *binfo; - DumpableObject *bdata; + PQExpBuffer loQry = createPQExpBuffer(); + LoInfo *loinfo; + DumpableObject *lodata; PGresult *res; int ntups; int i; @@ -3398,13 +3400,13 @@ getBlobs(Archive *fout) pg_log_info("reading large objects"); - /* Fetch BLOB OIDs, and owner/ACL data */ - appendPQExpBufferStr(blobQry, + /* Fetch LO OIDs, and owner/ACL data */ + appendPQExpBufferStr(loQry, "SELECT oid, lomowner, lomacl, " "acldefault('L', lomowner) AS acldefault " "FROM pg_largeobject_metadata"); - res = ExecuteSqlQuery(fout, blobQry->data, PGRES_TUPLES_OK); + res = ExecuteSqlQuery(fout, loQry->data, PGRES_TUPLES_OK); i_oid = PQfnumber(res, "oid"); i_lomowner = PQfnumber(res, "lomowner"); @@ -3416,38 +3418,38 @@ getBlobs(Archive *fout) /* * Each large object has its own BLOB archive entry. */ - binfo = (BlobInfo *) pg_malloc(ntups * sizeof(BlobInfo)); + loinfo = (LoInfo *) pg_malloc(ntups * sizeof(LoInfo)); for (i = 0; i < ntups; i++) { - binfo[i].dobj.objType = DO_BLOB; - binfo[i].dobj.catId.tableoid = LargeObjectRelationId; - binfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid)); - AssignDumpId(&binfo[i].dobj); + loinfo[i].dobj.objType = DO_LARGE_OBJECT; + loinfo[i].dobj.catId.tableoid = LargeObjectRelationId; + loinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid)); + AssignDumpId(&loinfo[i].dobj); - binfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid)); - binfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_lomacl)); - binfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault)); - binfo[i].dacl.privtype = 0; - binfo[i].dacl.initprivs = NULL; - binfo[i].rolname = getRoleName(PQgetvalue(res, i, i_lomowner)); + loinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_oid)); + loinfo[i].dacl.acl = pg_strdup(PQgetvalue(res, i, i_lomacl)); + loinfo[i].dacl.acldefault = pg_strdup(PQgetvalue(res, i, i_acldefault)); + loinfo[i].dacl.privtype = 0; + loinfo[i].dacl.initprivs = NULL; + loinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_lomowner)); - /* Blobs have data */ - binfo[i].dobj.components |= DUMP_COMPONENT_DATA; + /* LOs have data */ + loinfo[i].dobj.components |= DUMP_COMPONENT_DATA; - /* Mark whether blob has an ACL */ + /* Mark whether LO has an ACL */ if (!PQgetisnull(res, i, i_lomacl)) - binfo[i].dobj.components |= DUMP_COMPONENT_ACL; + loinfo[i].dobj.components |= DUMP_COMPONENT_ACL; /* - * In binary-upgrade mode for blobs, we do *not* dump out the blob + * In binary-upgrade mode for LOs, we do *not* dump out the LO * data, as it will be copied by pg_upgrade, which simply copies the * pg_largeobject table. We *do* however dump out anything but the * data, as pg_upgrade copies just pg_largeobject, but not * pg_largeobject_metadata, after the dump is restored. */ if (dopt->binary_upgrade) - binfo[i].dobj.dump &= ~DUMP_COMPONENT_DATA; + loinfo[i].dobj.dump &= ~DUMP_COMPONENT_DATA; } /* @@ -3456,77 +3458,77 @@ getBlobs(Archive *fout) */ if (ntups > 0) { - bdata = (DumpableObject *) pg_malloc(sizeof(DumpableObject)); - bdata->objType = DO_BLOB_DATA; - bdata->catId = nilCatalogId; - AssignDumpId(bdata); - bdata->name = pg_strdup("BLOBS"); - bdata->components |= DUMP_COMPONENT_DATA; + lodata = (DumpableObject *) pg_malloc(sizeof(DumpableObject)); + lodata->objType = DO_LARGE_OBJECT_DATA; + lodata->catId = nilCatalogId; + AssignDumpId(lodata); + lodata->name = pg_strdup("BLOBS"); + lodata->components |= DUMP_COMPONENT_DATA; } PQclear(res); - destroyPQExpBuffer(blobQry); + destroyPQExpBuffer(loQry); } /* - * dumpBlob + * dumpLO * * dump the definition (metadata) of the given large object */ static void -dumpBlob(Archive *fout, const BlobInfo *binfo) +dumpLO(Archive *fout, const LoInfo *loinfo) { PQExpBuffer cquery = createPQExpBuffer(); PQExpBuffer dquery = createPQExpBuffer(); appendPQExpBuffer(cquery, "SELECT pg_catalog.lo_create('%s');\n", - binfo->dobj.name); + loinfo->dobj.name); appendPQExpBuffer(dquery, "SELECT pg_catalog.lo_unlink('%s');\n", - binfo->dobj.name); + loinfo->dobj.name); - if (binfo->dobj.dump & DUMP_COMPONENT_DEFINITION) - ArchiveEntry(fout, binfo->dobj.catId, binfo->dobj.dumpId, - ARCHIVE_OPTS(.tag = binfo->dobj.name, - .owner = binfo->rolname, + if (loinfo->dobj.dump & DUMP_COMPONENT_DEFINITION) + ArchiveEntry(fout, loinfo->dobj.catId, loinfo->dobj.dumpId, + ARCHIVE_OPTS(.tag = loinfo->dobj.name, + .owner = loinfo->rolname, .description = "BLOB", .section = SECTION_PRE_DATA, .createStmt = cquery->data, .dropStmt = dquery->data)); /* Dump comment if any */ - if (binfo->dobj.dump & DUMP_COMPONENT_COMMENT) - dumpComment(fout, "LARGE OBJECT", binfo->dobj.name, - NULL, binfo->rolname, - binfo->dobj.catId, 0, binfo->dobj.dumpId); + if (loinfo->dobj.dump & DUMP_COMPONENT_COMMENT) + dumpComment(fout, "LARGE OBJECT", loinfo->dobj.name, + NULL, loinfo->rolname, + loinfo->dobj.catId, 0, loinfo->dobj.dumpId); /* Dump security label if any */ - if (binfo->dobj.dump & DUMP_COMPONENT_SECLABEL) - dumpSecLabel(fout, "LARGE OBJECT", binfo->dobj.name, - NULL, binfo->rolname, - binfo->dobj.catId, 0, binfo->dobj.dumpId); + if (loinfo->dobj.dump & DUMP_COMPONENT_SECLABEL) + dumpSecLabel(fout, "LARGE OBJECT", loinfo->dobj.name, + NULL, loinfo->rolname, + loinfo->dobj.catId, 0, loinfo->dobj.dumpId); /* Dump ACL if any */ - if (binfo->dobj.dump & DUMP_COMPONENT_ACL) - dumpACL(fout, binfo->dobj.dumpId, InvalidDumpId, "LARGE OBJECT", - binfo->dobj.name, NULL, - NULL, binfo->rolname, &binfo->dacl); + if (loinfo->dobj.dump & DUMP_COMPONENT_ACL) + dumpACL(fout, loinfo->dobj.dumpId, InvalidDumpId, "LARGE OBJECT", + loinfo->dobj.name, NULL, + NULL, loinfo->rolname, &loinfo->dacl); destroyPQExpBuffer(cquery); destroyPQExpBuffer(dquery); } /* - * dumpBlobs: + * dumpLOs: * dump the data contents of all large objects */ static int -dumpBlobs(Archive *fout, const void *arg) +dumpLOs(Archive *fout, const void *arg) { - const char *blobQry; - const char *blobFetchQry; + const char *loQry; + const char *loFetchQry; PGconn *conn = GetConnection(fout); PGresult *res; char buf[LOBBUFSIZE]; @@ -3537,38 +3539,38 @@ dumpBlobs(Archive *fout, const void *arg) pg_log_info("saving large objects"); /* - * Currently, we re-fetch all BLOB OIDs using a cursor. Consider scanning + * Currently, we re-fetch all LO OIDs using a cursor. Consider scanning * the already-in-memory dumpable objects instead... */ - blobQry = - "DECLARE bloboid CURSOR FOR " + loQry = + "DECLARE looid CURSOR FOR " "SELECT oid FROM pg_largeobject_metadata ORDER BY 1"; - ExecuteSqlStatement(fout, blobQry); + ExecuteSqlStatement(fout, loQry); /* Command to fetch from cursor */ - blobFetchQry = "FETCH 1000 IN bloboid"; + loFetchQry = "FETCH 1000 IN looid"; do { /* Do a fetch */ - res = ExecuteSqlQuery(fout, blobFetchQry, PGRES_TUPLES_OK); + res = ExecuteSqlQuery(fout, loFetchQry, PGRES_TUPLES_OK); /* Process the tuples, if any */ ntups = PQntuples(res); for (i = 0; i < ntups; i++) { - Oid blobOid; + Oid loOid; int loFd; - blobOid = atooid(PQgetvalue(res, i, 0)); - /* Open the BLOB */ - loFd = lo_open(conn, blobOid, INV_READ); + loOid = atooid(PQgetvalue(res, i, 0)); + /* Open the LO */ + loFd = lo_open(conn, loOid, INV_READ); if (loFd == -1) pg_fatal("could not open large object %u: %s", - blobOid, PQerrorMessage(conn)); + loOid, PQerrorMessage(conn)); - StartBlob(fout, blobOid); + StartLO(fout, loOid); /* Now read it in chunks, sending data to archive */ do @@ -3576,14 +3578,14 @@ dumpBlobs(Archive *fout, const void *arg) cnt = lo_read(conn, loFd, buf, LOBBUFSIZE); if (cnt < 0) pg_fatal("error reading large object %u: %s", - blobOid, PQerrorMessage(conn)); + loOid, PQerrorMessage(conn)); WriteData(fout, buf, cnt); } while (cnt > 0); lo_close(conn, loFd); - EndBlob(fout, blobOid); + EndLO(fout, loOid); } PQclear(res); @@ -9452,7 +9454,7 @@ dumpCommentExtended(Archive *fout, const char *type, if (dopt->no_comments) return; - /* Comments are schema not data ... except blob comments are data */ + /* Comments are schema not data ... except LO comments are data */ if (strcmp(type, "LARGE OBJECT") != 0) { if (dopt->dataOnly) @@ -9460,7 +9462,7 @@ dumpCommentExtended(Archive *fout, const char *type, } else { - /* We do dump blob comments in binary-upgrade mode */ + /* We do dump LO comments in binary-upgrade mode */ if (dopt->schemaOnly && !dopt->binary_upgrade) return; } @@ -9940,10 +9942,10 @@ dumpDumpableObject(Archive *fout, DumpableObject *dobj) case DO_DEFAULT_ACL: dumpDefaultACL(fout, (const DefaultACLInfo *) dobj); break; - case DO_BLOB: - dumpBlob(fout, (const BlobInfo *) dobj); + case DO_LARGE_OBJECT: + dumpLO(fout, (const LoInfo *) dobj); break; - case DO_BLOB_DATA: + case DO_LARGE_OBJECT_DATA: if (dobj->dump & DUMP_COMPONENT_DATA) { TocEntry *te; @@ -9952,19 +9954,19 @@ dumpDumpableObject(Archive *fout, DumpableObject *dobj) ARCHIVE_OPTS(.tag = dobj->name, .description = "BLOBS", .section = SECTION_DATA, - .dumpFn = dumpBlobs)); + .dumpFn = dumpLOs)); /* * Set the TocEntry's dataLength in case we are doing a * parallel dump and want to order dump jobs by table size. * (We need some size estimate for every TocEntry with a * DataDumper function.) We don't currently have any cheap - * way to estimate the size of blobs, but it doesn't matter; + * way to estimate the size of LOs, but it doesn't matter; * let's just set the size to a large value so parallel dumps - * will launch this job first. If there's lots of blobs, we + * will launch this job first. If there's lots of LOs, we * win, and if there aren't, we don't lose much. (If you want * to improve on this, really what you should be thinking - * about is allowing blob dumping to be parallelized, not just + * about is allowing LO dumping to be parallelized, not just * getting a smarter estimate for the single TOC entry.) */ te->dataLength = INT_MAX; @@ -14436,7 +14438,7 @@ dumpACL(Archive *fout, DumpId objDumpId, DumpId altDumpId, if (dopt->aclsSkip) return InvalidDumpId; - /* --data-only skips ACLs *except* BLOB ACLs */ + /* --data-only skips ACLs *except* large object ACLs */ if (dopt->dataOnly && strcmp(type, "LARGE OBJECT") != 0) return InvalidDumpId; @@ -14558,7 +14560,7 @@ dumpSecLabel(Archive *fout, const char *type, const char *name, if (dopt->no_security_labels) return; - /* Security labels are schema not data ... except blob labels are data */ + /* Security labels are schema not data ... except large object labels are data */ if (strcmp(type, "LARGE OBJECT") != 0) { if (dopt->dataOnly) @@ -14566,7 +14568,7 @@ dumpSecLabel(Archive *fout, const char *type, const char *name, } else { - /* We do dump blob security labels in binary-upgrade mode */ + /* We do dump large object security labels in binary-upgrade mode */ if (dopt->schemaOnly && !dopt->binary_upgrade) return; } @@ -17914,13 +17916,13 @@ addBoundaryDependencies(DumpableObject **dobjs, int numObjs, case DO_FDW: case DO_FOREIGN_SERVER: case DO_TRANSFORM: - case DO_BLOB: + case DO_LARGE_OBJECT: /* Pre-data objects: must come before the pre-data boundary */ addObjectDependency(preDataBound, dobj->dumpId); break; case DO_TABLE_DATA: case DO_SEQUENCE_SET: - case DO_BLOB_DATA: + case DO_LARGE_OBJECT_DATA: /* Data objects: must come between the boundaries */ addObjectDependency(dobj, preDataBound->dumpId); addObjectDependency(postDataBound, dobj->dumpId); diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h index 427f5d45f65b..436ac5bb9860 100644 --- a/src/bin/pg_dump/pg_dump.h +++ b/src/bin/pg_dump/pg_dump.h @@ -72,8 +72,8 @@ typedef enum DO_FOREIGN_SERVER, DO_DEFAULT_ACL, DO_TRANSFORM, - DO_BLOB, - DO_BLOB_DATA, + DO_LARGE_OBJECT, + DO_LARGE_OBJECT_DATA, DO_PRE_DATA_BOUNDARY, DO_POST_DATA_BOUNDARY, DO_EVENT_TRIGGER, @@ -582,12 +582,12 @@ typedef struct _defaultACLInfo char defaclobjtype; } DefaultACLInfo; -typedef struct _blobInfo +typedef struct _loInfo { DumpableObject dobj; DumpableAcl dacl; const char *rolname; -} BlobInfo; +} LoInfo; /* * The PolicyInfo struct is used to represent policies on a table and diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c index 5de3241eb496..31cee46f3eba 100644 --- a/src/bin/pg_dump/pg_dump_sort.c +++ b/src/bin/pg_dump/pg_dump_sort.c @@ -75,11 +75,11 @@ enum dbObjectTypePriorities PRIO_TABLE_ATTACH, PRIO_DUMMY_TYPE, PRIO_ATTRDEF, - PRIO_BLOB, + PRIO_LARGE_OBJECT, PRIO_PRE_DATA_BOUNDARY, /* boundary! */ PRIO_TABLE_DATA, PRIO_SEQUENCE_SET, - PRIO_BLOB_DATA, + PRIO_LARGE_OBJECT_DATA, PRIO_POST_DATA_BOUNDARY, /* boundary! */ PRIO_CONSTRAINT, PRIO_INDEX, @@ -136,8 +136,8 @@ static const int dbObjectTypePriority[] = PRIO_FOREIGN_SERVER, /* DO_FOREIGN_SERVER */ PRIO_DEFAULT_ACL, /* DO_DEFAULT_ACL */ PRIO_TRANSFORM, /* DO_TRANSFORM */ - PRIO_BLOB, /* DO_BLOB */ - PRIO_BLOB_DATA, /* DO_BLOB_DATA */ + PRIO_LARGE_OBJECT, /* DO_LARGE_OBJECT */ + PRIO_LARGE_OBJECT_DATA, /* DO_LARGE_OJECT_DATA */ PRIO_PRE_DATA_BOUNDARY, /* DO_PRE_DATA_BOUNDARY */ PRIO_POST_DATA_BOUNDARY, /* DO_POST_DATA_BOUNDARY */ PRIO_EVENT_TRIGGER, /* DO_EVENT_TRIGGER */ @@ -1463,14 +1463,14 @@ describeDumpableObject(DumpableObject *obj, char *buf, int bufsize) "DEFAULT ACL %s (ID %d OID %u)", obj->name, obj->dumpId, obj->catId.oid); return; - case DO_BLOB: + case DO_LARGE_OBJECT: snprintf(buf, bufsize, - "BLOB (ID %d OID %u)", + "LARGE OBJECT (ID %d OID %u)", obj->dumpId, obj->catId.oid); return; - case DO_BLOB_DATA: + case DO_LARGE_OBJECT_DATA: snprintf(buf, bufsize, - "BLOB DATA (ID %d)", + "LARGE OBJECT DATA (ID %d)", obj->dumpId); return; case DO_POLICY: diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index fe53ed0f89eb..63fc2a8687aa 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -315,10 +315,10 @@ '--no-toast-compression', 'postgres', ], }, - no_blobs => { + no_large_objects => { dump_cmd => [ 'pg_dump', '--no-sync', - "--file=$tempdir/no_blobs.sql", '-B', + "--file=$tempdir/no_large_objects.sql", '-B', 'postgres', ], }, @@ -427,9 +427,9 @@ '--section=post-data', '--no-sync', 'postgres', ], }, - test_schema_plus_blobs => { + test_schema_plus_large_objects => { dump_cmd => [ - 'pg_dump', "--file=$tempdir/test_schema_plus_blobs.sql", + 'pg_dump', "--file=$tempdir/test_schema_plus_large_objects.sql", '--schema=dump_test', '-b', '-B', '--no-sync', 'postgres', ], @@ -478,10 +478,10 @@ # Tests which target the 'dump_test' schema, specifically. my %dump_test_schema_runs = ( only_dump_test_schema => 1, - test_schema_plus_blobs => 1,); + test_schema_plus_large_objects => 1,); # Tests which are considered 'full' dumps by pg_dump, but there -# are flags used to exclude specific items (ACLs, blobs, etc). +# are flags used to exclude specific items (ACLs, LOs, etc). my %full_runs = ( binary_upgrade => 1, clean => 1, @@ -493,7 +493,7 @@ exclude_test_table => 1, exclude_test_table_data => 1, no_toast_compression => 1, - no_blobs => 1, + no_large_objects => 1, no_owner => 1, no_privs => 1, no_table_access_method => 1, @@ -690,10 +690,10 @@ data_only => 1, inserts => 1, section_pre_data => 1, - test_schema_plus_blobs => 1, + test_schema_plus_large_objects => 1, }, unlike => { - no_blobs => 1, + no_large_objects => 1, no_owner => 1, schema_only => 1, }, @@ -1027,7 +1027,7 @@ }, }, - 'BLOB create (using lo_from_bytea)' => { + 'LO create (using lo_from_bytea)' => { create_order => 50, create_sql => 'SELECT pg_catalog.lo_from_bytea(0, \'\\x310a320a330a340a350a360a370a380a390a\');', @@ -1038,15 +1038,15 @@ data_only => 1, inserts => 1, section_pre_data => 1, - test_schema_plus_blobs => 1, + test_schema_plus_large_objects => 1, }, unlike => { schema_only => 1, - no_blobs => 1, + no_large_objects => 1, }, }, - 'BLOB load (using lo_from_bytea)' => { + 'LO load (using lo_from_bytea)' => { regexp => qr/^ \QSELECT pg_catalog.lo_open\E \('\d+',\ \d+\);\n \QSELECT pg_catalog.lowrite(0, \E @@ -1059,11 +1059,11 @@ data_only => 1, inserts => 1, section_data => 1, - test_schema_plus_blobs => 1, + test_schema_plus_large_objects => 1, }, unlike => { binary_upgrade => 1, - no_blobs => 1, + no_large_objects => 1, schema_only => 1, }, }, @@ -1211,10 +1211,10 @@ data_only => 1, inserts => 1, section_pre_data => 1, - test_schema_plus_blobs => 1, + test_schema_plus_large_objects => 1, }, unlike => { - no_blobs => 1, + no_large_objects => 1, schema_only => 1, }, }, @@ -3188,7 +3188,7 @@ exclude_test_table => 1, exclude_test_table_data => 1, no_toast_compression => 1, - no_blobs => 1, + no_large_objects => 1, no_privs => 1, no_owner => 1, no_table_access_method => 1, @@ -3197,7 +3197,7 @@ pg_dumpall_exclude => 1, schema_only => 1, section_post_data => 1, - test_schema_plus_blobs => 1, + test_schema_plus_large_objects => 1, }, unlike => { exclude_dump_test_schema => 1, @@ -3263,7 +3263,7 @@ exclude_test_table => 1, exclude_test_table_data => 1, no_toast_compression => 1, - no_blobs => 1, + no_large_objects => 1, no_privs => 1, no_owner => 1, no_table_access_method => 1, @@ -3279,7 +3279,7 @@ pg_dumpall_globals => 1, pg_dumpall_globals_clean => 1, section_pre_data => 1, - test_schema_plus_blobs => 1, + test_schema_plus_large_objects => 1, }, }, @@ -3606,11 +3606,11 @@ data_only => 1, inserts => 1, section_pre_data => 1, - test_schema_plus_blobs => 1, + test_schema_plus_large_objects => 1, binary_upgrade => 1, }, unlike => { - no_blobs => 1, + no_large_objects => 1, no_privs => 1, schema_only => 1, }, base-commit: d2a4490401f2ddc878134a1fd75ef4482e403e47 -- 2.38.1