From 005a1217fbae15f3d84206a7b42b715c80f96a47 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sat, 6 Dec 2003 03:00:16 +0000 Subject: [PATCH] Massive overhaul of pg_dump: make use of dependency information from pg_depend to determine a safe dump order. Defaults and check constraints can be emitted either as part of a table or domain definition, or separately if that's needed to break a dependency loop. Lots of old half-baked code for controlling dump order removed. --- doc/src/sgml/ref/pg_restore.sgml | 56 +- src/bin/pg_dump/Makefile | 8 +- src/bin/pg_dump/common.c | 605 +-- src/bin/pg_dump/pg_backup.h | 38 +- src/bin/pg_dump/pg_backup_archiver.c | 548 +-- src/bin/pg_dump/pg_backup_archiver.h | 32 +- src/bin/pg_dump/pg_backup_custom.c | 14 +- src/bin/pg_dump/pg_backup_files.c | 8 +- src/bin/pg_dump/pg_backup_null.c | 4 +- src/bin/pg_dump/pg_backup_tar.c | 8 +- src/bin/pg_dump/pg_dump.c | 5372 ++++++++++++++------------ src/bin/pg_dump/pg_dump.h | 276 +- src/bin/pg_dump/pg_dump_sort.c | 727 ++++ src/bin/pg_dump/pg_restore.c | 32 +- 14 files changed, 4427 insertions(+), 3301 deletions(-) create mode 100644 src/bin/pg_dump/pg_dump_sort.c diff --git a/doc/src/sgml/ref/pg_restore.sgml b/doc/src/sgml/ref/pg_restore.sgml index 49dcf1ee65..20486ccca1 100644 --- a/doc/src/sgml/ref/pg_restore.sgml +++ b/doc/src/sgml/ref/pg_restore.sgml @@ -1,4 +1,4 @@ - + @@ -227,35 +227,6 @@ - - - - - - Restore items in the order they were originally generated within - pg_dump. This option has no known - practical use, since pg_dump generates - the items in an order convenient to it, which is unlikely to be a - safe order for restoring them. (This is not the order - in which the items are ultimately listed in the archive's table of - contents.) See also - - - - - - - - - Restore items in order by OID. This option is of limited usefulness, - since OID is only an approximate indication of original creation - order. This option overrides - - - @@ -287,31 +258,6 @@ - - - - - - Rearrange items by object type (this occurs after the sorting - specified by or , if - given). The rearrangement is intended to give the best possible - restore performance. - - - - When none of , , and - - - - diff --git a/src/bin/pg_dump/Makefile b/src/bin/pg_dump/Makefile index 05f06a7ff3..5e36f7f082 100644 --- a/src/bin/pg_dump/Makefile +++ b/src/bin/pg_dump/Makefile @@ -5,7 +5,7 @@ # Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group # Portions Copyright (c) 1994, Regents of the University of California # -# $PostgreSQL: pgsql/src/bin/pg_dump/Makefile,v 1.41 2003/11/29 19:52:04 pgsql Exp $ +# $PostgreSQL: pgsql/src/bin/pg_dump/Makefile,v 1.42 2003/12/06 03:00:11 tgl Exp $ # #------------------------------------------------------------------------- @@ -24,8 +24,8 @@ override CPPFLAGS := -I$(libpq_srcdir) $(CPPFLAGS) -DBINDIR=\"$(bindir)\" all: submake-libpq submake-libpgport submake-backend pg_dump pg_restore pg_dumpall -pg_dump: pg_dump.o common.o $(OBJS) $(libpq_builddir)/libpq.a - $(CC) $(CFLAGS) pg_dump.o common.o $(OBJS) $(EXTRA_OBJS) $(libpq) $(LDFLAGS) $(LIBS) -o $@ +pg_dump: pg_dump.o common.o pg_dump_sort.o $(OBJS) $(libpq_builddir)/libpq.a + $(CC) $(CFLAGS) pg_dump.o common.o pg_dump_sort.o $(OBJS) $(EXTRA_OBJS) $(libpq) $(LDFLAGS) $(LIBS) -o $@ pg_restore: pg_restore.o $(OBJS) $(libpq_builddir)/libpq.a $(CC) $(CFLAGS) pg_restore.o $(OBJS) $(EXTRA_OBJS) $(libpq) $(LDFLAGS) $(LIBS) -o $@ @@ -50,4 +50,4 @@ uninstall: rm -f $(addprefix $(DESTDIR)$(bindir)/, pg_dump$(X) pg_restore$(X) pg_dumpall$(X)) clean distclean maintainer-clean: - rm -f pg_dump$(X) pg_restore$(X) pg_dumpall$(X) $(OBJS) pg_dump.o common.o pg_restore.o pg_dumpall.o + rm -f pg_dump$(X) pg_restore$(X) pg_dumpall$(X) $(OBJS) pg_dump.o common.o pg_dump_sort.o pg_restore.o pg_dumpall.o diff --git a/src/bin/pg_dump/common.c b/src/bin/pg_dump/common.c index 21777958c6..81d9cc284c 100644 --- a/src/bin/pg_dump/common.c +++ b/src/bin/pg_dump/common.c @@ -11,7 +11,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/bin/pg_dump/common.c,v 1.77 2003/11/29 19:52:04 pgsql Exp $ + * $PostgreSQL: pgsql/src/bin/pg_dump/common.c,v 1.78 2003/12/06 03:00:11 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -29,10 +29,30 @@ #include "strdup.h" #endif -static void findParentsByOid(TableInfo *tblinfo, int numTables, - InhInfo *inhinfo, int numInherits, - const char *oid, - int *numParentsPtr, int **parentIndexes); + +/* + * Variables for mapping DumpId to DumpableObject + */ +static DumpableObject **dumpIdMap = NULL; +static int allocedDumpIds = 0; +static DumpId lastDumpId = 0; + +/* + * These variables are static to avoid the notational cruft of having to pass + * them into findTableByOid() and friends. + */ +static TableInfo *tblinfo; +static TypeInfo *tinfo; +static FuncInfo *finfo; +static OprInfo *oprinfo; +static int numTables; +static int numTypes; +static int numFuncs; +static int numOperators; + + +static void findParentsByOid(TableInfo *self, + InhInfo *inhinfo, int numInherits); static void flagInhTables(TableInfo *tbinfo, int numTables, InhInfo *inhinfo, int numInherits); static void flagInhAttrs(TableInfo *tbinfo, int numTables, @@ -41,48 +61,48 @@ static int strInArray(const char *pattern, char **arr, int arr_size); /* - * dumpSchema: - * we have a valid connection, we are now going to dump the schema - * into the file + * getSchemaData + * Collect information about all potentially dumpable objects */ - TableInfo * -dumpSchema(Archive *fout, - int *numTablesPtr, - const bool aclsSkip, - const bool schemaOnly, - const bool dataOnly) +getSchemaData(int *numTablesPtr, + const bool schemaOnly, + const bool dataOnly) { - int numNamespaces; - int numTypes; - int numFuncs; - int numTables; - int numInherits; - int numAggregates; - int numOperators; - int numOpclasses; - int numConversions; NamespaceInfo *nsinfo; - TypeInfo *tinfo; - FuncInfo *finfo; AggInfo *agginfo; - TableInfo *tblinfo; InhInfo *inhinfo; - OprInfo *oprinfo; + RuleInfo *ruleinfo; + ProcLangInfo *proclanginfo; + CastInfo *castinfo; OpclassInfo *opcinfo; - ConvInfo *convinfo; + ConvInfo *convinfo; + int numNamespaces; + int numAggregates; + int numInherits; + int numRules; + int numProcLangs; + int numCasts; + int numOpclasses; + int numConversions; if (g_verbose) write_msg(NULL, "reading schemas\n"); nsinfo = getNamespaces(&numNamespaces); + if (g_verbose) + write_msg(NULL, "reading user-defined functions\n"); + finfo = getFuncs(&numFuncs); + + /* this must be after getFuncs */ if (g_verbose) write_msg(NULL, "reading user-defined types\n"); tinfo = getTypes(&numTypes); + /* this must be after getFuncs, too */ if (g_verbose) - write_msg(NULL, "reading user-defined functions\n"); - finfo = getFuncs(&numFuncs); + write_msg(NULL, "reading procedural languages\n"); + proclanginfo = getProcLangs(&numProcLangs); if (g_verbose) write_msg(NULL, "reading user-defined aggregate functions\n"); @@ -108,6 +128,14 @@ dumpSchema(Archive *fout, write_msg(NULL, "reading table inheritance information\n"); inhinfo = getInherits(&numInherits); + if (g_verbose) + write_msg(NULL, "reading rewrite rules\n"); + ruleinfo = getRules(&numRules); + + if (g_verbose) + write_msg(NULL, "reading type casts\n"); + castinfo = getCasts(&numCasts); + /* Link tables to parents, mark parents of target tables interesting */ if (g_verbose) write_msg(NULL, "finding inheritance relationships\n"); @@ -121,94 +149,24 @@ dumpSchema(Archive *fout, write_msg(NULL, "flagging inherited columns in subtables\n"); flagInhAttrs(tblinfo, numTables, inhinfo, numInherits); - if (!dataOnly) - { - if (g_verbose) - write_msg(NULL, "dumping out database comment\n"); - dumpDBComment(fout); - } - - if (!dataOnly) - { - if (g_verbose) - write_msg(NULL, "dumping out user-defined schemas\n"); - dumpNamespaces(fout, nsinfo, numNamespaces); - } - - if (!dataOnly) - { - if (g_verbose) - write_msg(NULL, "dumping out user-defined types\n"); - dumpTypes(fout, finfo, numFuncs, tinfo, numTypes); - } - if (g_verbose) - write_msg(NULL, "dumping out tables\n"); - dumpTables(fout, tblinfo, numTables, - aclsSkip, schemaOnly, dataOnly); - - if (!dataOnly) - { - if (g_verbose) - write_msg(NULL, "dumping out indexes\n"); - dumpIndexes(fout, tblinfo, numTables); - } - - if (!dataOnly) - { - if (g_verbose) - write_msg(NULL, "dumping out user-defined procedural languages\n"); - dumpProcLangs(fout, finfo, numFuncs); - } - - if (!dataOnly) - { - if (g_verbose) - write_msg(NULL, "dumping out user-defined functions\n"); - dumpFuncs(fout, finfo, numFuncs); - } - - if (!dataOnly) - { - if (g_verbose) - write_msg(NULL, "dumping out user-defined aggregate functions\n"); - dumpAggs(fout, agginfo, numAggregates); - } + write_msg(NULL, "reading indexes\n"); + getIndexes(tblinfo, numTables); - if (!dataOnly) - { - if (g_verbose) - write_msg(NULL, "dumping out user-defined operators\n"); - dumpOprs(fout, oprinfo, numOperators); - } - - if (!dataOnly) - { - if (g_verbose) - write_msg(NULL, "dumping out user-defined operator classes\n"); - dumpOpclasses(fout, opcinfo, numOpclasses); - } - - if (!dataOnly) - { - if (g_verbose) - write_msg(NULL, "dumping out user-defined casts\n"); - dumpCasts(fout, finfo, numFuncs, tinfo, numTypes); - } + if (g_verbose) + write_msg(NULL, "reading constraints\n"); + getConstraints(tblinfo, numTables); - if (!dataOnly) - { - if (g_verbose) - write_msg(NULL, "dumping out user-defined conversions\n"); - dumpConversions(fout, convinfo, numConversions); - } + if (g_verbose) + write_msg(NULL, "reading triggers\n"); + getTriggers(tblinfo, numTables); *numTablesPtr = numTables; return tblinfo; } /* flagInhTables - - * Fill in parentIndexes fields of every target table, and mark + * Fill in parent link fields of every target table, and mark * parents of target tables as interesting * * Note that only direct ancestors of targets are marked interesting. @@ -224,7 +182,7 @@ flagInhTables(TableInfo *tblinfo, int numTables, int i, j; int numParents; - int *parentIndexes; + TableInfo **parents; for (i = 0; i < numTables; i++) { @@ -238,21 +196,13 @@ flagInhTables(TableInfo *tblinfo, int numTables, continue; /* Find all the immediate parent tables */ - findParentsByOid(tblinfo, numTables, - inhinfo, numInherits, - tblinfo[i].oid, - &tblinfo[i].numParents, - &tblinfo[i].parentIndexes); - numParents = tblinfo[i].numParents; - parentIndexes = tblinfo[i].parentIndexes; + findParentsByOid(&tblinfo[i], inhinfo, numInherits); /* Mark the parents as interesting for getTableAttrs */ + numParents = tblinfo[i].numParents; + parents = tblinfo[i].parents; for (j = 0; j < numParents; j++) - { - int parentInd = parentIndexes[j]; - - tblinfo[parentInd].interesting = true; - } + parents[j]->interesting = true; } } @@ -269,30 +219,25 @@ flagInhAttrs(TableInfo *tblinfo, int numTables, int i, j, k; - int parentInd; - int inhAttrInd; - int numParents; - int *parentIndexes; - bool foundAttr; /* Attr was found in a parent */ - bool foundNotNull; /* Attr was NOT NULL in a parent */ - bool defaultsMatch; /* All non-empty defaults match */ - bool defaultsFound; /* Found a default in a parent */ - char *attrDef; - char *inhDef; for (i = 0; i < numTables; i++) { + TableInfo *tbinfo = &(tblinfo[i]); + int numParents; + TableInfo **parents; + TableInfo *parent; + /* Sequences and views never have parents */ - if (tblinfo[i].relkind == RELKIND_SEQUENCE || - tblinfo[i].relkind == RELKIND_VIEW) + if (tbinfo->relkind == RELKIND_SEQUENCE || + tbinfo->relkind == RELKIND_VIEW) continue; /* Don't bother computing anything for non-target tables, either */ - if (!tblinfo[i].dump) + if (!tbinfo->dump) continue; - numParents = tblinfo[i].numParents; - parentIndexes = tblinfo[i].parentIndexes; + numParents = tbinfo->numParents; + parents = tbinfo->parents; if (numParents == 0) continue; /* nothing to see here, move along */ @@ -310,35 +255,45 @@ flagInhAttrs(TableInfo *tblinfo, int numTables, * See discussion on -hackers around 2-Apr-2001. *---------------------------------------------------------------- */ - for (j = 0; j < tblinfo[i].numatts; j++) + for (j = 0; j < tbinfo->numatts; j++) { + bool foundAttr; /* Attr was found in a parent */ + bool foundNotNull; /* Attr was NOT NULL in a parent */ + bool defaultsMatch; /* All non-empty defaults match */ + bool defaultsFound; /* Found a default in a parent */ + AttrDefInfo *attrDef; + foundAttr = false; foundNotNull = false; defaultsMatch = true; defaultsFound = false; - attrDef = tblinfo[i].adef_expr[j]; + attrDef = tbinfo->attrdefs[j]; for (k = 0; k < numParents; k++) { - parentInd = parentIndexes[k]; - inhAttrInd = strInArray(tblinfo[i].attnames[j], - tblinfo[parentInd].attnames, - tblinfo[parentInd].numatts); + int inhAttrInd; + + parent = parents[k]; + inhAttrInd = strInArray(tbinfo->attnames[j], + parent->attnames, + parent->numatts); if (inhAttrInd != -1) { foundAttr = true; - foundNotNull |= tblinfo[parentInd].notnull[inhAttrInd]; + foundNotNull |= parent->notnull[inhAttrInd]; if (attrDef != NULL) /* If we have a default, * check parent */ { - inhDef = tblinfo[parentInd].adef_expr[inhAttrInd]; + AttrDefInfo *inhDef; + inhDef = parent->attrdefs[inhAttrInd]; if (inhDef != NULL) { defaultsFound = true; - defaultsMatch &= (strcmp(attrDef, inhDef) == 0); + defaultsMatch &= (strcmp(attrDef->adef_expr, + inhDef->adef_expr) == 0); } } } @@ -351,9 +306,9 @@ flagInhAttrs(TableInfo *tblinfo, int numTables, if (foundAttr) /* Attr was inherited */ { /* Set inherited flag by default */ - tblinfo[i].inhAttrs[j] = true; - tblinfo[i].inhAttrDef[j] = true; - tblinfo[i].inhNotNull[j] = true; + tbinfo->inhAttrs[j] = true; + tbinfo->inhAttrDef[j] = true; + tbinfo->inhNotNull[j] = true; /* * Clear it if attr had a default, but parents did not, or @@ -361,181 +316,377 @@ flagInhAttrs(TableInfo *tblinfo, int numTables, */ if ((attrDef != NULL) && (!defaultsFound || !defaultsMatch)) { - tblinfo[i].inhAttrs[j] = false; - tblinfo[i].inhAttrDef[j] = false; + tbinfo->inhAttrs[j] = false; + tbinfo->inhAttrDef[j] = false; } /* * Clear it if NOT NULL and none of the parents were NOT * NULL */ - if (tblinfo[i].notnull[j] && !foundNotNull) + if (tbinfo->notnull[j] && !foundNotNull) { - tblinfo[i].inhAttrs[j] = false; - tblinfo[i].inhNotNull[j] = false; + tbinfo->inhAttrs[j] = false; + tbinfo->inhNotNull[j] = false; } /* Clear it if attr has local definition */ - if (g_fout->remoteVersion >= 70300 && tblinfo[i].attislocal[j]) - tblinfo[i].inhAttrs[j] = false; + if (tbinfo->attislocal[j]) + tbinfo->inhAttrs[j] = false; + } + } + + /* + * Check for inherited CHECK constraints. We assume a constraint + * is inherited if its expression matches the parent and the name + * is the same, *or* both names start with '$'. + */ + for (j = 0; j < tbinfo->ncheck; j++) + { + ConstraintInfo *constr; + + constr = &(tbinfo->checkexprs[j]); + + for (k = 0; k < numParents; k++) + { + int l; + + parent = parents[k]; + for (l = 0; l < parent->ncheck; l++) + { + ConstraintInfo *pconstr; + + pconstr = &(parent->checkexprs[l]); + if (strcmp(pconstr->condef, constr->condef) != 0) + continue; + if (strcmp(pconstr->conname, constr->conname) == 0 || + (pconstr->conname[0] == '$' && + constr->conname[0] == '$')) + { + constr->coninherited = true; + break; + } + } + if (constr->coninherited) + break; } } } } +/* + * AssignDumpId + * Given a newly-created dumpable object, assign a dump ID, + * and enter the object into the lookup table. + * + * The caller is expected to have filled in objType and catalogId, + * but not any of the other standard fields of a DumpableObject. + */ +void +AssignDumpId(DumpableObject *dobj) +{ + dobj->dumpId = ++lastDumpId; + dobj->dependencies = NULL; + dobj->nDeps = 0; + dobj->allocDeps = 0; + + while (dobj->dumpId >= allocedDumpIds) + { + int newAlloc; + + if (allocedDumpIds <= 0) + { + newAlloc = 256; + dumpIdMap = (DumpableObject **) + malloc(newAlloc * sizeof(DumpableObject *)); + } + else + { + newAlloc = allocedDumpIds * 2; + dumpIdMap = (DumpableObject **) + realloc(dumpIdMap, newAlloc * sizeof(DumpableObject *)); + } + if (dumpIdMap == NULL) + exit_horribly(NULL, NULL, "out of memory\n"); + memset(dumpIdMap + allocedDumpIds, 0, + (newAlloc - allocedDumpIds) * sizeof(DumpableObject *)); + allocedDumpIds = newAlloc; + } + dumpIdMap[dobj->dumpId] = dobj; +} /* - * findTableByOid - * finds the index (in tblinfo) of the table with the given oid - * returns -1 if not found + * Assign a DumpId that's not tied to a DumpableObject. + * + * This is used when creating a "fixed" ArchiveEntry that doesn't need to + * participate in the sorting logic. + */ +DumpId +createDumpId(void) +{ + return ++lastDumpId; +} + +/* + * Return the largest DumpId so far assigned + */ +DumpId +getMaxDumpId(void) +{ + return lastDumpId; +} + +/* + * Find a DumpableObject by dump ID + * + * Returns NULL for invalid ID + */ +DumpableObject * +findObjectByDumpId(DumpId dumpId) +{ + if (dumpId <= 0 || dumpId >= allocedDumpIds) + return NULL; /* out of range? */ + return dumpIdMap[dumpId]; +} + +/* + * Find a DumpableObject by catalog ID + * + * Returns NULL for unknown ID * * NOTE: should hash this, but just do linear search for now */ -int -findTableByOid(TableInfo *tblinfo, int numTables, const char *oid) +DumpableObject * +findObjectByCatalogId(CatalogId catalogId) +{ + DumpId i; + + for (i = 1; i < allocedDumpIds; i++) + { + DumpableObject *dobj = dumpIdMap[i]; + + if (dobj && + dobj->catId.tableoid == catalogId.tableoid && + dobj->catId.oid == catalogId.oid) + return dobj; + } + return NULL; +} + +/* + * Build an array of pointers to all known dumpable objects + * + * This simply creates a modifiable copy of the internal map. + */ +void +getDumpableObjects(DumpableObject ***objs, int *numObjs) +{ + int i, + j; + + *objs = (DumpableObject **) + malloc(allocedDumpIds * sizeof(DumpableObject *)); + if (*objs == NULL) + exit_horribly(NULL, NULL, "out of memory\n"); + j = 0; + for (i = 1; i < allocedDumpIds; i++) + { + if (dumpIdMap[i]) + (*objs)[j++] = dumpIdMap[i]; + } + *numObjs = j; +} + +/* + * Add a dependency link to a DumpableObject + * + * Note: duplicate dependencies are currently not eliminated + */ +void +addObjectDependency(DumpableObject *dobj, DumpId refId) +{ + if (dobj->nDeps >= dobj->allocDeps) + { + if (dobj->allocDeps <= 0) + { + dobj->allocDeps = 16; + dobj->dependencies = (DumpId *) + malloc(dobj->allocDeps * sizeof(DumpId)); + } + else + { + dobj->allocDeps *= 2; + dobj->dependencies = (DumpId *) + realloc(dobj->dependencies, + dobj->allocDeps * sizeof(DumpId)); + } + if (dobj->dependencies == NULL) + exit_horribly(NULL, NULL, "out of memory\n"); + } + dobj->dependencies[dobj->nDeps++] = refId; +} + +/* + * Remove a dependency link from a DumpableObject + * + * If there are multiple links, all are removed + */ +void +removeObjectDependency(DumpableObject *dobj, DumpId refId) { int i; + int j = 0; - for (i = 0; i < numTables; i++) + for (i = 0; i < dobj->nDeps; i++) { - if (strcmp(tblinfo[i].oid, oid) == 0) - return i; + if (dobj->dependencies[i] != refId) + dobj->dependencies[j++] = dobj->dependencies[i]; } - return -1; + dobj->nDeps = j; } /* - * findFuncByOid - * finds the index (in finfo) of the function with the given OID - * returns -1 if not found + * findTableByOid + * finds the entry (in tblinfo) of the table with the given oid + * returns NULL if not found * * NOTE: should hash this, but just do linear search for now */ -int -findFuncByOid(FuncInfo *finfo, int numFuncs, const char *oid) +TableInfo * +findTableByOid(Oid oid) { int i; - for (i = 0; i < numFuncs; i++) + for (i = 0; i < numTables; i++) { - if (strcmp(finfo[i].oid, oid) == 0) - return i; + if (tblinfo[i].dobj.catId.oid == oid) + return &tblinfo[i]; } - return -1; + return NULL; } /* - * Finds the index (in tinfo) of the type with the given OID. Returns - * -1 if not found. + * findTypeByOid + * finds the entry (in tinfo) of the type with the given oid + * returns NULL if not found + * + * NOTE: should hash this, but just do linear search for now */ -int -findTypeByOid(TypeInfo *tinfo, int numTypes, const char *oid) +TypeInfo * +findTypeByOid(Oid oid) { int i; for (i = 0; i < numTypes; i++) { - if (strcmp(tinfo[i].oid, oid) == 0) - return i; + if (tinfo[i].dobj.catId.oid == oid) + return &tinfo[i]; } - return -1; + return NULL; } /* - * findOprByOid - * given the oid of an operator, return the name of the operator + * findFuncByOid + * finds the entry (in finfo) of the function with the given oid + * returns NULL if not found * * NOTE: should hash this, but just do linear search for now */ -char * -findOprByOid(OprInfo *oprinfo, int numOprs, const char *oid) +FuncInfo * +findFuncByOid(Oid oid) { int i; - for (i = 0; i < numOprs; i++) + for (i = 0; i < numFuncs; i++) { - if (strcmp(oprinfo[i].oid, oid) == 0) - return oprinfo[i].oprname; + if (finfo[i].dobj.catId.oid == oid) + return &finfo[i]; } + return NULL; +} - /* should never get here */ - write_msg(NULL, "failed sanity check, operator with OID %s not found\n", oid); +/* + * findOprByOid + * finds the entry (in oprinfo) of the operator with the given oid + * returns NULL if not found + * + * NOTE: should hash this, but just do linear search for now + */ +OprInfo * +findOprByOid(Oid oid) +{ + int i; - /* no suitable operator name was found */ - return (NULL); + for (i = 0; i < numOperators; i++) + { + if (oprinfo[i].dobj.catId.oid == oid) + return &oprinfo[i]; + } + return NULL; } /* * findParentsByOid - * given the oid of a class, find its parent classes in tblinfo[] - * - * Returns the number of parents and their array indexes into the - * last two arguments. + * find a table's parents in tblinfo[] */ - static void -findParentsByOid(TableInfo *tblinfo, int numTables, - InhInfo *inhinfo, int numInherits, - const char *oid, - int *numParentsPtr, int **parentIndexes) +findParentsByOid(TableInfo *self, + InhInfo *inhinfo, int numInherits) { + Oid oid = self->dobj.catId.oid; int i, j; - int parentInd, - selfInd; int numParents; numParents = 0; for (i = 0; i < numInherits; i++) { - if (strcmp(inhinfo[i].inhrelid, oid) == 0) + if (inhinfo[i].inhrelid == oid) numParents++; } - *numParentsPtr = numParents; + self->numParents = numParents; if (numParents > 0) { - *parentIndexes = (int *) malloc(sizeof(int) * numParents); + self->parents = (TableInfo **) malloc(sizeof(TableInfo *) * numParents); j = 0; for (i = 0; i < numInherits; i++) { - if (strcmp(inhinfo[i].inhrelid, oid) == 0) + if (inhinfo[i].inhrelid == oid) { - parentInd = findTableByOid(tblinfo, numTables, - inhinfo[i].inhparent); - if (parentInd < 0) - { - selfInd = findTableByOid(tblinfo, numTables, oid); - if (selfInd >= 0) - write_msg(NULL, "failed sanity check, parent OID %s of table \"%s\" (OID %s) not found\n", - inhinfo[i].inhparent, - tblinfo[selfInd].relname, - oid); - else - write_msg(NULL, "failed sanity check, parent OID %s of table (OID %s) not found\n", - inhinfo[i].inhparent, - oid); + TableInfo *parent; + parent = findTableByOid(inhinfo[i].inhparent); + if (parent == NULL) + { + write_msg(NULL, "failed sanity check, parent OID %u of table \"%s\" (OID %u) not found\n", + inhinfo[i].inhparent, + self->relname, + oid); exit_nicely(); } - (*parentIndexes)[j++] = parentInd; + self->parents[j++] = parent; } } } else - *parentIndexes = NULL; + self->parents = NULL; } /* - * parseNumericArray + * parseOidArray * parse a string of numbers delimited by spaces into a character array + * + * Note: actually this is used for both Oids and potentially-signed + * attribute numbers. This should cause no trouble, but we could split + * the function into two functions with different argument types if it does. */ void -parseNumericArray(const char *str, char **array, int arraysize) +parseOidArray(const char *str, Oid *array, int arraysize) { int j, argNum; @@ -557,7 +708,7 @@ parseNumericArray(const char *str, char **array, int arraysize) exit_nicely(); } temp[j] = '\0'; - array[argNum++] = strdup(temp); + array[argNum++] = atooid(temp); j = 0; } if (s == '\0') @@ -576,7 +727,7 @@ parseNumericArray(const char *str, char **array, int arraysize) } while (argNum < arraysize) - array[argNum++] = strdup("0"); + array[argNum++] = InvalidOid; } diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h index 881fef5afa..e59ef846e0 100644 --- a/src/bin/pg_dump/pg_backup.h +++ b/src/bin/pg_dump/pg_backup.h @@ -15,19 +15,22 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup.h,v 1.27 2003/11/29 19:52:05 pgsql Exp $ + * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup.h,v 1.28 2003/12/06 03:00:11 tgl Exp $ * *------------------------------------------------------------------------- */ -#ifndef PG_BACKUP__ -#define PG_BACKUP__ +#ifndef PG_BACKUP_H +#define PG_BACKUP_H #include "postgres_fe.h" +#include "pg_dump.h" + #include "libpq-fe.h" #include "pqexpbuffer.h" + #define atooid(x) ((Oid) strtoul((x), NULL, 10)) #define oidcmp(x,y) ( ((x) < (y) ? -1 : ((x) > (y)) ? 1 : 0) ) #define oideq(x,y) ( (x) == (y) ) @@ -45,7 +48,7 @@ typedef enum _archiveFormat } ArchiveFormat; /* - * We may want to have so user-readbale data, but in the mean + * We may want to have some more user-readable data, but in the mean * time this gives us some abstraction and type checking. */ typedef struct _Archive @@ -57,7 +60,7 @@ typedef struct _Archive /* The rest is private */ } Archive; -typedef int (*DataDumperPtr) (Archive *AH, char *oid, void *userArg); +typedef int (*DataDumperPtr) (Archive *AH, void *userArg); typedef struct _restoreOptions { @@ -74,9 +77,6 @@ typedef struct _restoreOptions int aclsSkip; int tocSummary; char *tocFile; - int oidOrder; - int origOrder; - int rearrange; int format; char *formatName; @@ -98,8 +98,8 @@ typedef struct _restoreOptions int ignoreVersion; int requirePassword; - int *idWanted; - int limitToList; + bool *idWanted; + bool limitToList; int compression; int suppressDumpWarnings; /* Suppress output of WARNING @@ -127,11 +127,13 @@ PGconn *ConnectDatabase(Archive *AH, /* Called to add a TOC entry */ -extern void ArchiveEntry(Archive *AHX, const char *oid, const char *tag, +extern void ArchiveEntry(Archive *AHX, + CatalogId catalogId, DumpId dumpId, + const char *tag, const char *namespace, const char *owner, - const char *desc, const char *((*deps)[]), - const char *defn, const char *dropStmt, - const char *copyStmt, + const char *desc, const char *defn, + const char *dropStmt, const char *copyStmt, + const DumpId *deps, int nDeps, DataDumperPtr dumpFn, void *dumpArg); /* Called to write *data* to the archive */ @@ -161,19 +163,13 @@ extern void PrintTOCSummary(Archive *AH, RestoreOptions *ropt); extern RestoreOptions *NewRestoreOptions(void); /* Rearrange TOC entries */ -extern void MoveToStart(Archive *AH, const char *oType); -extern void MoveToEnd(Archive *AH, const char *oType); -extern void SortTocByObjectType(Archive *AH); -extern void SortTocByOID(Archive *AH); -extern void SortTocByID(Archive *AH); extern void SortTocFromFile(Archive *AH, RestoreOptions *ropt); /* Convenience functions used only when writing DATA */ extern int archputs(const char *s, Archive *AH); -extern int archputc(const char c, Archive *AH); extern int archprintf(Archive *AH, const char *fmt,...) /* This extension allows gcc to check the format string */ __attribute__((format(printf, 2, 3))); -#endif +#endif /* PG_BACKUP_H */ diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index 698c1c86d0..ffb07484a4 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -15,7 +15,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.80 2003/11/29 19:52:05 pgsql Exp $ + * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.81 2003/12/06 03:00:11 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -41,9 +41,10 @@ typedef enum _teReqs_ REQ_ALL = REQ_SCHEMA + REQ_DATA } teReqs; -static void _SortToc(ArchiveHandle *AH, TocSortCompareFn fn); -static int _tocSortCompareByOIDNum(const void *p1, const void *p2); -static int _tocSortCompareByIDNum(const void *p1, const void *p2); +const char *progname; +static char *modulename = gettext_noop("archiver"); + + static ArchiveHandle *_allocAH(const char *FileSpec, const ArchiveFormat fmt, const int compression, ArchiveMode mode); static int _printTocEntry(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt, bool isData); @@ -57,15 +58,9 @@ static void _selectOutputSchema(ArchiveHandle *AH, const char *schemaName); static teReqs _tocEntryRequired(TocEntry *te, RestoreOptions *ropt); static void _disableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt); static void _enableTriggersIfNecessary(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt); -static TocEntry *_getTocEntry(ArchiveHandle *AH, int id); +static TocEntry *getTocEntryByDumpId(ArchiveHandle *AH, DumpId id); static void _moveAfter(ArchiveHandle *AH, TocEntry *pos, TocEntry *te); -static void _moveBefore(ArchiveHandle *AH, TocEntry *pos, TocEntry *te); static int _discoverArchiveFormat(ArchiveHandle *AH); -static void _fixupOidInfo(TocEntry *te); -static Oid _findMaxOID(const char *((*deps)[])); - -const char *progname; -static char *modulename = gettext_noop("archiver"); static void _write_msg(const char *modulename, const char *fmt, va_list ap); static void _die_horribly(ArchiveHandle *AH, const char *modulename, const char *fmt, va_list ap); @@ -73,6 +68,7 @@ static void _die_horribly(ArchiveHandle *AH, const char *modulename, const char static int _canRestoreBlobs(ArchiveHandle *AH); static int _restoringToDB(ArchiveHandle *AH); + /* * Wrapper functions. * @@ -534,29 +530,33 @@ WriteData(Archive *AHX, const void *data, size_t dLen) /* Public */ void -ArchiveEntry(Archive *AHX, const char *oid, const char *tag, +ArchiveEntry(Archive *AHX, + CatalogId catalogId, DumpId dumpId, + const char *tag, const char *namespace, const char *owner, - const char *desc, const char *((*deps)[]), - const char *defn, const char *dropStmt, - const char *copyStmt, + const char *desc, const char *defn, + const char *dropStmt, const char *copyStmt, + const DumpId *deps, int nDeps, DataDumperPtr dumpFn, void *dumpArg) { ArchiveHandle *AH = (ArchiveHandle *) AHX; TocEntry *newToc; - AH->lastID++; - AH->tocCount++; - newToc = (TocEntry *) calloc(1, sizeof(TocEntry)); if (!newToc) die_horribly(AH, modulename, "out of memory\n"); + AH->tocCount++; + if (dumpId > AH->maxDumpId) + AH->maxDumpId = dumpId; + newToc->prev = AH->toc->prev; newToc->next = AH->toc; AH->toc->prev->next = newToc; AH->toc->prev = newToc; - newToc->id = AH->lastID; + newToc->catalogId = catalogId; + newToc->dumpId = dumpId; newToc->tag = strdup(tag); newToc->namespace = namespace ? strdup(namespace) : NULL; @@ -566,24 +566,26 @@ ArchiveEntry(Archive *AHX, const char *oid, const char *tag, newToc->dropStmt = strdup(dropStmt); newToc->copyStmt = copyStmt ? strdup(copyStmt) : NULL; - newToc->oid = strdup(oid); - newToc->depOid = deps; /* NB: not copied */ - _fixupOidInfo(newToc); + if (nDeps > 0) + { + newToc->dependencies = (DumpId *) malloc(nDeps * sizeof(DumpId)); + memcpy(newToc->dependencies, deps, nDeps * sizeof(DumpId)); + newToc->nDeps = nDeps; + } + else + { + newToc->dependencies = NULL; + newToc->nDeps = 0; + } - newToc->printed = 0; - newToc->formatData = NULL; newToc->dataDumper = dumpFn; newToc->dataDumperArg = dumpArg; + newToc->hadDumper = dumpFn ? true : false; - newToc->hadDumper = dumpFn ? 1 : 0; + newToc->formatData = NULL; - if (AH->ArchiveEntryPtr !=NULL) + if (AH->ArchiveEntryPtr != NULL) (*AH->ArchiveEntryPtr) (AH, newToc); - - /* - * printf("New toc owned by '%s', oid %u\n", newToc->owner, - * newToc->oidVal); - */ } /* Public */ @@ -627,7 +629,9 @@ PrintTOCSummary(Archive *AHX, RestoreOptions *ropt) while (te != AH->toc) { if (_tocEntryRequired(te, ropt) != 0) - ahprintf(AH, "%d; %d %s %s %s\n", te->id, te->oidVal, te->desc, te->tag, te->owner); + ahprintf(AH, "%d; %u %u %s %s %s\n", te->dumpId, + te->catalogId.tableoid, te->catalogId.oid, + te->desc, te->tag, te->owner); te = te->next; } @@ -781,127 +785,6 @@ EndRestoreBlob(ArchiveHandle *AH, Oid oid) * Sorting and Reordering ***********/ -/* - * Move TOC entries of the specified type to the START of the TOC. - * - * This is public, but if you use it anywhere but SortTocByObjectType, - * you are risking breaking things. - */ -void -MoveToStart(Archive *AHX, const char *oType) -{ - ArchiveHandle *AH = (ArchiveHandle *) AHX; - TocEntry *te = AH->toc->next; - TocEntry *newTe; - - while (te != AH->toc) - { - te->_moved = 0; - te = te->next; - } - - te = AH->toc->prev; - while (te != AH->toc && !te->_moved) - { - newTe = te->prev; - if (strcmp(te->desc, oType) == 0) - _moveAfter(AH, AH->toc, te); - te = newTe; - } -} - - -/* - * Move TOC entries of the specified type to the end of the TOC. - * - * This is public, but if you use it anywhere but SortTocByObjectType, - * you are risking breaking things. - */ -void -MoveToEnd(Archive *AHX, const char *oType) -{ - ArchiveHandle *AH = (ArchiveHandle *) AHX; - TocEntry *te = AH->toc->next; - TocEntry *newTe; - - while (te != AH->toc) - { - te->_moved = 0; - te = te->next; - } - - te = AH->toc->next; - while (te != AH->toc && !te->_moved) - { - newTe = te->next; - if (strcmp(te->desc, oType) == 0) - _moveBefore(AH, AH->toc, te); - te = newTe; - } -} - -/* - * Sort TOC by object type (items of same type keep same relative order) - * - * This is factored out to ensure that pg_dump and pg_restore stay in sync - * about the standard ordering. - */ -void -SortTocByObjectType(Archive *AH) -{ - /* - * Procedural languages have to be declared just after database and - * schema creation, before they are used. - */ - MoveToStart(AH, "ACL LANGUAGE"); - MoveToStart(AH, "PROCEDURAL LANGUAGE"); - MoveToStart(AH, "FUNC PROCEDURAL LANGUAGE"); - MoveToStart(AH, "SCHEMA"); - MoveToStart(AH, ""); - /* Database entries *must* be at front (see also pg_restore.c) */ - MoveToStart(AH, "DATABASE"); - - MoveToEnd(AH, "TABLE DATA"); - MoveToEnd(AH, "BLOBS"); - MoveToEnd(AH, "INDEX"); - MoveToEnd(AH, "CONSTRAINT"); - MoveToEnd(AH, "FK CONSTRAINT"); - MoveToEnd(AH, "TRIGGER"); - MoveToEnd(AH, "RULE"); - MoveToEnd(AH, "SEQUENCE SET"); - - /* - * Moving all comments to end is annoying, but must do it for comments - * on stuff we just moved, and we don't seem to have quite enough - * dependency structure to get it really right... - */ - MoveToEnd(AH, "COMMENT"); -} - -/* - * Sort TOC by OID - */ -/* Public */ -void -SortTocByOID(Archive *AHX) -{ - ArchiveHandle *AH = (ArchiveHandle *) AHX; - - _SortToc(AH, _tocSortCompareByOIDNum); -} - -/* - * Sort TOC by ID - */ -/* Public */ -void -SortTocByID(Archive *AHX) -{ - ArchiveHandle *AH = (ArchiveHandle *) AHX; - - _SortToc(AH, _tocSortCompareByIDNum); -} - void SortTocFromFile(Archive *AHX, RestoreOptions *ropt) { @@ -910,25 +793,14 @@ SortTocFromFile(Archive *AHX, RestoreOptions *ropt) char buf[1024]; char *cmnt; char *endptr; - int id; + DumpId id; TocEntry *te; TocEntry *tePrev; - int i; /* Allocate space for the 'wanted' array, and init it */ - ropt->idWanted = (int *) malloc(sizeof(int) * AH->tocCount); - for (i = 0; i < AH->tocCount; i++) - ropt->idWanted[i] = 0; - - ropt->limitToList = 1; - - /* Mark all entries as 'not moved' */ - te = AH->toc->next; - while (te != AH->toc) - { - te->_moved = 0; - te = te->next; - } + ropt->idWanted = (bool *) malloc(sizeof(bool) * AH->maxDumpId); + memset(ropt->idWanted, 0, sizeof(bool) * AH->maxDumpId); + ropt->limitToList = true; /* Set prev entry as head of list */ tePrev = AH->toc; @@ -955,25 +827,27 @@ SortTocFromFile(Archive *AHX, RestoreOptions *ropt) /* Get an ID */ id = strtol(buf, &endptr, 10); - if (endptr == buf) + if (endptr == buf || id <= 0 || id > AH->maxDumpId) { write_msg(modulename, "WARNING: line ignored: %s\n", buf); continue; } /* Find TOC entry */ - te = _getTocEntry(AH, id); + te = getTocEntryByDumpId(AH, id); if (!te) - die_horribly(AH, modulename, "could not find entry for ID %d\n", id); + die_horribly(AH, modulename, "could not find entry for ID %d\n", + id); - ropt->idWanted[id - 1] = 1; + ropt->idWanted[id - 1] = true; _moveAfter(AH, tePrev, te); tePrev = te; } if (fclose(fh) != 0) - die_horribly(AH, modulename, "could not close TOC file: %s\n", strerror(errno)); + die_horribly(AH, modulename, "could not close TOC file: %s\n", + strerror(errno)); } /********************** @@ -988,13 +862,6 @@ archputs(const char *s, Archive *AH) return WriteData(AH, s, strlen(s)); } -/* Public */ -int -archputc(const char c, Archive *AH) -{ - return WriteData(AH, &c, 1); -} - /* Public */ int archprintf(Archive *AH, const char *fmt,...) @@ -1007,9 +874,6 @@ archprintf(Archive *AH, const char *fmt,...) /* * This is paranoid: deal with the possibility that vsnprintf is * willing to ignore trailing null - */ - - /* * or returns > 0 even if string does not fit. It may be the case that * it returns cnt = bufsize */ @@ -1287,6 +1151,7 @@ exit_horribly(Archive *AH, const char *modulename, const char *fmt,...) va_start(ap, fmt); _die_horribly((ArchiveHandle *) AH, modulename, fmt, ap); + va_end(ap); } /* Archiver use (just different arg declaration) */ @@ -1297,6 +1162,7 @@ die_horribly(ArchiveHandle *AH, const char *modulename, const char *fmt,...) va_start(ap, fmt); _die_horribly(AH, modulename, fmt, ap); + va_end(ap); } @@ -1311,10 +1177,10 @@ _moveAfter(ArchiveHandle *AH, TocEntry *pos, TocEntry *te) pos->next->prev = te; pos->next = te; - - te->_moved = 1; } +#ifdef NOT_USED + static void _moveBefore(ArchiveHandle *AH, TocEntry *pos, TocEntry *te) { @@ -1325,19 +1191,19 @@ _moveBefore(ArchiveHandle *AH, TocEntry *pos, TocEntry *te) te->next = pos; pos->prev->next = te; pos->prev = te; - - te->_moved = 1; } +#endif + static TocEntry * -_getTocEntry(ArchiveHandle *AH, int id) +getTocEntryByDumpId(ArchiveHandle *AH, DumpId id) { TocEntry *te; te = AH->toc->next; while (te != AH->toc) { - if (te->id == id) + if (te->dumpId == id) return te; te = te->next; } @@ -1345,9 +1211,9 @@ _getTocEntry(ArchiveHandle *AH, int id) } int -TocIDRequired(ArchiveHandle *AH, int id, RestoreOptions *ropt) +TocIDRequired(ArchiveHandle *AH, DumpId id, RestoreOptions *ropt) { - TocEntry *te = _getTocEntry(AH, id); + TocEntry *te = getTocEntryByDumpId(AH, id); if (!te) return 0; @@ -1685,7 +1551,6 @@ _allocAH(const char *FileSpec, const ArchiveFormat fmt, AH->intSize = sizeof(int); AH->offSize = sizeof(off_t); - AH->lastID = 0; if (FileSpec) { AH->fSpec = strdup(FileSpec); @@ -1795,7 +1660,7 @@ WriteDataChunks(ArchiveHandle *AH) * The user-provided DataDumper routine needs to call * AH->WriteData */ - (*te->dataDumper) ((Archive *) AH, te->oid, te->dataDumperArg); + (*te->dataDumper) ((Archive *) AH, te->dataDumperArg); if (endPtr != NULL) (*endPtr) (AH, te); @@ -1808,18 +1673,24 @@ WriteDataChunks(ArchiveHandle *AH) void WriteToc(ArchiveHandle *AH) { - TocEntry *te = AH->toc->next; - const char *dep; + TocEntry *te; + char workbuf[32]; int i; /* printf("%d TOC Entries to save\n", AH->tocCount); */ WriteInt(AH, AH->tocCount); - while (te != AH->toc) + + for (te = AH->toc->next; te != AH->toc; te = te->next) { - WriteInt(AH, te->id); + WriteInt(AH, te->dumpId); WriteInt(AH, te->dataDumper ? 1 : 0); - WriteStr(AH, te->oid); + + /* OID is recorded as a string for historical reasons */ + sprintf(workbuf, "%u", te->catalogId.tableoid); + WriteStr(AH, workbuf); + sprintf(workbuf, "%u", te->catalogId.oid); + WriteStr(AH, workbuf); WriteStr(AH, te->tag); WriteStr(AH, te->desc); @@ -1830,17 +1701,15 @@ WriteToc(ArchiveHandle *AH) WriteStr(AH, te->owner); /* Dump list of dependencies */ - if (te->depOid != NULL) + for (i = 0; i < te->nDeps; i++) { - i = 0; - while ((dep = (*te->depOid)[i++]) != NULL) - WriteStr(AH, dep); + sprintf(workbuf, "%d", te->dependencies[i]); + WriteStr(AH, workbuf); } WriteStr(AH, NULL); /* Terminate List */ if (AH->WriteExtraTocPtr) (*AH->WriteExtraTocPtr) (AH, te); - te = te->next; } } @@ -1848,27 +1717,43 @@ void ReadToc(ArchiveHandle *AH) { int i; - char *((*deps)[]); + char *tmp; + DumpId *deps; int depIdx; int depSize; TocEntry *te = AH->toc->next; AH->tocCount = ReadInt(AH); + AH->maxDumpId = 0; for (i = 0; i < AH->tocCount; i++) { - te = (TocEntry *) calloc(1, sizeof(TocEntry)); - te->id = ReadInt(AH); + te->dumpId = ReadInt(AH); + + if (te->dumpId > AH->maxDumpId) + AH->maxDumpId = te->dumpId; /* Sanity check */ - if (te->id <= 0 || te->id > AH->tocCount) - die_horribly(AH, modulename, "entry ID %d out of range -- perhaps a corrupt TOC\n", te->id); + if (te->dumpId <= 0) + die_horribly(AH, modulename, + "entry ID %d out of range -- perhaps a corrupt TOC\n", + te->dumpId); te->hadDumper = ReadInt(AH); - te->oid = ReadStr(AH); - te->oidVal = atooid(te->oid); + + if (AH->version >= K_VERS_1_8) + { + tmp = ReadStr(AH); + sscanf(tmp, "%u", &te->catalogId.tableoid); + free(tmp); + } + else + te->catalogId.tableoid = InvalidOid; + tmp = ReadStr(AH); + sscanf(tmp, "%u", &te->catalogId.oid); + free(tmp); te->tag = ReadStr(AH); te->desc = ReadStr(AH); @@ -1887,41 +1772,47 @@ ReadToc(ArchiveHandle *AH) if (AH->version >= K_VERS_1_5) { depSize = 100; - deps = malloc(sizeof(char *) * depSize); + deps = (DumpId *) malloc(sizeof(DumpId) * depSize); depIdx = 0; - do + for (;;) { + tmp = ReadStr(AH); + if (!tmp) + break; /* end of list */ if (depIdx >= depSize) { depSize *= 2; - deps = realloc(deps, sizeof(char *) * depSize); + deps = (DumpId *) realloc(deps, sizeof(DumpId) * depSize); } - (*deps)[depIdx] = ReadStr(AH); -#if 0 - if ((*deps)[depIdx]) - write_msg(modulename, "read dependency for %s -> %s\n", - te->tag, (*deps)[depIdx]); -#endif - } while ((*deps)[depIdx++] != NULL); + sscanf(tmp, "%d", &deps[depIdx]); + free(tmp); + depIdx++; + } - if (depIdx > 1) /* We have a non-null entry */ - te->depOid = realloc(deps, sizeof(char *) * depIdx); /* trim it */ + if (depIdx > 0) /* We have a non-null entry */ + { + deps = (DumpId *) realloc(deps, sizeof(DumpId) * depIdx); + te->dependencies = deps; + te->nDeps = depIdx; + } else { free(deps); - te->depOid = NULL; /* no deps */ + te->dependencies = NULL; + te->nDeps = 0; } } else - te->depOid = NULL; - - /* Set maxOidVal etc for use in sorting */ - _fixupOidInfo(te); + { + te->dependencies = NULL; + te->nDeps = 0; + } if (AH->ReadExtraTocPtr) (*AH->ReadExtraTocPtr) (AH, te); - ahlog(AH, 3, "read TOC entry %d (ID %d) for %s %s\n", i, te->id, te->desc, te->tag); + ahlog(AH, 3, "read TOC entry %d (ID %d) for %s %s\n", + i, te->dumpId, te->desc, te->tag); te->prev = AH->toc->prev; AH->toc->prev->next = te; @@ -2013,7 +1904,7 @@ _tocEntryRequired(TocEntry *te, RestoreOptions *ropt) res = res & ~REQ_SCHEMA; /* Finally, if we used a list, limit based on that as well */ - if (ropt->limitToList && !ropt->idWanted[te->id - 1]) + if (ropt->limitToList && !ropt->idWanted[te->dumpId - 1]) return 0; return res; @@ -2190,7 +2081,7 @@ _selectOutputSchema(ArchiveHandle *AH, const char *schemaName) static int _printTocEntry(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt, bool isData) { - char *pfx; + const char *pfx; /* Select owner and schema as necessary */ _becomeOwner(AH, te); @@ -2208,8 +2099,23 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt, bool isDat else pfx = ""; - ahprintf(AH, "--\n-- %sTOC entry %d (OID %s)\n-- Name: %s; Type: %s; Schema: %s; Owner: %s\n", - pfx, te->id, te->oid, te->tag, te->desc, + ahprintf(AH, "--\n"); + if (AH->public.verbose) + { + ahprintf(AH, "-- TOC entry %d (class %u OID %u)\n", + te->dumpId, te->catalogId.tableoid, te->catalogId.oid); + if (te->nDeps > 0) + { + int i; + + ahprintf(AH, "-- Dependencies:"); + for (i = 0; i < te->nDeps; i++) + ahprintf(AH, " %d", te->dependencies[i]); + ahprintf(AH, "\n"); + } + } + ahprintf(AH, "-- %sName: %s; Type: %s; Schema: %s; Owner: %s\n", + pfx, te->tag, te->desc, te->namespace ? te->namespace : "-", te->owner); if (AH->PrintExtraTocPtr !=NULL) @@ -2381,181 +2287,3 @@ checkSeek(FILE *fp) else return true; } - - -static void -_SortToc(ArchiveHandle *AH, TocSortCompareFn fn) -{ - TocEntry **tea; - TocEntry *te; - int i; - - /* Allocate an array for quicksort (TOC size + head & foot) */ - tea = (TocEntry **) malloc(sizeof(TocEntry *) * (AH->tocCount + 2)); - - /* Build array of toc entries, including header at start and end */ - te = AH->toc; - for (i = 0; i <= AH->tocCount + 1; i++) - { - /* - * printf("%d: %x (%x, %x) - %u\n", i, te, te->prev, te->next, - * te->oidVal); - */ - tea[i] = te; - te = te->next; - } - - /* Sort it, but ignore the header entries */ - qsort(&(tea[1]), AH->tocCount, sizeof(TocEntry *), fn); - - /* Rebuild list: this works because we have headers at each end */ - for (i = 1; i <= AH->tocCount; i++) - { - tea[i]->next = tea[i + 1]; - tea[i]->prev = tea[i - 1]; - } - - - te = AH->toc; - for (i = 0; i <= AH->tocCount + 1; i++) - { - /* - * printf("%d: %x (%x, %x) - %u\n", i, te, te->prev, te->next, - * te->oidVal); - */ - te = te->next; - } - - - AH->toc->next = tea[1]; - AH->toc->prev = tea[AH->tocCount]; -} - -static int -_tocSortCompareByOIDNum(const void *p1, const void *p2) -{ - TocEntry *te1 = *(TocEntry **) p1; - TocEntry *te2 = *(TocEntry **) p2; - Oid id1 = te1->maxOidVal; - Oid id2 = te2->maxOidVal; - int cmpval; - - /* printf("Comparing %u to %u\n", id1, id2); */ - - cmpval = oidcmp(id1, id2); - - /* If we have a deterministic answer, return it. */ - if (cmpval != 0) - return cmpval; - - /* More comparisons required */ - if (oideq(id1, te1->maxDepOidVal)) /* maxOid1 came from deps */ - { - if (oideq(id2, te2->maxDepOidVal)) /* maxOid2 also came from - * deps */ - { - cmpval = oidcmp(te1->oidVal, te2->oidVal); /* Just compare base - * OIDs */ - } - else -/* MaxOid2 was entry OID */ - { - return 1; /* entry1 > entry2 */ - }; - } - else -/* must have oideq(id1, te1->oidVal) => maxOid1 = Oid1 */ - { - if (oideq(id2, te2->maxDepOidVal)) /* maxOid2 came from deps */ - { - return -1; /* entry1 < entry2 */ - } - else -/* MaxOid2 was entry OID - deps don't matter */ - { - cmpval = 0; - }; - }; - - /* - * If we get here, then we've done another comparison Once again, a 0 - * result means we require even more - */ - if (cmpval != 0) - return cmpval; - - /* - * Entire OID details match, so use ID number (ie. original pg_dump - * order) - */ - return _tocSortCompareByIDNum(te1, te2); -} - -static int -_tocSortCompareByIDNum(const void *p1, const void *p2) -{ - TocEntry *te1 = *(TocEntry **) p1; - TocEntry *te2 = *(TocEntry **) p2; - int id1 = te1->id; - int id2 = te2->id; - - /* printf("Comparing %d to %d\n", id1, id2); */ - - if (id1 < id2) - return -1; - else if (id1 > id2) - return 1; - else - return 0; -} - -/* - * Assuming Oid and depOid are set, work out the various - * Oid values used in sorting. - */ -static void -_fixupOidInfo(TocEntry *te) -{ - te->oidVal = atooid(te->oid); - te->maxDepOidVal = _findMaxOID(te->depOid); - - /* For the purpose of sorting, find the max OID. */ - if (oidcmp(te->oidVal, te->maxDepOidVal) >= 0) - te->maxOidVal = te->oidVal; - else - te->maxOidVal = te->maxDepOidVal; -} - -/* - * Find the max OID value for a given list of string Oid values - */ -static Oid -_findMaxOID(const char *((*deps)[])) -{ - const char *dep; - int i; - Oid maxOid = (Oid) 0; - Oid currOid; - - if (!deps) - return maxOid; - - i = 0; - while ((dep = (*deps)[i++]) != NULL) - { - currOid = atooid(dep); - if (oidcmp(maxOid, currOid) < 0) - maxOid = currOid; - } - - return maxOid; -} - -/* - * Maybe I can use this somewhere... - * - *create table pgdump_blob_path(p text); - *insert into pgdump_blob_path values('/home/pjw/work/postgresql-cvs/pgsql/src/bin/pg_dump_140'); - * - *insert into dump_blob_xref select 12345,lo_import(p || '/q.q') from pgdump_blob_path; - */ diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h index 2434f91c84..50bf263ed8 100644 --- a/src/bin/pg_dump/pg_backup_archiver.h +++ b/src/bin/pg_dump/pg_backup_archiver.h @@ -17,7 +17,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.h,v 1.53 2003/11/29 19:52:05 pgsql Exp $ + * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.h,v 1.54 2003/12/06 03:00:11 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -59,7 +59,7 @@ typedef z_stream *z_streamp; #include "libpq-fe.h" #define K_VERS_MAJOR 1 -#define K_VERS_MINOR 7 +#define K_VERS_MINOR 8 #define K_VERS_REV 0 /* Data block types */ @@ -76,7 +76,9 @@ typedef z_stream *z_streamp; #define K_VERS_1_6 (( (1 * 256 + 6) * 256 + 0) * 256 + 0) /* Schema field in TOCs */ #define K_VERS_1_7 (( (1 * 256 + 7) * 256 + 0) * 256 + 0) /* File Offset size in * header */ -#define K_VERS_MAX (( (1 * 256 + 7) * 256 + 255) * 256 + 0) +#define K_VERS_1_8 (( (1 * 256 + 8) * 256 + 0) * 256 + 0) /* change interpretation of ID numbers and dependencies */ + +#define K_VERS_MAX (( (1 * 256 + 8) * 256 + 255) * 256 + 0) /* No of BLOBs to restore in 1 TX */ #define BLOB_BATCH_SIZE 100 @@ -114,8 +116,6 @@ typedef void (*PrintTocDataPtr) (struct _archiveHandle * AH, struct _tocEntry * typedef size_t (*CustomOutPtr) (struct _archiveHandle * AH, const void *buf, size_t len); -typedef int (*TocSortCompareFn) (const void *te1, const void *te2); - typedef enum _archiveMode { archModeWrite, @@ -222,7 +222,6 @@ typedef struct _archiveHandle int createdBlobXref; /* Flag */ int blobCount; /* # of blobs restored */ - int lastID; /* Last internal ID for a TOC entry */ char *fSpec; /* Archive File Spec */ FILE *FH; /* General purpose file handle */ void *OF; @@ -230,6 +229,8 @@ typedef struct _archiveHandle struct _tocEntry *toc; /* List of TOC entries */ int tocCount; /* Number of TOC entries */ + DumpId maxDumpId; /* largest DumpId among all TOC entries */ + struct _tocEntry *currToc; /* Used when dumping data */ int compression; /* Compression requested on open */ ArchiveMode mode; /* File mode - r or w */ @@ -252,8 +253,9 @@ typedef struct _tocEntry { struct _tocEntry *prev; struct _tocEntry *next; - int id; - int hadDumper; /* Archiver was passed a dumper routine + CatalogId catalogId; + DumpId dumpId; + bool hadDumper; /* Archiver was passed a dumper routine * (used in restore) */ char *tag; /* index tag */ char *namespace; /* null or empty string if not in a schema */ @@ -262,23 +264,17 @@ typedef struct _tocEntry char *defn; char *dropStmt; char *copyStmt; - char *oid; /* Oid of source of entry */ - Oid oidVal; /* Value of above */ - const char *((*depOid)[]); - Oid maxDepOidVal; /* Value of largest OID in deps */ - Oid maxOidVal; /* Max of entry OID and max dep OID */ + DumpId *dependencies; /* dumpIds of objects this one depends on */ + int nDeps; /* number of dependencies */ - int printed; /* Indicates if entry defn has been dumped */ DataDumperPtr dataDumper; /* Routine to dump data for object */ void *dataDumperArg; /* Arg for above routine */ void *formatData; /* TOC Entry data specific to file format */ - - int _moved; /* Marker used when rearranging TOC */ - } TocEntry; /* Used everywhere */ extern const char *progname; + extern void die_horribly(ArchiveHandle *AH, const char *modulename, const char *fmt,...) __attribute__((format(printf, 3, 4))); extern void write_msg(const char *modulename, const char *fmt,...) __attribute__((format(printf, 2, 3))); @@ -290,7 +286,7 @@ extern void WriteToc(ArchiveHandle *AH); extern void ReadToc(ArchiveHandle *AH); extern void WriteDataChunks(ArchiveHandle *AH); -extern int TocIDRequired(ArchiveHandle *AH, int id, RestoreOptions *ropt); +extern int TocIDRequired(ArchiveHandle *AH, DumpId id, RestoreOptions *ropt); extern bool checkSeek(FILE *fp); /* diff --git a/src/bin/pg_dump/pg_backup_custom.c b/src/bin/pg_dump/pg_backup_custom.c index f68bcdcc92..76e1b0f403 100644 --- a/src/bin/pg_dump/pg_backup_custom.c +++ b/src/bin/pg_dump/pg_backup_custom.c @@ -19,7 +19,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_custom.c,v 1.27 2003/11/29 19:52:05 pgsql Exp $ + * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_custom.c,v 1.28 2003/12/06 03:00:11 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -303,7 +303,7 @@ _StartData(ArchiveHandle *AH, TocEntry *te) tctx->dataState = K_OFFSET_POS_SET; _WriteByte(AH, BLK_DATA); /* Block type */ - WriteInt(AH, te->id); /* For sanity check */ + WriteInt(AH, te->dumpId); /* For sanity check */ _StartDataCompressor(AH, te); } @@ -371,7 +371,7 @@ _StartBlobs(ArchiveHandle *AH, TocEntry *te) tctx->dataState = K_OFFSET_POS_SET; _WriteByte(AH, BLK_BLOBS); /* Block type */ - WriteInt(AH, te->id); /* For sanity check */ + WriteInt(AH, te->dumpId); /* For sanity check */ } /* @@ -439,7 +439,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt) _readBlockHeader(AH, &blkType, &id); - while (id != te->id) + while (id != te->dumpId) { if ((TocIDRequired(AH, id, ropt) & 2) != 0) die_horribly(AH, modulename, @@ -475,9 +475,9 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt) } /* Are we sane? */ - if (id != te->id) + if (id != te->dumpId) die_horribly(AH, modulename, "found unexpected block ID (%d) when reading data -- expected %d\n", - id, te->id); + id, te->dumpId); switch (blkType) { @@ -863,7 +863,7 @@ _readBlockHeader(ArchiveHandle *AH, int *type, int *id) if (AH->version < K_VERS_1_3) *type = BLK_DATA; else - *type = _ReadByte(AH);; + *type = _ReadByte(AH); *id = ReadInt(AH); } diff --git a/src/bin/pg_dump/pg_backup_files.c b/src/bin/pg_dump/pg_backup_files.c index c79ca05443..9d34b5d4db 100644 --- a/src/bin/pg_dump/pg_backup_files.c +++ b/src/bin/pg_dump/pg_backup_files.c @@ -20,7 +20,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_files.c,v 1.23 2003/11/29 19:52:05 pgsql Exp $ + * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_files.c,v 1.24 2003/12/06 03:00:11 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -172,11 +172,11 @@ _ArchiveEntry(ArchiveHandle *AH, TocEntry *te) { #ifdef HAVE_LIBZ if (AH->compression == 0) - sprintf(fn, "%d.dat", te->id); + sprintf(fn, "%d.dat", te->dumpId); else - sprintf(fn, "%d.dat.gz", te->id); + sprintf(fn, "%d.dat.gz", te->dumpId); #else - sprintf(fn, "%d.dat", te->id); + sprintf(fn, "%d.dat", te->dumpId); #endif ctx->filename = strdup(fn); } diff --git a/src/bin/pg_dump/pg_backup_null.c b/src/bin/pg_dump/pg_backup_null.c index 6a09855e79..f7d7f49900 100644 --- a/src/bin/pg_dump/pg_backup_null.c +++ b/src/bin/pg_dump/pg_backup_null.c @@ -17,7 +17,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_null.c,v 1.12 2003/11/29 19:52:05 pgsql Exp $ + * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_null.c,v 1.13 2003/12/06 03:00:11 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -91,7 +91,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt) if (te->dataDumper) { AH->currToc = te; - (*te->dataDumper) ((Archive *) AH, te->oid, te->dataDumperArg); + (*te->dataDumper) ((Archive *) AH, te->dataDumperArg); AH->currToc = NULL; } } diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c index 2f2e4fc541..c4c028c04c 100644 --- a/src/bin/pg_dump/pg_backup_tar.c +++ b/src/bin/pg_dump/pg_backup_tar.c @@ -16,7 +16,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_tar.c,v 1.39 2003/11/29 19:52:05 pgsql Exp $ + * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_tar.c,v 1.40 2003/12/06 03:00:11 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -259,11 +259,11 @@ _ArchiveEntry(ArchiveHandle *AH, TocEntry *te) { #ifdef HAVE_LIBZ if (AH->compression == 0) - sprintf(fn, "%d.dat", te->id); + sprintf(fn, "%d.dat", te->dumpId); else - sprintf(fn, "%d.dat.gz", te->id); + sprintf(fn, "%d.dat.gz", te->dumpId); #else - sprintf(fn, "%d.dat", te->id); + sprintf(fn, "%d.dat", te->dumpId); #endif ctx->filename = strdup(fn); } diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 029f30f664..7b21f65a23 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -12,7 +12,7 @@ * by PostgreSQL * * IDENTIFICATION - * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.359 2003/12/01 22:08:01 momjian Exp $ + * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.360 2003/12/06 03:00:12 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -62,67 +62,11 @@ int optreset; #define _(x) gettext((x)) -typedef struct _dumpContext -{ - TableInfo *tblinfo; - int tblidx; - bool oids; -} DumpContext; - -static void help(const char *progname); -static NamespaceInfo *findNamespace(const char *nsoid, const char *objoid); -static void dumpClasses(const TableInfo *tblinfo, const int numTables, - Archive *fout, const bool oids); -static void dumpComment(Archive *fout, const char *target, - const char *namespace, const char *owner, - const char *oid, const char *classname, int subid, - const char *((*deps)[])); -static void dumpOneBaseType(Archive *fout, TypeInfo *tinfo, - FuncInfo *g_finfo, int numFuncs, - TypeInfo *g_tinfo, int numTypes); -static void dumpOneDomain(Archive *fout, TypeInfo *tinfo); -static void dumpOneCompositeType(Archive *fout, TypeInfo *tinfo); -static void dumpOneTable(Archive *fout, TableInfo *tbinfo, - TableInfo *g_tblinfo); -static void dumpOneSequence(Archive *fout, TableInfo *tbinfo, - const bool schemaOnly, const bool dataOnly); - -static void dumpTableACL(Archive *fout, TableInfo *tbinfo); -static void dumpFuncACL(Archive *fout, FuncInfo *finfo); -static void dumpAggACL(Archive *fout, AggInfo *finfo); -static void dumpACL(Archive *fout, const char *type, const char *name, - const char *tag, const char *nspname, - const char *owner, const char *acl, const char *objoid); - -static void dumpConstraints(Archive *fout, TableInfo *tblinfo, int numTables); -static void dumpTriggers(Archive *fout, TableInfo *tblinfo, int numTables); -static void dumpRules(Archive *fout, TableInfo *tblinfo, int numTables); -static char *format_function_signature(FuncInfo *finfo, bool honor_quotes); -static void dumpOneFunc(Archive *fout, FuncInfo *finfo); -static void dumpOneOpr(Archive *fout, OprInfo *oprinfo, - OprInfo *g_oprinfo, int numOperators); -static const char *convertRegProcReference(const char *proc); -static const char *convertOperatorReference(const char *opr, - OprInfo *g_oprinfo, int numOperators); -static void dumpOneOpclass(Archive *fout, OpclassInfo *opcinfo); -static void dumpOneConversion(Archive *fout, ConvInfo *convinfo); -static void dumpOneAgg(Archive *fout, AggInfo *agginfo); -static Oid findLastBuiltinOid_V71(const char *); -static Oid findLastBuiltinOid_V70(void); -static void setMaxOid(Archive *fout); -static void selectSourceSchema(const char *schemaName); -static char *getFormattedTypeName(const char *oid, OidOptions opts); -static char *myFormatType(const char *typname, int32 typmod); -static const char *fmtQualifiedId(const char *schema, const char *id); -static int dumpBlobs(Archive *AH, char *, void *); -static int dumpDatabase(Archive *AH); -static const char *getAttrName(int attrnum, TableInfo *tblInfo); -static const char *fmtCopyColumnList(const TableInfo *ti); - extern char *optarg; extern int optind, opterr; + /* global decls */ bool g_verbose; /* User wants verbose narration of our * activities. */ @@ -130,7 +74,7 @@ Archive *g_fout; /* the script file */ PGconn *g_conn; /* the database connection */ /* various user-settable parameters */ -bool dumpData; /* dump data using proper insert strings */ +bool dumpInserts; /* dump data using proper insert strings */ bool attrNames; /* put attr names into insert strings */ bool schemaOnly; bool dataOnly; @@ -148,11 +92,68 @@ char g_opaque_type[10]; /* name for the opaque type */ char g_comment_start[10]; char g_comment_end[10]; +static const CatalogId nilCatalogId = { 0, 0 }; + /* these are to avoid passing around info for findNamespace() */ static NamespaceInfo *g_namespaces; static int g_numNamespaces; +static void help(const char *progname); +static NamespaceInfo *findNamespace(Oid nsoid, Oid objoid); +static void dumpTableData(Archive *fout, TableDataInfo *tdinfo); +static void dumpComment(Archive *fout, const char *target, + const char *namespace, const char *owner, + CatalogId catalogId, int subid, DumpId dumpId); +static void dumpDumpableObject(Archive *fout, DumpableObject *dobj); +static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo); +static void dumpType(Archive *fout, TypeInfo *tinfo); +static void dumpBaseType(Archive *fout, TypeInfo *tinfo); +static void dumpDomain(Archive *fout, TypeInfo *tinfo); +static void dumpCompositeType(Archive *fout, TypeInfo *tinfo); +static void dumpProcLang(Archive *fout, ProcLangInfo *plang); +static void dumpFunc(Archive *fout, FuncInfo *finfo); +static void dumpCast(Archive *fout, CastInfo *cast); +static void dumpOpr(Archive *fout, OprInfo *oprinfo); +static void dumpOpclass(Archive *fout, OpclassInfo *opcinfo); +static void dumpConversion(Archive *fout, ConvInfo *convinfo); +static void dumpRule(Archive *fout, RuleInfo *rinfo); +static void dumpAgg(Archive *fout, AggInfo *agginfo); +static void dumpTrigger(Archive *fout, TriggerInfo *tginfo); +static void dumpTable(Archive *fout, TableInfo *tbinfo); +static void dumpTableSchema(Archive *fout, TableInfo *tbinfo); +static void dumpAttrDef(Archive *fout, AttrDefInfo *adinfo); +static void dumpSequence(Archive *fout, TableInfo *tbinfo); +static void dumpIndex(Archive *fout, IndxInfo *indxinfo); +static void dumpConstraint(Archive *fout, ConstraintInfo *coninfo); + +static void dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId, + const char *type, const char *name, + const char *tag, const char *nspname, const char *owner, + const char *acls); + +static void getDependencies(void); +static void getDomainConstraints(TypeInfo *tinfo); +static void getTableData(TableInfo *tblinfo, int numTables, bool oids); +static char *format_function_signature(FuncInfo *finfo, bool honor_quotes); +static const char *convertRegProcReference(const char *proc); +static const char *convertOperatorReference(const char *opr); +static Oid findLastBuiltinOid_V71(const char *); +static Oid findLastBuiltinOid_V70(void); +static void setMaxOid(Archive *fout); +static void selectSourceSchema(const char *schemaName); +static char *getFormattedTypeName(Oid oid, OidOptions opts); +static char *myFormatType(const char *typname, int32 typmod); +static const char *fmtQualifiedId(const char *schema, const char *id); +static int dumpBlobs(Archive *AH, void *arg); +static void dumpDatabase(Archive *AH); +static const char *getAttrName(int attrnum, TableInfo *tblInfo); +static const char *fmtCopyColumnList(const TableInfo *ti); +static void do_sql_command(PGconn *conn, const char *query); +static void check_sql_result(PGresult *res, PGconn *conn, const char *query, + ExecStatusType expected); + + int main(int argc, char **argv) { @@ -164,9 +165,11 @@ main(int argc, char **argv) const char *pgport = NULL; const char *username = NULL; bool oids = false; - PGresult *res; TableInfo *tblinfo; int numTables; + DumpableObject **dobjs; + int numObjs; + int i; bool force_password = false; int compressLevel = -1; bool ignore_version = false; @@ -233,7 +236,7 @@ main(int argc, char **argv) g_comment_end[0] = '\0'; strcpy(g_opaque_type, "opaque"); - dataOnly = schemaOnly = dumpData = attrNames = false; + dataOnly = schemaOnly = dumpInserts = attrNames = false; progname = get_progname(argv[0]); @@ -281,12 +284,12 @@ main(int argc, char **argv) break; case 'd': /* dump data as proper insert strings */ - dumpData = true; + dumpInserts = true; break; case 'D': /* dump data as proper insert strings with * attr names */ - dumpData = true; + dumpInserts = true; attrNames = true; break; @@ -435,7 +438,7 @@ main(int argc, char **argv) exit(1); } - if (dumpData == true && oids == true) + if (dumpInserts == true && oids == true) { write_msg(NULL, "INSERT (-d, -D) and OID (-o) options cannot be used together\n"); write_msg(NULL, "(The INSERT command cannot set OIDs.)\n"); @@ -505,37 +508,19 @@ main(int argc, char **argv) /* * Start serializable transaction to dump consistent data. */ - res = PQexec(g_conn, "BEGIN"); - if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) - exit_horribly(g_fout, NULL, "BEGIN command failed: %s", - PQerrorMessage(g_conn)); - PQclear(res); + do_sql_command(g_conn, "BEGIN"); - res = PQexec(g_conn, "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE"); - if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) - exit_horribly(g_fout, NULL, "could not set transaction isolation level to serializable: %s", - PQerrorMessage(g_conn)); - PQclear(res); + do_sql_command(g_conn, "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE"); /* Set the datestyle to ISO to ensure the dump's portability */ - res = PQexec(g_conn, "SET DATESTYLE = ISO"); - if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) - exit_horribly(g_fout, NULL, "could not set datestyle to ISO: %s", - PQerrorMessage(g_conn)); - PQclear(res); + do_sql_command(g_conn, "SET DATESTYLE = ISO"); /* * If supported, set extra_float_digits so that we can dump float data * exactly (given correctly implemented float I/O code, anyway) */ if (g_fout->remoteVersion >= 70400) - { - res = PQexec(g_conn, "SET extra_float_digits TO 2"); - if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) - exit_horribly(g_fout, NULL, "could not set extra_float_digits: %s", - PQerrorMessage(g_conn)); - PQclear(res); - } + do_sql_command(g_conn, "SET extra_float_digits TO 2"); /* Find the last built-in OID, if needed */ if (g_fout->remoteVersion < 70300) @@ -548,34 +533,58 @@ main(int argc, char **argv) write_msg(NULL, "last built-in OID is %u\n", g_last_builtin_oid); } - /* Dump the database definition */ + /* + * Now scan the database and create DumpableObject structs for all the + * objects we intend to dump. + */ + tblinfo = getSchemaData(&numTables, schemaOnly, dataOnly); + + if (!schemaOnly) + getTableData(tblinfo, numTables, oids); + + /* + * Collect dependency data to assist in ordering the objects. + */ + getDependencies(); + + /* + * Sort the objects into a safe dump order (no forward references). + */ + getDumpableObjects(&dobjs, &numObjs); + + sortDumpableObjectsByType(dobjs, numObjs); + sortDumpableObjects(dobjs, numObjs); + + /* + * Create archive TOC entries for all the objects to be dumped, + * in a safe order. + */ + + /* The database item is always first. */ if (!dataOnly) dumpDatabase(g_fout); + /* Max OID is second. */ if (oids == true) setMaxOid(g_fout); - tblinfo = dumpSchema(g_fout, &numTables, aclsSkip, schemaOnly, dataOnly); - - if (!schemaOnly) - dumpClasses(tblinfo, numTables, g_fout, oids); - - if (outputBlobs) - ArchiveEntry(g_fout, "0", "BLOBS", NULL, "", - "BLOBS", NULL, "", "", NULL, dumpBlobs, NULL); - - if (!dataOnly) /* dump indexes and triggers at the end - * for performance */ + /* Now the rearrangeable objects. */ + for (i = 0; i < numObjs; i++) { - dumpConstraints(g_fout, tblinfo, numTables); - dumpTriggers(g_fout, tblinfo, numTables); - dumpRules(g_fout, tblinfo, numTables); + dumpDumpableObject(g_fout, dobjs[i]); } - /* Now sort the output nicely: by OID within object types */ - SortTocByOID(g_fout); - SortTocByObjectType(g_fout); + /* BLOBs are always last. */ + if (outputBlobs) + ArchiveEntry(g_fout, nilCatalogId, createDumpId(), + "BLOBS", NULL, "", + "BLOBS", "", "", NULL, + NULL, 0, + dumpBlobs, NULL); + /* + * And finally we can do the actual output. + */ if (plainText) { ropt = NewRestoreOptions(); @@ -601,6 +610,7 @@ main(int argc, char **argv) CloseArchive(g_fout); PQfinish(g_conn); + exit(0); } @@ -724,13 +734,13 @@ selectDumpableTable(TableInfo *tbinfo) #define COPYBUFSIZ 8192 static int -dumpClasses_nodumpData(Archive *fout, char *oid, void *dctxv) +dumpTableData_copy(Archive *fout, void *dcontext) { - const DumpContext *dctx = (DumpContext *) dctxv; - TableInfo *tbinfo = &dctx->tblinfo[dctx->tblidx]; + TableDataInfo *tdinfo = (TableDataInfo *) dcontext; + TableInfo *tbinfo = tdinfo->tdtable; const char *classname = tbinfo->relname; const bool hasoids = tbinfo->hasoids; - const bool oids = dctx->oids; + const bool oids = tdinfo->oids; PQExpBuffer q = createPQExpBuffer(); PGresult *res; int ret; @@ -775,24 +785,7 @@ dumpClasses_nodumpData(Archive *fout, char *oid, void *dctxv) column_list); } res = PQexec(g_conn, q->data); - if (!res || - PQresultStatus(res) == PGRES_FATAL_ERROR) - { - write_msg(NULL, "SQL command to dump the contents of table \"%s\" failed\n", - classname); - write_msg(NULL, "Error message from server: %s", PQerrorMessage(g_conn)); - write_msg(NULL, "The command was: %s\n", q->data); - exit_nicely(); - } - if (PQresultStatus(res) != PGRES_COPY_OUT) - { - write_msg(NULL, "SQL command to dump the contents of table \"%s\" executed abnormally.\n", - classname); - write_msg(NULL, "The server returned status %d when %d was expected.\n", - PQresultStatus(res), PGRES_COPY_OUT); - write_msg(NULL, "The command was: %s\n", q->data); - exit_nicely(); - } + check_sql_result(res, g_conn, q->data, PGRES_COPY_OUT); copydone = false; @@ -815,7 +808,7 @@ dumpClasses_nodumpData(Archive *fout, char *oid, void *dctxv) copydone = true; /* FALLTHROUGH */ case 0: - archputc('\n', fout); + archputs("\n", fout); break; case 1: break; @@ -880,10 +873,10 @@ dumpClasses_nodumpData(Archive *fout, char *oid, void *dctxv) } static int -dumpClasses_dumpData(Archive *fout, char *oid, void *dctxv) +dumpTableData_insert(Archive *fout, void *dcontext) { - const DumpContext *dctx = (DumpContext *) dctxv; - TableInfo *tbinfo = &dctx->tblinfo[dctx->tblidx]; + TableDataInfo *tdinfo = (TableDataInfo *) dcontext; + TableInfo *tbinfo = tdinfo->tdtable; const char *classname = tbinfo->relname; PQExpBuffer q = createPQExpBuffer(); PGresult *res; @@ -915,29 +908,15 @@ dumpClasses_dumpData(Archive *fout, char *oid, void *dctxv) } res = PQexec(g_conn, q->data); - if (!res || - PQresultStatus(res) != PGRES_COMMAND_OK) - { - write_msg(NULL, "dumpClasses(): SQL command failed\n"); - write_msg(NULL, "Error message from server: %s", PQerrorMessage(g_conn)); - write_msg(NULL, "The command was: %s\n", q->data); - exit_nicely(); - } + check_sql_result(res, g_conn, q->data, PGRES_COMMAND_OK); do { PQclear(res); res = PQexec(g_conn, "FETCH 100 FROM _pg_dump_cursor"); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "dumpClasses(): SQL command failed\n"); - write_msg(NULL, "Error message from server: %s", PQerrorMessage(g_conn)); - write_msg(NULL, "The command was: FETCH 100 FROM _pg_dump_cursor\n"); - exit_nicely(); - } - + check_sql_result(res, g_conn, "FETCH 100 FROM _pg_dump_cursor", + PGRES_TUPLES_OK); nfields = PQnfields(res); for (tuple = 0; tuple < PQntuples(res); tuple++) { @@ -1029,19 +1008,11 @@ dumpClasses_dumpData(Archive *fout, char *oid, void *dctxv) } } while (PQntuples(res) > 0); - archprintf(fout, "\n\n"); PQclear(res); - res = PQexec(g_conn, "CLOSE _pg_dump_cursor"); - if (!res || - PQresultStatus(res) != PGRES_COMMAND_OK) - { - write_msg(NULL, "dumpClasses(): SQL command failed\n"); - write_msg(NULL, "Error message from server: %s", PQerrorMessage(g_conn)); - write_msg(NULL, "The command was: CLOSE _pg_dump_cursor\n"); - exit_nicely(); - } - PQclear(res); + archprintf(fout, "\n\n"); + + do_sql_command(g_conn, "CLOSE _pg_dump_cursor"); destroyPQExpBuffer(q); return 1; @@ -1049,70 +1020,86 @@ dumpClasses_dumpData(Archive *fout, char *oid, void *dctxv) /* - * DumpClasses - - * dump the contents of all the classes. + * dumpTableData - + * dump the contents of a single table + * + * Actually, this just makes an ArchiveEntry for the table contents. */ static void -dumpClasses(const TableInfo *tblinfo, const int numTables, Archive *fout, - const bool oids) +dumpTableData(Archive *fout, TableDataInfo *tdinfo) { + TableInfo *tbinfo = tdinfo->tdtable; PQExpBuffer copyBuf = createPQExpBuffer(); DataDumperPtr dumpFn; - DumpContext *dumpCtx; char *copyStmt; + + if (!dumpInserts) + { + /* Dump/restore using COPY */ + dumpFn = dumpTableData_copy; + /* must use 2 steps here 'cause fmtId is nonreentrant */ + appendPQExpBuffer(copyBuf, "COPY %s ", + fmtId(tbinfo->relname)); + appendPQExpBuffer(copyBuf, "%s %sFROM stdin;\n", + fmtCopyColumnList(tbinfo), + (tdinfo->oids && tbinfo->hasoids) ? "WITH OIDS " : ""); + copyStmt = copyBuf->data; + } + else + { + /* Restore using INSERT */ + dumpFn = dumpTableData_insert; + copyStmt = NULL; + } + + ArchiveEntry(fout, tdinfo->dobj.catId, tdinfo->dobj.dumpId, + tbinfo->relname, + tbinfo->relnamespace->nspname, + tbinfo->usename, + "TABLE DATA", "", "", copyStmt, + tdinfo->dobj.dependencies, tdinfo->dobj.nDeps, + dumpFn, tdinfo); + + destroyPQExpBuffer(copyBuf); +} + +/* + * getTableData - + * set up dumpable objects representing the contents of tables + */ +static void +getTableData(TableInfo *tblinfo, int numTables, bool oids) +{ int i; for (i = 0; i < numTables; i++) { - const char *classname = tblinfo[i].relname; - - /* Skip VIEW relations */ + /* Skip VIEWs (no data to dump) */ if (tblinfo[i].relkind == RELKIND_VIEW) continue; - - if (tblinfo[i].relkind == RELKIND_SEQUENCE) /* already dumped */ + /* Skip SEQUENCEs (handled elsewhere) */ + if (tblinfo[i].relkind == RELKIND_SEQUENCE) continue; if (tblinfo[i].dump) { - if (g_verbose) - write_msg(NULL, "preparing to dump the contents of table %s\n", - classname); - - dumpCtx = (DumpContext *) calloc(1, sizeof(DumpContext)); - dumpCtx->tblinfo = (TableInfo *) tblinfo; - dumpCtx->tblidx = i; - dumpCtx->oids = oids; + TableDataInfo *tdinfo; - if (!dumpData) - { - /* Dump/restore using COPY */ - dumpFn = dumpClasses_nodumpData; - resetPQExpBuffer(copyBuf); - /* must use 2 steps here 'cause fmtId is nonreentrant */ - appendPQExpBuffer(copyBuf, "COPY %s ", - fmtId(tblinfo[i].relname)); - appendPQExpBuffer(copyBuf, "%s %sFROM stdin;\n", - fmtCopyColumnList(&(tblinfo[i])), - (oids && tblinfo[i].hasoids) ? "WITH OIDS " : ""); - copyStmt = copyBuf->data; - } - else - { - /* Restore using INSERT */ - dumpFn = dumpClasses_dumpData; - copyStmt = NULL; - } + tdinfo = (TableDataInfo *) malloc(sizeof(TableDataInfo)); - ArchiveEntry(fout, tblinfo[i].oid, tblinfo[i].relname, - tblinfo[i].relnamespace->nspname, - tblinfo[i].usename, - "TABLE DATA", NULL, "", "", copyStmt, - dumpFn, dumpCtx); + tdinfo->dobj.objType = DO_TABLE_DATA; + /* + * Note: use tableoid 0 so that this object won't be mistaken + * for something that pg_depend entries apply to. + */ + tdinfo->dobj.catId.tableoid = 0; + tdinfo->dobj.catId.oid = tblinfo[i].dobj.catId.oid; + AssignDumpId(&tdinfo->dobj); + tdinfo->tdtable = &(tblinfo[i]); + tdinfo->oids = oids; + addObjectDependency(&tdinfo->dobj, tblinfo[i].dobj.dumpId); } } - - destroyPQExpBuffer(copyBuf); } @@ -1120,7 +1107,7 @@ dumpClasses(const TableInfo *tblinfo, const int numTables, Archive *fout, * dumpDatabase: * dump the database definition */ -static int +static void dumpDatabase(Archive *AH) { PQExpBuffer dbQry = createPQExpBuffer(); @@ -1128,9 +1115,13 @@ dumpDatabase(Archive *AH) PQExpBuffer creaQry = createPQExpBuffer(); PGresult *res; int ntups; - int i_dba, + int i_tableoid, + i_oid, + i_dba, i_encoding, i_datpath; + CatalogId dbCatId; + DumpId dbDumpId; const char *datname, *dba, *encoding, @@ -1145,21 +1136,31 @@ dumpDatabase(Archive *AH) selectSourceSchema("pg_catalog"); /* Get the database owner and parameters from pg_database */ - appendPQExpBuffer(dbQry, "select (select usename from pg_user where usesysid = datdba) as dba," - " pg_encoding_to_char(encoding) as encoding," - " datpath from pg_database" - " where datname = "); - appendStringLiteral(dbQry, datname, true); - - res = PQexec(g_conn, dbQry->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) + if (g_fout->remoteVersion >= 70100) { - write_msg(NULL, "SQL command failed\n"); - write_msg(NULL, "Error message from server: %s", PQerrorMessage(g_conn)); - write_msg(NULL, "The command was: %s\n", dbQry->data); - exit_nicely(); + appendPQExpBuffer(dbQry, "SELECT tableoid, oid, " + "(SELECT usename FROM pg_user WHERE usesysid = datdba) as dba, " + "pg_encoding_to_char(encoding) as encoding, " + "datpath " + "FROM pg_database " + "WHERE datname = "); + appendStringLiteral(dbQry, datname, true); } + else + { + appendPQExpBuffer(dbQry, "SELECT " + "(SELECT oid FROM pg_class WHERE relname = 'pg_database') AS tableoid, " + "oid, " + "(SELECT usename FROM pg_user WHERE usesysid = datdba) as dba, " + "pg_encoding_to_char(encoding) as encoding, " + "datpath " + "FROM pg_database " + "WHERE datname = "); + appendStringLiteral(dbQry, datname, true); + } + + res = PQexec(g_conn, dbQry->data); + check_sql_result(res, g_conn, dbQry->data, PGRES_TUPLES_OK); ntups = PQntuples(res); @@ -1177,9 +1178,14 @@ dumpDatabase(Archive *AH) exit_nicely(); } + i_tableoid = PQfnumber(res, "tableoid"); + i_oid = PQfnumber(res, "oid"); i_dba = PQfnumber(res, "dba"); i_encoding = PQfnumber(res, "encoding"); i_datpath = PQfnumber(res, "datpath"); + + dbCatId.tableoid = atooid(PQgetvalue(res, 0, i_tableoid)); + dbCatId.oid = atooid(PQgetvalue(res, 0, i_oid)); dba = PQgetvalue(res, 0, i_dba); encoding = PQgetvalue(res, 0, i_encoding); datpath = PQgetvalue(res, 0, i_datpath); @@ -1201,25 +1207,34 @@ dumpDatabase(Archive *AH) appendPQExpBuffer(delQry, "DROP DATABASE %s;\n", fmtId(datname)); - ArchiveEntry(AH, "0", /* OID */ + dbDumpId = createDumpId(); + + ArchiveEntry(AH, + dbCatId, /* catalog ID */ + dbDumpId, /* dump ID */ datname, /* Name */ NULL, /* Namespace */ dba, /* Owner */ "DATABASE", /* Desc */ - NULL, /* Deps */ creaQry->data, /* Create */ delQry->data, /* Del */ NULL, /* Copy */ + NULL, /* Deps */ + 0, /* # Deps */ NULL, /* Dumper */ NULL); /* Dumper Arg */ + /* Dump DB comment if any */ + resetPQExpBuffer(dbQry); + appendPQExpBuffer(dbQry, "DATABASE %s", fmtId(datname)); + dumpComment(AH, dbQry->data, NULL, "", + dbCatId, 0, dbDumpId); + PQclear(res); destroyPQExpBuffer(dbQry); destroyPQExpBuffer(delQry); destroyPQExpBuffer(creaQry); - - return 1; } @@ -1233,7 +1248,7 @@ dumpDatabase(Archive *AH) #define loFetchSize 1000 static int -dumpBlobs(Archive *AH, char *junkOid, void *junkVal) +dumpBlobs(Archive *AH, void *arg) { PQExpBuffer oidQry = createPQExpBuffer(); PQExpBuffer oidFetchQry = createPQExpBuffer(); @@ -1257,11 +1272,7 @@ dumpBlobs(Archive *AH, char *junkOid, void *junkVal) appendPQExpBuffer(oidQry, "Declare blobOid Cursor for SELECT oid from pg_class where relkind = 'l'"); res = PQexec(g_conn, oidQry->data); - if (!res || PQresultStatus(res) != PGRES_COMMAND_OK) - { - write_msg(NULL, "dumpBlobs(): cursor declaration failed: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, oidQry->data, PGRES_COMMAND_OK); /* Fetch for cursor */ appendPQExpBuffer(oidFetchQry, "Fetch %d in blobOid", loFetchSize); @@ -1270,14 +1281,9 @@ dumpBlobs(Archive *AH, char *junkOid, void *junkVal) { /* Do a fetch */ PQclear(res); - res = PQexec(g_conn, oidFetchQry->data); - if (!res || PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "dumpBlobs(): fetch from cursor failed: %s", - PQerrorMessage(g_conn)); - exit_nicely(); - } + res = PQexec(g_conn, oidFetchQry->data); + check_sql_result(res, g_conn, oidFetchQry->data, PGRES_TUPLES_OK); /* Process the tuples, if any */ for (i = 0; i < PQntuples(res); i++) @@ -1337,6 +1343,7 @@ getNamespaces(int *numNamespaces) int i; PQExpBuffer query; NamespaceInfo *nsinfo; + int i_tableoid; int i_oid; int i_nspname; int i_usename; @@ -1350,14 +1357,20 @@ getNamespaces(int *numNamespaces) { nsinfo = (NamespaceInfo *) malloc(2 * sizeof(NamespaceInfo)); - nsinfo[0].oid = strdup("0"); + nsinfo[0].dobj.objType = DO_NAMESPACE; + nsinfo[0].dobj.catId.tableoid = 0; + nsinfo[0].dobj.catId.oid = 0; + AssignDumpId(&nsinfo[0].dobj); nsinfo[0].nspname = strdup(""); nsinfo[0].usename = strdup(""); nsinfo[0].nspacl = strdup(""); selectDumpableNamespace(&nsinfo[0]); - nsinfo[1].oid = strdup("1"); + nsinfo[1].dobj.objType = DO_NAMESPACE; + nsinfo[1].dobj.catId.tableoid = 0; + nsinfo[1].dobj.catId.oid = 1; + AssignDumpId(&nsinfo[1].dobj); nsinfo[1].nspname = strdup("pg_catalog"); nsinfo[1].usename = strdup(""); nsinfo[1].nspacl = strdup(""); @@ -1379,23 +1392,19 @@ getNamespaces(int *numNamespaces) * we fetch all namespaces including system ones, so that every object * we read in can be linked to a containing namespace. */ - appendPQExpBuffer(query, "SELECT oid, nspname, " + appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, " "(select usename from pg_user where nspowner = usesysid) as usename, " "nspacl " "FROM pg_namespace"); res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain list of schemas failed: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); ntups = PQntuples(res); nsinfo = (NamespaceInfo *) malloc(ntups * sizeof(NamespaceInfo)); + i_tableoid = PQfnumber(res, "tableoid"); i_oid = PQfnumber(res, "oid"); i_nspname = PQfnumber(res, "nspname"); i_usename = PQfnumber(res, "usename"); @@ -1403,7 +1412,10 @@ getNamespaces(int *numNamespaces) for (i = 0; i < ntups; i++) { - nsinfo[i].oid = strdup(PQgetvalue(res, i, i_oid)); + nsinfo[i].dobj.objType = DO_NAMESPACE; + nsinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid)); + nsinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid)); + AssignDumpId(&nsinfo[i].dobj); nsinfo[i].nspname = strdup(PQgetvalue(res, i, i_nspname)); nsinfo[i].usename = strdup(PQgetvalue(res, i, i_usename)); nsinfo[i].nspacl = strdup(PQgetvalue(res, i, i_nspacl)); @@ -1453,7 +1465,7 @@ getNamespaces(int *numNamespaces) * a system object or not. In 7.3 and later there is no guessing. */ static NamespaceInfo * -findNamespace(const char *nsoid, const char *objoid) +findNamespace(Oid nsoid, Oid objoid) { int i; @@ -1463,16 +1475,16 @@ findNamespace(const char *nsoid, const char *objoid) { NamespaceInfo *nsinfo = &g_namespaces[i]; - if (strcmp(nsoid, nsinfo->oid) == 0) + if (nsoid == nsinfo->dobj.catId.oid) return nsinfo; } - write_msg(NULL, "schema with OID %s does not exist\n", nsoid); + write_msg(NULL, "schema with OID %u does not exist\n", nsoid); exit_nicely(); } else { /* This code depends on the layout set up by getNamespaces. */ - if (atooid(objoid) > g_last_builtin_oid) + if (objoid > g_last_builtin_oid) i = 0; /* user object */ else i = 1; /* system object */ @@ -1488,6 +1500,9 @@ findNamespace(const char *nsoid, const char *objoid) * TypeInfo* structure * * numTypes is set to the number of types read in + * + * NB: this must run after getFuncs() because we assume we can do + * findFuncByOid(). */ TypeInfo * getTypes(int *numTypes) @@ -1497,10 +1512,13 @@ getTypes(int *numTypes) int i; PQExpBuffer query = createPQExpBuffer(); TypeInfo *tinfo; + int i_tableoid; int i_oid; int i_typname; int i_typnamespace; int i_usename; + int i_typinput; + int i_typoutput; int i_typelem; int i_typrelid; int i_typrelkind; @@ -1521,10 +1539,23 @@ getTypes(int *numTypes) if (g_fout->remoteVersion >= 70300) { - appendPQExpBuffer(query, "SELECT pg_type.oid, typname, " + appendPQExpBuffer(query, "SELECT tableoid, oid, typname, " "typnamespace, " "(select usename from pg_user where typowner = usesysid) as usename, " - "typelem, typrelid, " + "typinput::oid as typinput, " + "typoutput::oid as typoutput, typelem, typrelid, " + "CASE WHEN typrelid = 0 THEN ' '::\"char\" " + "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END as typrelkind, " + "typtype, typisdefined " + "FROM pg_type"); + } + else if (g_fout->remoteVersion >= 70100) + { + appendPQExpBuffer(query, "SELECT tableoid, oid, typname, " + "0::oid as typnamespace, " + "(select usename from pg_user where typowner = usesysid) as usename, " + "typinput::oid as typinput, " + "typoutput::oid as typoutput, typelem, typrelid, " "CASE WHEN typrelid = 0 THEN ' '::\"char\" " "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END as typrelkind, " "typtype, typisdefined " @@ -1532,10 +1563,13 @@ getTypes(int *numTypes) } else { - appendPQExpBuffer(query, "SELECT pg_type.oid, typname, " + appendPQExpBuffer(query, "SELECT " + "(SELECT oid FROM pg_class WHERE relname = 'pg_type') AS tableoid, " + "oid, typname, " "0::oid as typnamespace, " "(select usename from pg_user where typowner = usesysid) as usename, " - "typelem, typrelid, " + "typinput::oid as typinput, " + "typoutput::oid as typoutput, typelem, typrelid, " "CASE WHEN typrelid = 0 THEN ' '::\"char\" " "ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END as typrelkind, " "typtype, typisdefined " @@ -1543,21 +1577,19 @@ getTypes(int *numTypes) } res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain list of data types failed: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); ntups = PQntuples(res); tinfo = (TypeInfo *) malloc(ntups * sizeof(TypeInfo)); + i_tableoid = PQfnumber(res, "tableoid"); i_oid = PQfnumber(res, "oid"); i_typname = PQfnumber(res, "typname"); i_typnamespace = PQfnumber(res, "typnamespace"); i_usename = PQfnumber(res, "usename"); + i_typinput = PQfnumber(res, "typinput"); + i_typoutput = PQfnumber(res, "typoutput"); i_typelem = PQfnumber(res, "typelem"); i_typrelid = PQfnumber(res, "typrelid"); i_typrelkind = PQfnumber(res, "typrelkind"); @@ -1566,20 +1598,28 @@ getTypes(int *numTypes) for (i = 0; i < ntups; i++) { - tinfo[i].oid = strdup(PQgetvalue(res, i, i_oid)); + Oid typoutput; + FuncInfo *funcInfo; + + tinfo[i].dobj.objType = DO_TYPE; + tinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid)); + tinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid)); + AssignDumpId(&tinfo[i].dobj); tinfo[i].typname = strdup(PQgetvalue(res, i, i_typname)); - tinfo[i].typnamespace = findNamespace(PQgetvalue(res, i, i_typnamespace), - tinfo[i].oid); + tinfo[i].typnamespace = findNamespace(atooid(PQgetvalue(res, i, i_typnamespace)), + tinfo[i].dobj.catId.oid); tinfo[i].usename = strdup(PQgetvalue(res, i, i_usename)); - tinfo[i].typelem = strdup(PQgetvalue(res, i, i_typelem)); - tinfo[i].typrelid = strdup(PQgetvalue(res, i, i_typrelid)); + tinfo[i].typinput = atooid(PQgetvalue(res, i, i_typinput)); + typoutput = atooid(PQgetvalue(res, i, i_typoutput)); + tinfo[i].typelem = atooid(PQgetvalue(res, i, i_typelem)); + tinfo[i].typrelid = atooid(PQgetvalue(res, i, i_typrelid)); tinfo[i].typrelkind = *PQgetvalue(res, i, i_typrelkind); tinfo[i].typtype = *PQgetvalue(res, i, i_typtype); /* * check for user-defined array types, omit system generated ones */ - if ((strcmp(tinfo[i].typelem, "0") != 0) && + if (OidIsValid(tinfo[i].typelem) && tinfo[i].typname[0] != '_') tinfo[i].isArray = true; else @@ -1590,6 +1630,29 @@ getTypes(int *numTypes) else tinfo[i].isDefined = false; + /* + * If it's a domain, fetch info about its constraints, if any + */ + tinfo[i].nDomChecks = 0; + tinfo[i].domChecks = NULL; + if (tinfo[i].typtype == 'd') + getDomainConstraints(&(tinfo[i])); + + /* + * Make sure there are dependencies from the type to its input and + * output functions. (We don't worry about typsend/typreceive since + * those are only valid in 7.4 and later, wherein the standard + * dependency mechanism will pick them up.) + */ + funcInfo = findFuncByOid(tinfo[i].typinput); + if (funcInfo) + addObjectDependency(&tinfo[i].dobj, + funcInfo->dobj.dumpId); + funcInfo = findFuncByOid(typoutput); + if (funcInfo) + addObjectDependency(&tinfo[i].dobj, + funcInfo->dobj.dumpId); + if (strlen(tinfo[i].usename) == 0 && tinfo[i].isDefined) write_msg(NULL, "WARNING: owner of data type \"%s\" appears to be invalid\n", tinfo[i].typname); @@ -1619,6 +1682,7 @@ getOperators(int *numOprs) int i; PQExpBuffer query = createPQExpBuffer(); OprInfo *oprinfo; + int i_tableoid; int i_oid; int i_oprname; int i_oprnamespace; @@ -1635,34 +1699,40 @@ getOperators(int *numOprs) if (g_fout->remoteVersion >= 70300) { - appendPQExpBuffer(query, "SELECT pg_operator.oid, oprname, " + appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, " "oprnamespace, " "(select usename from pg_user where oprowner = usesysid) as usename, " "oprcode::oid as oprcode " - "from pg_operator"); + "FROM pg_operator"); + } + else if (g_fout->remoteVersion >= 70100) + { + appendPQExpBuffer(query, "SELECT tableoid, oid, oprname, " + "0::oid as oprnamespace, " + "(select usename from pg_user where oprowner = usesysid) as usename, " + "oprcode::oid as oprcode " + "FROM pg_operator"); } else { - appendPQExpBuffer(query, "SELECT pg_operator.oid, oprname, " + appendPQExpBuffer(query, "SELECT " + "(SELECT oid FROM pg_class WHERE relname = 'pg_operator') AS tableoid, " + "oid, oprname, " "0::oid as oprnamespace, " "(select usename from pg_user where oprowner = usesysid) as usename, " "oprcode::oid as oprcode " - "from pg_operator"); + "FROM pg_operator"); } res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain list of operators failed: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); ntups = PQntuples(res); *numOprs = ntups; oprinfo = (OprInfo *) malloc(ntups * sizeof(OprInfo)); + i_tableoid = PQfnumber(res, "tableoid"); i_oid = PQfnumber(res, "oid"); i_oprname = PQfnumber(res, "oprname"); i_oprnamespace = PQfnumber(res, "oprnamespace"); @@ -1671,12 +1741,15 @@ getOperators(int *numOprs) for (i = 0; i < ntups; i++) { - oprinfo[i].oid = strdup(PQgetvalue(res, i, i_oid)); + oprinfo[i].dobj.objType = DO_OPERATOR; + oprinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid)); + oprinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid)); + AssignDumpId(&oprinfo[i].dobj); oprinfo[i].oprname = strdup(PQgetvalue(res, i, i_oprname)); - oprinfo[i].oprnamespace = findNamespace(PQgetvalue(res, i, i_oprnamespace), - oprinfo[i].oid); + oprinfo[i].oprnamespace = findNamespace(atooid(PQgetvalue(res, i, i_oprnamespace)), + oprinfo[i].dobj.catId.oid); oprinfo[i].usename = strdup(PQgetvalue(res, i, i_usename)); - oprinfo[i].oprcode = strdup(PQgetvalue(res, i, i_oprcode)); + oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode)); if (strlen(oprinfo[i].usename) == 0) write_msg(NULL, "WARNING: owner of operator \"%s\" appears to be invalid\n", @@ -1705,6 +1778,7 @@ getConversions(int *numConversions) int i; PQExpBuffer query = createPQExpBuffer(); ConvInfo *convinfo; + int i_tableoid; int i_oid; int i_conname; int i_connamespace; @@ -1724,24 +1798,20 @@ getConversions(int *numConversions) /* Make sure we are in proper schema */ selectSourceSchema("pg_catalog"); - appendPQExpBuffer(query, "SELECT pg_conversion.oid, conname, " + appendPQExpBuffer(query, "SELECT tableoid, oid, conname, " "connamespace, " "(select usename from pg_user where conowner = usesysid) as usename " - "from pg_conversion"); + "FROM pg_conversion"); res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain list of conversions failed: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); ntups = PQntuples(res); *numConversions = ntups; convinfo = (ConvInfo *) malloc(ntups * sizeof(ConvInfo)); + i_tableoid = PQfnumber(res, "tableoid"); i_oid = PQfnumber(res, "oid"); i_conname = PQfnumber(res, "conname"); i_connamespace = PQfnumber(res, "connamespace"); @@ -1749,10 +1819,13 @@ getConversions(int *numConversions) for (i = 0; i < ntups; i++) { - convinfo[i].oid = strdup(PQgetvalue(res, i, i_oid)); + convinfo[i].dobj.objType = DO_CONVERSION; + convinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid)); + convinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid)); + AssignDumpId(&convinfo[i].dobj); convinfo[i].conname = strdup(PQgetvalue(res, i, i_conname)); - convinfo[i].connamespace = findNamespace(PQgetvalue(res, i, i_connamespace), - convinfo[i].oid); + convinfo[i].connamespace = findNamespace(atooid(PQgetvalue(res, i, i_connamespace)), + convinfo[i].dobj.catId.oid); convinfo[i].usename = strdup(PQgetvalue(res, i, i_usename)); } @@ -1778,6 +1851,7 @@ getOpclasses(int *numOpclasses) int i; PQExpBuffer query = createPQExpBuffer(); OpclassInfo *opcinfo; + int i_tableoid; int i_oid; int i_opcname; int i_opcnamespace; @@ -1793,32 +1867,37 @@ getOpclasses(int *numOpclasses) if (g_fout->remoteVersion >= 70300) { - appendPQExpBuffer(query, "SELECT pg_opclass.oid, opcname, " + appendPQExpBuffer(query, "SELECT tableoid, oid, opcname, " "opcnamespace, " "(select usename from pg_user where opcowner = usesysid) as usename " - "from pg_opclass"); + "FROM pg_opclass"); + } + else if (g_fout->remoteVersion >= 70100) + { + appendPQExpBuffer(query, "SELECT tableoid, oid, opcname, " + "0::oid as opcnamespace, " + "''::name as usename " + "FROM pg_opclass"); } else { - appendPQExpBuffer(query, "SELECT pg_opclass.oid, opcname, " + appendPQExpBuffer(query, "SELECT " + "(SELECT oid FROM pg_class WHERE relname = 'pg_opclass') AS tableoid, " + "oid, opcname, " "0::oid as opcnamespace, " "''::name as usename " - "from pg_opclass"); + "FROM pg_opclass"); } res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain list of operator classes failed: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); ntups = PQntuples(res); *numOpclasses = ntups; opcinfo = (OpclassInfo *) malloc(ntups * sizeof(OpclassInfo)); + i_tableoid = PQfnumber(res, "tableoid"); i_oid = PQfnumber(res, "oid"); i_opcname = PQfnumber(res, "opcname"); i_opcnamespace = PQfnumber(res, "opcnamespace"); @@ -1826,10 +1905,13 @@ getOpclasses(int *numOpclasses) for (i = 0; i < ntups; i++) { - opcinfo[i].oid = strdup(PQgetvalue(res, i, i_oid)); + opcinfo[i].dobj.objType = DO_OPCLASS; + opcinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid)); + opcinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid)); + AssignDumpId(&opcinfo[i].dobj); opcinfo[i].opcname = strdup(PQgetvalue(res, i, i_opcname)); - opcinfo[i].opcnamespace = findNamespace(PQgetvalue(res, i, i_opcnamespace), - opcinfo[i].oid); + opcinfo[i].opcnamespace = findNamespace(atooid(PQgetvalue(res, i, i_opcnamespace)), + opcinfo[i].dobj.catId.oid); opcinfo[i].usename = strdup(PQgetvalue(res, i, i_usename)); if (g_fout->remoteVersion >= 70300) @@ -1862,7 +1944,7 @@ getAggregates(int *numAggs) int i; PQExpBuffer query = createPQExpBuffer(); AggInfo *agginfo; - + int i_tableoid; int i_oid; int i_aggname; int i_aggnamespace; @@ -1877,7 +1959,7 @@ getAggregates(int *numAggs) if (g_fout->remoteVersion >= 70300) { - appendPQExpBuffer(query, "SELECT pg_proc.oid, proname as aggname, " + appendPQExpBuffer(query, "SELECT tableoid, oid, proname as aggname, " "pronamespace as aggnamespace, " "proargtypes[0] as aggbasetype, " "(select usename from pg_user where proowner = usesysid) as usename, " @@ -1887,32 +1969,40 @@ getAggregates(int *numAggs) "AND pronamespace != " "(select oid from pg_namespace where nspname = 'pg_catalog')"); } + else if (g_fout->remoteVersion >= 70100) + { + appendPQExpBuffer(query, "SELECT tableoid, oid, aggname, " + "0::oid as aggnamespace, " + "aggbasetype, " + "(select usename from pg_user where aggowner = usesysid) as usename, " + "'{=X}' as aggacl " + "FROM pg_aggregate " + "where oid > '%u'::oid", + g_last_builtin_oid); + } else { - appendPQExpBuffer(query, "SELECT pg_aggregate.oid, aggname, " + appendPQExpBuffer(query, "SELECT " + "(SELECT oid FROM pg_class WHERE relname = 'pg_aggregate') AS tableoid, " + "oid, aggname, " "0::oid as aggnamespace, " "aggbasetype, " "(select usename from pg_user where aggowner = usesysid) as usename, " "'{=X}' as aggacl " - "from pg_aggregate " + "FROM pg_aggregate " "where oid > '%u'::oid", g_last_builtin_oid); } res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain list of aggregate functions failed: %s", - PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); ntups = PQntuples(res); *numAggs = ntups; agginfo = (AggInfo *) malloc(ntups * sizeof(AggInfo)); + i_tableoid = PQfnumber(res, "tableoid"); i_oid = PQfnumber(res, "oid"); i_aggname = PQfnumber(res, "aggname"); i_aggnamespace = PQfnumber(res, "aggnamespace"); @@ -1922,16 +2012,23 @@ getAggregates(int *numAggs) for (i = 0; i < ntups; i++) { - agginfo[i].oid = strdup(PQgetvalue(res, i, i_oid)); - agginfo[i].aggname = strdup(PQgetvalue(res, i, i_aggname)); - agginfo[i].aggnamespace = findNamespace(PQgetvalue(res, i, i_aggnamespace), - agginfo[i].oid); - agginfo[i].aggbasetype = strdup(PQgetvalue(res, i, i_aggbasetype)); - agginfo[i].usename = strdup(PQgetvalue(res, i, i_usename)); - if (strlen(agginfo[i].usename) == 0) + agginfo[i].aggfn.dobj.objType = DO_AGG; + agginfo[i].aggfn.dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid)); + agginfo[i].aggfn.dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid)); + AssignDumpId(&agginfo[i].aggfn.dobj); + agginfo[i].aggfn.proname = strdup(PQgetvalue(res, i, i_aggname)); + agginfo[i].aggfn.pronamespace = findNamespace(atooid(PQgetvalue(res, i, i_aggnamespace)), + agginfo[i].aggfn.dobj.catId.oid); + agginfo[i].aggfn.usename = strdup(PQgetvalue(res, i, i_usename)); + if (strlen(agginfo[i].aggfn.usename) == 0) write_msg(NULL, "WARNING: owner of aggregate function \"%s\" appears to be invalid\n", - agginfo[i].aggname); - agginfo[i].aggacl = strdup(PQgetvalue(res, i, i_aggacl)); + agginfo[i].aggfn.proname); + agginfo[i].aggfn.lang = InvalidOid; /* not currently interesting */ + agginfo[i].aggfn.nargs = 1; + agginfo[i].aggfn.argtypes = (Oid *) malloc(sizeof(Oid)); + agginfo[i].aggfn.argtypes[0] = atooid(PQgetvalue(res, i, i_aggbasetype)); + agginfo[i].aggfn.prorettype = InvalidOid; /* not saved */ + agginfo[i].aggfn.proacl = strdup(PQgetvalue(res, i, i_aggacl)); agginfo[i].anybasetype = false; /* computed when it's dumped */ agginfo[i].fmtbasetype = NULL; /* computed when it's dumped */ } @@ -1958,7 +2055,7 @@ getFuncs(int *numFuncs) int i; PQExpBuffer query = createPQExpBuffer(); FuncInfo *finfo; - + int i_tableoid; int i_oid; int i_proname; int i_pronamespace; @@ -1977,7 +2074,7 @@ getFuncs(int *numFuncs) if (g_fout->remoteVersion >= 70300) { appendPQExpBuffer(query, - "SELECT pg_proc.oid, proname, prolang, " + "SELECT tableoid, oid, proname, prolang, " "pronargs, proargtypes, prorettype, proacl, " "pronamespace, " "(select usename from pg_user where proowner = usesysid) as usename " @@ -1986,10 +2083,24 @@ getFuncs(int *numFuncs) "AND pronamespace != " "(select oid from pg_namespace where nspname = 'pg_catalog')"); } + else if (g_fout->remoteVersion >= 70100) + { + appendPQExpBuffer(query, + "SELECT tableoid, oid, proname, prolang, " + "pronargs, proargtypes, prorettype, " + "'{=X}' as proacl, " + "0::oid as pronamespace, " + "(select usename from pg_user where proowner = usesysid) as usename " + "FROM pg_proc " + "where pg_proc.oid > '%u'::oid", + g_last_builtin_oid); + } else { appendPQExpBuffer(query, - "SELECT pg_proc.oid, proname, prolang, " + "SELECT " + "(SELECT oid FROM pg_class WHERE relname = 'pg_proc') AS tableoid, " + "oid, proname, prolang, " "pronargs, proargtypes, prorettype, " "'{=X}' as proacl, " "0::oid as pronamespace, " @@ -2000,13 +2111,7 @@ getFuncs(int *numFuncs) } res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain list of functions failed: %s", - PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); ntups = PQntuples(res); @@ -2014,6 +2119,7 @@ getFuncs(int *numFuncs) finfo = (FuncInfo *) calloc(ntups, sizeof(FuncInfo)); + i_tableoid = PQfnumber(res, "tableoid"); i_oid = PQfnumber(res, "oid"); i_proname = PQfnumber(res, "proname"); i_pronamespace = PQfnumber(res, "pronamespace"); @@ -2026,27 +2132,27 @@ getFuncs(int *numFuncs) for (i = 0; i < ntups; i++) { - finfo[i].oid = strdup(PQgetvalue(res, i, i_oid)); + finfo[i].dobj.objType = DO_FUNC; + finfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid)); + finfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid)); + AssignDumpId(&finfo[i].dobj); finfo[i].proname = strdup(PQgetvalue(res, i, i_proname)); - finfo[i].pronamespace = findNamespace(PQgetvalue(res, i, i_pronamespace), - finfo[i].oid); + finfo[i].pronamespace = findNamespace(atooid(PQgetvalue(res, i, i_pronamespace)), + finfo[i].dobj.catId.oid); finfo[i].usename = strdup(PQgetvalue(res, i, i_usename)); finfo[i].lang = atooid(PQgetvalue(res, i, i_prolang)); - finfo[i].prorettype = strdup(PQgetvalue(res, i, i_prorettype)); + finfo[i].prorettype = atooid(PQgetvalue(res, i, i_prorettype)); finfo[i].proacl = strdup(PQgetvalue(res, i, i_proacl)); finfo[i].nargs = atoi(PQgetvalue(res, i, i_pronargs)); if (finfo[i].nargs == 0) finfo[i].argtypes = NULL; else { - finfo[i].argtypes = malloc(finfo[i].nargs * sizeof(finfo[i].argtypes[0])); - parseNumericArray(PQgetvalue(res, i, i_proargtypes), - finfo[i].argtypes, - finfo[i].nargs); + finfo[i].argtypes = (Oid *) malloc(finfo[i].nargs * sizeof(Oid)); + parseOidArray(PQgetvalue(res, i, i_proargtypes), + finfo[i].argtypes, finfo[i].nargs); } - finfo[i].dumped = false; - if (strlen(finfo[i].usename) == 0) write_msg(NULL, "WARNING: owner of function \"%s\" appears to be invalid\n", finfo[i].proname); @@ -2076,7 +2182,7 @@ getTables(int *numTables) PQExpBuffer delqry = createPQExpBuffer(); PQExpBuffer lockquery = createPQExpBuffer(); TableInfo *tblinfo; - + int i_reltableoid; int i_reloid; int i_relname; int i_relnamespace; @@ -2117,8 +2223,8 @@ getTables(int *numTables) * serial column, if any */ appendPQExpBuffer(query, - "SELECT c.oid, relname, relacl, relkind, " - "relnamespace, " + "SELECT c.tableoid, c.oid, relname, " + "relacl, relkind, relnamespace, " "(select usename from pg_user where relowner = usesysid) as usename, " "relchecks, reltriggers, " "relhasindex, relhasrules, relhasoids, " @@ -2138,7 +2244,7 @@ getTables(int *numTables) else if (g_fout->remoteVersion >= 70200) { appendPQExpBuffer(query, - "SELECT pg_class.oid, relname, relacl, relkind, " + "SELECT tableoid, oid, relname, relacl, relkind, " "0::oid as relnamespace, " "(select usename from pg_user where relowner = usesysid) as usename, " "relchecks, reltriggers, " @@ -2154,7 +2260,7 @@ getTables(int *numTables) { /* all tables have oids in 7.1 */ appendPQExpBuffer(query, - "SELECT pg_class.oid, relname, relacl, relkind, " + "SELECT tableoid, oid, relname, relacl, relkind, " "0::oid as relnamespace, " "(select usename from pg_user where relowner = usesysid) as usename, " "relchecks, reltriggers, " @@ -2174,7 +2280,9 @@ getTables(int *numTables) * if we have a view by looking for a rule in pg_rewrite. */ appendPQExpBuffer(query, - "SELECT c.oid, relname, relacl, " + "SELECT " + "(SELECT oid FROM pg_class WHERE relname = 'pg_class') AS tableoid, " + "oid, relname, relacl, " "CASE WHEN relhasrules and relkind = 'r' " " and EXISTS(SELECT rulename FROM pg_rewrite r WHERE " " r.ev_class = c.oid AND r.ev_type = '1') " @@ -2195,13 +2303,7 @@ getTables(int *numTables) } res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain list of tables failed: %s", - PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); ntups = PQntuples(res); @@ -2218,6 +2320,7 @@ getTables(int *numTables) */ tblinfo = (TableInfo *) calloc(ntups, sizeof(TableInfo)); + i_reltableoid = PQfnumber(res, "tableoid"); i_reloid = PQfnumber(res, "oid"); i_relname = PQfnumber(res, "relname"); i_relnamespace = PQfnumber(res, "relnamespace"); @@ -2234,10 +2337,13 @@ getTables(int *numTables) for (i = 0; i < ntups; i++) { - tblinfo[i].oid = strdup(PQgetvalue(res, i, i_reloid)); + tblinfo[i].dobj.objType = DO_TABLE; + tblinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_reltableoid)); + tblinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_reloid)); + AssignDumpId(&tblinfo[i].dobj); tblinfo[i].relname = strdup(PQgetvalue(res, i, i_relname)); - tblinfo[i].relnamespace = findNamespace(PQgetvalue(res, i, i_relnamespace), - tblinfo[i].oid); + tblinfo[i].relnamespace = findNamespace(atooid(PQgetvalue(res, i, i_relnamespace)), + tblinfo[i].dobj.catId.oid); tblinfo[i].usename = strdup(PQgetvalue(res, i, i_usename)); tblinfo[i].relacl = strdup(PQgetvalue(res, i, i_relacl)); tblinfo[i].relkind = *(PQgetvalue(res, i, i_relkind)); @@ -2248,12 +2354,12 @@ getTables(int *numTables) tblinfo[i].ntrig = atoi(PQgetvalue(res, i, i_reltriggers)); if (PQgetisnull(res, i, i_owning_tab)) { - tblinfo[i].owning_tab = NULL; + tblinfo[i].owning_tab = InvalidOid; tblinfo[i].owning_col = 0; } else { - tblinfo[i].owning_tab = strdup(PQgetvalue(res, i, i_owning_tab)); + tblinfo[i].owning_tab = atooid(PQgetvalue(res, i, i_owning_tab)); tblinfo[i].owning_col = atoi(PQgetvalue(res, i, i_owning_col)); } @@ -2264,10 +2370,10 @@ getTables(int *numTables) * serial columns are never dumpable on their own; we will * transpose their owning table's dump flag to them below. */ - if (tblinfo[i].owning_tab == NULL) - selectDumpableTable(&tblinfo[i]); - else + if (OidIsValid(tblinfo[i].owning_tab)) tblinfo[i].dump = false; + else + selectDumpableTable(&tblinfo[i]); tblinfo[i].interesting = tblinfo[i].dump; /* @@ -2283,21 +2389,12 @@ getTables(int *numTables) */ if (tblinfo[i].dump && tblinfo[i].relkind == RELKIND_RELATION) { - PGresult *lres; - resetPQExpBuffer(lockquery); appendPQExpBuffer(lockquery, "LOCK TABLE %s IN ACCESS SHARE MODE", fmtQualifiedId(tblinfo[i].relnamespace->nspname, tblinfo[i].relname)); - lres = PQexec(g_conn, lockquery->data); - if (!lres || PQresultStatus(lres) != PGRES_COMMAND_OK) - { - write_msg(NULL, "attempt to lock table \"%s\" failed: %s", - tblinfo[i].relname, PQerrorMessage(g_conn)); - exit_nicely(); - } - PQclear(lres); + do_sql_command(g_conn, lockquery->data); } /* Emit notice if join for owner failed */ @@ -2361,13 +2458,7 @@ getInherits(int *numInherits) appendPQExpBuffer(query, "SELECT inhrelid, inhparent from pg_inherits"); res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain inheritance relationships failed: %s", - PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); ntups = PQntuples(res); @@ -2380,8 +2471,8 @@ getInherits(int *numInherits) for (i = 0; i < ntups; i++) { - inhinfo[i].inhrelid = strdup(PQgetvalue(res, i, i_inhrelid)); - inhinfo[i].inhparent = strdup(PQgetvalue(res, i, i_inhparent)); + inhinfo[i].inhrelid = atooid(PQgetvalue(res, i, i_inhrelid)); + inhinfo[i].inhparent = atooid(PQgetvalue(res, i, i_inhparent)); } PQclear(res); @@ -2392,29 +2483,885 @@ getInherits(int *numInherits) } /* - * getTableAttrs - - * for each interesting table, read its attributes types and names - * - * this is implemented in a very inefficient way right now, looping - * through the tblinfo and doing a join per table to find the attrs and their - * types + * getIndexes + * get information about every index on a dumpable table * - * modifies tblinfo + * Note: index data is not returned directly to the caller, but it + * does get entered into the DumpableObject tables. */ void -getTableAttrs(TableInfo *tblinfo, int numTables) +getIndexes(TableInfo tblinfo[], int numTables) { int i, - j, - k; - PQExpBuffer q = createPQExpBuffer(); - int i_attnum; - int i_attname; - int i_atttypname; - int i_atttypmod; - int i_attstattarget; - int i_attstorage; - int i_typstorage; + j; + PQExpBuffer query = createPQExpBuffer(); + PGresult *res; + IndxInfo *indxinfo; + ConstraintInfo *constrinfo; + int i_tableoid, + i_oid, + i_indexname, + i_indexdef, + i_indnkeys, + i_indkey, + i_indisclustered, + i_contype, + i_conname, + i_contableoid, + i_conoid; + int ntups; + + for (i = 0; i < numTables; i++) + { + TableInfo *tbinfo = &tblinfo[i]; + + /* Only plain tables have indexes */ + if (tbinfo->relkind != RELKIND_RELATION || !tbinfo->hasindex) + continue; + + if (!tbinfo->dump) + continue; + + if (g_verbose) + write_msg(NULL, "reading indexes for table \"%s\"\n", + tbinfo->relname); + + /* Make sure we are in proper schema so indexdef is right */ + selectSourceSchema(tbinfo->relnamespace->nspname); + + /* + * The point of the messy-looking outer join is to find a + * constraint that is related by an internal dependency link to + * the index. If we find one, create a CONSTRAINT entry linked + * to the INDEX entry. We assume an index won't have more than + * one internal dependency. + */ + resetPQExpBuffer(query); + if (g_fout->remoteVersion >= 70300) + { + appendPQExpBuffer(query, + "SELECT t.tableoid, t.oid, " + "t.relname as indexname, " + "pg_catalog.pg_get_indexdef(i.indexrelid) as indexdef, " + "t.relnatts as indnkeys, " + "i.indkey, i.indisclustered, " + "c.contype, c.conname, " + "c.tableoid as contableoid, " + "c.oid as conoid " + "FROM pg_catalog.pg_index i " + "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) " + "LEFT JOIN pg_catalog.pg_depend d " + "ON (d.classid = t.tableoid " + "AND d.objid = t.oid " + "AND d.deptype = 'i') " + "LEFT JOIN pg_catalog.pg_constraint c " + "ON (d.refclassid = c.tableoid " + "AND d.refobjid = c.oid) " + "WHERE i.indrelid = '%u'::pg_catalog.oid " + "ORDER BY indexname", + tbinfo->dobj.catId.oid); + } + else if (g_fout->remoteVersion >= 70100) + { + appendPQExpBuffer(query, + "SELECT t.tableoid, t.oid, " + "t.relname as indexname, " + "pg_get_indexdef(i.indexrelid) as indexdef, " + "t.relnatts as indnkeys, " + "i.indkey, false as indisclustered, " + "CASE WHEN i.indisprimary THEN 'p'::char " + "ELSE '0'::char END as contype, " + "t.relname as conname, " + "0::oid as contableoid, " + "t.oid as conoid " + "FROM pg_index i, pg_class t " + "WHERE t.oid = i.indexrelid " + "AND i.indrelid = '%u'::oid " + "ORDER BY indexname", + tbinfo->dobj.catId.oid); + } + else + { + appendPQExpBuffer(query, + "SELECT " + "(SELECT oid FROM pg_class WHERE relname = 'pg_class') AS tableoid, " + "t.oid, " + "t.relname as indexname, " + "pg_get_indexdef(i.indexrelid) as indexdef, " + "t.relnatts as indnkeys, " + "i.indkey, false as indisclustered, " + "CASE WHEN i.indisprimary THEN 'p'::char " + "ELSE '0'::char END as contype, " + "t.relname as conname, " + "0::oid as contableoid, " + "t.oid as conoid " + "FROM pg_index i, pg_class t " + "WHERE t.oid = i.indexrelid " + "AND i.indrelid = '%u'::oid " + "ORDER BY indexname", + tbinfo->dobj.catId.oid); + } + + res = PQexec(g_conn, query->data); + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); + + ntups = PQntuples(res); + + i_tableoid = PQfnumber(res, "tableoid"); + i_oid = PQfnumber(res, "oid"); + i_indexname = PQfnumber(res, "indexname"); + i_indexdef = PQfnumber(res, "indexdef"); + i_indnkeys = PQfnumber(res, "indnkeys"); + i_indkey = PQfnumber(res, "indkey"); + i_indisclustered = PQfnumber(res, "indisclustered"); + i_contype = PQfnumber(res, "contype"); + i_conname = PQfnumber(res, "conname"); + i_contableoid = PQfnumber(res, "contableoid"); + i_conoid = PQfnumber(res, "conoid"); + + indxinfo = (IndxInfo *) malloc(ntups * sizeof(IndxInfo)); + constrinfo = (ConstraintInfo *) malloc(ntups * sizeof(ConstraintInfo)); + + for (j = 0; j < ntups; j++) + { + char contype; + + indxinfo[j].dobj.objType = DO_INDEX; + indxinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid)); + indxinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid)); + AssignDumpId(&indxinfo[j].dobj); + indxinfo[j].indexname = strdup(PQgetvalue(res, j, i_indexname)); + indxinfo[j].indextable = tbinfo; + indxinfo[j].indexdef = strdup(PQgetvalue(res, j, i_indexdef)); + indxinfo[j].indnkeys = atoi(PQgetvalue(res, j, i_indnkeys)); + /* + * In pre-7.4 releases, indkeys may contain more entries than + * indnkeys says (since indnkeys will be 1 for a functional + * index). We don't actually care about this case since we don't + * examine indkeys except for indexes associated with PRIMARY + * and UNIQUE constraints, which are never functional indexes. + * But we have to allocate enough space to keep parseOidArray + * from complaining. + */ + indxinfo[j].indkeys = (Oid *) malloc(INDEX_MAX_KEYS * sizeof(Oid)); + parseOidArray(PQgetvalue(res, j, i_indkey), + indxinfo[j].indkeys, INDEX_MAX_KEYS); + indxinfo[j].indisclustered = (PQgetvalue(res, j, i_indisclustered)[0] == 't'); + contype = *(PQgetvalue(res, j, i_contype)); + + if (contype == 'p' || contype == 'u') + { + /* + * If we found a constraint matching the index, create an + * entry for it. + * + * In a pre-7.3 database, we take this path iff the index was + * marked indisprimary. + */ + constrinfo[j].dobj.objType = DO_CONSTRAINT; + constrinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_contableoid)); + constrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_conoid)); + AssignDumpId(&constrinfo[j].dobj); + + constrinfo[j].conname = strdup(PQgetvalue(res, j, i_conname)); + constrinfo[j].contable = tbinfo; + constrinfo[j].condomain = NULL; + constrinfo[j].contype = contype; + constrinfo[j].condef = NULL; + constrinfo[j].conindex = indxinfo[j].dobj.dumpId; + constrinfo[j].coninherited = false; + constrinfo[j].separate = true; + + indxinfo[j].indexconstraint = constrinfo[j].dobj.dumpId; + + /* If pre-7.3 DB, better make sure table comes first */ + addObjectDependency(&constrinfo[j].dobj, + tbinfo->dobj.dumpId); + } + else + { + /* Plain secondary index */ + indxinfo[j].indexconstraint = 0; + } + } + + PQclear(res); + } + + destroyPQExpBuffer(query); +} + +/* + * getConstraints + * + * Get info about constraints on dumpable tables. + * + * Currently handles foreign keys only. + * Unique and primary key constraints are handled with indexes, + * while check constraints are processed in getTableAttrs(). + */ +void +getConstraints(TableInfo tblinfo[], int numTables) +{ + int i, + j; + ConstraintInfo *constrinfo; + PQExpBuffer query; + PGresult *res; + int i_condef, + i_contableoid, + i_conoid, + i_conname; + int ntups; + + /* pg_constraint was created in 7.3, so nothing to do if older */ + if (g_fout->remoteVersion < 70300) + return; + + query = createPQExpBuffer(); + + for (i = 0; i < numTables; i++) + { + TableInfo *tbinfo = &tblinfo[i]; + + if (tbinfo->ntrig == 0 || !tbinfo->dump) + continue; + + if (g_verbose) + write_msg(NULL, "reading foreign key constraints for table \"%s\"\n", + tbinfo->relname); + + /* + * select table schema to ensure constraint expr is qualified if + * needed + */ + selectSourceSchema(tbinfo->relnamespace->nspname); + + resetPQExpBuffer(query); + appendPQExpBuffer(query, + "SELECT tableoid, oid, conname, " + "pg_catalog.pg_get_constraintdef(oid) as condef " + "FROM pg_catalog.pg_constraint " + "WHERE conrelid = '%u'::pg_catalog.oid " + "AND contype = 'f'", + tbinfo->dobj.catId.oid); + res = PQexec(g_conn, query->data); + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); + + ntups = PQntuples(res); + + i_contableoid = PQfnumber(res, "tableoid"); + i_conoid = PQfnumber(res, "oid"); + i_conname = PQfnumber(res, "conname"); + i_condef = PQfnumber(res, "condef"); + + constrinfo = (ConstraintInfo *) malloc(ntups * sizeof(ConstraintInfo)); + + for (j = 0; j < ntups; j++) + { + constrinfo[j].dobj.objType = DO_FK_CONSTRAINT; + constrinfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_contableoid)); + constrinfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_conoid)); + AssignDumpId(&constrinfo[j].dobj); + constrinfo[j].conname = strdup(PQgetvalue(res, j, i_conname)); + constrinfo[j].contable = tbinfo; + constrinfo[j].condomain = NULL; + constrinfo[j].contype = 'f'; + constrinfo[j].condef = strdup(PQgetvalue(res, j, i_condef)); + constrinfo[j].conindex = 0; + constrinfo[j].coninherited = false; + constrinfo[j].separate = true; + } + + PQclear(res); + } + + destroyPQExpBuffer(query); +} + +/* + * getDomainConstraints + * + * Get info about constraints on a domain. + */ +static void +getDomainConstraints(TypeInfo *tinfo) +{ + int i; + ConstraintInfo *constrinfo; + PQExpBuffer query; + PGresult *res; + int i_tableoid, + i_oid, + i_conname, + i_consrc; + int ntups; + + /* pg_constraint was created in 7.3, so nothing to do if older */ + if (g_fout->remoteVersion < 70300) + return; + + /* + * select appropriate schema to ensure names in constraint are properly + * qualified + */ + selectSourceSchema(tinfo->typnamespace->nspname); + + query = createPQExpBuffer(); + + if (g_fout->remoteVersion >= 70400) + appendPQExpBuffer(query, "SELECT tableoid, oid, conname, " + "pg_catalog.pg_get_constraintdef(oid) AS consrc " + "FROM pg_catalog.pg_constraint " + "WHERE contypid = '%u'::pg_catalog.oid " + "ORDER BY conname", + tinfo->dobj.catId.oid); + else + appendPQExpBuffer(query, "SELECT tableoid, oid, conname, " + "'CHECK (' || consrc || ')' AS consrc " + "FROM pg_catalog.pg_constraint " + "WHERE contypid = '%u'::pg_catalog.oid " + "ORDER BY conname", + tinfo->dobj.catId.oid); + + res = PQexec(g_conn, query->data); + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); + + ntups = PQntuples(res); + + i_tableoid = PQfnumber(res, "tableoid"); + i_oid = PQfnumber(res, "oid"); + i_conname = PQfnumber(res, "conname"); + i_consrc = PQfnumber(res, "consrc"); + + constrinfo = (ConstraintInfo *) malloc(ntups * sizeof(ConstraintInfo)); + + tinfo->nDomChecks = ntups; + tinfo->domChecks = constrinfo; + + for (i = 0; i < ntups; i++) + { + constrinfo[i].dobj.objType = DO_CONSTRAINT; + constrinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid)); + constrinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid)); + AssignDumpId(&constrinfo[i].dobj); + constrinfo[i].conname = strdup(PQgetvalue(res, i, i_conname)); + constrinfo[i].contable = NULL; + constrinfo[i].condomain = tinfo; + constrinfo[i].contype = 'c'; + constrinfo[i].condef = strdup(PQgetvalue(res, i, i_consrc)); + constrinfo[i].conindex = 0; + constrinfo[i].coninherited = false; + constrinfo[i].separate = false; + /* + * Make the domain depend on the constraint, ensuring it won't + * be output till any constraint dependencies are OK. + */ + addObjectDependency(&tinfo->dobj, + constrinfo[i].dobj.dumpId); + } + + PQclear(res); + + destroyPQExpBuffer(query); +} + +/* + * getRules + * get basic information about every rule in the system + * + * numRules is set to the number of rules read in + */ +RuleInfo * +getRules(int *numRules) +{ + PGresult *res; + int ntups; + int i; + PQExpBuffer query = createPQExpBuffer(); + RuleInfo *ruleinfo; + int i_tableoid; + int i_oid; + int i_rulename; + int i_ruletable; + int i_ev_type; + int i_is_instead; + + /* Make sure we are in proper schema */ + selectSourceSchema("pg_catalog"); + + if (g_fout->remoteVersion >= 70100) + { + appendPQExpBuffer(query, "SELECT " + "tableoid, oid, rulename, " + "ev_class as ruletable, ev_type, is_instead " + "FROM pg_rewrite " + "ORDER BY oid"); + } + else + { + appendPQExpBuffer(query, "SELECT " + "(SELECT oid FROM pg_class WHERE relname = 'pg_rewrite') AS tableoid, " + "oid, rulename, " + "ev_class as ruletable, ev_type, is_instead " + "FROM pg_rewrite " + "ORDER BY oid"); + } + + res = PQexec(g_conn, query->data); + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); + + ntups = PQntuples(res); + + *numRules = ntups; + + ruleinfo = (RuleInfo *) malloc(ntups * sizeof(RuleInfo)); + + i_tableoid = PQfnumber(res, "tableoid"); + i_oid = PQfnumber(res, "oid"); + i_rulename = PQfnumber(res, "rulename"); + i_ruletable = PQfnumber(res, "ruletable"); + i_ev_type = PQfnumber(res, "ev_type"); + i_is_instead = PQfnumber(res, "is_instead"); + + for (i = 0; i < ntups; i++) + { + Oid ruletableoid; + + ruleinfo[i].dobj.objType = DO_RULE; + ruleinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid)); + ruleinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid)); + AssignDumpId(&ruleinfo[i].dobj); + ruleinfo[i].rulename = strdup(PQgetvalue(res, i, i_rulename)); + ruletableoid = atooid(PQgetvalue(res, i, i_ruletable)); + ruleinfo[i].ruletable = findTableByOid(ruletableoid); + ruleinfo[i].ev_type = *(PQgetvalue(res, i, i_ev_type)); + ruleinfo[i].is_instead = *(PQgetvalue(res, i, i_is_instead)) == 't'; + if (ruleinfo[i].ruletable) + { + /* + * If the table is a view, force its ON SELECT rule to be sorted + * before the view itself --- this ensures that any dependencies + * for the rule affect the table's positioning. Other rules + * are forced to appear after their table. + */ + if (ruleinfo[i].ruletable->relkind == RELKIND_VIEW && + ruleinfo[i].ev_type == '1' && ruleinfo[i].is_instead) + addObjectDependency(&ruleinfo[i].ruletable->dobj, + ruleinfo[i].dobj.dumpId); + else + addObjectDependency(&ruleinfo[i].dobj, + ruleinfo[i].ruletable->dobj.dumpId); + } + } + + PQclear(res); + + destroyPQExpBuffer(query); + + return ruleinfo; +} + +/* + * getTriggers + * get information about every trigger on a dumpable table + * + * Note: trigger data is not returned directly to the caller, but it + * does get entered into the DumpableObject tables. + */ +void +getTriggers(TableInfo tblinfo[], int numTables) +{ + int i, + j; + PQExpBuffer query = createPQExpBuffer(); + PGresult *res; + TriggerInfo *tginfo; + int i_tableoid, + i_oid, + i_tgname, + i_tgfname, + i_tgtype, + i_tgnargs, + i_tgargs, + i_tgisconstraint, + i_tgconstrname, + i_tgconstrrelid, + i_tgconstrrelname, + i_tgdeferrable, + i_tginitdeferred; + int ntups; + + for (i = 0; i < numTables; i++) + { + TableInfo *tbinfo = &tblinfo[i]; + + if (tbinfo->ntrig == 0 || !tbinfo->dump) + continue; + + if (g_verbose) + write_msg(NULL, "reading triggers for table \"%s\"\n", + tbinfo->relname); + + /* + * select table schema to ensure regproc name is qualified if + * needed + */ + selectSourceSchema(tbinfo->relnamespace->nspname); + + resetPQExpBuffer(query); + if (g_fout->remoteVersion >= 70300) + { + /* + * We ignore triggers that are tied to a foreign-key + * constraint + */ + appendPQExpBuffer(query, + "SELECT tgname, " + "tgfoid::pg_catalog.regproc as tgfname, " + "tgtype, tgnargs, tgargs, " + "tgisconstraint, tgconstrname, tgdeferrable, " + "tgconstrrelid, tginitdeferred, tableoid, oid, " + "tgconstrrelid::pg_catalog.regclass as tgconstrrelname " + "from pg_catalog.pg_trigger t " + "where tgrelid = '%u'::pg_catalog.oid " + "and (not tgisconstraint " + " OR NOT EXISTS" + " (SELECT 1 FROM pg_catalog.pg_depend d " + " JOIN pg_catalog.pg_constraint c ON (d.refclassid = c.tableoid AND d.refobjid = c.oid) " + " WHERE d.classid = t.tableoid AND d.objid = t.oid AND d.deptype = 'i' AND c.contype = 'f'))", + tbinfo->dobj.catId.oid); + } + else if (g_fout->remoteVersion >= 70100) + { + appendPQExpBuffer(query, + "SELECT tgname, tgfoid::regproc as tgfname, " + "tgtype, tgnargs, tgargs, " + "tgisconstraint, tgconstrname, tgdeferrable, " + "tgconstrrelid, tginitdeferred, tableoid, oid, " + "(select relname from pg_class where oid = tgconstrrelid) " + " as tgconstrrelname " + "from pg_trigger " + "where tgrelid = '%u'::oid", + tbinfo->dobj.catId.oid); + } + else + { + appendPQExpBuffer(query, + "SELECT tgname, tgfoid::regproc as tgfname, " + "tgtype, tgnargs, tgargs, " + "tgisconstraint, tgconstrname, tgdeferrable, " + "tgconstrrelid, tginitdeferred, " + "(SELECT oid FROM pg_class WHERE relname = 'pg_trigger') AS tableoid, " + + "oid, " + "(select relname from pg_class where oid = tgconstrrelid) " + " as tgconstrrelname " + "from pg_trigger " + "where tgrelid = '%u'::oid", + tbinfo->dobj.catId.oid); + } + res = PQexec(g_conn, query->data); + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); + + ntups = PQntuples(res); + + /* + * We may have less triggers than recorded due to having ignored + * foreign-key triggers + */ + if (ntups > tbinfo->ntrig) + { + write_msg(NULL, "expected %d triggers on table \"%s\" but found %d\n", + tbinfo->ntrig, tbinfo->relname, ntups); + exit_nicely(); + } + i_tableoid = PQfnumber(res, "tableoid"); + i_oid = PQfnumber(res, "oid"); + i_tgname = PQfnumber(res, "tgname"); + i_tgfname = PQfnumber(res, "tgfname"); + i_tgtype = PQfnumber(res, "tgtype"); + i_tgnargs = PQfnumber(res, "tgnargs"); + i_tgargs = PQfnumber(res, "tgargs"); + i_tgisconstraint = PQfnumber(res, "tgisconstraint"); + i_tgconstrname = PQfnumber(res, "tgconstrname"); + i_tgconstrrelid = PQfnumber(res, "tgconstrrelid"); + i_tgconstrrelname = PQfnumber(res, "tgconstrrelname"); + i_tgdeferrable = PQfnumber(res, "tgdeferrable"); + i_tginitdeferred = PQfnumber(res, "tginitdeferred"); + + tginfo = (TriggerInfo *) malloc(ntups * sizeof(TriggerInfo)); + + for (j = 0; j < ntups; j++) + { + tginfo[j].dobj.objType = DO_TRIGGER; + tginfo[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, i_tableoid)); + tginfo[j].dobj.catId.oid = atooid(PQgetvalue(res, j, i_oid)); + AssignDumpId(&tginfo[j].dobj); + tginfo[j].tgtable = tbinfo; + tginfo[j].tgname = strdup(PQgetvalue(res, j, i_tgname)); + tginfo[j].tgfname = strdup(PQgetvalue(res, j, i_tgfname)); + tginfo[j].tgtype = atoi(PQgetvalue(res, j, i_tgtype)); + tginfo[j].tgnargs = atoi(PQgetvalue(res, j, i_tgnargs)); + tginfo[j].tgargs = strdup(PQgetvalue(res, j, i_tgargs)); + tginfo[j].tgisconstraint = *(PQgetvalue(res, j, i_tgisconstraint)) == 't'; + tginfo[j].tgdeferrable = *(PQgetvalue(res, j, i_tgdeferrable)) == 't'; + tginfo[j].tginitdeferred = *(PQgetvalue(res, j, i_tginitdeferred)) == 't'; + + if (tginfo[j].tgisconstraint) + { + tginfo[j].tgconstrname = strdup(PQgetvalue(res, j, i_tgconstrname)); + tginfo[j].tgconstrrelid = atooid(PQgetvalue(res, j, i_tgconstrrelid)); + if (OidIsValid(tginfo[j].tgconstrrelid)) + { + if (PQgetisnull(res, j, i_tgconstrrelname)) + { + write_msg(NULL, "query produced null referenced table name for foreign key trigger \"%s\" on table \"%s\" (OID of table: %u)\n", + tginfo[j].tgname, tbinfo->relname, + tginfo[j].tgconstrrelid); + exit_nicely(); + } + tginfo[j].tgconstrrelname = strdup(PQgetvalue(res, j, i_tgconstrrelname)); + } + else + tginfo[j].tgconstrrelname = NULL; + } + else + { + tginfo[j].tgconstrname = NULL; + tginfo[j].tgconstrrelid = InvalidOid; + tginfo[j].tgconstrrelname = NULL; + } + } + + PQclear(res); + } + + destroyPQExpBuffer(query); +} + +/* + * getProcLangs + * get basic information about every procedural language in the system + * + * numProcLangs is set to the number of langs read in + * + * NB: this must run after getFuncs() because we assume we can do + * findFuncByOid(). + */ +ProcLangInfo * +getProcLangs(int *numProcLangs) +{ + PGresult *res; + int ntups; + int i; + PQExpBuffer query = createPQExpBuffer(); + ProcLangInfo *planginfo; + int i_tableoid; + int i_oid; + int i_lanname; + int i_lanpltrusted; + int i_lanplcallfoid; + int i_lanvalidator = -1; + int i_lanacl = -1; + + /* Make sure we are in proper schema */ + selectSourceSchema("pg_catalog"); + + if (g_fout->remoteVersion >= 70100) + { + appendPQExpBuffer(query, "SELECT tableoid, oid, * FROM pg_language " + "WHERE lanispl " + "ORDER BY oid"); + } + else + { + appendPQExpBuffer(query, "SELECT " + "(SELECT oid FROM pg_class WHERE relname = 'pg_language') AS tableoid, " + "oid, * FROM pg_language " + "WHERE lanispl " + "ORDER BY oid"); + } + + res = PQexec(g_conn, query->data); + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); + + ntups = PQntuples(res); + + *numProcLangs = ntups; + + planginfo = (ProcLangInfo *) malloc(ntups * sizeof(ProcLangInfo)); + + i_tableoid = PQfnumber(res, "tableoid"); + i_oid = PQfnumber(res, "oid"); + i_lanname = PQfnumber(res, "lanname"); + i_lanpltrusted = PQfnumber(res, "lanpltrusted"); + i_lanplcallfoid = PQfnumber(res, "lanplcallfoid"); + if (g_fout->remoteVersion >= 70300) + { + i_lanvalidator = PQfnumber(res, "lanvalidator"); + i_lanacl = PQfnumber(res, "lanacl"); + } + + for (i = 0; i < ntups; i++) + { + planginfo[i].dobj.objType = DO_PROCLANG; + planginfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid)); + planginfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid)); + AssignDumpId(&planginfo[i].dobj); + + planginfo[i].lanname = strdup(PQgetvalue(res, i, i_lanname)); + planginfo[i].lanpltrusted = *(PQgetvalue(res, i, i_lanpltrusted)) == 't'; + planginfo[i].lanplcallfoid = atooid(PQgetvalue(res, i, i_lanplcallfoid)); + if (g_fout->remoteVersion >= 70300) + { + planginfo[i].lanvalidator = atooid(PQgetvalue(res, i, i_lanvalidator)); + planginfo[i].lanacl = strdup(PQgetvalue(res, i, i_lanacl)); + } + else + { + FuncInfo *funcInfo; + + planginfo[i].lanvalidator = InvalidOid; + planginfo[i].lanacl = strdup("{=U}"); + /* + * We need to make a dependency to ensure the function will + * be dumped first. (In 7.3 and later the regular dependency + * mechanism will handle this for us.) + */ + funcInfo = findFuncByOid(planginfo[i].lanplcallfoid); + if (funcInfo) + addObjectDependency(&planginfo[i].dobj, + funcInfo->dobj.dumpId); + } + } + + PQclear(res); + + destroyPQExpBuffer(query); + + return planginfo; +} + +/* + * getCasts + * get basic information about every cast in the system + * + * numCasts is set to the number of casts read in + */ +CastInfo * +getCasts(int *numCasts) +{ + PGresult *res; + int ntups; + int i; + PQExpBuffer query = createPQExpBuffer(); + CastInfo *castinfo; + int i_tableoid; + int i_oid; + int i_castsource; + int i_casttarget; + int i_castfunc; + int i_castcontext; + + /* Make sure we are in proper schema */ + selectSourceSchema("pg_catalog"); + + if (g_fout->remoteVersion >= 70300) + { + appendPQExpBuffer(query, "SELECT tableoid, oid, " + "castsource, casttarget, castfunc, castcontext " + "FROM pg_cast ORDER BY 3,4"); + } + else + { + appendPQExpBuffer(query, "SELECT 0 as tableoid, p.oid, " + "t1.oid as castsource, t2.oid as casttarget, " + "p.oid as castfunc, 'e' as castcontext " + "FROM pg_type t1, pg_type t2, pg_proc p " + "WHERE p.pronargs = 1 AND " + "p.proargtypes[0] = t1.oid AND " + "p.prorettype = t2.oid AND p.proname = t2.typname " + "ORDER BY 3,4"); + } + + res = PQexec(g_conn, query->data); + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); + + ntups = PQntuples(res); + + *numCasts = ntups; + + castinfo = (CastInfo *) malloc(ntups * sizeof(CastInfo)); + + i_tableoid = PQfnumber(res, "tableoid"); + i_oid = PQfnumber(res, "oid"); + i_castsource = PQfnumber(res, "castsource"); + i_casttarget = PQfnumber(res, "casttarget"); + i_castfunc = PQfnumber(res, "castfunc"); + i_castcontext = PQfnumber(res, "castcontext"); + + for (i = 0; i < ntups; i++) + { + castinfo[i].dobj.objType = DO_CAST; + castinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid)); + castinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid)); + AssignDumpId(&castinfo[i].dobj); + castinfo[i].castsource = atooid(PQgetvalue(res, i, i_castsource)); + castinfo[i].casttarget = atooid(PQgetvalue(res, i, i_casttarget)); + castinfo[i].castfunc = atooid(PQgetvalue(res, i, i_castfunc)); + castinfo[i].castcontext = *(PQgetvalue(res, i, i_castcontext)); + + if (OidIsValid(castinfo[i].castfunc)) + { + /* + * We need to make a dependency to ensure the function will + * be dumped first. (In 7.3 and later the regular dependency + * mechanism will handle this for us.) + */ + FuncInfo *funcInfo; + + funcInfo = findFuncByOid(castinfo[i].castfunc); + if (funcInfo) + addObjectDependency(&castinfo[i].dobj, + funcInfo->dobj.dumpId); + } + } + + PQclear(res); + + destroyPQExpBuffer(query); + + return castinfo; +} + +/* + * getTableAttrs - + * for each interesting table, read info about its attributes + * (names, types, default values, CHECK constraints, etc) + * + * This is implemented in a very inefficient way right now, looping + * through the tblinfo and doing a join per table to find the attrs and their + * types. However, because we want type names and so forth to be named + * relative to the schema of each table, we couldn't do it in just one + * query. (Maybe one query per schema?) + * + * modifies tblinfo + */ +void +getTableAttrs(TableInfo *tblinfo, int numTables) +{ + int i, + j, + k; + PQExpBuffer q = createPQExpBuffer(); + int i_attnum; + int i_attname; + int i_atttypname; + int i_atttypmod; + int i_attstattarget; + int i_attstorage; + int i_typstorage; int i_attnotnull; int i_atthasdef; int i_attisdropped; @@ -2465,10 +3412,10 @@ getTableAttrs(TableInfo *tblinfo, int numTables) "pg_catalog.format_type(t.oid,a.atttypmod) as atttypname " "from pg_catalog.pg_attribute a left join pg_catalog.pg_type t " "on a.atttypid = t.oid " - "where a.attrelid = '%s'::pg_catalog.oid " + "where a.attrelid = '%u'::pg_catalog.oid " "and a.attnum > 0::pg_catalog.int2 " "order by a.attrelid, a.attnum", - tbinfo->oid); + tbinfo->dobj.catId.oid); } else if (g_fout->remoteVersion >= 70100) { @@ -2478,35 +3425,30 @@ getTableAttrs(TableInfo *tblinfo, int numTables) * been explicitly set or was just a default. */ appendPQExpBuffer(q, "SELECT a.attnum, a.attname, a.atttypmod, -1 as attstattarget, a.attstorage, t.typstorage, " - "a.attnotnull, a.atthasdef, false as attisdropped, null as attislocal, " + "a.attnotnull, a.atthasdef, false as attisdropped, false as attislocal, " "format_type(t.oid,a.atttypmod) as atttypname " "from pg_attribute a left join pg_type t " "on a.atttypid = t.oid " - "where a.attrelid = '%s'::oid " + "where a.attrelid = '%u'::oid " "and a.attnum > 0::int2 " "order by a.attrelid, a.attnum", - tbinfo->oid); + tbinfo->dobj.catId.oid); } else { /* format_type not available before 7.1 */ appendPQExpBuffer(q, "SELECT attnum, attname, atttypmod, -1 as attstattarget, attstorage, attstorage as typstorage, " - "attnotnull, atthasdef, false as attisdropped, null as attislocal, " + "attnotnull, atthasdef, false as attisdropped, false as attislocal, " "(select typname from pg_type where oid = atttypid) as atttypname " "from pg_attribute a " - "where attrelid = '%s'::oid " + "where attrelid = '%u'::oid " "and attnum > 0::int2 " "order by attrelid, attnum", - tbinfo->oid); + tbinfo->dobj.catId.oid); } res = PQexec(g_conn, q->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to get table columns failed: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, q->data, PGRES_TUPLES_OK); ntups = PQntuples(res); @@ -2533,7 +3475,7 @@ getTableAttrs(TableInfo *tblinfo, int numTables) tbinfo->attislocal = (bool *) malloc(ntups * sizeof(bool)); tbinfo->attisserial = (bool *) malloc(ntups * sizeof(bool)); tbinfo->notnull = (bool *) malloc(ntups * sizeof(bool)); - tbinfo->adef_expr = (char **) malloc(ntups * sizeof(char *)); + tbinfo->attrdefs = (AttrDefInfo **) malloc(ntups * sizeof(AttrDefInfo *)); tbinfo->inhAttrs = (bool *) malloc(ntups * sizeof(bool)); tbinfo->inhAttrDef = (bool *) malloc(ntups * sizeof(bool)); tbinfo->inhNotNull = (bool *) malloc(ntups * sizeof(bool)); @@ -2557,7 +3499,7 @@ getTableAttrs(TableInfo *tblinfo, int numTables) tbinfo->attislocal[j] = (PQgetvalue(res, j, i_attislocal)[0] == 't'); tbinfo->attisserial[j] = false; /* fix below */ tbinfo->notnull[j] = (PQgetvalue(res, j, i_attnotnull)[0] == 't'); - tbinfo->adef_expr[j] = NULL; /* fix below */ + tbinfo->attrdefs[j] = NULL; /* fix below */ if (PQgetvalue(res, j, i_atthasdef)[0] == 't') hasdefaults = true; /* these flags will be set in flagInhAttrs() */ @@ -2568,8 +3510,12 @@ getTableAttrs(TableInfo *tblinfo, int numTables) PQclear(res); + /* + * Get info about column defaults + */ if (hasdefaults) { + AttrDefInfo *attrdefs; int numDefaults; if (g_verbose) @@ -2579,48 +3525,192 @@ getTableAttrs(TableInfo *tblinfo, int numTables) resetPQExpBuffer(q); if (g_fout->remoteVersion >= 70300) { - appendPQExpBuffer(q, "SELECT adnum, " - "pg_catalog.pg_get_expr(adbin, adrelid) AS adsrc " - "FROM pg_catalog.pg_attrdef " - "WHERE adrelid = '%s'::pg_catalog.oid", - tbinfo->oid); + appendPQExpBuffer(q, "SELECT tableoid, oid, adnum, " + "pg_catalog.pg_get_expr(adbin, adrelid) AS adsrc " + "FROM pg_catalog.pg_attrdef " + "WHERE adrelid = '%u'::pg_catalog.oid", + tbinfo->dobj.catId.oid); + } + else if (g_fout->remoteVersion >= 70200) + { + /* 7.2 did not have OIDs in pg_attrdef */ + appendPQExpBuffer(q, "SELECT tableoid, 0 as oid, adnum, " + "pg_get_expr(adbin, adrelid) AS adsrc " + "FROM pg_attrdef " + "WHERE adrelid = '%u'::oid", + tbinfo->dobj.catId.oid); + } + else if (g_fout->remoteVersion >= 70100) + { + /* no pg_get_expr, so must rely on adsrc */ + appendPQExpBuffer(q, "SELECT tableoid, oid, adnum, adsrc " + "FROM pg_attrdef " + "WHERE adrelid = '%u'::oid", + tbinfo->dobj.catId.oid); + } + else + { + /* no pg_get_expr, no tableoid either */ + appendPQExpBuffer(q, "SELECT " + "(SELECT oid FROM pg_class WHERE relname = 'pg_attrdef') AS tableoid, " + "oid, adnum, adsrc " + "FROM pg_attrdef " + "WHERE adrelid = '%u'::oid", + tbinfo->dobj.catId.oid); + } + res = PQexec(g_conn, q->data); + check_sql_result(res, g_conn, q->data, PGRES_TUPLES_OK); + + numDefaults = PQntuples(res); + attrdefs = (AttrDefInfo *) malloc(numDefaults * sizeof(AttrDefInfo)); + + for (j = 0; j < numDefaults; j++) + { + int adnum; + + attrdefs[j].dobj.objType = DO_ATTRDEF; + attrdefs[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, 0)); + attrdefs[j].dobj.catId.oid = atooid(PQgetvalue(res, j, 1)); + AssignDumpId(&attrdefs[j].dobj); + attrdefs[j].adtable = tbinfo; + attrdefs[j].adnum = adnum = atoi(PQgetvalue(res, j, 2)); + attrdefs[j].adef_expr = strdup(PQgetvalue(res, j, 3)); + + /* + * Defaults on a VIEW must always be dumped as separate + * ALTER TABLE commands. Defaults on regular tables are + * dumped as part of the CREATE TABLE if possible. To check + * if it's safe, we mark the default as needing to appear + * before the CREATE. + */ + if (tbinfo->relkind == RELKIND_VIEW) + { + attrdefs[j].separate = true; + /* needed in case pre-7.3 DB: */ + addObjectDependency(&attrdefs[j].dobj, + tbinfo->dobj.dumpId); + } + else + { + attrdefs[j].separate = false; + addObjectDependency(&tbinfo->dobj, + attrdefs[j].dobj.dumpId); + } + + if (adnum <= 0 || adnum > ntups) + { + write_msg(NULL, "invalid adnum value %d for table \"%s\"\n", + adnum, tbinfo->relname); + exit_nicely(); + } + tbinfo->attrdefs[adnum - 1] = &attrdefs[j]; + } + PQclear(res); + } + + /* + * Get info about table CHECK constraints + */ + if (tbinfo->ncheck > 0) + { + ConstraintInfo *constrs; + int numConstrs; + + if (g_verbose) + write_msg(NULL, "finding check constraints for table \"%s\"\n", + tbinfo->relname); + + resetPQExpBuffer(q); + if (g_fout->remoteVersion >= 70400) + { + appendPQExpBuffer(q, "SELECT tableoid, oid, conname, " + "pg_catalog.pg_get_constraintdef(oid) AS consrc " + "FROM pg_catalog.pg_constraint " + "WHERE conrelid = '%u'::pg_catalog.oid " + " AND contype = 'c' " + "ORDER BY conname", + tbinfo->dobj.catId.oid); + } + else if (g_fout->remoteVersion >= 70300) + { + /* no pg_get_constraintdef, must use consrc */ + appendPQExpBuffer(q, "SELECT tableoid, oid, conname, " + "'CHECK (' || consrc || ')' AS consrc " + "FROM pg_catalog.pg_constraint " + "WHERE conrelid = '%u'::pg_catalog.oid " + " AND contype = 'c' " + "ORDER BY conname", + tbinfo->dobj.catId.oid); } else if (g_fout->remoteVersion >= 70200) { - appendPQExpBuffer(q, "SELECT adnum, " - "pg_get_expr(adbin, adrelid) AS adsrc " - "FROM pg_attrdef " - "WHERE adrelid = '%s'::oid", - tbinfo->oid); + /* 7.2 did not have OIDs in pg_relcheck */ + appendPQExpBuffer(q, "SELECT tableoid, 0 as oid, " + "rcname AS conname, " + "'CHECK (' || rcsrc || ')' AS consrc " + "FROM pg_relcheck " + "WHERE rcrelid = '%u'::oid " + "ORDER BY rcname", + tbinfo->dobj.catId.oid); + } + else if (g_fout->remoteVersion >= 70100) + { + appendPQExpBuffer(q, "SELECT tableoid, oid, " + "rcname AS conname, " + "'CHECK (' || rcsrc || ')' AS consrc " + "FROM pg_relcheck " + "WHERE rcrelid = '%u'::oid " + "ORDER BY rcname", + tbinfo->dobj.catId.oid); } else { - /* no pg_get_expr, so must rely on adsrc */ - appendPQExpBuffer(q, "SELECT adnum, adsrc FROM pg_attrdef " - "WHERE adrelid = '%s'::oid", - tbinfo->oid); + /* no tableoid in 7.0 */ + appendPQExpBuffer(q, "SELECT " + "(SELECT oid FROM pg_class WHERE relname = 'pg_relcheck') AS tableoid, " + "oid, rcname AS conname, " + "'CHECK (' || rcsrc || ')' AS consrc " + "FROM pg_relcheck " + "WHERE rcrelid = '%u'::oid " + "ORDER BY rcname", + tbinfo->dobj.catId.oid); } res = PQexec(g_conn, q->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) + check_sql_result(res, g_conn, q->data, PGRES_TUPLES_OK); + + numConstrs = PQntuples(res); + if (numConstrs != tbinfo->ncheck) { - write_msg(NULL, "query to get column default values failed: %s", - PQerrorMessage(g_conn)); + write_msg(NULL, "expected %d check constraints on table \"%s\" but found %d\n", + tbinfo->ncheck, tbinfo->relname, numConstrs); + write_msg(NULL, "(The system catalogs might be corrupted.)\n"); exit_nicely(); } - numDefaults = PQntuples(res); - for (j = 0; j < numDefaults; j++) - { - int adnum = atoi(PQgetvalue(res, j, 0)); + constrs = (ConstraintInfo *) malloc(numConstrs * sizeof(ConstraintInfo)); + tbinfo->checkexprs = constrs; - if (adnum <= 0 || adnum > ntups) - { - write_msg(NULL, "invalid adnum value %d for table \"%s\"\n", - adnum, tbinfo->relname); - exit_nicely(); - } - tbinfo->adef_expr[adnum - 1] = strdup(PQgetvalue(res, j, 1)); + for (j = 0; j < numConstrs; j++) + { + constrs[j].dobj.objType = DO_CONSTRAINT; + constrs[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, 0)); + constrs[j].dobj.catId.oid = atooid(PQgetvalue(res, j, 1)); + AssignDumpId(&constrs[j].dobj); + constrs[j].contable = tbinfo; + constrs[j].condomain = NULL; + constrs[j].contype = 'c'; + constrs[j].conname = strdup(PQgetvalue(res, j, 2)); + constrs[j].condef = strdup(PQgetvalue(res, j, 3)); + constrs[j].conindex = 0; + constrs[j].coninherited = false; + constrs[j].separate = false; + addObjectDependency(&tbinfo->dobj, + constrs[j].dobj.dumpId); + /* + * If the constraint is inherited, this will be detected + * later. We also detect later if the constraint must be + * split out from the table definition. + */ } PQclear(res); } @@ -2642,14 +3732,14 @@ getTableAttrs(TableInfo *tblinfo, int numTables) if (strcmp(tbinfo->atttypnames[j], "integer") != 0 && strcmp(tbinfo->atttypnames[j], "bigint") != 0) continue; - if (tbinfo->adef_expr[j] == NULL) + if (tbinfo->attrdefs[j] == NULL) continue; for (k = 0; k < numTables; k++) { TableInfo *seqinfo = &tblinfo[k]; - if (seqinfo->owning_tab != NULL && - strcmp(seqinfo->owning_tab, tbinfo->oid) == 0 && + if (OidIsValid(seqinfo->owning_tab) && + seqinfo->owning_tab == tbinfo->dobj.catId.oid && seqinfo->owning_col == j + 1) { /* @@ -2673,21 +3763,17 @@ getTableAttrs(TableInfo *tblinfo, int numTables) * dumpComment -- * * This routine is used to dump any comments associated with the - * oid handed to this routine. The routine takes a constant character + * object handed to this routine. The routine takes a constant character * string for the target part of the comment-creation command, plus * the namespace and owner of the object (for labeling the ArchiveEntry), - * plus OID, class name, and subid which are the lookup key for pg_description. + * plus catalog ID and subid which are the lookup key for pg_description, + * plus the dump ID for the object (for setting a dependency). * If a matching pg_description entry is found, it is dumped. - * Additional dependencies can be passed for the comment, too --- this is - * needed for VIEWs, whose comments are filed under the table OID but - * which are dumped in order by their rule OID. */ - static void dumpComment(Archive *fout, const char *target, const char *namespace, const char *owner, - const char *oid, const char *classname, int subid, - const char *((*deps)[])) + CatalogId catalogId, int subid, DumpId dumpId) { PGresult *res; PQExpBuffer query; @@ -2708,35 +3794,30 @@ dumpComment(Archive *fout, const char *target, if (fout->remoteVersion >= 70300) { - appendPQExpBuffer(query, "SELECT description FROM pg_catalog.pg_description " - "WHERE objoid = '%s'::pg_catalog.oid and classoid = " - "'pg_catalog.%s'::pg_catalog.regclass " - "and objsubid = %d", - oid, classname, subid); + appendPQExpBuffer(query, + "SELECT description FROM pg_catalog.pg_description " + "WHERE classoid = '%u'::pg_catalog.oid and " + "objoid = '%u'::pg_catalog.oid and objsubid = %d", + catalogId.tableoid, catalogId.oid, subid); } else if (fout->remoteVersion >= 70200) { - appendPQExpBuffer(query, "SELECT description FROM pg_description " - "WHERE objoid = '%s'::oid and classoid = " - "(SELECT oid FROM pg_class where relname = '%s') " - "and objsubid = %d", - oid, classname, subid); + appendPQExpBuffer(query, + "SELECT description FROM pg_description " + "WHERE classoid = '%u'::oid and " + "objoid = '%u'::oid and objsubid = %d", + catalogId.tableoid, catalogId.oid, subid); } else { /* Note: this will fail to find attribute comments in pre-7.2... */ - appendPQExpBuffer(query, "SELECT description FROM pg_description WHERE objoid = '%s'::oid", oid); + appendPQExpBuffer(query, "SELECT description FROM pg_description WHERE objoid = '%u'::oid", catalogId.oid); } /* Execute query */ res = PQexec(g_conn, query->data); - if (!res || PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to get comment on OID %s failed: %s", - oid, PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); /* If a comment exists, build COMMENT ON statement */ @@ -2748,9 +3829,11 @@ dumpComment(Archive *fout, const char *target, appendStringLiteral(query, PQgetvalue(res, 0, i_description), false); appendPQExpBuffer(query, ";\n"); - ArchiveEntry(fout, oid, target, namespace, owner, - "COMMENT", deps, - query->data, "", NULL, NULL, NULL); + ArchiveEntry(fout, nilCatalogId, createDumpId(), + target, namespace, owner, + "COMMENT", query->data, "", NULL, + &(dumpId), 1, + NULL, NULL); } PQclear(res); @@ -2765,8 +3848,7 @@ dumpComment(Archive *fout, const char *target, */ static void dumpTableComment(Archive *fout, TableInfo *tbinfo, - const char *reltypename, - const char *((*deps)[])) + const char *reltypename) { PGresult *res; PQExpBuffer query; @@ -2793,34 +3875,31 @@ dumpTableComment(Archive *fout, TableInfo *tbinfo, if (fout->remoteVersion >= 70300) { appendPQExpBuffer(query, "SELECT description, objsubid FROM pg_catalog.pg_description " - "WHERE objoid = '%s'::pg_catalog.oid and classoid = " - "'pg_catalog.pg_class'::pg_catalog.regclass " + "WHERE classoid = '%u'::pg_catalog.oid and " + "objoid = '%u'::pg_catalog.oid " "ORDER BY objoid, classoid, objsubid", - tbinfo->oid); + tbinfo->dobj.catId.tableoid, tbinfo->dobj.catId.oid); } else if (fout->remoteVersion >= 70200) { appendPQExpBuffer(query, "SELECT description, objsubid FROM pg_description " - "WHERE objoid = '%s'::oid and classoid = " - "(SELECT oid FROM pg_class where relname = 'pg_class') " + "WHERE classoid = '%u'::oid and " + "objoid = '%u'::oid " "ORDER BY objoid, classoid, objsubid", - tbinfo->oid); + tbinfo->dobj.catId.tableoid, tbinfo->dobj.catId.oid); } else { /* Note: this will fail to find attribute comments in pre-7.2... */ - appendPQExpBuffer(query, "SELECT description, 0 as objsubid FROM pg_description WHERE objoid = '%s'::oid", tbinfo->oid); + appendPQExpBuffer(query, "SELECT description, 0 as objsubid FROM pg_description WHERE objoid = '%u'::oid", + tbinfo->dobj.catId.oid); } /* Execute query */ res = PQexec(g_conn, query->data); - if (!res || PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to get comments on table %s failed: %s", - tbinfo->relname, PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); + i_description = PQfnumber(res, "description"); i_objsubid = PQfnumber(res, "objsubid"); @@ -2843,10 +3922,12 @@ dumpTableComment(Archive *fout, TableInfo *tbinfo, appendStringLiteral(query, descr, false); appendPQExpBuffer(query, ";\n"); - ArchiveEntry(fout, tbinfo->oid, target->data, + ArchiveEntry(fout, nilCatalogId, createDumpId(), + target->data, tbinfo->relnamespace->nspname, tbinfo->usename, - "COMMENT", deps, - query->data, "", NULL, NULL, NULL); + "COMMENT", query->data, "", NULL, + &(tbinfo->dobj.dumpId), 1, + NULL, NULL); } else if (objsubid > 0 && objsubid <= tbinfo->numatts) { @@ -2861,10 +3942,12 @@ dumpTableComment(Archive *fout, TableInfo *tbinfo, appendStringLiteral(query, descr, false); appendPQExpBuffer(query, ";\n"); - ArchiveEntry(fout, tbinfo->oid, target->data, + ArchiveEntry(fout, nilCatalogId, createDumpId(), + target->data, tbinfo->relnamespace->nspname, tbinfo->usename, - "COMMENT", deps, - query->data, "", NULL, NULL, NULL); + "COMMENT", query->data, "", NULL, + &(tbinfo->dobj.dumpId), 1, + NULL, NULL); } } @@ -2874,158 +3957,198 @@ dumpTableComment(Archive *fout, TableInfo *tbinfo, } /* - * dumpDBComment -- + * dumpDumpableObject * - * This routine is used to dump any comments associated with the - * database to which we are currently connected. + * This routine and its subsidiaries are responsible for creating + * ArchiveEntries (TOC objects) for each object to be dumped. */ -void -dumpDBComment(Archive *fout) +static void +dumpDumpableObject(Archive *fout, DumpableObject *dobj) { - PGresult *res; - PQExpBuffer query; - int i_oid; - - /* Make sure we are in proper schema */ - selectSourceSchema("pg_catalog"); - - /* Build query to find comment */ - - query = createPQExpBuffer(); - appendPQExpBuffer(query, "SELECT oid FROM pg_database WHERE datname = "); - appendStringLiteral(query, PQdb(g_conn), true); - - /* Execute query */ - - res = PQexec(g_conn, query->data); - if (!res || PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to get database OID failed: %s", - PQerrorMessage(g_conn)); - exit_nicely(); - } - - /* If a comment exists, build COMMENT ON statement */ - - if (PQntuples(res) != 0) + switch (dobj->objType) { - i_oid = PQfnumber(res, "oid"); - resetPQExpBuffer(query); - appendPQExpBuffer(query, "DATABASE %s", fmtId(PQdb(g_conn))); - dumpComment(fout, query->data, NULL, "", - PQgetvalue(res, 0, i_oid), "pg_database", 0, NULL); + case DO_NAMESPACE: + dumpNamespace(fout, (NamespaceInfo *) dobj); + break; + case DO_TYPE: + dumpType(fout, (TypeInfo *) dobj); + break; + case DO_FUNC: + dumpFunc(fout, (FuncInfo *) dobj); + break; + case DO_AGG: + dumpAgg(fout, (AggInfo *) dobj); + break; + case DO_OPERATOR: + dumpOpr(fout, (OprInfo *) dobj); + break; + case DO_OPCLASS: + dumpOpclass(fout, (OpclassInfo *) dobj); + break; + case DO_CONVERSION: + dumpConversion(fout, (ConvInfo *) dobj); + break; + case DO_TABLE: + dumpTable(fout, (TableInfo *) dobj); + break; + case DO_ATTRDEF: + dumpAttrDef(fout, (AttrDefInfo *) dobj); + break; + case DO_INDEX: + dumpIndex(fout, (IndxInfo *) dobj); + break; + case DO_RULE: + dumpRule(fout, (RuleInfo *) dobj); + break; + case DO_TRIGGER: + dumpTrigger(fout, (TriggerInfo *) dobj); + break; + case DO_CONSTRAINT: + dumpConstraint(fout, (ConstraintInfo *) dobj); + break; + case DO_FK_CONSTRAINT: + dumpConstraint(fout, (ConstraintInfo *) dobj); + break; + case DO_PROCLANG: + dumpProcLang(fout, (ProcLangInfo *) dobj); + break; + case DO_CAST: + dumpCast(fout, (CastInfo *) dobj); + break; + case DO_TABLE_DATA: + dumpTableData(fout, (TableDataInfo *) dobj); + break; } - - PQclear(res); - destroyPQExpBuffer(query); } /* - * dumpNamespaces - * writes out to fout the queries to recreate user-defined namespaces + * dumpNamespace + * writes out to fout the queries to recreate a user-defined namespace */ -void -dumpNamespaces(Archive *fout, NamespaceInfo *nsinfo, int numNamespaces) +static void +dumpNamespace(Archive *fout, NamespaceInfo *nspinfo) { - PQExpBuffer q = createPQExpBuffer(); - PQExpBuffer delq = createPQExpBuffer(); - int i; + PQExpBuffer q; + PQExpBuffer delq; char *qnspname; - for (i = 0; i < numNamespaces; i++) - { - NamespaceInfo *nspinfo = &nsinfo[i]; - - /* skip if not to be dumped */ - if (!nspinfo->dump) - continue; + /* skip if not to be dumped */ + if (!nspinfo->dump || dataOnly) + return; - /* don't dump dummy namespace from pre-7.3 source */ - if (strlen(nspinfo->nspname) == 0) - continue; + /* don't dump dummy namespace from pre-7.3 source */ + if (strlen(nspinfo->nspname) == 0) + return; - qnspname = strdup(fmtId(nspinfo->nspname)); + q = createPQExpBuffer(); + delq = createPQExpBuffer(); - /* - * If it's the PUBLIC namespace, don't emit a CREATE SCHEMA record - * for it, since we expect PUBLIC to exist already in the - * destination database. But do emit ACL in case it's not standard, - * likewise comment. - * - * Note that ownership is shown in the AUTHORIZATION clause, - * while the archive entry is listed with empty owner (causing - * it to be emitted with SET SESSION AUTHORIZATION DEFAULT). - * This seems the best way of dealing with schemas owned by - * users without CREATE SCHEMA privilege. Further hacking has - * to be applied for --no-owner mode, though! - */ - if (strcmp(nspinfo->nspname, "public") != 0) - { - resetPQExpBuffer(q); - resetPQExpBuffer(delq); + qnspname = strdup(fmtId(nspinfo->nspname)); - appendPQExpBuffer(delq, "DROP SCHEMA %s;\n", qnspname); + /* + * If it's the PUBLIC namespace, suppress the CREATE SCHEMA record + * for it, since we expect PUBLIC to exist already in the + * destination database. But do emit ACL in case it's not standard, + * likewise comment. + * + * Note that ownership is shown in the AUTHORIZATION clause, + * while the archive entry is listed with empty owner (causing + * it to be emitted with SET SESSION AUTHORIZATION DEFAULT). + * This seems the best way of dealing with schemas owned by + * users without CREATE SCHEMA privilege. Further hacking has + * to be applied for --no-owner mode, though! + */ + if (strcmp(nspinfo->nspname, "public") != 0) + { + appendPQExpBuffer(delq, "DROP SCHEMA %s;\n", qnspname); - appendPQExpBuffer(q, "CREATE SCHEMA %s AUTHORIZATION %s;\n", - qnspname, fmtId(nspinfo->usename)); + appendPQExpBuffer(q, "CREATE SCHEMA %s AUTHORIZATION %s;\n", + qnspname, fmtId(nspinfo->usename)); - ArchiveEntry(fout, nspinfo->oid, nspinfo->nspname, - NULL, "", "SCHEMA", NULL, - q->data, delq->data, NULL, NULL, NULL); - } + ArchiveEntry(fout, nspinfo->dobj.catId, nspinfo->dobj.dumpId, + nspinfo->nspname, + NULL, "", + "SCHEMA", q->data, delq->data, NULL, + nspinfo->dobj.dependencies, nspinfo->dobj.nDeps, + NULL, NULL); + } - /* Dump Schema Comments */ - resetPQExpBuffer(q); - appendPQExpBuffer(q, "SCHEMA %s", qnspname); - dumpComment(fout, q->data, - NULL, nspinfo->usename, - nspinfo->oid, "pg_namespace", 0, NULL); + /* Dump Schema Comments */ + resetPQExpBuffer(q); + appendPQExpBuffer(q, "SCHEMA %s", qnspname); + dumpComment(fout, q->data, + NULL, nspinfo->usename, + nspinfo->dobj.catId, 0, nspinfo->dobj.dumpId); - dumpACL(fout, "SCHEMA", qnspname, nspinfo->nspname, NULL, - nspinfo->usename, nspinfo->nspacl, - nspinfo->oid); + dumpACL(fout, nspinfo->dobj.catId, nspinfo->dobj.dumpId, "SCHEMA", + qnspname, nspinfo->nspname, NULL, + nspinfo->usename, nspinfo->nspacl); - free(qnspname); - } + free(qnspname); destroyPQExpBuffer(q); destroyPQExpBuffer(delq); } /* - * dumpOneBaseType + * dumpType + * writes out to fout the queries to recreate a user-defined type + */ +static void +dumpType(Archive *fout, TypeInfo *tinfo) +{ + /* Dump only types in dumpable namespaces */ + if (!tinfo->typnamespace->dump || dataOnly) + return; + + /* skip complex types, except for standalone composite types */ + if (OidIsValid(tinfo->typrelid) && tinfo->typrelkind != 'c') + return; + + /* skip undefined placeholder types */ + if (!tinfo->isDefined) + return; + + /* skip all array types that start w/ underscore */ + if ((tinfo->typname[0] == '_') && + OidIsValid(tinfo->typelem)) + return; + + /* Dump out in proper style */ + if (tinfo->typtype == 'b') + dumpBaseType(fout, tinfo); + else if (tinfo->typtype == 'd') + dumpDomain(fout, tinfo); + else if (tinfo->typtype == 'c') + dumpCompositeType(fout, tinfo); +} + +/* + * dumpBaseType * writes out to fout the queries to recreate a user-defined base type - * as requested by dumpTypes */ static void -dumpOneBaseType(Archive *fout, TypeInfo *tinfo, - FuncInfo *g_finfo, int numFuncs, - TypeInfo *g_tinfo, int numTypes) +dumpBaseType(Archive *fout, TypeInfo *tinfo) { PQExpBuffer q = createPQExpBuffer(); PQExpBuffer delq = createPQExpBuffer(); PQExpBuffer query = createPQExpBuffer(); PGresult *res; int ntups; - int funcInd; char *typlen; char *typinput; char *typoutput; char *typreceive; char *typsend; - char *typinputoid; - char *typoutputoid; - char *typreceiveoid; - char *typsendoid; + Oid typinputoid; + Oid typoutputoid; + Oid typreceiveoid; + Oid typsendoid; char *typdelim; char *typdefault; char *typbyval; char *typalign; char *typstorage; - const char *((*deps)[]); - int depIdx = 0; - - deps = malloc(sizeof(char *) * 10); /* Set proper schema search path so regproc references list correctly */ selectSourceSchema(tinfo->typnamespace->nspname); @@ -3042,8 +4165,8 @@ dumpOneBaseType(Archive *fout, TypeInfo *tinfo, "typdelim, typdefault, typbyval, typalign, " "typstorage " "FROM pg_catalog.pg_type " - "WHERE oid = '%s'::pg_catalog.oid", - tinfo->oid); + "WHERE oid = '%u'::pg_catalog.oid", + tinfo->dobj.catId.oid); } else if (fout->remoteVersion >= 70300) { @@ -3056,8 +4179,8 @@ dumpOneBaseType(Archive *fout, TypeInfo *tinfo, "typdelim, typdefault, typbyval, typalign, " "typstorage " "FROM pg_catalog.pg_type " - "WHERE oid = '%s'::pg_catalog.oid", - tinfo->oid); + "WHERE oid = '%u'::pg_catalog.oid", + tinfo->dobj.catId.oid); } else if (fout->remoteVersion >= 70100) { @@ -3074,8 +4197,8 @@ dumpOneBaseType(Archive *fout, TypeInfo *tinfo, "typdelim, typdefault, typbyval, typalign, " "typstorage " "FROM pg_type " - "WHERE oid = '%s'::oid", - tinfo->oid); + "WHERE oid = '%u'::oid", + tinfo->dobj.catId.oid); } else { @@ -3088,18 +4211,12 @@ dumpOneBaseType(Archive *fout, TypeInfo *tinfo, "typdelim, typdefault, typbyval, typalign, " "'p'::char as typstorage " "FROM pg_type " - "WHERE oid = '%s'::oid", - tinfo->oid); + "WHERE oid = '%u'::oid", + tinfo->dobj.catId.oid); } res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain information on data type \"%s\" failed: %s", - tinfo->typname, PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); /* Expecting a single result only */ ntups = PQntuples(res); @@ -3115,10 +4232,10 @@ dumpOneBaseType(Archive *fout, TypeInfo *tinfo, typoutput = PQgetvalue(res, 0, PQfnumber(res, "typoutput")); typreceive = PQgetvalue(res, 0, PQfnumber(res, "typreceive")); typsend = PQgetvalue(res, 0, PQfnumber(res, "typsend")); - typinputoid = PQgetvalue(res, 0, PQfnumber(res, "typinputoid")); - typoutputoid = PQgetvalue(res, 0, PQfnumber(res, "typoutputoid")); - typreceiveoid = PQgetvalue(res, 0, PQfnumber(res, "typreceiveoid")); - typsendoid = PQgetvalue(res, 0, PQfnumber(res, "typsendoid")); + typinputoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typinputoid"))); + typoutputoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typoutputoid"))); + typreceiveoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typreceiveoid"))); + typsendoid = atooid(PQgetvalue(res, 0, PQfnumber(res, "typsendoid"))); typdelim = PQgetvalue(res, 0, PQfnumber(res, "typdelim")); if (PQgetisnull(res, 0, PQfnumber(res, "typdefault"))) typdefault = NULL; @@ -3128,34 +4245,6 @@ dumpOneBaseType(Archive *fout, TypeInfo *tinfo, typalign = PQgetvalue(res, 0, PQfnumber(res, "typalign")); typstorage = PQgetvalue(res, 0, PQfnumber(res, "typstorage")); - /* - * Before we create a type, we need to create the input and output - * functions for it, if they haven't been created already. So make - * sure there are dependency entries for this. But don't include - * dependencies if the functions aren't going to be dumped. - */ - funcInd = findFuncByOid(g_finfo, numFuncs, typinputoid); - if (funcInd >= 0 && g_finfo[funcInd].pronamespace->dump) - (*deps)[depIdx++] = strdup(typinputoid); - - funcInd = findFuncByOid(g_finfo, numFuncs, typoutputoid); - if (funcInd >= 0 && g_finfo[funcInd].pronamespace->dump) - (*deps)[depIdx++] = strdup(typoutputoid); - - if (strcmp(typreceiveoid, "0") != 0) - { - funcInd = findFuncByOid(g_finfo, numFuncs, typreceiveoid); - if (funcInd >= 0 && g_finfo[funcInd].pronamespace->dump) - (*deps)[depIdx++] = strdup(typreceiveoid); - } - - if (strcmp(typsendoid, "0") != 0) - { - funcInd = findFuncByOid(g_finfo, numFuncs, typsendoid); - if (funcInd >= 0 && g_finfo[funcInd].pronamespace->dump) - (*deps)[depIdx++] = strdup(typsendoid); - } - /* * DROP must be fully qualified in case same name appears in * pg_catalog @@ -3176,9 +4265,9 @@ dumpOneBaseType(Archive *fout, TypeInfo *tinfo, /* regproc result is correctly quoted in 7.3 */ appendPQExpBuffer(q, ",\n INPUT = %s", typinput); appendPQExpBuffer(q, ",\n OUTPUT = %s", typoutput); - if (strcmp(typreceiveoid, "0") != 0) + if (OidIsValid(typreceiveoid)) appendPQExpBuffer(q, ",\n RECEIVE = %s", typreceive); - if (strcmp(typsendoid, "0") != 0) + if (OidIsValid(typsendoid)) appendPQExpBuffer(q, ",\n SEND = %s", typsend); } else @@ -3205,8 +4294,6 @@ dumpOneBaseType(Archive *fout, TypeInfo *tinfo, elemType = getFormattedTypeName(tinfo->typelem, zeroAsOpaque); appendPQExpBuffer(q, ",\n ELEMENT = %s", elemType); free(elemType); - - (*deps)[depIdx++] = strdup(tinfo->typelem); } if (typdelim && strcmp(typdelim, ",") != 0) @@ -3238,12 +4325,13 @@ dumpOneBaseType(Archive *fout, TypeInfo *tinfo, appendPQExpBuffer(q, "\n);\n"); - (*deps)[depIdx++] = NULL; /* End of List */ - - ArchiveEntry(fout, tinfo->oid, tinfo->typname, + ArchiveEntry(fout, tinfo->dobj.catId, tinfo->dobj.dumpId, + tinfo->typname, tinfo->typnamespace->nspname, - tinfo->usename, "TYPE", deps, - q->data, delq->data, NULL, NULL, NULL); + tinfo->usename, + "TYPE", q->data, delq->data, NULL, + tinfo->dobj.dependencies, tinfo->dobj.nDeps, + NULL, NULL); /* Dump Type Comments */ resetPQExpBuffer(q); @@ -3251,7 +4339,7 @@ dumpOneBaseType(Archive *fout, TypeInfo *tinfo, appendPQExpBuffer(q, "TYPE %s", fmtId(tinfo->typname)); dumpComment(fout, q->data, tinfo->typnamespace->nspname, tinfo->usename, - tinfo->oid, "pg_type", 0, NULL); + tinfo->dobj.catId, 0, tinfo->dobj.dumpId); PQclear(res); destroyPQExpBuffer(q); @@ -3260,28 +4348,21 @@ dumpOneBaseType(Archive *fout, TypeInfo *tinfo, } /* - * dumpOneDomain + * dumpDomain * writes out to fout the queries to recreate a user-defined domain - * as requested by dumpTypes */ static void -dumpOneDomain(Archive *fout, TypeInfo *tinfo) +dumpDomain(Archive *fout, TypeInfo *tinfo) { PQExpBuffer q = createPQExpBuffer(); PQExpBuffer delq = createPQExpBuffer(); PQExpBuffer query = createPQExpBuffer(); - PQExpBuffer chkquery = createPQExpBuffer(); PGresult *res; int ntups; int i; char *typnotnull; char *typdefn; char *typdefault; - char *typbasetype; - const char *((*deps)[]); - int depIdx = 0; - - deps = malloc(sizeof(char *) * 10); /* Set proper schema search path so type references list correctly */ selectSourceSchema(tinfo->typnamespace->nspname); @@ -3290,18 +4371,13 @@ dumpOneDomain(Archive *fout, TypeInfo *tinfo) /* We assume here that remoteVersion must be at least 70300 */ appendPQExpBuffer(query, "SELECT typnotnull, " "pg_catalog.format_type(typbasetype, typtypmod) as typdefn, " - "typdefault, typbasetype " + "typdefault " "FROM pg_catalog.pg_type " - "WHERE oid = '%s'::pg_catalog.oid", - tinfo->oid); + "WHERE oid = '%u'::pg_catalog.oid", + tinfo->dobj.catId.oid); res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain domain information failed: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); /* Expecting a single result only */ ntups = PQntuples(res); @@ -3318,25 +4394,12 @@ dumpOneDomain(Archive *fout, TypeInfo *tinfo) typdefault = NULL; else typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefault")); - typbasetype = PQgetvalue(res, 0, PQfnumber(res, "typbasetype")); - - /* - * DROP must be fully qualified in case same name appears in - * pg_catalog - */ - appendPQExpBuffer(delq, "DROP DOMAIN %s.", - fmtId(tinfo->typnamespace->nspname)); - appendPQExpBuffer(delq, "%s;\n", - fmtId(tinfo->typname)); appendPQExpBuffer(q, "CREATE DOMAIN %s AS %s", fmtId(tinfo->typname), typdefn); - /* Depends on the base type */ - (*deps)[depIdx++] = strdup(typbasetype); - if (typnotnull[0] == 't') appendPQExpBuffer(q, " NOT NULL"); @@ -3346,50 +4409,35 @@ dumpOneDomain(Archive *fout, TypeInfo *tinfo) PQclear(res); /* - * Fetch and process CHECK constraints for the domain + * Add any CHECK constraints for the domain */ - if (g_fout->remoteVersion >= 70400) - appendPQExpBuffer(chkquery, "SELECT conname, " - "pg_catalog.pg_get_constraintdef(oid) AS consrc " - "FROM pg_catalog.pg_constraint " - "WHERE contypid = '%s'::pg_catalog.oid", - tinfo->oid); - else - appendPQExpBuffer(chkquery, "SELECT conname, 'CHECK (' || consrc || ')' AS consrc " - "FROM pg_catalog.pg_constraint " - "WHERE contypid = '%s'::pg_catalog.oid", - tinfo->oid); - - res = PQexec(g_conn, chkquery->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain domain constraint information failed: %s", - PQerrorMessage(g_conn)); - exit_nicely(); - } - - ntups = PQntuples(res); - for (i = 0; i < ntups; i++) + for (i = 0; i < tinfo->nDomChecks; i++) { - char *conname; - char *consrc; + ConstraintInfo *domcheck = &(tinfo->domChecks[i]); - conname = PQgetvalue(res, i, PQfnumber(res, "conname")); - consrc = PQgetvalue(res, i, PQfnumber(res, "consrc")); - - appendPQExpBuffer(q, "\n\tCONSTRAINT %s %s", - fmtId(conname), consrc); + if (!domcheck->separate) + appendPQExpBuffer(q, "\n\tCONSTRAINT %s %s", + fmtId(domcheck->conname), domcheck->condef); } appendPQExpBuffer(q, ";\n"); - (*deps)[depIdx++] = NULL; /* End of List */ + /* + * DROP must be fully qualified in case same name appears in + * pg_catalog + */ + appendPQExpBuffer(delq, "DROP DOMAIN %s.", + fmtId(tinfo->typnamespace->nspname)); + appendPQExpBuffer(delq, "%s;\n", + fmtId(tinfo->typname)); - ArchiveEntry(fout, tinfo->oid, tinfo->typname, + ArchiveEntry(fout, tinfo->dobj.catId, tinfo->dobj.dumpId, + tinfo->typname, tinfo->typnamespace->nspname, - tinfo->usename, "DOMAIN", deps, - q->data, delq->data, NULL, NULL, NULL); + tinfo->usename, + "DOMAIN", q->data, delq->data, NULL, + tinfo->dobj.dependencies, tinfo->dobj.nDeps, + NULL, NULL); /* Dump Domain Comments */ resetPQExpBuffer(q); @@ -3397,22 +4445,20 @@ dumpOneDomain(Archive *fout, TypeInfo *tinfo) appendPQExpBuffer(q, "DOMAIN %s", fmtId(tinfo->typname)); dumpComment(fout, q->data, tinfo->typnamespace->nspname, tinfo->usename, - tinfo->oid, "pg_type", 0, NULL); + tinfo->dobj.catId, 0, tinfo->dobj.dumpId); - PQclear(res); destroyPQExpBuffer(q); destroyPQExpBuffer(delq); destroyPQExpBuffer(query); - destroyPQExpBuffer(chkquery); } /* - * dumpOneCompositeType + * dumpCompositeType * writes out to fout the queries to recreate a user-defined stand-alone - * composite type as requested by dumpTypes + * composite type */ static void -dumpOneCompositeType(Archive *fout, TypeInfo *tinfo) +dumpCompositeType(Archive *fout, TypeInfo *tinfo) { PQExpBuffer q = createPQExpBuffer(); PQExpBuffer delq = createPQExpBuffer(); @@ -3432,19 +4478,14 @@ dumpOneCompositeType(Archive *fout, TypeInfo *tinfo) appendPQExpBuffer(query, "SELECT a.attname, " "pg_catalog.format_type(a.atttypid, a.atttypmod) as atttypdefn " "FROM pg_catalog.pg_type t, pg_catalog.pg_attribute a " - "WHERE t.oid = '%s'::pg_catalog.oid " + "WHERE t.oid = '%u'::pg_catalog.oid " "AND a.attrelid = t.typrelid " "AND NOT a.attisdropped " "ORDER BY a.attnum ", - tinfo->oid); + tinfo->dobj.catId.oid); res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain data type information failed: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); /* Expecting at least a single result */ ntups = PQntuples(res); @@ -3479,252 +4520,128 @@ dumpOneCompositeType(Archive *fout, TypeInfo *tinfo) * pg_catalog */ appendPQExpBuffer(delq, "DROP TYPE %s.", - fmtId(tinfo->typnamespace->nspname)); - appendPQExpBuffer(delq, "%s;\n", - fmtId(tinfo->typname)); - - ArchiveEntry(fout, tinfo->oid, tinfo->typname, - tinfo->typnamespace->nspname, - tinfo->usename, "TYPE", NULL, - q->data, delq->data, NULL, NULL, NULL); - - - /* Dump Type Comments */ - resetPQExpBuffer(q); - - appendPQExpBuffer(q, "TYPE %s", fmtId(tinfo->typname)); - dumpComment(fout, q->data, - tinfo->typnamespace->nspname, tinfo->usename, - tinfo->oid, "pg_type", 0, NULL); - - PQclear(res); - destroyPQExpBuffer(q); - destroyPQExpBuffer(delq); - destroyPQExpBuffer(query); -} - -/* - * dumpTypes - * writes out to fout the queries to recreate all the user-defined types - */ -void -dumpTypes(Archive *fout, FuncInfo *finfo, int numFuncs, - TypeInfo *tinfo, int numTypes) -{ - int i; - - for (i = 0; i < numTypes; i++) - { - /* Dump only types in dumpable namespaces */ - if (!tinfo[i].typnamespace->dump) - continue; - - /* skip complex types, except for standalone composite types */ - if (atooid(tinfo[i].typrelid) != 0 && tinfo[i].typrelkind != 'c') - continue; - - /* skip undefined placeholder types */ - if (!tinfo[i].isDefined) - continue; - - /* skip all array types that start w/ underscore */ - if ((tinfo[i].typname[0] == '_') && - atooid(tinfo[i].typelem) != 0) - continue; - - /* Dump out in proper style */ - if (tinfo[i].typtype == 'b') - dumpOneBaseType(fout, &tinfo[i], - finfo, numFuncs, tinfo, numTypes); - else if (tinfo[i].typtype == 'd') - dumpOneDomain(fout, &tinfo[i]); - else if (tinfo[i].typtype == 'c') - dumpOneCompositeType(fout, &tinfo[i]); - } -} - -/* - * dumpProcLangs - * writes out to fout the queries to recreate user-defined procedural languages - */ -void -dumpProcLangs(Archive *fout, FuncInfo finfo[], int numFuncs) -{ - PGresult *res; - PQExpBuffer query = createPQExpBuffer(); - PQExpBuffer defqry = createPQExpBuffer(); - PQExpBuffer delqry = createPQExpBuffer(); - int ntups; - int i_oid; - int i_lanname; - int i_lanpltrusted; - int i_lanplcallfoid; - int i_lanvalidator = -1; - int i_lanacl = -1; - char *lanoid; - char *lanname; - char *lanacl; - const char *lanplcallfoid; - const char *lanvalidator; - const char *((*deps)[]); - int depIdx; - int i, - fidx, - vidx = -1; - - /* Make sure we are in proper schema */ - selectSourceSchema("pg_catalog"); - - appendPQExpBuffer(query, "SELECT oid, * FROM pg_language " - "WHERE lanispl " - "ORDER BY oid"); - res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain list of procedural languages failed: %s", - PQerrorMessage(g_conn)); - exit_nicely(); - } - ntups = PQntuples(res); - - i_lanname = PQfnumber(res, "lanname"); - i_lanpltrusted = PQfnumber(res, "lanpltrusted"); - i_lanplcallfoid = PQfnumber(res, "lanplcallfoid"); - i_oid = PQfnumber(res, "oid"); - if (fout->remoteVersion >= 70300) - { - i_lanvalidator = PQfnumber(res, "lanvalidator"); - i_lanacl = PQfnumber(res, "lanacl"); - } - - for (i = 0; i < ntups; i++) - { - lanoid = PQgetvalue(res, i, i_oid); - lanplcallfoid = PQgetvalue(res, i, i_lanplcallfoid); - lanname = PQgetvalue(res, i, i_lanname); - if (fout->remoteVersion >= 70300) - { - lanvalidator = PQgetvalue(res, i, i_lanvalidator); - lanacl = PQgetvalue(res, i, i_lanacl); - } - else - { - lanvalidator = "0"; - lanacl = "{=U}"; - } + fmtId(tinfo->typnamespace->nspname)); + appendPQExpBuffer(delq, "%s;\n", + fmtId(tinfo->typname)); - /* - * Current theory is to dump PLs iff their underlying functions - * will be dumped (are in a dumpable namespace, or have a - * non-system OID in pre-7.3 databases). Actually, we treat the - * PL itself as being in the underlying function's namespace, - * though it isn't really. This avoids searchpath problems for - * the HANDLER clause. - * - * If the underlying function is in the pg_catalog namespace, - * we won't have loaded it into finfo[] at all; therefore, - * treat failure to find it in finfo[] as indicating we shouldn't - * dump it, not as an error condition. Ditto for the validator. - */ + ArchiveEntry(fout, tinfo->dobj.catId, tinfo->dobj.dumpId, + tinfo->typname, + tinfo->typnamespace->nspname, + tinfo->usename, + "TYPE", q->data, delq->data, NULL, + tinfo->dobj.dependencies, tinfo->dobj.nDeps, + NULL, NULL); - fidx = findFuncByOid(finfo, numFuncs, lanplcallfoid); - if (fidx < 0) - continue; - if (!finfo[fidx].pronamespace->dump) - continue; + /* Dump Type Comments */ + resetPQExpBuffer(q); - if (strcmp(lanvalidator, "0") != 0) - { - vidx = findFuncByOid(finfo, numFuncs, lanvalidator); - if (vidx < 0) - continue; - } + appendPQExpBuffer(q, "TYPE %s", fmtId(tinfo->typname)); + dumpComment(fout, q->data, + tinfo->typnamespace->nspname, tinfo->usename, + tinfo->dobj.catId, 0, tinfo->dobj.dumpId); - resetPQExpBuffer(defqry); - resetPQExpBuffer(delqry); + PQclear(res); + destroyPQExpBuffer(q); + destroyPQExpBuffer(delq); + destroyPQExpBuffer(query); +} - /* Make a dependency to ensure function is dumped first */ - deps = malloc(sizeof(char *) * 10); - depIdx = 0; +/* + * dumpProcLang + * writes out to fout the queries to recreate a user-defined + * procedural language + */ +static void +dumpProcLang(Archive *fout, ProcLangInfo *plang) +{ + PQExpBuffer defqry; + PQExpBuffer delqry; + char *qlanname; + FuncInfo *funcInfo; + FuncInfo *validatorInfo = NULL; - (*deps)[depIdx++] = strdup(lanplcallfoid); + if (dataOnly) + return; - appendPQExpBuffer(delqry, "DROP PROCEDURAL LANGUAGE %s;\n", - fmtId(lanname)); + /* + * Current theory is to dump PLs iff their underlying functions + * will be dumped (are in a dumpable namespace, or have a + * non-system OID in pre-7.3 databases). Actually, we treat the + * PL itself as being in the underlying function's namespace, + * though it isn't really. This avoids searchpath problems for + * the HANDLER clause. + * + * If the underlying function is in the pg_catalog namespace, + * we won't have loaded it into finfo[] at all; therefore, + * treat failure to find it in finfo[] as indicating we shouldn't + * dump it, not as an error condition. Ditto for the validator. + */ - appendPQExpBuffer(defqry, "CREATE %sPROCEDURAL LANGUAGE %s", - (PQgetvalue(res, i, i_lanpltrusted)[0] == 't') ? - "TRUSTED " : "", - fmtId(lanname)); - appendPQExpBuffer(defqry, " HANDLER %s", - fmtId(finfo[fidx].proname)); - if (strcmp(lanvalidator, "0") != 0) - { - appendPQExpBuffer(defqry, " VALIDATOR "); - /* Cope with possibility that validator is in different schema */ - if (finfo[vidx].pronamespace != finfo[fidx].pronamespace) - appendPQExpBuffer(defqry, "%s.", - fmtId(finfo[vidx].pronamespace->nspname)); - appendPQExpBuffer(defqry, "%s", - fmtId(finfo[vidx].proname)); - (*deps)[depIdx++] = strdup(lanvalidator); - } - appendPQExpBuffer(defqry, ";\n"); + funcInfo = findFuncByOid(plang->lanplcallfoid); + if (funcInfo == NULL) + return; - (*deps)[depIdx++] = NULL; /* End of List */ + if (!funcInfo->pronamespace->dump) + return; - ArchiveEntry(fout, lanoid, lanname, - finfo[fidx].pronamespace->nspname, "", - "PROCEDURAL LANGUAGE", deps, - defqry->data, delqry->data, NULL, NULL, NULL); + if (OidIsValid(plang->lanvalidator)) + { + validatorInfo = findFuncByOid(plang->lanvalidator); + if (validatorInfo == NULL) + return; + } - if (!aclsSkip) - { - char *tmp = strdup(fmtId(lanname)); + defqry = createPQExpBuffer(); + delqry = createPQExpBuffer(); - dumpACL(fout, "ACL LANGUAGE", tmp, lanname, - finfo[fidx].pronamespace->nspname, - NULL, lanacl, lanoid); - free(tmp); - } + qlanname = strdup(fmtId(plang->lanname)); - /* Dump Proc Lang Comments */ - resetPQExpBuffer(defqry); + appendPQExpBuffer(delqry, "DROP PROCEDURAL LANGUAGE %s;\n", + qlanname); - appendPQExpBuffer(defqry, "LANGUAGE %s", fmtId(lanname)); - dumpComment(fout, defqry->data, - NULL, "", - lanoid, "pg_language", 0, NULL); + appendPQExpBuffer(defqry, "CREATE %sPROCEDURAL LANGUAGE %s", + plang->lanpltrusted ? "TRUSTED " : "", + qlanname); + appendPQExpBuffer(defqry, " HANDLER %s", + fmtId(funcInfo->proname)); + if (OidIsValid(plang->lanvalidator)) + { + appendPQExpBuffer(defqry, " VALIDATOR "); + /* Cope with possibility that validator is in different schema */ + if (validatorInfo->pronamespace != funcInfo->pronamespace) + appendPQExpBuffer(defqry, "%s.", + fmtId(validatorInfo->pronamespace->nspname)); + appendPQExpBuffer(defqry, "%s", + fmtId(validatorInfo->proname)); } + appendPQExpBuffer(defqry, ";\n"); - PQclear(res); + ArchiveEntry(fout, plang->dobj.catId, plang->dobj.dumpId, + plang->lanname, + funcInfo->pronamespace->nspname, "", + "PROCEDURAL LANGUAGE", + defqry->data, delqry->data, NULL, + plang->dobj.dependencies, plang->dobj.nDeps, + NULL, NULL); - destroyPQExpBuffer(query); - destroyPQExpBuffer(defqry); - destroyPQExpBuffer(delqry); -} + /* Dump Proc Lang Comments */ + resetPQExpBuffer(defqry); -/* - * dumpFuncs - * writes out to fout the queries to recreate all the user-defined functions - */ -void -dumpFuncs(Archive *fout, FuncInfo finfo[], int numFuncs) -{ - int i; + appendPQExpBuffer(defqry, "LANGUAGE %s", qlanname); + dumpComment(fout, defqry->data, + NULL, "", + plang->dobj.catId, 0, plang->dobj.dumpId); - for (i = 0; i < numFuncs; i++) - { - /* Dump only funcs in dumpable namespaces */ - if (!finfo[i].pronamespace->dump) - continue; + dumpACL(fout, plang->dobj.catId, plang->dobj.dumpId, "LANGUAGE", + qlanname, plang->lanname, + funcInfo->pronamespace->nspname, + NULL, plang->lanacl); - dumpOneFunc(fout, &finfo[i]); - if (!aclsSkip) - dumpFuncACL(fout, &finfo[i]); - } + free(qlanname); + + destroyPQExpBuffer(defqry); + destroyPQExpBuffer(delqry); } /* @@ -3759,36 +4676,20 @@ format_function_signature(FuncInfo *finfo, bool honor_quotes) } -static void -dumpFuncACL(Archive *fout, FuncInfo *finfo) -{ - char *funcsig, - *funcsig_tag; - - funcsig = format_function_signature(finfo, true); - funcsig_tag = format_function_signature(finfo, false); - dumpACL(fout, "FUNCTION", funcsig, funcsig_tag, - finfo->pronamespace->nspname, - finfo->usename, finfo->proacl, finfo->oid); - free(funcsig); - free(funcsig_tag); -} - - /* - * dumpOneFunc: - * dump out only one function + * dumpFunc: + * dump out one function */ static void -dumpOneFunc(Archive *fout, FuncInfo *finfo) +dumpFunc(Archive *fout, FuncInfo *finfo) { - PQExpBuffer query = createPQExpBuffer(); - PQExpBuffer q = createPQExpBuffer(); - PQExpBuffer delqry = createPQExpBuffer(); - PQExpBuffer asPart = createPQExpBuffer(); - PGresult *res = NULL; - char *funcsig = NULL; - char *funcsig_tag = NULL; + PQExpBuffer query; + PQExpBuffer q; + PQExpBuffer delqry; + PQExpBuffer asPart; + PGresult *res; + char *funcsig; + char *funcsig_tag; int ntups; char *proretset; char *prosrc; @@ -3798,13 +4699,15 @@ dumpOneFunc(Archive *fout, FuncInfo *finfo) char *prosecdef; char *lanname; char *rettypename; - char *funcproclang; /* Boolean : is this function a PLang - * handler ? */ - if (finfo->dumped) - goto done; + /* Dump only funcs in dumpable namespaces */ + if (!finfo->pronamespace->dump || dataOnly) + return; - finfo->dumped = true; + query = createPQExpBuffer(); + q = createPQExpBuffer(); + delqry = createPQExpBuffer(); + asPart = createPQExpBuffer(); /* Set proper schema search path so type references list correctly */ selectSourceSchema(finfo->pronamespace->nspname); @@ -3815,11 +4718,10 @@ dumpOneFunc(Archive *fout, FuncInfo *finfo) appendPQExpBuffer(query, "SELECT proretset, prosrc, probin, " "provolatile, proisstrict, prosecdef, " - "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) as lanname, " - "exists (SELECT 'x' FROM pg_catalog.pg_language WHERE lanplcallfoid = pg_catalog.pg_proc.oid) as funcproclang " + "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) as lanname " "FROM pg_catalog.pg_proc " - "WHERE oid = '%s'::pg_catalog.oid", - finfo->oid); + "WHERE oid = '%u'::pg_catalog.oid", + finfo->dobj.catId.oid); } else if (g_fout->remoteVersion >= 70100) { @@ -3828,11 +4730,10 @@ dumpOneFunc(Archive *fout, FuncInfo *finfo) "case when proiscachable then 'i' else 'v' end as provolatile, " "proisstrict, " "'f'::boolean as prosecdef, " - "(SELECT lanname FROM pg_language WHERE oid = prolang) as lanname, " - "exists (SELECT 'x' FROM pg_language WHERE lanplcallfoid = pg_proc.oid) as funcproclang " + "(SELECT lanname FROM pg_language WHERE oid = prolang) as lanname " "FROM pg_proc " - "WHERE oid = '%s'::oid", - finfo->oid); + "WHERE oid = '%u'::oid", + finfo->dobj.catId.oid); } else { @@ -3841,21 +4742,14 @@ dumpOneFunc(Archive *fout, FuncInfo *finfo) "case when proiscachable then 'i' else 'v' end as provolatile, " "'f'::boolean as proisstrict, " "'f'::boolean as prosecdef, " - "(SELECT lanname FROM pg_language WHERE oid = prolang) as lanname, " - "exists (SELECT 'x' FROM pg_language WHERE lanplcallfoid = pg_proc.oid) as funcproclang " + "(SELECT lanname FROM pg_language WHERE oid = prolang) as lanname " "FROM pg_proc " - "WHERE oid = '%s'::oid", - finfo->oid); + "WHERE oid = '%u'::oid", + finfo->dobj.catId.oid); } res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain information on function \"%s\" failed: %s", - finfo->proname, PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); /* Expecting a single result only */ ntups = PQntuples(res); @@ -3873,7 +4767,6 @@ dumpOneFunc(Archive *fout, FuncInfo *finfo) proisstrict = PQgetvalue(res, 0, PQfnumber(res, "proisstrict")); prosecdef = PQgetvalue(res, 0, PQfnumber(res, "prosecdef")); lanname = PQgetvalue(res, 0, PQfnumber(res, "lanname")); - funcproclang = PQgetvalue(res, 0, PQfnumber(res, "funcproclang")); /* * See backend/commands/define.c for details of how the 'AS' clause is @@ -3942,21 +4835,26 @@ dumpOneFunc(Archive *fout, FuncInfo *finfo) appendPQExpBuffer(q, ";\n"); - ArchiveEntry(fout, finfo->oid, funcsig_tag, + ArchiveEntry(fout, finfo->dobj.catId, finfo->dobj.dumpId, + funcsig_tag, finfo->pronamespace->nspname, - finfo->usename, strcmp(funcproclang, "t") ? "FUNCTION" : "FUNC PROCEDURAL LANGUAGE", NULL, - q->data, delqry->data, - NULL, NULL, NULL); + finfo->usename, + "FUNCTION", q->data, delqry->data, NULL, + finfo->dobj.dependencies, finfo->dobj.nDeps, + NULL, NULL); /* Dump Function Comments */ - resetPQExpBuffer(q); appendPQExpBuffer(q, "FUNCTION %s", funcsig); dumpComment(fout, q->data, finfo->pronamespace->nspname, finfo->usename, - finfo->oid, "pg_proc", 0, NULL); + finfo->dobj.catId, 0, finfo->dobj.dumpId); + + dumpACL(fout, finfo->dobj.catId, finfo->dobj.dumpId, "FUNCTION", + funcsig, funcsig_tag, + finfo->pronamespace->nspname, + finfo->usename, finfo->proacl); -done: PQclear(res); destroyPQExpBuffer(query); @@ -3969,201 +4867,136 @@ done: /* - * Dump all casts + * Dump a user-defined cast */ -void -dumpCasts(Archive *fout, - FuncInfo *finfo, int numFuncs, - TypeInfo *tinfo, int numTypes) +static void +dumpCast(Archive *fout, CastInfo *cast) { - PGresult *res; - PQExpBuffer query = createPQExpBuffer(); - PQExpBuffer defqry = createPQExpBuffer(); - PQExpBuffer delqry = createPQExpBuffer(); - PQExpBuffer castsig = createPQExpBuffer(); - int ntups; - int i; - - /* Make sure we are in proper schema */ - selectSourceSchema("pg_catalog"); + PQExpBuffer defqry; + PQExpBuffer delqry; + PQExpBuffer castsig; + FuncInfo *funcInfo = NULL; + TypeInfo *sourceInfo; + TypeInfo *targetInfo; - if (fout->remoteVersion >= 70300) - appendPQExpBuffer(query, "SELECT oid, castsource, casttarget, castfunc, castcontext FROM pg_cast ORDER BY 1,2,3;"); - else - appendPQExpBuffer(query, "SELECT p.oid, t1.oid, t2.oid, p.oid, true FROM pg_type t1, pg_type t2, pg_proc p WHERE p.pronargs = 1 AND p.proargtypes[0] = t1.oid AND p.prorettype = t2.oid AND p.proname = t2.typname ORDER BY 1,2,3;"); + if (dataOnly) + return; - res = PQexec(g_conn, query->data); - if (!res || PQresultStatus(res) != PGRES_TUPLES_OK) + if (OidIsValid(cast->castfunc)) { - write_msg(NULL, "query to obtain list of casts failed: %s", - PQerrorMessage(g_conn)); - exit_nicely(); + funcInfo = findFuncByOid(cast->castfunc); + if (funcInfo == NULL) + return; } - ntups = PQntuples(res); - - for (i = 0; i < ntups; i++) - { - char *castoid = PQgetvalue(res, i, 0); - char *castsource = PQgetvalue(res, i, 1); - char *casttarget = PQgetvalue(res, i, 2); - char *castfunc = PQgetvalue(res, i, 3); - char *castcontext = PQgetvalue(res, i, 4); - int fidx = -1; - const char *((*deps)[]); - int source_idx; - int target_idx; - - if (strcmp(castfunc, "0") != 0) - fidx = findFuncByOid(finfo, numFuncs, castfunc); - - /* - * As per discussion we dump casts if one or more of the underlying - * objects (the conversion function and the two data types) are not - * builtin AND if all of the non-builtin objects namespaces are - * included in the dump. Builtin meaning, the namespace name does - * not start with "pg_". - */ - source_idx = findTypeByOid(tinfo, numTypes, castsource); - target_idx = findTypeByOid(tinfo, numTypes, casttarget); - - /* - * Skip this cast if all objects are from pg_ - */ - if ((fidx < 0 || strncmp(finfo[fidx].pronamespace->nspname, "pg_", 3) == 0) && - strncmp(tinfo[source_idx].typnamespace->nspname, "pg_", 3) == 0 && - strncmp(tinfo[target_idx].typnamespace->nspname, "pg_", 3) == 0) - continue; - - /* - * Skip cast if function isn't from pg_ and that namespace is - * not dumped. - */ - if (fidx >= 0 && - strncmp(finfo[fidx].pronamespace->nspname, "pg_", 3) != 0 && - !finfo[fidx].pronamespace->dump) - continue; - - /* - * Same for the Source type - */ - if (strncmp(tinfo[source_idx].typnamespace->nspname, "pg_", 3) != 0 && - !tinfo[source_idx].typnamespace->dump) - continue; - - /* - * and the target type. - */ - if (strncmp(tinfo[target_idx].typnamespace->nspname, "pg_", 3) != 0 && - !tinfo[target_idx].typnamespace->dump) - continue; - - /* Make a dependency to ensure function is dumped first */ - if (fidx >= 0) - { - deps = malloc(sizeof(char *) * 2); - (*deps)[0] = strdup(castfunc); - (*deps)[1] = NULL; /* End of List */ - } - else - deps = NULL; + /* + * As per discussion we dump casts if one or more of the underlying + * objects (the conversion function and the two data types) are not + * builtin AND if all of the non-builtin objects namespaces are + * included in the dump. Builtin meaning, the namespace name does + * not start with "pg_". + */ + sourceInfo = findTypeByOid(cast->castsource); + targetInfo = findTypeByOid(cast->casttarget); - resetPQExpBuffer(defqry); - resetPQExpBuffer(delqry); - resetPQExpBuffer(castsig); + if (sourceInfo == NULL || targetInfo == NULL) + return; - appendPQExpBuffer(delqry, "DROP CAST (%s AS %s);\n", - getFormattedTypeName(castsource, zeroAsNone), - getFormattedTypeName(casttarget, zeroAsNone)); + /* + * Skip this cast if all objects are from pg_ + */ + if ((funcInfo == NULL || strncmp(funcInfo->pronamespace->nspname, "pg_", 3) == 0) && + strncmp(sourceInfo->typnamespace->nspname, "pg_", 3) == 0 && + strncmp(targetInfo->typnamespace->nspname, "pg_", 3) == 0) + return; - appendPQExpBuffer(defqry, "CREATE CAST (%s AS %s) ", - getFormattedTypeName(castsource, zeroAsNone), - getFormattedTypeName(casttarget, zeroAsNone)); + /* + * Skip cast if function isn't from pg_ and that namespace is + * not dumped. + */ + if (funcInfo && + strncmp(funcInfo->pronamespace->nspname, "pg_", 3) != 0 && + !funcInfo->pronamespace->dump) + return; - if (strcmp(castfunc, "0") == 0) - appendPQExpBuffer(defqry, "WITHOUT FUNCTION"); - else - appendPQExpBuffer(defqry, "WITH FUNCTION %s", - format_function_signature(&finfo[fidx], true)); + /* + * Same for the Source type + */ + if (strncmp(sourceInfo->typnamespace->nspname, "pg_", 3) != 0 && + !sourceInfo->typnamespace->dump) + return; - if (strcmp(castcontext, "a") == 0) - appendPQExpBuffer(defqry, " AS ASSIGNMENT"); - else if (strcmp(castcontext, "i") == 0) - appendPQExpBuffer(defqry, " AS IMPLICIT"); - appendPQExpBuffer(defqry, ";\n"); + /* + * and the target type. + */ + if (strncmp(targetInfo->typnamespace->nspname, "pg_", 3) != 0 && + !targetInfo->typnamespace->dump) + return; - appendPQExpBuffer(castsig, "CAST (%s AS %s)", - getFormattedTypeName(castsource, zeroAsNone), - getFormattedTypeName(casttarget, zeroAsNone)); + /* Make sure we are in proper schema (needed for getFormattedTypeName) */ + selectSourceSchema("pg_catalog"); - ArchiveEntry(fout, castoid, - castsig->data, - tinfo[source_idx].typnamespace->nspname, "", - "CAST", deps, - defqry->data, delqry->data, - NULL, NULL, NULL); + defqry = createPQExpBuffer(); + delqry = createPQExpBuffer(); + castsig = createPQExpBuffer(); - /* Dump Cast Comments */ - resetPQExpBuffer(defqry); - appendPQExpBuffer(defqry, "CAST (%s AS %s)", - getFormattedTypeName(castsource, zeroAsNone), - getFormattedTypeName(casttarget, zeroAsNone)); - dumpComment(fout, defqry->data, - NULL, "", - castoid, "pg_cast", 0, NULL); + appendPQExpBuffer(delqry, "DROP CAST (%s AS %s);\n", + getFormattedTypeName(cast->castsource, zeroAsNone), + getFormattedTypeName(cast->casttarget, zeroAsNone)); - } + appendPQExpBuffer(defqry, "CREATE CAST (%s AS %s) ", + getFormattedTypeName(cast->castsource, zeroAsNone), + getFormattedTypeName(cast->casttarget, zeroAsNone)); - PQclear(res); + if (!OidIsValid(cast->castfunc)) + appendPQExpBuffer(defqry, "WITHOUT FUNCTION"); + else + appendPQExpBuffer(defqry, "WITH FUNCTION %s", + format_function_signature(funcInfo, true)); + + if (cast->castcontext == 'a') + appendPQExpBuffer(defqry, " AS ASSIGNMENT"); + else if (cast->castcontext == 'i') + appendPQExpBuffer(defqry, " AS IMPLICIT"); + appendPQExpBuffer(defqry, ";\n"); + + appendPQExpBuffer(castsig, "CAST (%s AS %s)", + getFormattedTypeName(cast->castsource, zeroAsNone), + getFormattedTypeName(cast->casttarget, zeroAsNone)); + + ArchiveEntry(fout, cast->dobj.catId, cast->dobj.dumpId, + castsig->data, + sourceInfo->typnamespace->nspname, "", + "CAST", defqry->data, delqry->data, NULL, + cast->dobj.dependencies, cast->dobj.nDeps, + NULL, NULL); + + /* Dump Cast Comments */ + resetPQExpBuffer(defqry); + appendPQExpBuffer(defqry, "CAST (%s AS %s)", + getFormattedTypeName(cast->castsource, zeroAsNone), + getFormattedTypeName(cast->casttarget, zeroAsNone)); + dumpComment(fout, defqry->data, + NULL, "", + cast->dobj.catId, 0, cast->dobj.dumpId); - destroyPQExpBuffer(query); destroyPQExpBuffer(defqry); destroyPQExpBuffer(delqry); destroyPQExpBuffer(castsig); } - -/* - * dumpOprs - * writes out to fout the queries to recreate all the user-defined operators - */ -void -dumpOprs(Archive *fout, OprInfo *oprinfo, int numOperators) -{ - int i; - - for (i = 0; i < numOperators; i++) - { - /* Dump only operators in dumpable namespaces */ - if (!oprinfo[i].oprnamespace->dump) - continue; - - /* - * some operators are invalid because they were the result of user - * defining operators before commutators exist - */ - if (strcmp(oprinfo[i].oprcode, "0") == 0) - continue; - - /* OK, dump it */ - dumpOneOpr(fout, &oprinfo[i], - oprinfo, numOperators); - } -} - /* - * dumpOneOpr + * dumpOpr * write out a single operator definition */ static void -dumpOneOpr(Archive *fout, OprInfo *oprinfo, - OprInfo *g_oprinfo, int numOperators) +dumpOpr(Archive *fout, OprInfo *oprinfo) { - PQExpBuffer query = createPQExpBuffer(); - PQExpBuffer q = createPQExpBuffer(); - PQExpBuffer delq = createPQExpBuffer(); - PQExpBuffer oprid = createPQExpBuffer(); - PQExpBuffer details = createPQExpBuffer(); + PQExpBuffer query; + PQExpBuffer q; + PQExpBuffer delq; + PQExpBuffer oprid; + PQExpBuffer details; const char *name; PGresult *res; int ntups; @@ -4194,6 +5027,23 @@ dumpOneOpr(Archive *fout, OprInfo *oprinfo, char *oprltcmpop; char *oprgtcmpop; + /* Dump only operators in dumpable namespaces */ + if (!oprinfo->oprnamespace->dump || dataOnly) + return; + + /* + * some operators are invalid because they were the result of user + * defining operators before commutators exist + */ + if (!OidIsValid(oprinfo->oprcode)) + return; + + query = createPQExpBuffer(); + q = createPQExpBuffer(); + delq = createPQExpBuffer(); + oprid = createPQExpBuffer(); + details = createPQExpBuffer(); + /* Make sure we are in proper schema so regoperator works correctly */ selectSourceSchema(oprinfo->oprnamespace->nspname); @@ -4213,8 +5063,8 @@ dumpOneOpr(Archive *fout, OprInfo *oprinfo, "oprltcmpop::pg_catalog.regoperator, " "oprgtcmpop::pg_catalog.regoperator " "from pg_catalog.pg_operator " - "where oid = '%s'::pg_catalog.oid", - oprinfo->oid); + "where oid = '%u'::pg_catalog.oid", + oprinfo->dobj.catId.oid); } else if (g_fout->remoteVersion >= 70100) { @@ -4227,8 +5077,8 @@ dumpOneOpr(Archive *fout, OprInfo *oprinfo, "oprcanhash, oprlsortop, oprrsortop, " "0 as oprltcmpop, 0 as oprgtcmpop " "from pg_operator " - "where oid = '%s'::oid", - oprinfo->oid); + "where oid = '%u'::oid", + oprinfo->dobj.catId.oid); } else { @@ -4241,17 +5091,12 @@ dumpOneOpr(Archive *fout, OprInfo *oprinfo, "oprcanhash, oprlsortop, oprrsortop, " "0 as oprltcmpop, 0 as oprgtcmpop " "from pg_operator " - "where oid = '%s'::oid", - oprinfo->oid); + "where oid = '%u'::oid", + oprinfo->dobj.catId.oid); } res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain list of operators failed: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); /* Expecting a single result only */ ntups = PQntuples(res); @@ -4326,11 +5171,11 @@ dumpOneOpr(Archive *fout, OprInfo *oprinfo, else appendPQExpBuffer(oprid, ", NONE)"); - name = convertOperatorReference(oprcom, g_oprinfo, numOperators); + name = convertOperatorReference(oprcom); if (name) appendPQExpBuffer(details, ",\n COMMUTATOR = %s", name); - name = convertOperatorReference(oprnegate, g_oprinfo, numOperators); + name = convertOperatorReference(oprnegate); if (name) appendPQExpBuffer(details, ",\n NEGATOR = %s", name); @@ -4345,19 +5190,19 @@ dumpOneOpr(Archive *fout, OprInfo *oprinfo, if (name) appendPQExpBuffer(details, ",\n JOIN = %s", name); - name = convertOperatorReference(oprlsortop, g_oprinfo, numOperators); + name = convertOperatorReference(oprlsortop); if (name) appendPQExpBuffer(details, ",\n SORT1 = %s", name); - name = convertOperatorReference(oprrsortop, g_oprinfo, numOperators); + name = convertOperatorReference(oprrsortop); if (name) appendPQExpBuffer(details, ",\n SORT2 = %s", name); - name = convertOperatorReference(oprltcmpop, g_oprinfo, numOperators); + name = convertOperatorReference(oprltcmpop); if (name) appendPQExpBuffer(details, ",\n LTCMP = %s", name); - name = convertOperatorReference(oprgtcmpop, g_oprinfo, numOperators); + name = convertOperatorReference(oprgtcmpop); if (name) appendPQExpBuffer(details, ",\n GTCMP = %s", name); @@ -4372,19 +5217,19 @@ dumpOneOpr(Archive *fout, OprInfo *oprinfo, appendPQExpBuffer(q, "CREATE OPERATOR %s (\n%s\n);\n", oprinfo->oprname, details->data); - ArchiveEntry(fout, oprinfo->oid, oprinfo->oprname, + ArchiveEntry(fout, oprinfo->dobj.catId, oprinfo->dobj.dumpId, + oprinfo->oprname, oprinfo->oprnamespace->nspname, oprinfo->usename, - "OPERATOR", NULL, - q->data, delq->data, - NULL, NULL, NULL); + "OPERATOR", q->data, delq->data, NULL, + oprinfo->dobj.dependencies, oprinfo->dobj.nDeps, + NULL, NULL); /* Dump Operator Comments */ - resetPQExpBuffer(q); appendPQExpBuffer(q, "OPERATOR %s", oprid->data); dumpComment(fout, q->data, oprinfo->oprnamespace->nspname, oprinfo->usename, - oprinfo->oid, "pg_operator", 0, NULL); + oprinfo->dobj.catId, 0, oprinfo->dobj.dumpId); PQclear(res); @@ -4446,10 +5291,9 @@ convertRegProcReference(const char *proc) * numeric OID, which we search our operator list for. */ static const char * -convertOperatorReference(const char *opr, - OprInfo *g_oprinfo, int numOperators) +convertOperatorReference(const char *opr) { - char *name; + OprInfo *oprInfo; /* In all cases "0" means a null reference */ if (strcmp(opr, "0") == 0) @@ -4457,6 +5301,7 @@ convertOperatorReference(const char *opr, if (g_fout->remoteVersion >= 70300) { + char *name; char *paren; bool inquote; @@ -4476,45 +5321,26 @@ convertOperatorReference(const char *opr, return name; } - name = findOprByOid(g_oprinfo, numOperators, opr); - if (name == NULL) + oprInfo = findOprByOid(atooid(opr)); + if (oprInfo == NULL) + { write_msg(NULL, "WARNING: could not find operator with OID %s\n", opr); - return name; -} - - -/* - * dumpOpclasses - * writes out to fout the queries to recreate all the user-defined - * operator classes - */ -void -dumpOpclasses(Archive *fout, OpclassInfo *opcinfo, int numOpclasses) -{ - int i; - - for (i = 0; i < numOpclasses; i++) - { - /* Dump only opclasses in dumpable namespaces */ - if (!opcinfo[i].opcnamespace->dump) - continue; - - /* OK, dump it */ - dumpOneOpclass(fout, &opcinfo[i]); + return NULL; } + return oprInfo->oprname; } /* - * dumpOneOpclass + * dumpOpclass * write out a single operator class definition */ static void -dumpOneOpclass(Archive *fout, OpclassInfo *opcinfo) +dumpOpclass(Archive *fout, OpclassInfo *opcinfo) { - PQExpBuffer query = createPQExpBuffer(); - PQExpBuffer q = createPQExpBuffer(); - PQExpBuffer delq = createPQExpBuffer(); + PQExpBuffer query; + PQExpBuffer q; + PQExpBuffer delq; PGresult *res; int ntups; int i_opcintype; @@ -4538,6 +5364,10 @@ dumpOneOpclass(Archive *fout, OpclassInfo *opcinfo) bool needComma; int i; + /* Dump only opclasses in dumpable namespaces */ + if (!opcinfo->opcnamespace->dump || dataOnly) + return; + /* * XXX currently we do not implement dumping of operator classes from * pre-7.3 databases. This could be done but it seems not worth the @@ -4546,6 +5376,10 @@ dumpOneOpclass(Archive *fout, OpclassInfo *opcinfo) if (g_fout->remoteVersion < 70300) return; + query = createPQExpBuffer(); + q = createPQExpBuffer(); + delq = createPQExpBuffer(); + /* Make sure we are in proper schema so regoperator works correctly */ selectSourceSchema(opcinfo->opcnamespace->nspname); @@ -4555,16 +5389,11 @@ dumpOneOpclass(Archive *fout, OpclassInfo *opcinfo) "opcdefault, " "(SELECT amname FROM pg_catalog.pg_am WHERE oid = opcamid) AS amname " "FROM pg_catalog.pg_opclass " - "WHERE oid = '%s'::pg_catalog.oid", - opcinfo->oid); + "WHERE oid = '%u'::pg_catalog.oid", + opcinfo->dobj.catId.oid); res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain operator class details failed: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); /* Expecting a single result only */ ntups = PQntuples(res); @@ -4625,17 +5454,12 @@ dumpOneOpclass(Archive *fout, OpclassInfo *opcinfo) appendPQExpBuffer(query, "SELECT amopstrategy, amopreqcheck, " "amopopr::pg_catalog.regoperator " "FROM pg_catalog.pg_amop " - "WHERE amopclaid = '%s'::pg_catalog.oid " + "WHERE amopclaid = '%u'::pg_catalog.oid " "ORDER BY amopstrategy", - opcinfo->oid); + opcinfo->dobj.catId.oid); res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain operator class operators failed: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); ntups = PQntuples(res); @@ -4670,17 +5494,12 @@ dumpOneOpclass(Archive *fout, OpclassInfo *opcinfo) appendPQExpBuffer(query, "SELECT amprocnum, " "amproc::pg_catalog.regprocedure " "FROM pg_catalog.pg_amproc " - "WHERE amopclaid = '%s'::pg_catalog.oid " + "WHERE amopclaid = '%u'::pg_catalog.oid " "ORDER BY amprocnum", - opcinfo->oid); + opcinfo->dobj.catId.oid); res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain operator class functions failed: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); ntups = PQntuples(res); @@ -4705,11 +5524,12 @@ dumpOneOpclass(Archive *fout, OpclassInfo *opcinfo) appendPQExpBuffer(q, ";\n"); - ArchiveEntry(fout, opcinfo->oid, opcinfo->opcname, + ArchiveEntry(fout, opcinfo->dobj.catId, opcinfo->dobj.dumpId, + opcinfo->opcname, opcinfo->opcnamespace->nspname, opcinfo->usename, - "OPERATOR CLASS", NULL, - q->data, delq->data, - NULL, NULL, NULL); + "OPERATOR CLASS", q->data, delq->data, NULL, + opcinfo->dobj.dependencies, opcinfo->dobj.nDeps, + NULL, NULL); /* Dump Operator Class Comments */ resetPQExpBuffer(q); @@ -4719,7 +5539,7 @@ dumpOneOpclass(Archive *fout, OpclassInfo *opcinfo) fmtId(amname)); dumpComment(fout, q->data, NULL, opcinfo->usename, - opcinfo->oid, "pg_opclass", 0, NULL); + opcinfo->dobj.catId, 0, opcinfo->dobj.dumpId); free(amname); destroyPQExpBuffer(query); @@ -4728,35 +5548,16 @@ dumpOneOpclass(Archive *fout, OpclassInfo *opcinfo) } /* - * dumpConversions - * writes out to fout the queries to create all the user-defined conversions - */ -void -dumpConversions(Archive *fout, ConvInfo convinfo[], int numConvs) -{ - int i; - - for (i = 0; i < numConvs; i++) - { - /* Dump only conversions in dumpable namespaces */ - if (!convinfo[i].connamespace->dump) - continue; - - dumpOneConversion(fout, &convinfo[i]); - } -} - -/* - * dumpOneConversion + * dumpConversion * write out a single conversion definition */ static void -dumpOneConversion(Archive *fout, ConvInfo *convinfo) +dumpConversion(Archive *fout, ConvInfo *convinfo) { - PQExpBuffer query = createPQExpBuffer(); - PQExpBuffer q = createPQExpBuffer(); - PQExpBuffer delq = createPQExpBuffer(); - PQExpBuffer details = createPQExpBuffer(); + PQExpBuffer query; + PQExpBuffer q; + PQExpBuffer delq; + PQExpBuffer details; PGresult *res; int ntups; int i_conname; @@ -4770,6 +5571,15 @@ dumpOneConversion(Archive *fout, ConvInfo *convinfo) const char *conproc; bool condefault; + /* Dump only conversions in dumpable namespaces */ + if (!convinfo->connamespace->dump || dataOnly) + return; + + query = createPQExpBuffer(); + q = createPQExpBuffer(); + delq = createPQExpBuffer(); + details = createPQExpBuffer(); + /* Make sure we are in proper schema */ selectSourceSchema(convinfo->connamespace->nspname); @@ -4779,17 +5589,11 @@ dumpOneConversion(Archive *fout, ConvInfo *convinfo) "pg_catalog.pg_encoding_to_char(contoencoding) AS contoencoding, " "conproc, condefault " "FROM pg_catalog.pg_conversion c " - "WHERE c.oid = '%s'::pg_catalog.oid", - convinfo->oid); + "WHERE c.oid = '%u'::pg_catalog.oid", + convinfo->dobj.catId.oid); res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain conversion failed: %s", - PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); /* Expecting a single result only */ ntups = PQntuples(res); @@ -4830,49 +5634,28 @@ dumpOneConversion(Archive *fout, ConvInfo *convinfo) /* regproc is automatically quoted in 7.3 and above */ appendPQExpBuffer(q, " FROM %s;\n", conproc); - ArchiveEntry(fout, convinfo->oid, convinfo->conname, + ArchiveEntry(fout, convinfo->dobj.catId, convinfo->dobj.dumpId, + convinfo->conname, convinfo->connamespace->nspname, convinfo->usename, - "CONVERSION", NULL, - q->data, delq->data, - NULL, NULL, NULL); + "CONVERSION", q->data, delq->data, NULL, + convinfo->dobj.dependencies, convinfo->dobj.nDeps, + NULL, NULL); /* Dump Conversion Comments */ resetPQExpBuffer(q); appendPQExpBuffer(q, "CONVERSION %s", fmtId(convinfo->conname)); dumpComment(fout, q->data, - convinfo->connamespace->nspname, convinfo->usename, - convinfo->oid, "pg_conversion", 0, NULL); - - PQclear(res); - - destroyPQExpBuffer(query); - destroyPQExpBuffer(q); - destroyPQExpBuffer(delq); - destroyPQExpBuffer(details); -} - -/* - * dumpAggs - * writes out to fout the queries to create all the user-defined aggregates - */ -void -dumpAggs(Archive *fout, AggInfo agginfo[], int numAggs) -{ - int i; + convinfo->connamespace->nspname, convinfo->usename, + convinfo->dobj.catId, 0, convinfo->dobj.dumpId); - for (i = 0; i < numAggs; i++) - { - /* Dump only aggs in dumpable namespaces */ - if (!agginfo[i].aggnamespace->dump) - continue; + PQclear(res); - dumpOneAgg(fout, &agginfo[i]); - if (!aclsSkip) - dumpAggACL(fout, &agginfo[i]); - } + destroyPQExpBuffer(query); + destroyPQExpBuffer(q); + destroyPQExpBuffer(delq); + destroyPQExpBuffer(details); } - /* * format_aggregate_signature: generate aggregate name and argument list * @@ -4887,9 +5670,9 @@ format_aggregate_signature(AggInfo *agginfo, Archive *fout, bool honor_quotes) initPQExpBuffer(&buf); if (honor_quotes) appendPQExpBuffer(&buf, "%s", - fmtId(agginfo->aggname)); + fmtId(agginfo->aggfn.proname)); else - appendPQExpBuffer(&buf, "%s", agginfo->aggname); + appendPQExpBuffer(&buf, "%s", agginfo->aggfn.proname); /* If using regtype or format_type, fmtbasetype is already quoted */ if (fout->remoteVersion >= 70100) @@ -4911,34 +5694,17 @@ format_aggregate_signature(AggInfo *agginfo, Archive *fout, bool honor_quotes) return buf.data; } - -static void -dumpAggACL(Archive *fout, AggInfo *finfo) -{ - char *aggsig, - *aggsig_tag; - - aggsig = format_aggregate_signature(finfo, fout, true); - aggsig_tag = format_aggregate_signature(finfo, fout, false); - dumpACL(fout, "FUNCTION", aggsig, aggsig_tag, - finfo->aggnamespace->nspname, - finfo->usename, finfo->aggacl, finfo->oid); - free(aggsig); - free(aggsig_tag); -} - - /* - * dumpOneAgg + * dumpAgg * write out a single aggregate definition */ static void -dumpOneAgg(Archive *fout, AggInfo *agginfo) +dumpAgg(Archive *fout, AggInfo *agginfo) { - PQExpBuffer query = createPQExpBuffer(); - PQExpBuffer q = createPQExpBuffer(); - PQExpBuffer delq = createPQExpBuffer(); - PQExpBuffer details = createPQExpBuffer(); + PQExpBuffer query; + PQExpBuffer q; + PQExpBuffer delq; + PQExpBuffer details; char *aggsig; char *aggsig_tag; PGresult *res; @@ -4956,8 +5722,17 @@ dumpOneAgg(Archive *fout, AggInfo *agginfo) const char *agginitval; bool convertok; + /* Dump only aggs in dumpable namespaces */ + if (!agginfo->aggfn.pronamespace->dump || dataOnly) + return; + + query = createPQExpBuffer(); + q = createPQExpBuffer(); + delq = createPQExpBuffer(); + details = createPQExpBuffer(); + /* Make sure we are in proper schema */ - selectSourceSchema(agginfo->aggnamespace->nspname); + selectSourceSchema(agginfo->aggfn.pronamespace->nspname); /* Get aggregate-specific details */ if (g_fout->remoteVersion >= 70300) @@ -4970,8 +5745,8 @@ dumpOneAgg(Archive *fout, AggInfo *agginfo) "'t'::boolean as convertok " "from pg_catalog.pg_aggregate a, pg_catalog.pg_proc p " "where a.aggfnoid = p.oid " - "and p.oid = '%s'::pg_catalog.oid", - agginfo->oid); + "and p.oid = '%u'::pg_catalog.oid", + agginfo->aggfn.dobj.catId.oid); } else if (g_fout->remoteVersion >= 70100) { @@ -4983,8 +5758,8 @@ dumpOneAgg(Archive *fout, AggInfo *agginfo) "ELSE format_type(aggbasetype, NULL) END as fmtbasetype, " "'t'::boolean as convertok " "from pg_aggregate " - "where oid = '%s'::oid", - agginfo->oid); + "where oid = '%u'::oid", + agginfo->aggfn.dobj.catId.oid); } else { @@ -4996,18 +5771,12 @@ dumpOneAgg(Archive *fout, AggInfo *agginfo) "(select typname from pg_type where oid = aggbasetype) as fmtbasetype, " "(aggtransfn2 = 0 and aggtranstype2 = 0 and agginitval2 is null) as convertok " "from pg_aggregate " - "where oid = '%s'::oid", - agginfo->oid); + "where oid = '%u'::oid", + agginfo->aggfn.dobj.catId.oid); } res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain list of aggregate functions failed: %s", - PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); /* Expecting a single result only */ ntups = PQntuples(res); @@ -5030,27 +5799,19 @@ dumpOneAgg(Archive *fout, AggInfo *agginfo) aggfinalfn = PQgetvalue(res, 0, i_aggfinalfn); aggtranstype = PQgetvalue(res, 0, i_aggtranstype); agginitval = PQgetvalue(res, 0, i_agginitval); - /* we save anybasetype so that dumpAggACL can use it later */ + /* we save anybasetype for format_aggregate_signature */ agginfo->anybasetype = (PQgetvalue(res, 0, i_anybasetype)[0] == 't'); - /* we save fmtbasetype so that dumpAggACL can use it later */ + /* we save fmtbasetype for format_aggregate_signature */ agginfo->fmtbasetype = strdup(PQgetvalue(res, 0, i_fmtbasetype)); convertok = (PQgetvalue(res, 0, i_convertok)[0] == 't'); - aggsig = format_aggregate_signature(agginfo, g_fout, true); - aggsig_tag = format_aggregate_signature(agginfo, g_fout, false); + aggsig = format_aggregate_signature(agginfo, fout, true); + aggsig_tag = format_aggregate_signature(agginfo, fout, false); if (!convertok) { write_msg(NULL, "WARNING: aggregate function %s could not be dumped correctly for this database version; ignored\n", aggsig); - - appendPQExpBuffer(q, "-- WARNING: aggregate function %s could not be dumped correctly for this database version; ignored\n", - aggsig); - ArchiveEntry(fout, agginfo->oid, aggsig_tag, - agginfo->aggnamespace->nspname, agginfo->usename, - "WARNING", NULL, - q->data, "" /* Del */ , - NULL, NULL, NULL); return; } @@ -5101,31 +5862,46 @@ dumpOneAgg(Archive *fout, AggInfo *agginfo) * pg_catalog */ appendPQExpBuffer(delq, "DROP AGGREGATE %s.%s;\n", - fmtId(agginfo->aggnamespace->nspname), + fmtId(agginfo->aggfn.pronamespace->nspname), aggsig); appendPQExpBuffer(q, "CREATE AGGREGATE %s (\n%s\n);\n", - fmtId(agginfo->aggname), + fmtId(agginfo->aggfn.proname), details->data); - ArchiveEntry(fout, agginfo->oid, aggsig_tag, - agginfo->aggnamespace->nspname, agginfo->usename, - "AGGREGATE", NULL, - q->data, delq->data, - NULL, NULL, NULL); + ArchiveEntry(fout, agginfo->aggfn.dobj.catId, agginfo->aggfn.dobj.dumpId, + aggsig_tag, + agginfo->aggfn.pronamespace->nspname, agginfo->aggfn.usename, + "AGGREGATE", q->data, delq->data, NULL, + agginfo->aggfn.dobj.dependencies, agginfo->aggfn.dobj.nDeps, + NULL, NULL); /* Dump Aggregate Comments */ - resetPQExpBuffer(q); appendPQExpBuffer(q, "AGGREGATE %s", aggsig); - if (g_fout->remoteVersion >= 70300) - dumpComment(fout, q->data, - agginfo->aggnamespace->nspname, agginfo->usename, - agginfo->oid, "pg_proc", 0, NULL); - else - dumpComment(fout, q->data, - agginfo->aggnamespace->nspname, agginfo->usename, - agginfo->oid, "pg_aggregate", 0, NULL); + dumpComment(fout, q->data, + agginfo->aggfn.pronamespace->nspname, agginfo->aggfn.usename, + agginfo->aggfn.dobj.catId, 0, agginfo->aggfn.dobj.dumpId); + + /* + * Since there is no GRANT ON AGGREGATE syntax, we have to make the + * ACL command look like a function's GRANT; in particular this affects + * the syntax for aggregates on ANY. + */ + free(aggsig); + free(aggsig_tag); + + aggsig = format_function_signature(&agginfo->aggfn, true); + aggsig_tag = format_function_signature(&agginfo->aggfn, false); + + dumpACL(fout, agginfo->aggfn.dobj.catId, agginfo->aggfn.dobj.dumpId, + "FUNCTION", + aggsig, aggsig_tag, + agginfo->aggfn.pronamespace->nspname, + agginfo->aggfn.usename, agginfo->aggfn.proacl); + + free(aggsig); + free(aggsig_tag); PQclear(res); @@ -5133,14 +5909,14 @@ dumpOneAgg(Archive *fout, AggInfo *agginfo) destroyPQExpBuffer(q); destroyPQExpBuffer(delq); destroyPQExpBuffer(details); - free(aggsig); - free(aggsig_tag); } /*---------- * Write out grant/revoke information * + * 'objCatId' is the catalog ID of the underlying object. + * 'objDumpId' is the dump ID of the underlying object. * 'type' must be TABLE, FUNCTION, LANGUAGE, or SCHEMA. * 'name' is the formatted name of the object. Must be quoted etc. already. * 'tag' is the tag for the archive entry (typ. unquoted name of object). @@ -5148,28 +5924,19 @@ dumpOneAgg(Archive *fout, AggInfo *agginfo) * 'owner' is the owner, NULL if there is no owner (for languages). * 'acls' is the string read out of the fooacl system catalog field; * it will be parsed here. - * 'objoid' is the OID of the object for purposes of ordering. *---------- */ static void -dumpACL(Archive *fout, const char *type, const char *name, +dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId, + const char *type, const char *name, const char *tag, const char *nspname, const char *owner, - const char *acls, const char *objoid) + const char *acls) { PQExpBuffer sql; - /* - * acl_lang is a flag only true if we are dumping language's ACL, so - * we can set 'type' to a value that is suitable to build SQL requests - * as for other types. - */ - bool acl_lang = false; - - if (!strcmp(type, "ACL LANGUAGE")) - { - type = "LANGUAGE"; - acl_lang = true; - } + /* Do nothing if ACL dump is not enabled */ + if (dataOnly || aclsSkip) + return; sql = createPQExpBuffer(); @@ -5181,126 +5948,58 @@ dumpACL(Archive *fout, const char *type, const char *name, } if (sql->len > 0) - ArchiveEntry(fout, objoid, tag, nspname, + ArchiveEntry(fout, nilCatalogId, createDumpId(), + tag, nspname, owner ? owner : "", - acl_lang ? "ACL LANGUAGE" : "ACL", - NULL, sql->data, "", NULL, NULL, NULL); + "ACL", sql->data, "", NULL, + &(objDumpId), 1, + NULL, NULL); destroyPQExpBuffer(sql); } - -static void -dumpTableACL(Archive *fout, TableInfo *tbinfo) -{ - char *namecopy = strdup(fmtId(tbinfo->relname)); - char *dumpoid; - - /* - * Choose OID to use for sorting ACL into position. For a view, sort - * by the view OID; for a serial sequence, sort by the owning table's - * OID; otherwise use the table's own OID. - */ - if (tbinfo->viewoid != NULL) - dumpoid = tbinfo->viewoid; - else if (tbinfo->owning_tab != NULL) - dumpoid = tbinfo->owning_tab; - else - dumpoid = tbinfo->oid; - - dumpACL(fout, "TABLE", namecopy, tbinfo->relname, - tbinfo->relnamespace->nspname, tbinfo->usename, tbinfo->relacl, - dumpoid); - - free(namecopy); -} - - /* - * dumpTables: - * write out to fout the declarations (not data) of all user-defined tables + * dumpTable + * write out to fout the declarations (not data) of a user-defined table */ -void -dumpTables(Archive *fout, TableInfo tblinfo[], int numTables, - const bool aclsSkip, const bool schemaOnly, const bool dataOnly) +static void +dumpTable(Archive *fout, TableInfo *tbinfo) { - int i; - - /* - * Dump non-serial sequences first, in case they are referenced in - * table defn's - */ - for (i = 0; i < numTables; i++) - { - TableInfo *tbinfo = &tblinfo[i]; - - if (tbinfo->relkind != RELKIND_SEQUENCE) - continue; - - if (tbinfo->dump && tbinfo->owning_tab == NULL) - { - dumpOneSequence(fout, tbinfo, schemaOnly, dataOnly); - if (!dataOnly && !aclsSkip) - dumpTableACL(fout, tbinfo); - } - } - - if (!dataOnly) - { - for (i = 0; i < numTables; i++) - { - TableInfo *tbinfo = &tblinfo[i]; - - if (tbinfo->relkind == RELKIND_SEQUENCE) /* already dumped */ - continue; - - if (tbinfo->dump) - { - dumpOneTable(fout, tbinfo, tblinfo); - if (!aclsSkip) - dumpTableACL(fout, tbinfo); - } - } - } + char *namecopy; - /* - * Dump serial sequences last (we will not emit any CREATE commands, - * but we do have to think about ACLs and setval operations). - */ - for (i = 0; i < numTables; i++) + if (tbinfo->dump) { - TableInfo *tbinfo = &tblinfo[i]; - - if (tbinfo->relkind != RELKIND_SEQUENCE) - continue; - - if (tbinfo->dump && tbinfo->owning_tab != NULL) - { - dumpOneSequence(fout, tbinfo, schemaOnly, dataOnly); - if (!dataOnly && !aclsSkip) - dumpTableACL(fout, tbinfo); - } + if (tbinfo->relkind == RELKIND_SEQUENCE) + dumpSequence(fout, tbinfo); + else if (!dataOnly) + dumpTableSchema(fout, tbinfo); + + /* Handle the ACL here */ + namecopy = strdup(fmtId(tbinfo->relname)); + dumpACL(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId, "TABLE", + namecopy, tbinfo->relname, + tbinfo->relnamespace->nspname, tbinfo->usename, + tbinfo->relacl); + free(namecopy); } } /* - * dumpOneTable + * dumpTableSchema * write the declaration (not data) of one user-defined table or view */ static void -dumpOneTable(Archive *fout, TableInfo *tbinfo, TableInfo *g_tblinfo) +dumpTableSchema(Archive *fout, TableInfo *tbinfo) { PQExpBuffer query = createPQExpBuffer(); PQExpBuffer q = createPQExpBuffer(); PQExpBuffer delq = createPQExpBuffer(); PGresult *res; int numParents; - int *parentIndexes; + TableInfo **parents; int actual_atts; /* number of attrs in this CREATE statment */ char *reltypename; char *storage; - char *objoid; - const char *((*commentDeps)[]); int j, k; @@ -5317,31 +6016,21 @@ dumpOneTable(Archive *fout, TableInfo *tbinfo, TableInfo *g_tblinfo) /* Fetch the view definition */ if (g_fout->remoteVersion >= 70300) { - /* Beginning in 7.3, viewname is not unique; use OID */ - appendPQExpBuffer(query, "SELECT pg_catalog.pg_get_viewdef(ev_class) as viewdef, " - "oid as view_oid" - " from pg_catalog.pg_rewrite where" - " ev_class = '%s'::pg_catalog.oid and" - " rulename = '_RETURN';", - tbinfo->oid); + /* Beginning in 7.3, viewname is not unique; rely on OID */ + appendPQExpBuffer(query, + "SELECT pg_catalog.pg_get_viewdef('%u'::pg_catalog.oid) as viewdef", + tbinfo->dobj.catId.oid); } else { - appendPQExpBuffer(query, "SELECT definition as viewdef, " - "(select oid from pg_rewrite where " - " rulename=('_RET' || viewname)::name) as view_oid" + appendPQExpBuffer(query, "SELECT definition as viewdef " " from pg_views where viewname = "); appendStringLiteral(query, tbinfo->relname, true); appendPQExpBuffer(query, ";"); } res = PQexec(g_conn, query->data); - if (!res || PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain definition of view \"%s\" failed: %s", - tbinfo->relname, PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); if (PQntuples(res) != 1) { @@ -5354,13 +6043,6 @@ dumpOneTable(Archive *fout, TableInfo *tbinfo, TableInfo *g_tblinfo) exit_nicely(); } - if (PQgetisnull(res, 0, 1)) - { - write_msg(NULL, "query to obtain definition of view \"%s\" returned null OID\n", - tbinfo->relname); - exit_nicely(); - } - viewdef = PQgetvalue(res, 0, 0); if (strlen(viewdef) == 0) @@ -5370,11 +6052,6 @@ dumpOneTable(Archive *fout, TableInfo *tbinfo, TableInfo *g_tblinfo) exit_nicely(); } - /* We use the OID of the view rule as the object OID */ - objoid = strdup(PQgetvalue(res, 0, 1)); - /* Save it for use by dumpACL, too */ - tbinfo->viewoid = objoid; - /* * DROP must be fully qualified in case same name appears in * pg_catalog @@ -5388,35 +6065,12 @@ dumpOneTable(Archive *fout, TableInfo *tbinfo, TableInfo *g_tblinfo) fmtId(tbinfo->relname), viewdef); PQclear(res); - - /* - * Views can have default values -- however, they must be - * specified in an ALTER TABLE command after the view has been - * created, not in the view definition itself. - */ - for (j = 0; j < tbinfo->numatts; j++) - { - if (tbinfo->adef_expr[j] != NULL && !tbinfo->inhAttrDef[j]) - { - appendPQExpBuffer(q, "ALTER TABLE %s ", - fmtId(tbinfo->relname)); - appendPQExpBuffer(q, "ALTER COLUMN %s SET DEFAULT %s;\n", - fmtId(tbinfo->attnames[j]), - tbinfo->adef_expr[j]); - } - } - - commentDeps = malloc(sizeof(char *) * 2); - (*commentDeps)[0] = strdup(objoid); - (*commentDeps)[1] = NULL; /* end of list */ } else { reltypename = "TABLE"; - objoid = tbinfo->oid; - commentDeps = NULL; numParents = tbinfo->numParents; - parentIndexes = tbinfo->parentIndexes; + parents = tbinfo->parents; /* * DROP must be fully qualified in case same name appears in @@ -5466,12 +6120,16 @@ dumpOneTable(Archive *fout, TableInfo *tbinfo, TableInfo *g_tblinfo) tbinfo->atttypmod[j])); } - /* Default value --- suppress if inherited or serial */ - if (tbinfo->adef_expr[j] != NULL && + /* + * Default value --- suppress if inherited, serial, + * or to be printed separately. + */ + if (tbinfo->attrdefs[j] != NULL && !tbinfo->inhAttrDef[j] && - !tbinfo->attisserial[j]) + !tbinfo->attisserial[j] && + !tbinfo->attrdefs[j]->separate) appendPQExpBuffer(q, " DEFAULT %s", - tbinfo->adef_expr[j]); + tbinfo->attrdefs[j]->adef_expr); /* * Not Null constraint --- suppress if inherited @@ -5490,123 +6148,25 @@ dumpOneTable(Archive *fout, TableInfo *tbinfo, TableInfo *g_tblinfo) } /* - * Add non-inherited CHECK constraints, if any. If a constraint - * matches by name and condition with a constraint belonging to a - * parent class (OR conditions match and both names start with - * '$'), we assume it was inherited. + * Add non-inherited CHECK constraints, if any. */ - if (tbinfo->ncheck > 0) + for (j = 0; j < tbinfo->ncheck; j++) { - PGresult *res2; - int i_conname, - i_consrc; - int ntups2; - - if (g_verbose) - write_msg(NULL, "finding check constraints for table \"%s\"\n", - tbinfo->relname); - - resetPQExpBuffer(query); - if (g_fout->remoteVersion >= 70400) - appendPQExpBuffer(query, "SELECT conname, " - " pg_catalog.pg_get_constraintdef(c1.oid) AS consrc " - " from pg_catalog.pg_constraint c1 " - " where conrelid = '%s'::pg_catalog.oid " - " and contype = 'c' " - " and not exists " - " (select 1 from " - " pg_catalog.pg_constraint c2, " - " pg_catalog.pg_inherits i " - " where i.inhrelid = c1.conrelid " - " and (c2.conname = c1.conname " - " or (c2.conname[0] = '$' " - " and c1.conname[0] = '$')" - " )" - " and pg_catalog.pg_get_constraintdef(c2.oid) " - " = pg_catalog.pg_get_constraintdef(c1.oid) " - " and c2.conrelid = i.inhparent) " - " order by conname ", - tbinfo->oid); - else if (g_fout->remoteVersion >= 70300) - appendPQExpBuffer(query, "SELECT conname, " - " 'CHECK (' || consrc || ')' AS consrc" - " from pg_catalog.pg_constraint c1" - " where conrelid = '%s'::pg_catalog.oid " - " and contype = 'c' " - " and not exists " - " (select 1 from " - " pg_catalog.pg_constraint c2, " - " pg_catalog.pg_inherits i " - " where i.inhrelid = c1.conrelid " - " and (c2.conname = c1.conname " - " or (c2.conname[0] = '$' " - " and c1.conname[0] = '$')" - " )" - " and c2.consrc = c1.consrc " - " and c2.conrelid = i.inhparent) " - " order by conname ", - tbinfo->oid); - else - appendPQExpBuffer(query, "SELECT rcname as conname," - " 'CHECK (' || rcsrc || ')' as consrc" - " from pg_relcheck c1" - " where rcrelid = '%s'::oid " - " and not exists " - " (select 1 from pg_relcheck c2, " - " pg_inherits i " - " where i.inhrelid = c1.rcrelid " - " and (c2.rcname = c1.rcname " - " or (c2.rcname[0] = '$' " - " and c1.rcname[0] = '$')" - " )" - " and c2.rcsrc = c1.rcsrc " - " and c2.rcrelid = i.inhparent) " - " order by rcname ", - tbinfo->oid); - res2 = PQexec(g_conn, query->data); - if (!res2 || - PQresultStatus(res2) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain check constraints failed: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } - ntups2 = PQntuples(res2); - if (ntups2 > tbinfo->ncheck) - { - write_msg(NULL, "expected %d check constraints on table \"%s\" but found %d\n", - tbinfo->ncheck, tbinfo->relname, ntups2); - write_msg(NULL, "(The system catalogs might be corrupted.)\n"); - exit_nicely(); - } + ConstraintInfo *constr = &(tbinfo->checkexprs[j]); - i_conname = PQfnumber(res2, "conname"); - i_consrc = PQfnumber(res2, "consrc"); + if (constr->coninherited || constr->separate) + continue; - for (j = 0; j < ntups2; j++) - { - const char *name = PQgetvalue(res2, j, i_conname); - const char *expr = PQgetvalue(res2, j, i_consrc); + if (actual_atts > 0) + appendPQExpBuffer(q, ",\n "); - if (actual_atts + j > 0) - appendPQExpBuffer(q, ",\n "); + appendPQExpBuffer(q, "CONSTRAINT %s ", + fmtId(constr->conname)); + appendPQExpBuffer(q, "%s", constr->condef); - appendPQExpBuffer(q, "CONSTRAINT %s ", - fmtId(name)); - appendPQExpBuffer(q, "%s", expr); - } - PQclear(res2); + actual_atts++; } - /* - * Primary Key: In versions of PostgreSQL prior to 7.2, we needed - * to include the primary key in the table definition. However, - * this is not ideal because it creates an index on the table, - * which makes COPY slower. As of release 7.2, we can add primary - * keys to a table after it has been created, using ALTER TABLE; - * see dumpIndexes() for more information. Therefore, we ignore - * primary keys in this function. - */ - appendPQExpBuffer(q, "\n)"); if (numParents > 0) @@ -5614,7 +6174,7 @@ dumpOneTable(Archive *fout, TableInfo *tbinfo, TableInfo *g_tblinfo) appendPQExpBuffer(q, "\nINHERITS ("); for (k = 0; k < numParents; k++) { - TableInfo *parentRel = &g_tblinfo[parentIndexes[k]]; + TableInfo *parentRel = parents[k]; if (k > 0) appendPQExpBuffer(q, ", "); @@ -5692,21 +6252,71 @@ dumpOneTable(Archive *fout, TableInfo *tbinfo, TableInfo *g_tblinfo) } } - ArchiveEntry(fout, objoid, tbinfo->relname, + ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId, + tbinfo->relname, tbinfo->relnamespace->nspname, tbinfo->usename, - reltypename, NULL, q->data, delq->data, - NULL, NULL, NULL); + reltypename, q->data, delq->data, NULL, + tbinfo->dobj.dependencies, tbinfo->dobj.nDeps, + NULL, NULL); /* Dump Table Comments */ - dumpTableComment(fout, tbinfo, reltypename, commentDeps); - - /* commentDeps now belongs to the archive entry ... don't free it! */ + dumpTableComment(fout, tbinfo, reltypename); destroyPQExpBuffer(query); destroyPQExpBuffer(q); destroyPQExpBuffer(delq); } +/* + * dumpAttrDef --- dump an attribute's default-value declaration + */ +static void +dumpAttrDef(Archive *fout, AttrDefInfo *adinfo) +{ + TableInfo *tbinfo = adinfo->adtable; + int adnum = adinfo->adnum; + PQExpBuffer q; + PQExpBuffer delq; + + /* Only print it if "separate" mode is selected */ + if (!tbinfo->dump || !adinfo->separate || dataOnly) + return; + + /* Don't print inherited or serial defaults, either */ + if (tbinfo->inhAttrDef[adnum-1] || tbinfo->attisserial[adnum-1]) + return; + + q = createPQExpBuffer(); + delq = createPQExpBuffer(); + + appendPQExpBuffer(q, "ALTER TABLE %s ", + fmtId(tbinfo->relname)); + appendPQExpBuffer(q, "ALTER COLUMN %s SET DEFAULT %s;\n", + fmtId(tbinfo->attnames[adnum - 1]), + adinfo->adef_expr); + + /* + * DROP must be fully qualified in case same name appears + * in pg_catalog + */ + appendPQExpBuffer(delq, "ALTER TABLE %s.", + fmtId(tbinfo->relnamespace->nspname)); + appendPQExpBuffer(delq, "%s ", + fmtId(tbinfo->relname)); + appendPQExpBuffer(delq, "ALTER COLUMN %s DROP DEFAULT;\n", + fmtId(tbinfo->attnames[adnum - 1])); + + ArchiveEntry(fout, adinfo->dobj.catId, adinfo->dobj.dumpId, + tbinfo->attnames[adnum - 1], + tbinfo->relnamespace->nspname, tbinfo->usename, + "DEFAULT", q->data, delq->data, NULL, + adinfo->dobj.dependencies, adinfo->dobj.nDeps, + NULL, NULL); + + destroyPQExpBuffer(q); + destroyPQExpBuffer(delq); +} + /* * getAttrName: extract the correct name for an attribute * @@ -5743,245 +6353,274 @@ getAttrName(int attrnum, TableInfo *tblInfo) } /* - * dumpIndexes: - * write out to fout all the user-defined indexes for dumpable tables + * dumpIndex + * write out to fout a user-defined index */ -void -dumpIndexes(Archive *fout, TableInfo *tblinfo, int numTables) +static void +dumpIndex(Archive *fout, IndxInfo *indxinfo) { - int i, - j; - PQExpBuffer query = createPQExpBuffer(); - PQExpBuffer q = createPQExpBuffer(); - PQExpBuffer delq = createPQExpBuffer(); - PGresult *res; - int ntups; - int i_indexreloid; - int i_indexrelname; - int i_indexdef; - int i_contype; - int i_conoid; - int i_indkey; - int i_indisclustered; - int i_indnkeys; + TableInfo *tbinfo = indxinfo->indextable; + PQExpBuffer q; + PQExpBuffer delq; - for (i = 0; i < numTables; i++) - { - TableInfo *tbinfo = &tblinfo[i]; + if (dataOnly) + return; - /* Only plain tables have indexes */ - if (tbinfo->relkind != RELKIND_RELATION || !tbinfo->hasindex) - continue; + q = createPQExpBuffer(); + delq = createPQExpBuffer(); - if (!tbinfo->dump) - continue; + /* + * If there's an associated constraint, don't dump the index per se, + * but do dump any comment for it. + */ + if (indxinfo->indexconstraint == 0) + { + /* Plain secondary index */ + appendPQExpBuffer(q, "%s;\n", indxinfo->indexdef); - /* Make sure we are in proper schema so indexdef is right */ - selectSourceSchema(tbinfo->relnamespace->nspname); + /* If the index is clustered, we need to record that. */ + if (indxinfo->indisclustered) + { + appendPQExpBuffer(q, "\nALTER TABLE %s CLUSTER", + fmtId(tbinfo->relname)); + appendPQExpBuffer(q, " ON %s;\n", + fmtId(indxinfo->indexname)); + } /* - * The point of the messy-looking outer join is to find a - * constraint that is related by an internal dependency link to - * the index. If we find one, we emit an ADD CONSTRAINT command - * instead of a CREATE INDEX command. We assume an index won't - * have more than one internal dependency. + * DROP must be fully qualified in case same name appears + * in pg_catalog */ - resetPQExpBuffer(query); - if (g_fout->remoteVersion >= 70300) - appendPQExpBuffer(query, - "SELECT i.indexrelid as indexreloid, " - "coalesce(c.conname, t.relname) as indexrelname, " - "pg_catalog.pg_get_indexdef(i.indexrelid) as indexdef, " - "i.indkey, i.indisclustered, " - "t.relnatts as indnkeys, " - "coalesce(c.contype, '0') as contype, " - "coalesce(c.oid, '0') as conoid " - "FROM pg_catalog.pg_index i " - "JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) " - "LEFT JOIN pg_catalog.pg_depend d " - "ON (d.classid = t.tableoid " - "AND d.objid = t.oid " - "AND d.deptype = 'i') " - "LEFT JOIN pg_catalog.pg_constraint c " - "ON (d.refclassid = c.tableoid " - "AND d.refobjid = c.oid) " - "WHERE i.indrelid = '%s'::pg_catalog.oid " - "ORDER BY indexrelname", - tbinfo->oid); - else - appendPQExpBuffer(query, - "SELECT i.indexrelid as indexreloid, " - "t.relname as indexrelname, " - "pg_get_indexdef(i.indexrelid) as indexdef, " - "i.indkey, false as indisclustered, " - "t.relnatts as indnkeys, " - "CASE WHEN i.indisprimary THEN 'p'::char " - "ELSE '0'::char END as contype, " - "0::oid as conoid " - "FROM pg_index i, pg_class t " - "WHERE t.oid = i.indexrelid " - "AND i.indrelid = '%s'::oid " - "ORDER BY indexrelname", - tbinfo->oid); + appendPQExpBuffer(delq, "DROP INDEX %s.", + fmtId(tbinfo->relnamespace->nspname)); + appendPQExpBuffer(delq, "%s;\n", + fmtId(indxinfo->indexname)); - res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain list of indexes failed: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } + ArchiveEntry(fout, indxinfo->dobj.catId, indxinfo->dobj.dumpId, + indxinfo->indexname, + tbinfo->relnamespace->nspname, + tbinfo->usename, + "INDEX", q->data, delq->data, NULL, + indxinfo->dobj.dependencies, indxinfo->dobj.nDeps, + NULL, NULL); + } - ntups = PQntuples(res); + /* Dump Index Comments */ + resetPQExpBuffer(q); + appendPQExpBuffer(q, "INDEX %s", + fmtId(indxinfo->indexname)); + dumpComment(fout, q->data, + tbinfo->relnamespace->nspname, + tbinfo->usename, + indxinfo->dobj.catId, 0, indxinfo->dobj.dumpId); - i_indexreloid = PQfnumber(res, "indexreloid"); - i_indexrelname = PQfnumber(res, "indexrelname"); - i_indexdef = PQfnumber(res, "indexdef"); - i_contype = PQfnumber(res, "contype"); - i_conoid = PQfnumber(res, "conoid"); - i_indkey = PQfnumber(res, "indkey"); - i_indisclustered = PQfnumber(res, "indisclustered"); - i_indnkeys = PQfnumber(res, "indnkeys"); + destroyPQExpBuffer(q); + destroyPQExpBuffer(delq); +} + +/* + * dumpConstraint + * write out to fout a user-defined constraint + */ +static void +dumpConstraint(Archive *fout, ConstraintInfo *coninfo) +{ + TableInfo *tbinfo = coninfo->contable; + PQExpBuffer q; + PQExpBuffer delq; + + if (dataOnly) + return; + if (tbinfo && !tbinfo->dump) + return; + + q = createPQExpBuffer(); + delq = createPQExpBuffer(); + + if (coninfo->contype == 'p' || coninfo->contype == 'u') + { + /* Index-related constraint */ + IndxInfo *indxinfo; + int k; + + indxinfo = (IndxInfo *) findObjectByDumpId(coninfo->conindex); + + if (indxinfo == NULL) + { + write_msg(NULL, "missing index for constraint %s\n", + coninfo->conname); + exit_nicely(); + } - for (j = 0; j < ntups; j++) + appendPQExpBuffer(q, "ALTER TABLE ONLY %s\n", + fmtId(tbinfo->relname)); + appendPQExpBuffer(q, " ADD CONSTRAINT %s %s (", + fmtId(coninfo->conname), + coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE"); + + for (k = 0; k < indxinfo->indnkeys; k++) { - const char *indexreloid = PQgetvalue(res, j, i_indexreloid); - const char *indexrelname = PQgetvalue(res, j, i_indexrelname); - const char *indexdef = PQgetvalue(res, j, i_indexdef); - char contype = *(PQgetvalue(res, j, i_contype)); - const char *conoid = PQgetvalue(res, j, i_conoid); - bool indisclustered = (PQgetvalue(res, j, i_indisclustered)[0] == 't'); + int indkey = (int) indxinfo->indkeys[k]; + const char *attname; - resetPQExpBuffer(q); - resetPQExpBuffer(delq); + if (indkey == InvalidAttrNumber) + break; + attname = getAttrName(indkey, tbinfo); - if (contype == 'p' || contype == 'u') - { - /* - * If we found a constraint matching the index, emit ADD - * CONSTRAINT not CREATE INDEX. - * - * In a pre-7.3 database, we take this path iff the index was - * marked indisprimary. - */ - int indnkeys = atoi(PQgetvalue(res, j, i_indnkeys)); - char **indkeys = (char **) malloc(indnkeys * sizeof(char *)); - int k; + appendPQExpBuffer(q, "%s%s", + (k == 0) ? "" : ", ", + fmtId(attname)); + } - parseNumericArray(PQgetvalue(res, j, i_indkey), - indkeys, indnkeys); + appendPQExpBuffer(q, ");\n"); - appendPQExpBuffer(q, "ALTER TABLE ONLY %s\n", - fmtId(tbinfo->relname)); - appendPQExpBuffer(q, " ADD CONSTRAINT %s %s (", - fmtId(indexrelname), - contype == 'p' ? "PRIMARY KEY" : "UNIQUE"); + /* If the index is clustered, we need to record that. */ + if (indxinfo->indisclustered) + { + appendPQExpBuffer(q, "\nALTER TABLE %s CLUSTER", + fmtId(tbinfo->relname)); + appendPQExpBuffer(q, " ON %s;\n", + fmtId(indxinfo->indexname)); + } - for (k = 0; k < indnkeys; k++) - { - int indkey = atoi(indkeys[k]); - const char *attname; + /* + * DROP must be fully qualified in case same name appears + * in pg_catalog + */ + appendPQExpBuffer(delq, "ALTER TABLE ONLY %s.", + fmtId(tbinfo->relnamespace->nspname)); + appendPQExpBuffer(delq, "%s ", + fmtId(tbinfo->relname)); + appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n", + fmtId(coninfo->conname)); - if (indkey == InvalidAttrNumber) - break; - attname = getAttrName(indkey, tbinfo); + ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId, + coninfo->conname, + tbinfo->relnamespace->nspname, + tbinfo->usename, + "CONSTRAINT", q->data, delq->data, NULL, + coninfo->dobj.dependencies, coninfo->dobj.nDeps, + NULL, NULL); + } + else if (coninfo->contype == 'f') + { + /* + * XXX Potentially wrap in a 'SET CONSTRAINTS OFF' block so that + * the current table data is not processed + */ + appendPQExpBuffer(q, "ALTER TABLE ONLY %s\n", + fmtId(tbinfo->relname)); + appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n", + fmtId(coninfo->conname), + coninfo->condef); - appendPQExpBuffer(q, "%s%s", - (k == 0) ? "" : ", ", - fmtId(attname)); - } + /* + * DROP must be fully qualified in case same name appears in + * pg_catalog + */ + appendPQExpBuffer(delq, "ALTER TABLE ONLY %s.", + fmtId(tbinfo->relnamespace->nspname)); + appendPQExpBuffer(delq, "%s ", + fmtId(tbinfo->relname)); + appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n", + fmtId(coninfo->conname)); - appendPQExpBuffer(q, ");\n"); + ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId, + coninfo->conname, + tbinfo->relnamespace->nspname, + tbinfo->usename, + "FK CONSTRAINT", q->data, delq->data, NULL, + coninfo->dobj.dependencies, coninfo->dobj.nDeps, + NULL, NULL); + } + else if (coninfo->contype == 'c' && tbinfo) + { + /* CHECK constraint on a table */ - /* If the index is clustered, we need to record that. */ - if (indisclustered) - { - appendPQExpBuffer(q, "\nALTER TABLE %s CLUSTER", - fmtId(tbinfo->relname)); - appendPQExpBuffer(q, " ON %s;\n", - fmtId(indexrelname)); - } + /* Ignore if not to be dumped separately */ + if (coninfo->separate) + { + /* not ONLY since we want it to propagate to children */ + appendPQExpBuffer(q, "ALTER TABLE %s\n", + fmtId(tbinfo->relname)); + appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n", + fmtId(coninfo->conname), + coninfo->condef); - /* - * DROP must be fully qualified in case same name appears - * in pg_catalog - */ - appendPQExpBuffer(delq, "ALTER TABLE ONLY %s.", - fmtId(tbinfo->relnamespace->nspname)); - appendPQExpBuffer(delq, "%s ", - fmtId(tbinfo->relname)); - appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n", - fmtId(indexrelname)); - - ArchiveEntry(fout, indexreloid, - indexrelname, - tbinfo->relnamespace->nspname, - tbinfo->usename, - "CONSTRAINT", NULL, - q->data, delq->data, - NULL, NULL, NULL); - - for (k = 0; k < indnkeys; k++) - free(indkeys[k]); - free(indkeys); - - /* Dump Constraint Comments */ - resetPQExpBuffer(q); - appendPQExpBuffer(q, "CONSTRAINT %s ", - fmtId(indexrelname)); - appendPQExpBuffer(q, "ON %s", - fmtId(tbinfo->relname)); - dumpComment(fout, q->data, - tbinfo->relnamespace->nspname, - tbinfo->usename, - conoid, "pg_constraint", 0, NULL); - } - else - { - /* Plain secondary index */ - appendPQExpBuffer(q, "%s;\n", indexdef); + /* + * DROP must be fully qualified in case same name appears in + * pg_catalog + */ + appendPQExpBuffer(delq, "ALTER TABLE %s.", + fmtId(tbinfo->relnamespace->nspname)); + appendPQExpBuffer(delq, "%s ", + fmtId(tbinfo->relname)); + appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n", + fmtId(coninfo->conname)); - /* If the index is clustered, we need to record that. */ - if (indisclustered) - { - appendPQExpBuffer(q, "\nALTER TABLE %s CLUSTER", - fmtId(tbinfo->relname)); - appendPQExpBuffer(q, " ON %s;\n", - fmtId(indexrelname)); - } + ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId, + coninfo->conname, + tbinfo->relnamespace->nspname, + tbinfo->usename, + "CHECK CONSTRAINT", q->data, delq->data, NULL, + coninfo->dobj.dependencies, coninfo->dobj.nDeps, + NULL, NULL); + } + } + else if (coninfo->contype == 'c' && tbinfo == NULL) + { + /* CHECK constraint on a domain */ + TypeInfo *tinfo = coninfo->condomain; - /* - * DROP must be fully qualified in case same name appears - * in pg_catalog - */ - appendPQExpBuffer(delq, "DROP INDEX %s.", - fmtId(tbinfo->relnamespace->nspname)); - appendPQExpBuffer(delq, "%s;\n", - fmtId(indexrelname)); - - ArchiveEntry(fout, indexreloid, - indexrelname, - tbinfo->relnamespace->nspname, - tbinfo->usename, - "INDEX", NULL, - q->data, delq->data, - NULL, NULL, NULL); - } + /* Ignore if not to be dumped separately, or if not dumping domain */ + if (coninfo->separate && tinfo->typnamespace->dump) + { + appendPQExpBuffer(q, "ALTER DOMAIN %s\n", + fmtId(tinfo->typname)); + appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n", + fmtId(coninfo->conname), + coninfo->condef); - /* Dump Index Comments */ - resetPQExpBuffer(q); - appendPQExpBuffer(q, "INDEX %s", - fmtId(indexrelname)); - dumpComment(fout, q->data, - tbinfo->relnamespace->nspname, - tbinfo->usename, - indexreloid, "pg_class", 0, NULL); + /* + * DROP must be fully qualified in case same name appears in + * pg_catalog + */ + appendPQExpBuffer(delq, "ALTER DOMAIN %s.", + fmtId(tinfo->typnamespace->nspname)); + appendPQExpBuffer(delq, "%s ", + fmtId(tinfo->typname)); + appendPQExpBuffer(delq, "DROP CONSTRAINT %s;\n", + fmtId(coninfo->conname)); + + ArchiveEntry(fout, coninfo->dobj.catId, coninfo->dobj.dumpId, + coninfo->conname, + tinfo->typnamespace->nspname, + tinfo->usename, + "CHECK CONSTRAINT", q->data, delq->data, NULL, + coninfo->dobj.dependencies, coninfo->dobj.nDeps, + NULL, NULL); } + } + else + { + write_msg(NULL, "unexpected constraint type\n"); + exit_nicely(); + } - PQclear(res); + /* Dump Constraint Comments --- only works for table constraints */ + if (tbinfo) + { + resetPQExpBuffer(q); + appendPQExpBuffer(q, "CONSTRAINT %s ", + fmtId(coninfo->conname)); + appendPQExpBuffer(q, "ON %s", + fmtId(tbinfo->relname)); + dumpComment(fout, q->data, + tbinfo->relnamespace->nspname, + tbinfo->usename, + coninfo->dobj.catId, 0, coninfo->dobj.dumpId); } - destroyPQExpBuffer(query); destroyPQExpBuffer(q); destroyPQExpBuffer(delq); } @@ -5998,21 +6637,11 @@ setMaxOid(Archive *fout) Oid max_oid; char sql[1024]; - res = PQexec(g_conn, "CREATE TEMPORARY TABLE pgdump_oid (dummy integer)"); - if (!res || - PQresultStatus(res) != PGRES_COMMAND_OK) - { - write_msg(NULL, "could not create pgdump_oid table: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } - PQclear(res); + do_sql_command(g_conn, + "CREATE TEMPORARY TABLE pgdump_oid (dummy integer)"); res = PQexec(g_conn, "INSERT INTO pgdump_oid VALUES (0)"); - if (!res || - PQresultStatus(res) != PGRES_COMMAND_OK) - { - write_msg(NULL, "could not insert into pgdump_oid table: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, "INSERT INTO pgdump_oid VALUES (0)", + PGRES_COMMAND_OK); max_oid = PQoidValue(res); if (max_oid == 0) { @@ -6020,14 +6649,7 @@ setMaxOid(Archive *fout) exit_nicely(); } PQclear(res); - res = PQexec(g_conn, "DROP TABLE pgdump_oid;"); - if (!res || - PQresultStatus(res) != PGRES_COMMAND_OK) - { - write_msg(NULL, "could not drop pgdump_oid table: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } - PQclear(res); + do_sql_command(g_conn, "DROP TABLE pgdump_oid;"); if (g_verbose) write_msg(NULL, "maximum system OID is %u\n", max_oid); snprintf(sql, sizeof(sql), @@ -6038,10 +6660,11 @@ setMaxOid(Archive *fout) "DROP TABLE pgdump_oid;\n", max_oid); - ArchiveEntry(fout, "0", "Max OID", NULL, "", - "", NULL, - sql, "", - NULL, NULL, NULL); + ArchiveEntry(fout, nilCatalogId, createDumpId(), + "Max OID", NULL, "", + "", sql, "", NULL, + NULL, 0, + NULL, NULL); } /* @@ -6064,12 +6687,8 @@ findLastBuiltinOid_V71(const char *dbname) appendStringLiteral(query, dbname, true); res = PQexec(g_conn, query->data); - if (res == NULL || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "error in finding the last system OID: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); + ntups = PQntuples(res); if (ntups < 1) { @@ -6104,21 +6723,18 @@ findLastBuiltinOid_V70(void) res = PQexec(g_conn, "SELECT oid FROM pg_class WHERE relname = 'pg_indexes'"); - if (res == NULL || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "error in finding the template1 database: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, + "SELECT oid FROM pg_class WHERE relname = 'pg_indexes'", + PGRES_TUPLES_OK); ntups = PQntuples(res); if (ntups < 1) { - write_msg(NULL, "could not find entry for database template1 in table pg_database\n"); + write_msg(NULL, "could not find entry for pg_indexes in pg_class\n"); exit_nicely(); } if (ntups > 1) { - write_msg(NULL, "found more than one entry for database template1 in table pg_database\n"); + write_msg(NULL, "found more than one entry for pg_indexes in pg_class\n"); exit_nicely(); } last_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "oid"))); @@ -6127,8 +6743,7 @@ findLastBuiltinOid_V70(void) } static void -dumpOneSequence(Archive *fout, TableInfo *tbinfo, - const bool schemaOnly, const bool dataOnly) +dumpSequence(Archive *fout, TableInfo *tbinfo) { PGresult *res; char *last, @@ -6164,11 +6779,7 @@ dumpOneSequence(Archive *fout, TableInfo *tbinfo, fmtId(tbinfo->relname)); res = PQexec(g_conn, query->data); - if (!res || PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to get data of sequence \"%s\" failed: %s", tbinfo->relname, PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); if (PQntuples(res) != 1) { @@ -6208,7 +6819,7 @@ dumpOneSequence(Archive *fout, TableInfo *tbinfo, * data. We do this for serial sequences too. */ - if (!dataOnly && tbinfo->owning_tab == NULL) + if (!dataOnly && !OidIsValid(tbinfo->owning_tab)) { resetPQExpBuffer(delqry); @@ -6245,11 +6856,12 @@ dumpOneSequence(Archive *fout, TableInfo *tbinfo, " CACHE %s%s;\n", cache, (cycled ? "\n CYCLE" : "")); - ArchiveEntry(fout, tbinfo->oid, tbinfo->relname, + ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId, + tbinfo->relname, tbinfo->relnamespace->nspname, tbinfo->usename, - "SEQUENCE", NULL, - query->data, delqry->data, - NULL, NULL, NULL); + "SEQUENCE", query->data, delqry->data, NULL, + tbinfo->dobj.dependencies, tbinfo->dobj.nDeps, + NULL, NULL); } if (!schemaOnly) @@ -6260,22 +6872,22 @@ dumpOneSequence(Archive *fout, TableInfo *tbinfo, appendPQExpBuffer(query, ", %s, %s);\n", last, (called ? "true" : "false")); - ArchiveEntry(fout, tbinfo->oid, tbinfo->relname, + ArchiveEntry(fout, nilCatalogId, createDumpId(), + tbinfo->relname, tbinfo->relnamespace->nspname, tbinfo->usename, - "SEQUENCE SET", NULL, - query->data, "" /* Del */ , - NULL, NULL, NULL); + "SEQUENCE SET", query->data, "", NULL, + &(tbinfo->dobj.dumpId), 1, + NULL, NULL); } if (!dataOnly) { /* Dump Sequence Comments */ - resetPQExpBuffer(query); appendPQExpBuffer(query, "SEQUENCE %s", fmtId(tbinfo->relname)); dumpComment(fout, query->data, tbinfo->relnamespace->nspname, tbinfo->usename, - tbinfo->oid, "pg_class", 0, NULL); + tbinfo->dobj.catId, 0, tbinfo->dobj.dumpId); } PQclear(res); @@ -6284,540 +6896,371 @@ dumpOneSequence(Archive *fout, TableInfo *tbinfo, destroyPQExpBuffer(delqry); } -/* - * dumpConstraints - * - * Dump out constraints after all table creation statements in - * an alter table format. Currently handles foreign keys only. - * (Unique and primary key constraints are handled with indexes, - * while check constraints are merged into the table definition.) - * - * XXX Potentially wrap in a 'SET CONSTRAINTS OFF' block so that - * the current table data is not processed - */ static void -dumpConstraints(Archive *fout, TableInfo *tblinfo, int numTables) +dumpTrigger(Archive *fout, TriggerInfo *tginfo) { - int i, - j; + TableInfo *tbinfo = tginfo->tgtable; PQExpBuffer query; PQExpBuffer delqry; - PGresult *res; - int i_condef, - i_conoid, - i_conname; - int ntups; + const char *p; + int findx; - /* pg_constraint was created in 7.3, so nothing to do if older */ - if (g_fout->remoteVersion < 70300) + if (dataOnly) return; - query = createPQExpBuffer(); - delqry = createPQExpBuffer(); - - for (i = 0; i < numTables; i++) - { - TableInfo *tbinfo = &tblinfo[i]; - - if (tbinfo->ntrig == 0 || !tbinfo->dump) - continue; - - if (g_verbose) - write_msg(NULL, "dumping foreign key constraints for table \"%s\"\n", - tbinfo->relname); - - /* - * select table schema to ensure regproc name is qualified if - * needed - */ - selectSourceSchema(tbinfo->relnamespace->nspname); - - resetPQExpBuffer(query); - appendPQExpBuffer(query, - "SELECT oid, conname, " - "pg_catalog.pg_get_constraintdef(oid) as condef " - "FROM pg_catalog.pg_constraint " - "WHERE conrelid = '%s'::pg_catalog.oid " - "AND contype = 'f'", - tbinfo->oid); - res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain list of foreign key definitions failed: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } - ntups = PQntuples(res); - - i_conoid = PQfnumber(res, "oid"); - i_conname = PQfnumber(res, "conname"); - i_condef = PQfnumber(res, "condef"); - - for (j = 0; j < ntups; j++) - { - const char *conOid = PQgetvalue(res, j, i_conoid); - const char *conName = PQgetvalue(res, j, i_conname); - const char *conDef = PQgetvalue(res, j, i_condef); - - resetPQExpBuffer(query); - appendPQExpBuffer(query, "ALTER TABLE ONLY %s\n", - fmtId(tbinfo->relname)); - appendPQExpBuffer(query, " ADD CONSTRAINT %s %s;\n", - fmtId(conName), - conDef); - - /* - * DROP must be fully qualified in case same name appears in - * pg_catalog - */ - resetPQExpBuffer(delqry); - appendPQExpBuffer(delqry, "ALTER TABLE ONLY %s.", - fmtId(tbinfo->relnamespace->nspname)); - appendPQExpBuffer(delqry, "%s ", - fmtId(tbinfo->relname)); - appendPQExpBuffer(delqry, "DROP CONSTRAINT %s;\n", - fmtId(conName)); - - ArchiveEntry(fout, conOid, - conName, - tbinfo->relnamespace->nspname, - tbinfo->usename, - "FK CONSTRAINT", NULL, - query->data, delqry->data, - NULL, NULL, NULL); - - resetPQExpBuffer(query); - appendPQExpBuffer(query, "CONSTRAINT %s ", - fmtId(conName)); - appendPQExpBuffer(query, "ON %s", - fmtId(tbinfo->relname)); - - dumpComment(fout, query->data, - tbinfo->relnamespace->nspname, tbinfo->usename, - conOid, "pg_constraint", 0, NULL); - } - - PQclear(res); - } - - destroyPQExpBuffer(query); - destroyPQExpBuffer(delqry); -} - -static void -dumpTriggers(Archive *fout, TableInfo *tblinfo, int numTables) -{ - int i, - j; - PQExpBuffer query = createPQExpBuffer(); - PQExpBuffer delqry = createPQExpBuffer(); - PGresult *res; - int i_tgoid, - i_tgname, - i_tgfname, - i_tgtype, - i_tgnargs, - i_tgargs, - i_tgisconstraint, - i_tgconstrname, - i_tgdeferrable, - i_tgconstrrelid, - i_tgconstrrelname, - i_tginitdeferred; - int ntups; - - for (i = 0; i < numTables; i++) - { - TableInfo *tbinfo = &tblinfo[i]; - - if (tbinfo->ntrig == 0 || !tbinfo->dump) - continue; - - if (g_verbose) - write_msg(NULL, "dumping triggers for table \"%s\"\n", - tbinfo->relname); - - /* - * select table schema to ensure regproc name is qualified if - * needed - */ - selectSourceSchema(tbinfo->relnamespace->nspname); - - resetPQExpBuffer(query); - if (g_fout->remoteVersion >= 70300) - { - /* - * We ignore triggers that are tied to a foreign-key - * constraint - */ - appendPQExpBuffer(query, - "SELECT tgname, " - "tgfoid::pg_catalog.regproc as tgfname, " - "tgtype, tgnargs, tgargs, " - "tgisconstraint, tgconstrname, tgdeferrable, " - "tgconstrrelid, tginitdeferred, oid, " - "tgconstrrelid::pg_catalog.regclass as tgconstrrelname " - "from pg_catalog.pg_trigger t " - "where tgrelid = '%s'::pg_catalog.oid " - "and (not tgisconstraint " - " OR NOT EXISTS" - " (SELECT 1 FROM pg_catalog.pg_depend d " - " JOIN pg_catalog.pg_constraint c ON (d.refclassid = c.tableoid AND d.refobjid = c.oid) " - " WHERE d.classid = t.tableoid AND d.objid = t.oid AND d.deptype = 'i' AND c.contype = 'f'))", - tbinfo->oid); - } - else - { - appendPQExpBuffer(query, - "SELECT tgname, tgfoid::regproc as tgfname, " - "tgtype, tgnargs, tgargs, " - "tgisconstraint, tgconstrname, tgdeferrable, " - "tgconstrrelid, tginitdeferred, oid, " - "(select relname from pg_class where oid = tgconstrrelid) " - " as tgconstrrelname " - "from pg_trigger " - "where tgrelid = '%s'::oid", - tbinfo->oid); - } - res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain list of triggers failed: %s", PQerrorMessage(g_conn)); - exit_nicely(); - } - ntups = PQntuples(res); + query = createPQExpBuffer(); + delqry = createPQExpBuffer(); - /* - * We may have less triggers than recorded due to constraint - * triggers which are dumped by dumpConstraints - */ - if (ntups > tbinfo->ntrig) - { - write_msg(NULL, "expected %d triggers on table \"%s\" but found %d\n", - tbinfo->ntrig, tbinfo->relname, ntups); - exit_nicely(); - } - i_tgname = PQfnumber(res, "tgname"); - i_tgfname = PQfnumber(res, "tgfname"); - i_tgtype = PQfnumber(res, "tgtype"); - i_tgnargs = PQfnumber(res, "tgnargs"); - i_tgargs = PQfnumber(res, "tgargs"); - i_tgoid = PQfnumber(res, "oid"); - i_tgisconstraint = PQfnumber(res, "tgisconstraint"); - i_tgconstrname = PQfnumber(res, "tgconstrname"); - i_tgdeferrable = PQfnumber(res, "tgdeferrable"); - i_tgconstrrelid = PQfnumber(res, "tgconstrrelid"); - i_tgconstrrelname = PQfnumber(res, "tgconstrrelname"); - i_tginitdeferred = PQfnumber(res, "tginitdeferred"); + /* + * DROP must be fully qualified in case same name appears in + * pg_catalog + */ + appendPQExpBuffer(delqry, "DROP TRIGGER %s ", + fmtId(tginfo->tgname)); + appendPQExpBuffer(delqry, "ON %s.", + fmtId(tbinfo->relnamespace->nspname)); + appendPQExpBuffer(delqry, "%s;\n", + fmtId(tbinfo->relname)); - for (j = 0; j < ntups; j++) - { - const char *tgoid = PQgetvalue(res, j, i_tgoid); - char *tgname = PQgetvalue(res, j, i_tgname); - const char *tgfname = PQgetvalue(res, j, i_tgfname); - int2 tgtype = atoi(PQgetvalue(res, j, i_tgtype)); - int tgnargs = atoi(PQgetvalue(res, j, i_tgnargs)); - const char *tgargs = PQgetvalue(res, j, i_tgargs); - int tgisconstraint; - int tgdeferrable; - int tginitdeferred; - char *tgconstrrelid; - const char *p; - int findx; - - if (strcmp(PQgetvalue(res, j, i_tgisconstraint), "f") == 0) - tgisconstraint = 0; - else - tgisconstraint = 1; + if (tginfo->tgisconstraint) + { + appendPQExpBuffer(query, "CREATE CONSTRAINT TRIGGER "); + appendPQExpBuffer(query, fmtId(tginfo->tgconstrname)); + } + else + { + appendPQExpBuffer(query, "CREATE TRIGGER "); + appendPQExpBuffer(query, fmtId(tginfo->tgname)); + } + appendPQExpBuffer(query, "\n "); - if (strcmp(PQgetvalue(res, j, i_tgdeferrable), "f") == 0) - tgdeferrable = 0; - else - tgdeferrable = 1; + /* Trigger type */ + findx = 0; + if (TRIGGER_FOR_BEFORE(tginfo->tgtype)) + appendPQExpBuffer(query, "BEFORE"); + else + appendPQExpBuffer(query, "AFTER"); + if (TRIGGER_FOR_INSERT(tginfo->tgtype)) + { + appendPQExpBuffer(query, " INSERT"); + findx++; + } + if (TRIGGER_FOR_DELETE(tginfo->tgtype)) + { + if (findx > 0) + appendPQExpBuffer(query, " OR DELETE"); + else + appendPQExpBuffer(query, " DELETE"); + findx++; + } + if (TRIGGER_FOR_UPDATE(tginfo->tgtype)) + { + if (findx > 0) + appendPQExpBuffer(query, " OR UPDATE"); + else + appendPQExpBuffer(query, " UPDATE"); + } + appendPQExpBuffer(query, " ON %s\n", + fmtId(tbinfo->relname)); - if (strcmp(PQgetvalue(res, j, i_tginitdeferred), "f") == 0) - tginitdeferred = 0; + if (tginfo->tgisconstraint) + { + if (OidIsValid(tginfo->tgconstrrelid)) + { + /* If we are using regclass, name is already quoted */ + if (g_fout->remoteVersion >= 70300) + appendPQExpBuffer(query, " FROM %s\n ", + tginfo->tgconstrrelname); else - tginitdeferred = 1; + appendPQExpBuffer(query, " FROM %s\n ", + fmtId(tginfo->tgconstrrelname)); + } + if (!tginfo->tgdeferrable) + appendPQExpBuffer(query, "NOT "); + appendPQExpBuffer(query, "DEFERRABLE INITIALLY "); + if (tginfo->tginitdeferred) + appendPQExpBuffer(query, "DEFERRED\n"); + else + appendPQExpBuffer(query, "IMMEDIATE\n"); + } - resetPQExpBuffer(delqry); + if (TRIGGER_FOR_ROW(tginfo->tgtype)) + appendPQExpBuffer(query, " FOR EACH ROW\n "); + else + appendPQExpBuffer(query, " FOR EACH STATEMENT\n "); - /* - * DROP must be fully qualified in case same name appears in - * pg_catalog - */ - appendPQExpBuffer(delqry, "DROP TRIGGER %s ", - fmtId(tgname)); - appendPQExpBuffer(delqry, "ON %s.", - fmtId(tbinfo->relnamespace->nspname)); - appendPQExpBuffer(delqry, "%s;\n", - fmtId(tbinfo->relname)); + /* In 7.3, result of regproc is already quoted */ + if (g_fout->remoteVersion >= 70300) + appendPQExpBuffer(query, "EXECUTE PROCEDURE %s(", + tginfo->tgfname); + else + appendPQExpBuffer(query, "EXECUTE PROCEDURE %s(", + fmtId(tginfo->tgfname)); - resetPQExpBuffer(query); - if (tgisconstraint) - { - appendPQExpBuffer(query, "CREATE CONSTRAINT TRIGGER "); - appendPQExpBuffer(query, fmtId(PQgetvalue(res, j, i_tgconstrname))); - } - else - { - appendPQExpBuffer(query, "CREATE TRIGGER "); - appendPQExpBuffer(query, fmtId(tgname)); - } - appendPQExpBuffer(query, "\n "); - /* Trigger type */ - findx = 0; - if (TRIGGER_FOR_BEFORE(tgtype)) - appendPQExpBuffer(query, "BEFORE"); - else - appendPQExpBuffer(query, "AFTER"); - if (TRIGGER_FOR_INSERT(tgtype)) - { - appendPQExpBuffer(query, " INSERT"); - findx++; - } - if (TRIGGER_FOR_DELETE(tgtype)) + p = tginfo->tgargs; + for (findx = 0; findx < tginfo->tgnargs; findx++) + { + const char *s = p; + + for (;;) + { + p = strchr(p, '\\'); + if (p == NULL) { - if (findx > 0) - appendPQExpBuffer(query, " OR DELETE"); - else - appendPQExpBuffer(query, " DELETE"); - findx++; + write_msg(NULL, "invalid argument string (%s) for trigger \"%s\" on table \"%s\"\n", + tginfo->tgargs, + tginfo->tgname, + tbinfo->relname); + exit_nicely(); } - if (TRIGGER_FOR_UPDATE(tgtype)) + p++; + if (*p == '\\') { - if (findx > 0) - appendPQExpBuffer(query, " OR UPDATE"); - else - appendPQExpBuffer(query, " UPDATE"); + p++; + continue; } - appendPQExpBuffer(query, " ON %s\n", - fmtId(tbinfo->relname)); + if (p[0] == '0' && p[1] == '0' && p[2] == '0') + break; + } + p--; + appendPQExpBufferChar(query, '\''); + while (s < p) + { + if (*s == '\'') + appendPQExpBufferChar(query, '\\'); + appendPQExpBufferChar(query, *s++); + } + appendPQExpBufferChar(query, '\''); + appendPQExpBuffer(query, + (findx < tginfo->tgnargs - 1) ? ", " : ""); + p = p + 4; + } + appendPQExpBuffer(query, ");\n"); - if (tgisconstraint) - { - tgconstrrelid = PQgetvalue(res, j, i_tgconstrrelid); + ArchiveEntry(fout, tginfo->dobj.catId, tginfo->dobj.dumpId, + tginfo->tgname, + tbinfo->relnamespace->nspname, + tbinfo->usename, + "TRIGGER", query->data, delqry->data, NULL, + tginfo->dobj.dependencies, tginfo->dobj.nDeps, + NULL, NULL); - if (strcmp(tgconstrrelid, "0") != 0) - { + resetPQExpBuffer(query); + appendPQExpBuffer(query, "TRIGGER %s ", + fmtId(tginfo->tgname)); + appendPQExpBuffer(query, "ON %s", + fmtId(tbinfo->relname)); - if (PQgetisnull(res, j, i_tgconstrrelname)) - { - write_msg(NULL, "query produced null referenced table name for foreign key trigger \"%s\" on table \"%s\" (OID of table: %s)\n", - tgname, tbinfo->relname, tgconstrrelid); - exit_nicely(); - } + dumpComment(fout, query->data, + tbinfo->relnamespace->nspname, tbinfo->usename, + tginfo->dobj.catId, 0, tginfo->dobj.dumpId); - /* If we are using regclass, name is already quoted */ - if (g_fout->remoteVersion >= 70300) - appendPQExpBuffer(query, " FROM %s\n ", - PQgetvalue(res, j, i_tgconstrrelname)); - else - appendPQExpBuffer(query, " FROM %s\n ", - fmtId(PQgetvalue(res, j, i_tgconstrrelname))); - } - if (!tgdeferrable) - appendPQExpBuffer(query, "NOT "); - appendPQExpBuffer(query, "DEFERRABLE INITIALLY "); - if (tginitdeferred) - appendPQExpBuffer(query, "DEFERRED\n"); - else - appendPQExpBuffer(query, "IMMEDIATE\n"); + destroyPQExpBuffer(query); + destroyPQExpBuffer(delqry); +} - } +/* + * dumpRule + * Dump a rule + */ +static void +dumpRule(Archive *fout, RuleInfo *rinfo) +{ + TableInfo *tbinfo = rinfo->ruletable; + PQExpBuffer query; + PQExpBuffer cmd; + PQExpBuffer delcmd; + PGresult *res; - if (TRIGGER_FOR_ROW(tgtype)) - appendPQExpBuffer(query, " FOR EACH ROW\n "); - else - appendPQExpBuffer(query, " FOR EACH STATEMENT\n "); + /* + * Ignore rules for not-to-be-dumped tables + */ + if (tbinfo == NULL || !tbinfo->dump || dataOnly) + return; - /* In 7.3, result of regproc is already quoted */ - if (g_fout->remoteVersion >= 70300) - appendPQExpBuffer(query, "EXECUTE PROCEDURE %s(", - tgfname); - else - appendPQExpBuffer(query, "EXECUTE PROCEDURE %s(", - fmtId(tgfname)); - for (findx = 0; findx < tgnargs; findx++) - { - const char *s; + /* + * If it is an ON SELECT rule, we do not need to dump it because + * it will be handled via CREATE VIEW for the table. + */ + if (rinfo->ev_type == '1' && rinfo->is_instead) + return; - for (p = tgargs;;) - { - p = strchr(p, '\\'); - if (p == NULL) - { - write_msg(NULL, "invalid argument string (%s) for trigger \"%s\" on table \"%s\"\n", - PQgetvalue(res, j, i_tgargs), - tgname, - tbinfo->relname); - exit_nicely(); - } - p++; - if (*p == '\\') - { - p++; - continue; - } - if (p[0] == '0' && p[1] == '0' && p[2] == '0') - break; - } - p--; - appendPQExpBufferChar(query, '\''); - for (s = tgargs; s < p;) - { - if (*s == '\'') - appendPQExpBufferChar(query, '\\'); - appendPQExpBufferChar(query, *s++); - } - appendPQExpBufferChar(query, '\''); - appendPQExpBuffer(query, (findx < tgnargs - 1) ? ", " : ""); - tgargs = p + 4; - } - appendPQExpBuffer(query, ");\n"); + /* + * Make sure we are in proper schema. + */ + selectSourceSchema(tbinfo->relnamespace->nspname); - ArchiveEntry(fout, tgoid, - tgname, - tbinfo->relnamespace->nspname, - tbinfo->usename, - "TRIGGER", NULL, - query->data, delqry->data, - NULL, NULL, NULL); + query = createPQExpBuffer(); + cmd = createPQExpBuffer(); + delcmd = createPQExpBuffer(); - resetPQExpBuffer(query); - appendPQExpBuffer(query, "TRIGGER %s ", - fmtId(tgname)); - appendPQExpBuffer(query, "ON %s", - fmtId(tbinfo->relname)); + if (g_fout->remoteVersion >= 70300) + { + appendPQExpBuffer(query, + "SELECT pg_catalog.pg_get_ruledef('%u'::pg_catalog.oid) AS definition", + rinfo->dobj.catId.oid); + } + else + { + /* Rule name was unique before 7.3 ... */ + appendPQExpBuffer(query, + "SELECT pg_get_ruledef('%s') AS definition", + rinfo->rulename); + } - dumpComment(fout, query->data, - tbinfo->relnamespace->nspname, tbinfo->usename, - tgoid, "pg_trigger", 0, NULL); - } + res = PQexec(g_conn, query->data); + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); - PQclear(res); + if (PQntuples(res) != 1) + { + write_msg(NULL, "query to get rule \"%s\" for table \"%s\" failed: wrong number of rows returned", + rinfo->rulename, tbinfo->relname); + exit_nicely(); } + printfPQExpBuffer(cmd, "%s\n", PQgetvalue(res, 0, 0)); + + /* + * DROP must be fully qualified in case same name appears in + * pg_catalog + */ + appendPQExpBuffer(delcmd, "DROP RULE %s ", + fmtId(rinfo->rulename)); + appendPQExpBuffer(delcmd, "ON %s.", + fmtId(tbinfo->relnamespace->nspname)); + appendPQExpBuffer(delcmd, "%s;\n", + fmtId(tbinfo->relname)); + + ArchiveEntry(fout, rinfo->dobj.catId, rinfo->dobj.dumpId, + rinfo->rulename, + tbinfo->relnamespace->nspname, + tbinfo->usename, + "RULE", cmd->data, delcmd->data, NULL, + rinfo->dobj.dependencies, rinfo->dobj.nDeps, + NULL, NULL); + + /* Dump rule comments */ + resetPQExpBuffer(query); + appendPQExpBuffer(query, "RULE %s", + fmtId(rinfo->rulename)); + appendPQExpBuffer(query, " ON %s", + fmtId(tbinfo->relname)); + dumpComment(fout, query->data, + tbinfo->relnamespace->nspname, + tbinfo->usename, + rinfo->dobj.catId, 0, rinfo->dobj.dumpId); + + PQclear(res); + destroyPQExpBuffer(query); - destroyPQExpBuffer(delqry); + destroyPQExpBuffer(cmd); + destroyPQExpBuffer(delcmd); } - +/* + * getDependencies --- obtain available dependency data + */ static void -dumpRules(Archive *fout, TableInfo *tblinfo, int numTables) +getDependencies(void) { + PQExpBuffer query; PGresult *res; - int nrules; - int i, - t; - PQExpBuffer query = createPQExpBuffer(); - PQExpBuffer cmd = createPQExpBuffer(); - int i_definition; - int i_oid; - int i_rulename; + int ntups, + i; + int i_classid, + i_objid, + i_refclassid, + i_refobjid, + i_deptype; + DumpableObject *dobj, + *refdobj; + + /* No dependency info available before 7.3 */ + if (g_fout->remoteVersion < 70300) + return; if (g_verbose) - write_msg(NULL, "dumping out rules\n"); + write_msg(NULL, "fetching dependency data\n"); + + /* Make sure we are in proper schema */ + selectSourceSchema("pg_catalog"); + + query = createPQExpBuffer(); + + appendPQExpBuffer(query, "SELECT " + "classid, objid, refclassid, refobjid, deptype " + "FROM pg_depend " + "WHERE deptype != 'p' " + "ORDER BY 1,2"); + + res = PQexec(g_conn, query->data); + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); + + ntups = PQntuples(res); + + i_classid = PQfnumber(res, "classid"); + i_objid = PQfnumber(res, "objid"); + i_refclassid = PQfnumber(res, "refclassid"); + i_refobjid = PQfnumber(res, "refobjid"); + i_deptype = PQfnumber(res, "deptype"); /* - * For each table we dump + * Since we ordered the SELECT by referencing ID, we can expect that + * multiple entries for the same object will appear together; this + * saves on searches. */ - for (t = 0; t < numTables; t++) + dobj = NULL; + + for (i = 0; i < ntups; i++) { - TableInfo *tbinfo = &tblinfo[t]; + CatalogId objId; + CatalogId refobjId; + char deptype; - if (!tbinfo->hasrules || !tbinfo->dump) - continue; + objId.tableoid = atooid(PQgetvalue(res, i, i_classid)); + objId.oid = atooid(PQgetvalue(res, i, i_objid)); + refobjId.tableoid = atooid(PQgetvalue(res, i, i_refclassid)); + refobjId.oid = atooid(PQgetvalue(res, i, i_refobjid)); + deptype = *(PQgetvalue(res, i, i_deptype)); - /* Make sure we are in proper schema */ - selectSourceSchema(tbinfo->relnamespace->nspname); + if (dobj == NULL || + dobj->catId.tableoid != objId.tableoid || + dobj->catId.oid != objId.oid) + dobj = findObjectByCatalogId(objId); /* - * Get all rules defined for this table, except view select rules + * Failure to find objects mentioned in pg_depend is not unexpected, + * since for example we don't collect info about TOAST tables. */ - resetPQExpBuffer(query); - - if (g_fout->remoteVersion >= 70300) - { - appendPQExpBuffer(query, - "SELECT pg_catalog.pg_get_ruledef(oid) AS definition," - " oid, rulename " - "FROM pg_catalog.pg_rewrite " - "WHERE ev_class = '%s'::pg_catalog.oid " - "AND rulename != '_RETURN' " - "ORDER BY oid", - tbinfo->oid); - } - else - { - /* - * We include pg_rules in the cross since it filters out all - * view rules (pjw 15-Sep-2000). - */ - appendPQExpBuffer(query, "SELECT definition," - " pg_rewrite.oid, pg_rewrite.rulename " - "FROM pg_rewrite, pg_class, pg_rules " - "WHERE pg_class.relname = "); - appendStringLiteral(query, tbinfo->relname, true); - appendPQExpBuffer(query, - " AND pg_rewrite.ev_class = pg_class.oid " - " AND pg_rules.tablename = pg_class.relname " - " AND pg_rules.rulename = pg_rewrite.rulename " - "ORDER BY pg_rewrite.oid"); - } - - res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) + if (dobj == NULL) { - write_msg(NULL, "query to get rules associated with table \"%s\" failed: %s", - tbinfo->relname, PQerrorMessage(g_conn)); - exit_nicely(); +#ifdef NOT_USED + fprintf(stderr, "no referencing object %u %u\n", + objId.tableoid, objId.oid); +#endif + continue; } - nrules = PQntuples(res); - i_definition = PQfnumber(res, "definition"); - i_oid = PQfnumber(res, "oid"); - i_rulename = PQfnumber(res, "rulename"); - - /* - * Dump them out - */ + refdobj = findObjectByCatalogId(refobjId); - for (i = 0; i < nrules; i++) + if (refdobj == NULL) { - printfPQExpBuffer(cmd, "%s\n", PQgetvalue(res, i, i_definition)); - ArchiveEntry(fout, PQgetvalue(res, i, i_oid), - PQgetvalue(res, i, i_rulename), - tbinfo->relnamespace->nspname, - tbinfo->usename, - "RULE", NULL, - cmd->data, - "", /* Del */ - NULL, NULL, NULL); - - /* Dump rule comments */ - - resetPQExpBuffer(query); - appendPQExpBuffer(query, "RULE %s", - fmtId(PQgetvalue(res, i, i_rulename))); - appendPQExpBuffer(query, " ON %s", - fmtId(tbinfo->relname)); - dumpComment(fout, query->data, - tbinfo->relnamespace->nspname, - tbinfo->usename, - PQgetvalue(res, i, i_oid), "pg_rewrite", 0, NULL); - +#ifdef NOT_USED + fprintf(stderr, "no referenced object %u %u\n", + refobjId.tableoid, refobjId.oid); +#endif + continue; } - PQclear(res); + addObjectDependency(dobj, refdobj->dumpId); } + PQclear(res); + destroyPQExpBuffer(query); - destroyPQExpBuffer(cmd); } + /* * selectSourceSchema - make the specified schema the active search path * in the source database. @@ -6835,7 +7278,6 @@ selectSourceSchema(const char *schemaName) { static char *curSchemaName = NULL; PQExpBuffer query; - PGresult *res; /* Not relevant if fetching from pre-7.3 DB */ if (g_fout->remoteVersion < 70300) @@ -6852,17 +7294,10 @@ selectSourceSchema(const char *schemaName) fmtId(schemaName)); if (strcmp(schemaName, "pg_catalog") != 0) appendPQExpBuffer(query, ", pg_catalog"); - res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_COMMAND_OK) - { - write_msg(NULL, "command to set search_path failed: %s", - PQerrorMessage(g_conn)); - exit_nicely(); - } - PQclear(res); - destroyPQExpBuffer(query); + do_sql_command(g_conn, query->data); + + destroyPQExpBuffer(query); if (curSchemaName) free(curSchemaName); curSchemaName = strdup(schemaName); @@ -6876,14 +7311,14 @@ selectSourceSchema(const char *schemaName) * schema; this is why we don't try to cache the names. */ static char * -getFormattedTypeName(const char *oid, OidOptions opts) +getFormattedTypeName(Oid oid, OidOptions opts) { char *result; PQExpBuffer query; PGresult *res; int ntups; - if (atooid(oid) == 0) + if (oid == 0) { if ((opts & zeroAsOpaque) != 0) return strdup(g_opaque_type); @@ -6898,30 +7333,24 @@ getFormattedTypeName(const char *oid, OidOptions opts) query = createPQExpBuffer(); if (g_fout->remoteVersion >= 70300) { - appendPQExpBuffer(query, "SELECT pg_catalog.format_type('%s'::pg_catalog.oid, NULL)", + appendPQExpBuffer(query, "SELECT pg_catalog.format_type('%u'::pg_catalog.oid, NULL)", oid); } else if (g_fout->remoteVersion >= 70100) { - appendPQExpBuffer(query, "SELECT format_type('%s'::oid, NULL)", + appendPQExpBuffer(query, "SELECT format_type('%u'::oid, NULL)", oid); } else { appendPQExpBuffer(query, "SELECT typname " "FROM pg_type " - "WHERE oid = '%s'::oid", + "WHERE oid = '%u'::oid", oid); } res = PQexec(g_conn, query->data); - if (!res || - PQresultStatus(res) != PGRES_TUPLES_OK) - { - write_msg(NULL, "query to obtain name of data type %s failed: %s", - oid, PQerrorMessage(g_conn)); - exit_nicely(); - } + check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK); /* Expecting a single result only */ ntups = PQntuples(res); @@ -7074,3 +7503,40 @@ fmtCopyColumnList(const TableInfo *ti) appendPQExpBuffer(q, ")"); return q->data; } + +/* + * Convenience subroutine to execute a SQL command and check for + * COMMAND_OK status. + */ +static void +do_sql_command(PGconn *conn, const char *query) +{ + PGresult *res; + + res = PQexec(conn, query); + check_sql_result(res, conn, query, PGRES_COMMAND_OK); + PQclear(res); +} + +/* + * Convenience subroutine to verify a SQL command succeeded, + * and exit with a useful error message if not. + */ +static void +check_sql_result(PGresult *res, PGconn *conn, const char *query, + ExecStatusType expected) +{ + const char *err; + + if (res && PQresultStatus(res) == expected) + return; /* A-OK */ + + write_msg(NULL, "SQL command failed\n"); + if (res) + err = PQresultErrorMessage(res); + else + err = PQerrorMessage(conn); + write_msg(NULL, "Error message from server: %s", err); + write_msg(NULL, "The command was: %s\n", query); + exit_nicely(); +} diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h index 1a3949c52a..c46ca661e0 100644 --- a/src/bin/pg_dump/pg_dump.h +++ b/src/bin/pg_dump/pg_dump.h @@ -6,7 +6,7 @@ * Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.h,v 1.106 2003/11/29 22:40:46 pgsql Exp $ + * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.h,v 1.107 2003/12/06 03:00:16 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -14,10 +14,37 @@ #ifndef PG_DUMP_H #define PG_DUMP_H -#include "pg_backup.h" +#include "postgres_fe.h" + + +/* + * pg_dump uses two different mechanisms for identifying database objects: + * + * CatalogId represents an object by the tableoid and oid of its defining + * entry in the system catalogs. We need this to interpret pg_depend entries, + * for instance. + * + * DumpId is a simple sequential integer counter assigned as dumpable objects + * are identified during a pg_dump run. We use DumpId internally in preference + * to CatalogId for two reasons: it's more compact, and we can assign DumpIds + * to "objects" that don't have a separate CatalogId. For example, it is + * convenient to consider a table, its data, and its ACL as three separate + * dumpable "objects" with distinct DumpIds --- this lets us reason about the + * order in which to dump these things. + */ + +typedef struct +{ + Oid tableoid; + Oid oid; +} CatalogId; + +typedef int DumpId; + /* - * The data structures used to store system catalog information + * The data structures used to store system catalog information. Every + * dumpable object is a subclass of DumpableObject. * * NOTE: the structures described here live for the entire pg_dump run; * and in most cases we make a struct for every object we can find in the @@ -25,12 +52,46 @@ * best to store a minimal amount of per-object info in these structs, * and retrieve additional per-object info when and if we dump a specific * object. In particular, try to avoid retrieving expensive-to-compute - * information until it's known to be needed. + * information until it's known to be needed. We do, however, have to + * store enough info to determine whether an object should be dumped and + * what order to dump in. */ +typedef enum +{ + /* When modifying this enum, update priority table in pg_dump_sort.c! */ + DO_NAMESPACE, + DO_TYPE, + DO_FUNC, + DO_AGG, + DO_OPERATOR, + DO_OPCLASS, + DO_CONVERSION, + DO_TABLE, + DO_ATTRDEF, + DO_INDEX, + DO_RULE, + DO_TRIGGER, + DO_CONSTRAINT, + DO_FK_CONSTRAINT, /* see note for ConstraintInfo */ + DO_PROCLANG, + DO_CAST, + DO_TABLE_DATA +} DumpableObjectType; + +typedef struct _dumpableObject +{ + DumpableObjectType objType; + CatalogId catId; /* zero if not a cataloged object */ + DumpId dumpId; /* assigned by AssignDumpId() */ + DumpId *dependencies; /* dumpIds of objects this one depends on */ + int nDeps; /* number of valid dependencies */ + int allocDeps; /* allocated size of dependencies[] */ +} DumpableObject; + typedef struct _namespaceInfo { - char *oid; + DumpableObject dobj; char *nspname; char *usename; /* name of owner, or empty string */ char *nspacl; @@ -39,57 +100,56 @@ typedef struct _namespaceInfo typedef struct _typeInfo { - char *oid; + DumpableObject dobj; char *typname; /* name as seen in catalog */ /* Note: format_type might produce something different than typname */ NamespaceInfo *typnamespace; /* link to containing namespace */ char *usename; /* name of owner, or empty string */ - char *typelem; /* OID */ - char *typrelid; /* OID */ + Oid typinput; + Oid typelem; + Oid typrelid; char typrelkind; /* 'r', 'v', 'c', etc */ char typtype; /* 'b', 'c', etc */ bool isArray; /* true if user-defined array type */ bool isDefined; /* true if typisdefined */ + /* If it's a domain, we store links to its constraints here: */ + int nDomChecks; + struct _constraintInfo *domChecks; } TypeInfo; typedef struct _funcInfo { - char *oid; + DumpableObject dobj; char *proname; NamespaceInfo *pronamespace; /* link to containing namespace */ char *usename; /* name of owner, or empty string */ Oid lang; int nargs; - char **argtypes; /* OIDs */ - char *prorettype; /* OID */ + Oid *argtypes; + Oid prorettype; char *proacl; - bool dumped; /* true if already dumped */ } FuncInfo; +/* AggInfo is a superset of FuncInfo */ typedef struct _aggInfo { - char *oid; - char *aggname; - char *aggbasetype; /* OID */ - NamespaceInfo *aggnamespace; /* link to containing namespace */ - char *usename; - char *aggacl; + FuncInfo aggfn; bool anybasetype; /* is the basetype "any"? */ char *fmtbasetype; /* formatted type name */ } AggInfo; typedef struct _oprInfo { - char *oid; + DumpableObject dobj; char *oprname; NamespaceInfo *oprnamespace; /* link to containing namespace */ char *usename; - char *oprcode; /* as OID, not regproc name */ + Oid oprcode; } OprInfo; typedef struct _opclassInfo { - char *oid; + DumpableObject dobj; char *opcname; NamespaceInfo *opcnamespace; /* link to containing namespace */ char *usename; @@ -97,7 +157,7 @@ typedef struct _opclassInfo typedef struct _convInfo { - char *oid; + DumpableObject dobj; char *conname; NamespaceInfo *connamespace; /* link to containing namespace */ char *usename; @@ -108,7 +168,7 @@ typedef struct _tableInfo /* * These fields are collected for every table in the database. */ - char *oid; + DumpableObject dobj; char *relname; NamespaceInfo *relnamespace; /* link to containing namespace */ char *usename; /* name of owner, or empty string */ @@ -120,7 +180,7 @@ typedef struct _tableInfo int ncheck; /* # of CHECK expressions */ int ntrig; /* # of triggers */ /* these two are set only if table is a SERIAL column's sequence: */ - char *owning_tab; /* OID of table owning sequence */ + Oid owning_tab; /* OID of table owning sequence */ int owning_col; /* attr # of column owning sequence */ bool interesting; /* true if need to collect more data */ @@ -143,40 +203,127 @@ typedef struct _tableInfo bool *attisserial; /* true if attr is serial or bigserial */ /* - * Note: we need to store per-attribute notnull and default stuff for - * all interesting tables so that we can tell which constraints were - * inherited. + * Note: we need to store per-attribute notnull, default, and constraint + * stuff for all interesting tables so that we can tell which constraints + * were inherited. */ bool *notnull; /* Not null constraints on attributes */ - char **adef_expr; /* DEFAULT expressions */ + struct _attrDefInfo **attrdefs; /* DEFAULT expressions */ bool *inhAttrs; /* true if each attribute is inherited */ bool *inhAttrDef; /* true if attr's default is inherited */ bool *inhNotNull; /* true if NOT NULL is inherited */ + struct _constraintInfo *checkexprs; /* CHECK constraints */ /* * Stuff computed only for dumpable tables. */ int numParents; /* number of (immediate) parent tables */ - int *parentIndexes; /* TableInfo indexes of immediate parents */ - - char *viewoid; /* OID of view - should be >= oid of table - * important because views may be - * constructed manually from rules, and - * rule may ref things created after the - * base table was created. */ + struct _tableInfo **parents; /* TableInfos of immediate parents */ } TableInfo; +typedef struct _attrDefInfo +{ + DumpableObject dobj; + TableInfo *adtable; /* link to table of attribute */ + int adnum; + char *adef_expr; /* decompiled DEFAULT expression */ + bool separate; /* TRUE if must dump as separate item */ +} AttrDefInfo; + +typedef struct _tableDataInfo +{ + DumpableObject dobj; + TableInfo *tdtable; /* link to table to dump */ + bool oids; /* include OIDs in data? */ +} TableDataInfo; + +typedef struct _indxInfo +{ + DumpableObject dobj; + char *indexname; + TableInfo *indextable; /* link to table the index is for */ + char *indexdef; + int indnkeys; + Oid *indkeys; + bool indisclustered; + /* if there is an associated constraint object, its dumpId: */ + DumpId indexconstraint; +} IndxInfo; + +typedef struct _ruleInfo +{ + DumpableObject dobj; + char *rulename; + TableInfo *ruletable; /* link to table the rule is for */ + char ev_type; + bool is_instead; +} RuleInfo; + +typedef struct _triggerInfo +{ + DumpableObject dobj; + TableInfo *tgtable; /* link to table the trigger is for */ + char *tgname; + char *tgfname; + int tgtype; + int tgnargs; + char *tgargs; + bool tgisconstraint; + char *tgconstrname; + Oid tgconstrrelid; + char *tgconstrrelname; + bool tgdeferrable; + bool tginitdeferred; +} TriggerInfo; + +/* + * struct ConstraintInfo is used for all constraint types. However we + * use a different objType for foreign key constraints, to make it easier + * to sort them the way we want. + */ +typedef struct _constraintInfo +{ + DumpableObject dobj; + char *conname; + TableInfo *contable; /* NULL if domain constraint */ + TypeInfo *condomain; /* NULL if table constraint */ + char contype; + char *condef; /* definition, if CHECK or FOREIGN KEY */ + DumpId conindex; /* identifies associated index if any */ + bool coninherited; /* TRUE if appears to be inherited */ + bool separate; /* TRUE if must dump as separate item */ +} ConstraintInfo; + +typedef struct _procLangInfo +{ + DumpableObject dobj; + char *lanname; + bool lanpltrusted; + Oid lanplcallfoid; + Oid lanvalidator; + char *lanacl; +} ProcLangInfo; + +typedef struct _castInfo +{ + DumpableObject dobj; + Oid castsource; + Oid casttarget; + Oid castfunc; + char castcontext; +} CastInfo; + +/* InhInfo isn't a DumpableObject, just temporary state */ typedef struct _inhInfo { - char *inhrelid; /* OID of a child table */ - char *inhparent; /* OID of its parent */ + Oid inhrelid; /* OID of a child table */ + Oid inhparent; /* OID of its parent */ } InhInfo; /* global decls */ extern bool force_quotes; /* double-quotes for identifiers flag */ extern bool g_verbose; /* verbose flag */ -extern Archive *g_fout; /* the script file */ /* placeholders for comment starting and ending delimiters */ extern char g_comment_start[10]; @@ -188,9 +335,7 @@ extern char g_opaque_type[10]; /* name for the opaque type */ * common utility functions */ -extern TableInfo *dumpSchema(Archive *fout, - int *numTablesPtr, - const bool aclsSkip, +extern TableInfo *getSchemaData(int *numTablesPtr, const bool schemaOnly, const bool dataOnly); @@ -202,15 +347,28 @@ typedef enum _OidOptions zeroAsNone = 8 } OidOptions; -extern int findTableByOid(TableInfo *tbinfo, int numTables, const char *oid); -extern char *findOprByOid(OprInfo *oprinfo, int numOprs, const char *oid); -extern int findFuncByOid(FuncInfo *finfo, int numFuncs, const char *oid); -extern int findTypeByOid(TypeInfo *tinfo, int numTypes, const char *oid); +extern void AssignDumpId(DumpableObject *dobj); +extern DumpId createDumpId(void); +extern DumpId getMaxDumpId(void); +extern DumpableObject *findObjectByDumpId(DumpId dumpId); +extern DumpableObject *findObjectByCatalogId(CatalogId catalogId); +extern void getDumpableObjects(DumpableObject ***objs, int *numObjs); + +extern void addObjectDependency(DumpableObject *dobj, DumpId refId); +extern void removeObjectDependency(DumpableObject *dobj, DumpId refId); + +extern TableInfo *findTableByOid(Oid oid); +extern TypeInfo *findTypeByOid(Oid oid); +extern FuncInfo *findFuncByOid(Oid oid); +extern OprInfo *findOprByOid(Oid oid); extern void check_conn_and_db(void); extern void exit_nicely(void); -extern void parseNumericArray(const char *str, char **array, int arraysize); +extern void parseOidArray(const char *str, Oid *array, int arraysize); + +extern void sortDumpableObjects(DumpableObject **objs, int numObjs); +extern void sortDumpableObjectsByType(DumpableObject **objs, int numObjs); /* * version specific routines @@ -224,26 +382,12 @@ extern OpclassInfo *getOpclasses(int *numOpclasses); extern ConvInfo *getConversions(int *numConversions); extern TableInfo *getTables(int *numTables); extern InhInfo *getInherits(int *numInherits); - +extern void getIndexes(TableInfo tblinfo[], int numTables); +extern void getConstraints(TableInfo tblinfo[], int numTables); +extern RuleInfo *getRules(int *numRules); +extern void getTriggers(TableInfo tblinfo[], int numTables); +extern ProcLangInfo *getProcLangs(int *numProcLangs); +extern CastInfo *getCasts(int *numCasts); extern void getTableAttrs(TableInfo *tbinfo, int numTables); -extern void dumpDBComment(Archive *outfile); -extern void dumpNamespaces(Archive *fout, - NamespaceInfo *nsinfo, int numNamespaces); -extern void dumpTypes(Archive *fout, FuncInfo *finfo, int numFuncs, - TypeInfo *tinfo, int numTypes); -extern void dumpProcLangs(Archive *fout, FuncInfo finfo[], int numFuncs); -extern void dumpFuncs(Archive *fout, FuncInfo finfo[], int numFuncs); -extern void dumpCasts(Archive *fout, FuncInfo *finfo, int numFuncs, - TypeInfo *tinfo, int numTypes); -extern void dumpAggs(Archive *fout, AggInfo agginfo[], int numAggregates); -extern void dumpOprs(Archive *fout, OprInfo *oprinfo, int numOperators); -extern void dumpOpclasses(Archive *fout, - OpclassInfo *opcinfo, int numOpclasses); -extern void dumpConversions(Archive *fout, - ConvInfo *coninfo, int numConversions); -extern void dumpTables(Archive *fout, TableInfo tblinfo[], int numTables, - const bool aclsSkip, - const bool schemaOnly, const bool dataOnly); -extern void dumpIndexes(Archive *fout, TableInfo *tbinfo, int numTables); #endif /* PG_DUMP_H */ diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c new file mode 100644 index 0000000000..12be66dadf --- /dev/null +++ b/src/bin/pg_dump/pg_dump_sort.c @@ -0,0 +1,727 @@ +/*------------------------------------------------------------------------- + * + * pg_dump_sort.c + * Sort the items of a dump into a safe order for dumping + * + * + * Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump_sort.c,v 1.1 2003/12/06 03:00:16 tgl Exp $ + * + *------------------------------------------------------------------------- + */ +#include "pg_dump.h" +#include "pg_backup_archiver.h" + + +static char *modulename = gettext_noop("sorter"); + +/* + * Sort priority for object types. Objects are sorted by priority, + * and within an equal priority level by OID. (This is a relatively + * crude hack to provide semi-reasonable behavior for old databases + * without full dependency info.) + */ +static const int objectTypePriority[] = +{ + 1, /* DO_NAMESPACE */ + 2, /* DO_TYPE */ + 2, /* DO_FUNC */ + 2, /* DO_AGG */ + 3, /* DO_OPERATOR */ + 4, /* DO_OPCLASS */ + 5, /* DO_CONVERSION */ + 6, /* DO_TABLE */ + 7, /* DO_ATTRDEF */ + 10, /* DO_INDEX */ + 11, /* DO_RULE */ + 12, /* DO_TRIGGER */ + 9, /* DO_CONSTRAINT */ + 13, /* DO_FK_CONSTRAINT */ + 2, /* DO_PROCLANG */ + 2, /* DO_CAST */ + 8 /* DO_TABLE_DATA */ +}; + + +static int DOTypeCompare(const void *p1, const void *p2); +static bool TopoSort(DumpableObject **objs, + int numObjs, + DumpableObject **ordering, + int *nOrdering); +static bool findLoop(DumpableObject *obj, + int depth, + DumpableObject **ordering, + int *nOrdering); +static void repairDependencyLoop(DumpableObject **loop, + int nLoop); +static void describeDumpableObject(DumpableObject *obj, + char *buf, int bufsize); + + +/* + * Sort the given objects into a type/OID-based ordering + * + * Normally this is just the starting point for the dependency-based + * ordering. + */ +void +sortDumpableObjectsByType(DumpableObject **objs, int numObjs) +{ + if (numObjs > 1) + qsort((void *) objs, numObjs, sizeof(DumpableObject *), DOTypeCompare); +} + +static int +DOTypeCompare(const void *p1, const void *p2) +{ + DumpableObject *obj1 = *(DumpableObject **) p1; + DumpableObject *obj2 = *(DumpableObject **) p2; + int cmpval; + + cmpval = objectTypePriority[obj1->objType] - + objectTypePriority[obj2->objType]; + + if (cmpval != 0) + return cmpval; + + return oidcmp(obj1->catId.oid, obj2->catId.oid); +} + + +/* + * Sort the given objects into a safe dump order using dependency + * information (to the extent we have it available). + */ +void +sortDumpableObjects(DumpableObject **objs, int numObjs) +{ + DumpableObject **ordering; + int nOrdering; + + ordering = (DumpableObject **) malloc(numObjs * sizeof(DumpableObject *)); + if (ordering == NULL) + exit_horribly(NULL, modulename, "out of memory\n"); + + while (!TopoSort(objs, numObjs, ordering, &nOrdering)) + repairDependencyLoop(ordering, nOrdering); + + memcpy(objs, ordering, numObjs * sizeof(DumpableObject *)); + + free(ordering); +} + +/* + * TopoSort -- topological sort of a dump list + * + * Generate a re-ordering of the dump list that satisfies all the dependency + * constraints shown in the dump list. (Each such constraint is a fact of a + * partial ordering.) Minimize rearrangement of the list not needed to + * achieve the partial ordering. + * + * This is a lot simpler and slower than, for example, the topological sort + * algorithm shown in Knuth's Volume 1. However, Knuth's method doesn't + * try to minimize the damage to the existing order. + * + * Returns TRUE if able to build an ordering that satisfies all the + * constraints, FALSE if not (there are contradictory constraints). + * + * On success (TRUE result), ordering[] is filled with an array of + * DumpableObject pointers, of length equal to the input list length. + * + * On failure (FALSE result), ordering[] is filled with an array of + * DumpableObject pointers of length *nOrdering, representing a circular set + * of dependency constraints. (If there is more than one cycle in the given + * constraints, one is picked at random to return.) + * + * The caller is responsible for allocating sufficient space at *ordering. + */ +static bool +TopoSort(DumpableObject **objs, + int numObjs, + DumpableObject **ordering, /* output argument */ + int *nOrdering) /* output argument */ +{ + DumpId maxDumpId = getMaxDumpId(); + bool result = true; + DumpableObject **topoItems; + DumpableObject *obj; + int *beforeConstraints; + int i, + j, + k, + last; + + /* First, create work array with the dump items in their current order */ + topoItems = (DumpableObject **) malloc(numObjs * sizeof(DumpableObject *)); + if (topoItems == NULL) + exit_horribly(NULL, modulename, "out of memory\n"); + memcpy(topoItems, objs, numObjs * sizeof(DumpableObject *)); + + *nOrdering = numObjs; /* for success return */ + + /* + * Scan the constraints, and for each item in the array, generate a + * count of the number of constraints that say it must be before + * something else. The count for the item with dumpId j is + * stored in beforeConstraints[j]. + */ + beforeConstraints = (int *) malloc((maxDumpId + 1) * sizeof(int)); + if (beforeConstraints == NULL) + exit_horribly(NULL, modulename, "out of memory\n"); + memset(beforeConstraints, 0, (maxDumpId + 1) * sizeof(int)); + for (i = 0; i < numObjs; i++) + { + obj = topoItems[i]; + for (j = 0; j < obj->nDeps; j++) + { + k = obj->dependencies[j]; + if (k <= 0 || k > maxDumpId) + exit_horribly(NULL, modulename, "invalid dependency %d\n", k); + beforeConstraints[k]++; + } + } + + /*-------------------- + * Now scan the topoItems array backwards. At each step, output the + * last item that has no remaining before-constraints, and decrease + * the beforeConstraints count of each of the items it was constrained + * against. + * i = index of ordering[] entry we want to output this time + * j = search index for topoItems[] + * k = temp for scanning constraint list for item j + * last = last non-null index in topoItems (avoid redundant searches) + *-------------------- + */ + last = numObjs - 1; + for (i = numObjs; --i >= 0;) + { + /* Find next candidate to output */ + while (topoItems[last] == NULL) + last--; + for (j = last; j >= 0; j--) + { + obj = topoItems[j]; + if (obj != NULL && beforeConstraints[obj->dumpId] == 0) + break; + } + /* If no available candidate, topological sort fails */ + if (j < 0) + { + result = false; + break; + } + /* Output candidate, and mark it done by zeroing topoItems[] entry */ + ordering[i] = obj = topoItems[j]; + topoItems[j] = NULL; + /* Update beforeConstraints counts of its predecessors */ + for (k = 0; k < obj->nDeps; k++) + beforeConstraints[obj->dependencies[k]]--; + } + + /* + * If we failed, report one of the circular constraint sets + */ + if (!result) + { + for (j = last; j >= 0; j--) + { + ordering[0] = obj = topoItems[j]; + if (obj && findLoop(obj, 1, ordering, nOrdering)) + break; + } + if (j < 0) + exit_horribly(NULL, modulename, + "could not find dependency loop\n"); + } + + /* Done */ + free(topoItems); + free(beforeConstraints); + + return result; +} + +/* + * Recursively search for a circular dependency loop + */ +static bool +findLoop(DumpableObject *obj, + int depth, + DumpableObject **ordering, /* output argument */ + int *nOrdering) /* output argument */ +{ + DumpId startPoint = ordering[0]->dumpId; + int j; + int k; + + /* See if we've found a loop back to the starting point */ + for (j = 0; j < obj->nDeps; j++) + { + if (obj->dependencies[j] == startPoint) + { + *nOrdering = depth; + return true; + } + } + /* Try each outgoing branch */ + for (j = 0; j < obj->nDeps; j++) + { + DumpableObject *nextobj = findObjectByDumpId(obj->dependencies[j]); + + if (!nextobj) + continue; /* ignore dependencies on undumped objects */ + for (k = 0; k < depth; k++) + { + if (ordering[k] == nextobj) + break; + } + if (k < depth) + continue; /* ignore loops not including start point */ + ordering[depth] = nextobj; + if (findLoop(nextobj, + depth + 1, + ordering, + nOrdering)) + return true; + } + + return false; +} + +/* + * A user-defined datatype will have a dependency loop with each of its + * I/O functions (since those have the datatype as input or output). + * We want the dump ordering to be the input function, then any other + * I/O functions, then the datatype. So we break the circularity in + * favor of the functions, and add a dependency from any non-input + * function to the input function. + */ +static void +repairTypeFuncLoop(DumpableObject *typeobj, DumpableObject *funcobj) +{ + TypeInfo *typeInfo = (TypeInfo *) typeobj; + FuncInfo *inputFuncInfo; + + /* remove function's dependency on type */ + removeObjectDependency(funcobj, typeobj->dumpId); + + /* if this isn't the input function, make it depend on same */ + if (funcobj->catId.oid == typeInfo->typinput) + return; /* it is the input function */ + inputFuncInfo = findFuncByOid(typeInfo->typinput); + if (inputFuncInfo == NULL) + return; + addObjectDependency(funcobj, inputFuncInfo->dobj.dumpId); + /* + * Make sure the input function's dependency on type gets removed too; + * if it hasn't been done yet, we'd end up with loops involving the + * type and two or more functions, which repairDependencyLoop() is not + * smart enough to handle. + */ + removeObjectDependency(&inputFuncInfo->dobj, typeobj->dumpId); +} + +/* + * Because we force a view to depend on its ON SELECT rule, while there + * will be an implicit dependency in the other direction, we need to break + * the loop. We can always do this by removing the implicit dependency. + */ +static void +repairViewRuleLoop(DumpableObject *viewobj, + DumpableObject *ruleobj) +{ + /* remove rule's dependency on view */ + removeObjectDependency(ruleobj, viewobj->dumpId); +} + +/* + * Because we make tables depend on their CHECK constraints, while there + * will be an automatic dependency in the other direction, we need to break + * the loop. If there are no other objects in the loop then we can remove + * the automatic dependency and leave the CHECK constraint non-separate. + */ +static void +repairTableConstraintLoop(DumpableObject *tableobj, + DumpableObject *constraintobj) +{ + /* remove constraint's dependency on table */ + removeObjectDependency(constraintobj, tableobj->dumpId); +} + +/* + * However, if there are other objects in the loop, we must break the loop + * by making the CHECK constraint a separately-dumped object. + * + * Because findLoop() finds shorter cycles before longer ones, it's likely + * that we will have previously fired repairTableConstraintLoop() and + * removed the constraint's dependency on the table. Put it back to ensure + * the constraint won't be emitted before the table... + */ +static void +repairTableConstraintMultiLoop(DumpableObject *tableobj, + DumpableObject *constraintobj) +{ + /* remove table's dependency on constraint */ + removeObjectDependency(tableobj, constraintobj->dumpId); + /* mark constraint as needing its own dump */ + ((ConstraintInfo *) constraintobj)->separate = true; + /* put back constraint's dependency on table */ + addObjectDependency(constraintobj, tableobj->dumpId); +} + +/* + * Attribute defaults behave exactly the same as CHECK constraints... + */ +static void +repairTableAttrDefLoop(DumpableObject *tableobj, + DumpableObject *attrdefobj) +{ + /* remove attrdef's dependency on table */ + removeObjectDependency(attrdefobj, tableobj->dumpId); +} + +static void +repairTableAttrDefMultiLoop(DumpableObject *tableobj, + DumpableObject *attrdefobj) +{ + /* remove table's dependency on attrdef */ + removeObjectDependency(tableobj, attrdefobj->dumpId); + /* mark attrdef as needing its own dump */ + ((AttrDefInfo *) attrdefobj)->separate = true; + /* put back attrdef's dependency on table */ + addObjectDependency(attrdefobj, tableobj->dumpId); +} + +/* + * CHECK constraints on domains work just like those on tables ... + */ +static void +repairDomainConstraintLoop(DumpableObject *domainobj, + DumpableObject *constraintobj) +{ + /* remove constraint's dependency on domain */ + removeObjectDependency(constraintobj, domainobj->dumpId); +} + +static void +repairDomainConstraintMultiLoop(DumpableObject *domainobj, + DumpableObject *constraintobj) +{ + /* remove domain's dependency on constraint */ + removeObjectDependency(domainobj, constraintobj->dumpId); + /* mark constraint as needing its own dump */ + ((ConstraintInfo *) constraintobj)->separate = true; + /* put back constraint's dependency on domain */ + addObjectDependency(constraintobj, domainobj->dumpId); +} + +/* + * Fix a dependency loop, or die trying ... + * + * This routine is mainly concerned with reducing the multiple ways that + * a loop might appear to common cases, which it passes off to the + * "fixer" routines above. + */ +static void +repairDependencyLoop(DumpableObject **loop, + int nLoop) +{ + int i, + j; + + /* Datatype and one of its I/O functions */ + if (nLoop == 2 && + loop[0]->objType == DO_TYPE && + loop[1]->objType == DO_FUNC) + { + repairTypeFuncLoop(loop[0], loop[1]); + return; + } + if (nLoop == 2 && + loop[1]->objType == DO_TYPE && + loop[0]->objType == DO_FUNC) + { + repairTypeFuncLoop(loop[1], loop[0]); + return; + } + + /* View and its ON SELECT rule */ + if (nLoop == 2 && + loop[0]->objType == DO_TABLE && + loop[1]->objType == DO_RULE && + ((RuleInfo *) loop[1])->ev_type == '1' && + ((RuleInfo *) loop[1])->is_instead) + { + repairViewRuleLoop(loop[0], loop[1]); + return; + } + if (nLoop == 2 && + loop[1]->objType == DO_TABLE && + loop[0]->objType == DO_RULE && + ((RuleInfo *) loop[0])->ev_type == '1' && + ((RuleInfo *) loop[0])->is_instead) + { + repairViewRuleLoop(loop[1], loop[0]); + return; + } + + /* Table and CHECK constraint */ + if (nLoop == 2 && + loop[0]->objType == DO_TABLE && + loop[1]->objType == DO_CONSTRAINT && + ((ConstraintInfo *) loop[1])->contype == 'c' && + ((ConstraintInfo *) loop[1])->contable == (TableInfo *) loop[0]) + { + repairTableConstraintLoop(loop[0], loop[1]); + return; + } + if (nLoop == 2 && + loop[1]->objType == DO_TABLE && + loop[0]->objType == DO_CONSTRAINT && + ((ConstraintInfo *) loop[0])->contype == 'c' && + ((ConstraintInfo *) loop[0])->contable == (TableInfo *) loop[1]) + { + repairTableConstraintLoop(loop[1], loop[0]); + return; + } + + /* Indirect loop involving table and CHECK constraint */ + if (nLoop > 2) + { + for (i = 0; i < nLoop; i++) + { + if (loop[i]->objType == DO_TABLE) + { + for (j = 0; j < nLoop; j++) + { + if (loop[j]->objType == DO_CONSTRAINT && + ((ConstraintInfo *) loop[j])->contype == 'c' && + ((ConstraintInfo *) loop[j])->contable == (TableInfo *) loop[i]) + { + repairTableConstraintMultiLoop(loop[i], loop[j]); + return; + } + } + } + } + } + + /* Table and attribute default */ + if (nLoop == 2 && + loop[0]->objType == DO_TABLE && + loop[1]->objType == DO_ATTRDEF && + ((AttrDefInfo *) loop[1])->adtable == (TableInfo *) loop[0]) + { + repairTableAttrDefLoop(loop[0], loop[1]); + return; + } + if (nLoop == 2 && + loop[1]->objType == DO_TABLE && + loop[0]->objType == DO_ATTRDEF && + ((AttrDefInfo *) loop[0])->adtable == (TableInfo *) loop[1]) + { + repairTableAttrDefLoop(loop[1], loop[0]); + return; + } + + /* Indirect loop involving table and attribute default */ + if (nLoop > 2) + { + for (i = 0; i < nLoop; i++) + { + if (loop[i]->objType == DO_TABLE) + { + for (j = 0; j < nLoop; j++) + { + if (loop[j]->objType == DO_ATTRDEF && + ((AttrDefInfo *) loop[j])->adtable == (TableInfo *) loop[i]) + { + repairTableAttrDefMultiLoop(loop[i], loop[j]); + return; + } + } + } + } + } + + /* Domain and CHECK constraint */ + if (nLoop == 2 && + loop[0]->objType == DO_TYPE && + loop[1]->objType == DO_CONSTRAINT && + ((ConstraintInfo *) loop[1])->contype == 'c' && + ((ConstraintInfo *) loop[1])->condomain == (TypeInfo *) loop[0]) + { + repairDomainConstraintLoop(loop[0], loop[1]); + return; + } + if (nLoop == 2 && + loop[1]->objType == DO_TYPE && + loop[0]->objType == DO_CONSTRAINT && + ((ConstraintInfo *) loop[0])->contype == 'c' && + ((ConstraintInfo *) loop[0])->condomain == (TypeInfo *) loop[1]) + { + repairDomainConstraintLoop(loop[1], loop[0]); + return; + } + + /* Indirect loop involving domain and CHECK constraint */ + if (nLoop > 2) + { + for (i = 0; i < nLoop; i++) + { + if (loop[i]->objType == DO_TYPE) + { + for (j = 0; j < nLoop; j++) + { + if (loop[j]->objType == DO_CONSTRAINT && + ((ConstraintInfo *) loop[j])->contype == 'c' && + ((ConstraintInfo *) loop[j])->condomain == (TypeInfo *) loop[i]) + { + repairDomainConstraintMultiLoop(loop[i], loop[j]); + return; + } + } + } + } + } + + /* + * If we can't find a principled way to break the loop, complain and + * break it in an arbitrary fashion. + */ + write_msg(modulename, "WARNING: could not resolve dependency loop among these items:\n"); + for (i = 0; i < nLoop; i++) + { + char buf[1024]; + + describeDumpableObject(loop[i], buf, sizeof(buf)); + write_msg(modulename, " %s\n", buf); + } + removeObjectDependency(loop[0], loop[1]->dumpId); +} + +/* + * Describe a dumpable object usefully for errors + * + * This should probably go somewhere else... + */ +static void +describeDumpableObject(DumpableObject *obj, char *buf, int bufsize) +{ + switch (obj->objType) + { + case DO_NAMESPACE: + snprintf(buf, bufsize, + "SCHEMA %s (ID %d OID %u)", + ((NamespaceInfo *) obj)->nspname, + obj->dumpId, obj->catId.oid); + return; + case DO_TYPE: + snprintf(buf, bufsize, + "TYPE %s (ID %d OID %u)", + ((TypeInfo *) obj)->typname, + obj->dumpId, obj->catId.oid); + return; + case DO_FUNC: + snprintf(buf, bufsize, + "FUNCTION %s (ID %d OID %u)", + ((FuncInfo *) obj)->proname, + obj->dumpId, obj->catId.oid); + return; + case DO_AGG: + snprintf(buf, bufsize, + "AGGREGATE %s (ID %d OID %u)", + ((AggInfo *) obj)->aggfn.proname, + obj->dumpId, obj->catId.oid); + return; + case DO_OPERATOR: + snprintf(buf, bufsize, + "OPERATOR %s (ID %d OID %u)", + ((OprInfo *) obj)->oprname, + obj->dumpId, obj->catId.oid); + return; + case DO_OPCLASS: + snprintf(buf, bufsize, + "OPERATOR CLASS %s (ID %d OID %u)", + ((OpclassInfo *) obj)->opcname, + obj->dumpId, obj->catId.oid); + return; + case DO_CONVERSION: + snprintf(buf, bufsize, + "CONVERSION %s (ID %d OID %u)", + ((ConvInfo *) obj)->conname, + obj->dumpId, obj->catId.oid); + return; + case DO_TABLE: + snprintf(buf, bufsize, + "TABLE %s (ID %d OID %u)", + ((TableInfo *) obj)->relname, + obj->dumpId, obj->catId.oid); + return; + case DO_ATTRDEF: + snprintf(buf, bufsize, + "ATTRDEF %s.%s (ID %d OID %u)", + ((AttrDefInfo *) obj)->adtable->relname, + ((AttrDefInfo *) obj)->adtable->attnames[((AttrDefInfo *) obj)->adnum - 1], + obj->dumpId, obj->catId.oid); + return; + case DO_INDEX: + snprintf(buf, bufsize, + "INDEX %s (ID %d OID %u)", + ((IndxInfo *) obj)->indexname, + obj->dumpId, obj->catId.oid); + return; + case DO_RULE: + snprintf(buf, bufsize, + "RULE %s (ID %d OID %u)", + ((RuleInfo *) obj)->rulename, + obj->dumpId, obj->catId.oid); + return; + case DO_TRIGGER: + snprintf(buf, bufsize, + "TRIGGER %s (ID %d OID %u)", + ((TriggerInfo *) obj)->tgname, + obj->dumpId, obj->catId.oid); + return; + case DO_CONSTRAINT: + snprintf(buf, bufsize, + "CONSTRAINT %s (ID %d OID %u)", + ((ConstraintInfo *) obj)->conname, + obj->dumpId, obj->catId.oid); + return; + case DO_FK_CONSTRAINT: + snprintf(buf, bufsize, + "FK CONSTRAINT %s (ID %d OID %u)", + ((ConstraintInfo *) obj)->conname, + obj->dumpId, obj->catId.oid); + return; + case DO_PROCLANG: + snprintf(buf, bufsize, + "PROCEDURAL LANGUAGE %s (ID %d OID %u)", + ((ProcLangInfo *) obj)->lanname, + obj->dumpId, obj->catId.oid); + return; + case DO_CAST: + snprintf(buf, bufsize, + "CAST %u to %u (ID %d OID %u)", + ((CastInfo *) obj)->castsource, + ((CastInfo *) obj)->casttarget, + obj->dumpId, obj->catId.oid); + return; + case DO_TABLE_DATA: + snprintf(buf, bufsize, + "TABLE DATA %s (ID %d OID %u)", + ((TableDataInfo *) obj)->tdtable->relname, + obj->dumpId, obj->catId.oid); + return; + } + /* shouldn't get here */ + snprintf(buf, bufsize, + "object type %d (ID %d OID %u)", + (int) obj->objType, + obj->dumpId, obj->catId.oid); +} diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c index b52ba6f94d..8e051104c3 100644 --- a/src/bin/pg_dump/pg_restore.c +++ b/src/bin/pg_dump/pg_restore.c @@ -34,7 +34,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/bin/pg_dump/pg_restore.c,v 1.54 2003/11/29 19:52:05 pgsql Exp $ + * $PostgreSQL: pgsql/src/bin/pg_dump/pg_restore.c,v 1.55 2003/12/06 03:00:16 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -101,10 +101,7 @@ main(int argc, char **argv) {"no-owner", 0, NULL, 'O'}, {"no-reconnect", 0, NULL, 'R'}, {"port", 1, NULL, 'p'}, - {"oid-order", 0, NULL, 'o'}, - {"orig-order", 0, NULL, 'N'}, {"password", 0, NULL, 'W'}, - {"rearrange", 0, NULL, 'r'}, {"schema-only", 0, NULL, 's'}, {"superuser", 1, NULL, 'S'}, {"table", 1, NULL, 't'}, @@ -147,7 +144,7 @@ main(int argc, char **argv) } } - while ((c = getopt_long(argc, argv, "acCd:f:F:h:iI:lL:NoOp:P:rRsS:t:T:uU:vWxX:", + while ((c = getopt_long(argc, argv, "acCd:f:F:h:iI:lL:Op:P:RsS:t:T:uU:vWxX:", cmdopts, NULL)) != -1) { switch (c) @@ -188,12 +185,6 @@ main(int argc, char **argv) opts->tocFile = strdup(optarg); break; - case 'N': - opts->origOrder = 1; - break; - case 'o': - opts->oidOrder = 1; - break; case 'O': opts->noOwner = 1; break; @@ -201,9 +192,6 @@ main(int argc, char **argv) if (strlen(optarg) != 0) opts->pgport = strdup(optarg); break; - case 'r': - opts->rearrange = 1; - break; case 'R': /* no-op, still accepted for backwards compatibility */ break; @@ -338,19 +326,6 @@ main(int argc, char **argv) if (opts->tocFile) SortTocFromFile(AH, opts); - if (opts->oidOrder) - SortTocByOID(AH); - else if (opts->origOrder) - SortTocByID(AH); - - if (opts->rearrange) - SortTocByObjectType(AH); - else - { - /* Database MUST be at start (see also SortTocByObjectType) */ - MoveToStart(AH, "DATABASE"); - } - if (opts->tocSummary) PrintTOCSummary(AH, opts); else @@ -385,12 +360,9 @@ usage(const char *progname) printf(_(" -I, --index=NAME restore named index\n")); printf(_(" -L, --use-list=FILENAME use specified table of contents for ordering\n" " output from this file\n")); - printf(_(" -N, --orig-order restore in original dump order\n")); - printf(_(" -o, --oid-order restore in OID order\n")); printf(_(" -O, --no-owner do not output commands to set object ownership\n")); printf(_(" -P, --function=NAME(args)\n" " restore named function\n")); - printf(_(" -r, --rearrange rearrange output to put indexes etc. at end\n")); printf(_(" -s, --schema-only restore only the schema, no data\n")); printf(_(" -S, --superuser=NAME specify the superuser user name to use for\n" " disabling triggers\n")); -- GitLab