diff --git a/gpMgmt/bin/gpexpand b/gpMgmt/bin/gpexpand index 9187a0522956b92d7d1cbec2a08012adc687f53f..4d97807d0ec7ed78c20ef4fd217752ca1dcf6448 100755 --- a/gpMgmt/bin/gpexpand +++ b/gpMgmt/bin/gpexpand @@ -120,8 +120,6 @@ def parseargs(): help='remove the expansion schema.') parser.add_option('-r', '--rollback', action='store_true', help='rollback failed expansion setup.') - parser.add_option('-V', '--novacuum', action='store_true', - help='Do not vacuum catalog tables before creating schema copy.') parser.add_option('-a', '--analyze', action='store_true', help='Analyze the expanded table after redistribution.') parser.add_option('-d', '--duration', type='duration', metavar='[h][:m[:s]]', @@ -634,13 +632,12 @@ class SegmentTemplate: def __init__(self, logger, statusLogger, pool, gparray, masterDataDirectory, - dburl, conn, noVacuumCatalog, tempDir, batch_size, + dburl, conn, tempDir, batch_size, segTarDir='.', schemaTarFile='gpexpand_schema.tar'): self.logger = logger self.statusLogger = statusLogger self.pool = pool self.gparray = gparray - self.noVacuumCatalog = noVacuumCatalog self.tempDir = tempDir self.batch_size = batch_size self.dburl = dburl @@ -702,10 +699,6 @@ class SegmentTemplate: """Creates the schema template that is used by new segments""" self.logger.info('Creating segment template') - if not self.noVacuumCatalog: - self.logger.info('VACUUM FULL on the catalog tables') - catalog.vacuum_catalog(self.dburl, self.conn, full=True, utility=True) - MakeDirectory.local('gpexpand create temp dir', self.tempDir) self._select_src_segment() @@ -1337,7 +1330,6 @@ Set PGDATABASE or use the -D option to specify the correct database to use.""" % masterDataDirectory=self.options.master_data_directory, dburl=self.dburl, conn=self.conn, - noVacuumCatalog=self.options.novacuum, tempDir=self.tempDir, segTarDir=self.options.tardir, batch_size=self.options.batch_size) diff --git a/gpMgmt/bin/gppylib/db/catalog.py b/gpMgmt/bin/gppylib/db/catalog.py index fcaebc47bd3de50c1a7779b4bcc9ba1cf06c8d20..291d04a017b9efb1306d7172e5fa5cfab77fe7c1 100644 --- a/gpMgmt/bin/gppylib/db/catalog.py +++ b/gpMgmt/bin/gppylib/db/catalog.py @@ -88,50 +88,3 @@ def get_catalogtable_list(conn): finally: if cursor: cursor.close() - - -def vacuum_catalog(dburl,conn,full=False,utility=False): - """ Will use the provided connection to enumerate the list of databases - and then connect to each one in turn and vacuum full all of the - catalog files - - TODO: There are a few tables that are cluster-wide that strictly speaking - don't need to be vacuumed for each database. These are most likely - small and so perhaps isn't worth the added complexity to optimize them. - - WARNING: doing a vacuum full on the catalog requires that - there aren't any users idle in a transaction as they typically - hold catalog share locks. The result is this vacuum will wait forever on - getting the lock. This method is best called when no one else - is connected to the system. Our own connections are typically idle - in transactions and so are especially bad. - """ - dblist = getDatabaseList(conn) - catlist = get_catalogtable_list(conn) - conn.commit() - - for db in dblist: - test_url = copy.deepcopy(dburl) - test_url.pgdb = db[0] - - if db[0] == 'template0' or db[0] == 'postgres': - continue - - vac_conn = dbconn.connect(test_url,utility) - vac_curs = vac_conn.cursor() - vac_curs.execute("COMMIT") - vac_curs.execute("SET CLIENT_MIN_MESSAGES='ERROR'") - for table in catlist: - logger.debug('Vacuuming %s %s' % (db[0],table[0]) ) - - if full: - sql = "VACUUM FULL %s" % table[0] - else: - sql = "VACUUM %s" % table[0] - - vac_curs.execute(sql) - - - vac_curs.execute(sql) - vac_conn.commit() - vac_conn.close() diff --git a/gpMgmt/bin/gppylib/db/test/regress/test_regress_catalog.py b/gpMgmt/bin/gppylib/db/test/regress/test_regress_catalog.py index 9d9e7b08e254d9284be2e688c23c632815da3e81..3a8509ffa4af29b6bf56bcb0047597e77c572eac 100644 --- a/gpMgmt/bin/gppylib/db/test/regress/test_regress_catalog.py +++ b/gpMgmt/bin/gppylib/db/test/regress/test_regress_catalog.py @@ -30,11 +30,6 @@ class catalogTestCase(unittest.TestCase): self.conn.close() pass - def test_vacuumcatalog(self): - logger.info("test_vacuumcatalog") - catalog.vacuum_catalog(self.dburl,self.conn) - catalog.vacuum_catalog(self.dburl,self.conn,full=True) - #------------------------------- Mainline -------------------------------- if __name__ == '__main__': unittest.main() diff --git a/gpMgmt/bin/gppylib/db/test/test_catalog.py b/gpMgmt/bin/gppylib/db/test/test_catalog.py index 121244cf06170fa78030e3ae0cddff4d59b238e3..b2296174c10f472766a62ef60504f5b1f4943733 100644 --- a/gpMgmt/bin/gppylib/db/test/test_catalog.py +++ b/gpMgmt/bin/gppylib/db/test/test_catalog.py @@ -30,11 +30,6 @@ class catalogTestCase(unittest.TestCase): self.conn.close() pass - def test_vacuumcatalog(self): - logger.info("test_vacuumcatalog") - catalog.vacuum_catalog(self.dburl,self.conn) - catalog.vacuum_catalog(self.dburl,self.conn,full=True) - #------------------------------- Mainline -------------------------------- if __name__ == '__main__': unittest.main() diff --git a/gpdb-doc/dita/utility_guide/admin_utilities/gpexpand.xml b/gpdb-doc/dita/utility_guide/admin_utilities/gpexpand.xml index ff834bdcf1aadb7cd63b66bade3e761d18360663..447d84b3bd12832319f3e1c27438ffe4b79c9d86 100644 --- a/gpdb-doc/dita/utility_guide/admin_utilities/gpexpand.xml +++ b/gpdb-doc/dita/utility_guide/admin_utilities/gpexpand.xml @@ -8,7 +8,7 @@
Synopsis gpexpand [{-f|--hosts-file} hosts_file] -       | {-i|--input} input_file [-B batch_size] [-V|--novacuum] +       | {-i|--input} input_file [-B batch_size]       | {{-d | --duration} hh:mm:ss | {-e|--end} 'YYYY-MM-DD hh:mm:ss'} [-a|-analyze]          [-n parallel_processes] @@ -187,10 +187,6 @@ --version Display the utility's version number and exit. - - -V | --novacuum - Do not vacuum catalog tables before creating schema copy. - -? | -h | --help Displays the online help.